1
0
Fork 0

RDMA subsystem updates for 5.4

This cycle mainly saw lots of bug fixes and clean up code across the core
 code and several drivers, few new functional changes were made.
 
 - Many cleanup and bug fixes for hns
 
 - Various small bug fixes and cleanups in hfi1, mlx5, usnic, qed,
   bnxt_re, efa
 
 - Share the query_port code between all the iWarp drivers
 
 - General rework and cleanup of the ODP MR umem code to fit better with
   the mmu notifier get/put scheme
 
 - Support rdma netlink in non init_net name spaces
 
 - mlx5 support for XRC devx and DC ODP
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl2A1ugACgkQOG33FX4g
 mxp+EQ//Ug8CyyDs40SGZztItoGghuQ4TVA2pXtuZ9LkFVJRWhYPJGOadHIYXqGO
 KQJJpZPQ02HTZUPWBZNKmD5bwHfErm4cS73/mVmUpximnUqu/UsLRJp8SIGmBg1D
 W1Lz1BJX24MdV8aUnChYvdL5Hbl52q+BaE99Z0haOvW7I3YnKJC34mR8m/A5MiRf
 rsNIZNPHdq2U6pKLgCbOyXib8yBcJQqBb8x4WNAoB1h4MOx+ir5QLfr3ozrZs1an
 xXgqyiOBmtkUgCMIpXC4juPN/6gw3Y5nkk2VIWY+MAY1a7jZPbI+6LJZZ1Uk8R44
 Lf2KSzabFMMYGKJYE1Znxk+JWV8iE+m+n6wWEfRM9l0b4gXXbrtKgaouFbsLcsQA
 CvBEQuiFSO9Kq01JPaAN1XDmhqyTarG6lHjXnW7ifNlLXnPbR1RJlprycExOhp0c
 axum5K2fRNW2+uZJt+zynMjk2kyjT1lnlsr1Rbgc4Pyionaiydg7zwpiac7y/bdS
 F7/WqdmPiff78KMi187EF5pjFqMWhthvBtTbWDuuxaxc2nrXSdiCUN+96j1amQUH
 yU/7AZzlnKeKEQQCR4xddsSs2eTrXiLLFRLk9GCK2eh4cUN212eHTrPLKkQf1cN+
 ydYbR2pxw3B38LCCNBy+bL+u7e/Tyehs4ynATMpBuEgc5iocTwE=
 =zHXW
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull RDMA subsystem updates from Jason Gunthorpe:
 "This cycle mainly saw lots of bug fixes and clean up code across the
  core code and several drivers, few new functional changes were made.

   - Many cleanup and bug fixes for hns

   - Various small bug fixes and cleanups in hfi1, mlx5, usnic, qed,
     bnxt_re, efa

   - Share the query_port code between all the iWarp drivers

   - General rework and cleanup of the ODP MR umem code to fit better
     with the mmu notifier get/put scheme

   - Support rdma netlink in non init_net name spaces

   - mlx5 support for XRC devx and DC ODP"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (99 commits)
  RDMA: Fix double-free in srq creation error flow
  RDMA/efa: Fix incorrect error print
  IB/mlx5: Free mpi in mp_slave mode
  IB/mlx5: Use the original address for the page during free_pages
  RDMA/bnxt_re: Fix spelling mistake "missin_resp" -> "missing_resp"
  RDMA/hns: Package operations of rq inline buffer into separate functions
  RDMA/hns: Optimize cmd init and mode selection for hip08
  IB/hfi1: Define variables as unsigned long to fix KASAN warning
  IB/{rdmavt, hfi1, qib}: Add a counter for credit waits
  IB/hfi1: Add traces for TID RDMA READ
  RDMA/siw: Relax from kmap_atomic() use in TX path
  IB/iser: Support up to 16MB data transfer in a single command
  RDMA/siw: Fix page address mapping in TX path
  RDMA: Fix goto target to release the allocated memory
  RDMA/usnic: Avoid overly large buffers on stack
  RDMA/odp: Add missing cast for 32 bit
  RDMA/hns: Use devm_platform_ioremap_resource() to simplify code
  Documentation/infiniband: update name of some functions
  RDMA/cma: Fix false error message
  RDMA/hns: Fix wrong assignment of qp_access_flags
  ...
alistair/sunxi64-5.4-dsi
Linus Torvalds 2019-09-21 10:26:24 -07:00
commit 018c6837f3
99 changed files with 2746 additions and 1927 deletions

View File

@ -29,10 +29,10 @@ Sleeping and interrupt context
The corresponding functions exported to upper level protocol The corresponding functions exported to upper level protocol
consumers: consumers:
- ib_create_ah - rdma_create_ah
- ib_modify_ah - rdma_modify_ah
- ib_query_ah - rdma_query_ah
- ib_destroy_ah - rdma_destroy_ah
- ib_post_send - ib_post_send
- ib_post_recv - ib_post_recv
- ib_req_notify_cq - ib_req_notify_cq

View File

@ -183,7 +183,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
/* Repair the nlmsg header length */ /* Repair the nlmsg header length */
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL); rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
/* Make the request retry, so when we get the response from userspace /* Make the request retry, so when we get the response from userspace
* we will have something. * we will have something.

View File

@ -810,6 +810,7 @@ static void release_gid_table(struct ib_device *device,
if (leak) if (leak)
return; return;
mutex_destroy(&table->lock);
kfree(table->data_vec); kfree(table->data_vec);
kfree(table); kfree(table);
} }

View File

@ -3046,7 +3046,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (status) if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status); status);
} else { } else if (status) {
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
} }

View File

@ -342,12 +342,18 @@ static struct configfs_subsystem cma_subsys = {
int __init cma_configfs_init(void) int __init cma_configfs_init(void)
{ {
int ret;
config_group_init(&cma_subsys.su_group); config_group_init(&cma_subsys.su_group);
mutex_init(&cma_subsys.su_mutex); mutex_init(&cma_subsys.su_mutex);
return configfs_register_subsystem(&cma_subsys); ret = configfs_register_subsystem(&cma_subsys);
if (ret)
mutex_destroy(&cma_subsys.su_mutex);
return ret;
} }
void __exit cma_configfs_exit(void) void __exit cma_configfs_exit(void)
{ {
configfs_unregister_subsystem(&cma_subsys); configfs_unregister_subsystem(&cma_subsys);
mutex_destroy(&cma_subsys.su_mutex);
} }

View File

@ -36,6 +36,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/cgroup_rdma.h> #include <linux/cgroup_rdma.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h> #include <rdma/opa_addr.h>
@ -54,8 +56,26 @@ struct pkey_index_qp_list {
struct list_head qp_list; struct list_head qp_list;
}; };
/**
* struct rdma_dev_net - rdma net namespace metadata for a net
* @nl_sock: Pointer to netlink socket
* @net: Pointer to owner net namespace
* @id: xarray id to identify the net namespace.
*/
struct rdma_dev_net {
struct sock *nl_sock;
possible_net_t net;
u32 id;
};
extern const struct attribute_group ib_dev_attr_group; extern const struct attribute_group ib_dev_attr_group;
extern bool ib_devices_shared_netns; extern bool ib_devices_shared_netns;
extern unsigned int rdma_dev_net_id;
static inline struct rdma_dev_net *rdma_net_to_dev_net(struct net *net)
{
return net_generic(net, rdma_dev_net_id);
}
int ib_device_register_sysfs(struct ib_device *device); int ib_device_register_sysfs(struct ib_device *device);
void ib_device_unregister_sysfs(struct ib_device *device); void ib_device_unregister_sysfs(struct ib_device *device);
@ -179,7 +199,6 @@ void ib_mad_cleanup(void);
int ib_sa_init(void); int ib_sa_init(void);
void ib_sa_cleanup(void); void ib_sa_cleanup(void);
int rdma_nl_init(void);
void rdma_nl_exit(void); void rdma_nl_exit(void);
int ib_nl_handle_resolve_resp(struct sk_buff *skb, int ib_nl_handle_resolve_resp(struct sk_buff *skb,
@ -365,4 +384,7 @@ void ib_port_unregister_module_stat(struct kobject *kobj);
int ib_device_set_netns_put(struct sk_buff *skb, int ib_device_set_netns_put(struct sk_buff *skb,
struct ib_device *dev, u32 ns_fd); struct ib_device *dev, u32 ns_fd);
int rdma_nl_net_init(struct rdma_dev_net *rnet);
void rdma_nl_net_exit(struct rdma_dev_net *rnet);
#endif /* _CORE_PRIV_H */ #endif /* _CORE_PRIV_H */

View File

@ -599,7 +599,7 @@ int rdma_counter_get_mode(struct ib_device *dev, u8 port,
void rdma_counter_init(struct ib_device *dev) void rdma_counter_init(struct ib_device *dev)
{ {
struct rdma_port_counter *port_counter; struct rdma_port_counter *port_counter;
u32 port; u32 port, i;
if (!dev->port_data) if (!dev->port_data)
return; return;
@ -620,13 +620,12 @@ void rdma_counter_init(struct ib_device *dev)
return; return;
fail: fail:
rdma_for_each_port(dev, port) { for (i = port; i >= rdma_start_port(dev); i--) {
port_counter = &dev->port_data[port].port_counter; port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats); kfree(port_counter->hstats);
port_counter->hstats = NULL; port_counter->hstats = NULL;
mutex_destroy(&port_counter->lock);
} }
return;
} }
void rdma_counter_release(struct ib_device *dev) void rdma_counter_release(struct ib_device *dev)
@ -637,5 +636,6 @@ void rdma_counter_release(struct ib_device *dev)
rdma_for_each_port(dev, port) { rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter; port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats); kfree(port_counter->hstats);
mutex_destroy(&port_counter->lock);
} }
} }

View File

@ -252,6 +252,34 @@ out_free_cq:
} }
EXPORT_SYMBOL(__ib_alloc_cq_user); EXPORT_SYMBOL(__ib_alloc_cq_user);
/**
* __ib_alloc_cq_any - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
* @poll_ctx: context to poll the CQ from
* @caller: module owner name
*
* Attempt to spread ULP Completion Queues over each device's interrupt
* vectors. A simple best-effort mechanism is used.
*/
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
int nr_cqe, enum ib_poll_context poll_ctx,
const char *caller)
{
static atomic_t counter;
int comp_vector = 0;
if (dev->num_comp_vectors > 1)
comp_vector =
atomic_inc_return(&counter) %
min_t(int, dev->num_comp_vectors, num_online_cpus());
return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
caller, NULL);
}
EXPORT_SYMBOL(__ib_alloc_cq_any);
/** /**
* ib_free_cq_user - free a completion queue * ib_free_cq_user - free a completion queue
* @cq: completion queue to free. * @cq: completion queue to free.

View File

@ -39,7 +39,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
@ -111,17 +110,7 @@ static void ib_client_put(struct ib_client *client)
*/ */
#define CLIENT_DATA_REGISTERED XA_MARK_1 #define CLIENT_DATA_REGISTERED XA_MARK_1
/** unsigned int rdma_dev_net_id;
* struct rdma_dev_net - rdma net namespace metadata for a net
* @net: Pointer to owner net namespace
* @id: xarray id to identify the net namespace.
*/
struct rdma_dev_net {
possible_net_t net;
u32 id;
};
static unsigned int rdma_dev_net_id;
/* /*
* A list of net namespaces is maintained in an xarray. This is necessary * A list of net namespaces is maintained in an xarray. This is necessary
@ -514,6 +503,9 @@ static void ib_device_release(struct device *device)
rcu_head); rcu_head);
} }
mutex_destroy(&dev->unregistration_lock);
mutex_destroy(&dev->compat_devs_mutex);
xa_destroy(&dev->compat_devs); xa_destroy(&dev->compat_devs);
xa_destroy(&dev->client_data); xa_destroy(&dev->client_data);
kfree_rcu(dev, rcu_head); kfree_rcu(dev, rcu_head);
@ -1060,7 +1052,7 @@ int rdma_compatdev_set(u8 enable)
static void rdma_dev_exit_net(struct net *net) static void rdma_dev_exit_net(struct net *net)
{ {
struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
struct ib_device *dev; struct ib_device *dev;
unsigned long index; unsigned long index;
int ret; int ret;
@ -1094,25 +1086,32 @@ static void rdma_dev_exit_net(struct net *net)
} }
up_read(&devices_rwsem); up_read(&devices_rwsem);
rdma_nl_net_exit(rnet);
xa_erase(&rdma_nets, rnet->id); xa_erase(&rdma_nets, rnet->id);
} }
static __net_init int rdma_dev_init_net(struct net *net) static __net_init int rdma_dev_init_net(struct net *net)
{ {
struct rdma_dev_net *rnet = net_generic(net, rdma_dev_net_id); struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
unsigned long index; unsigned long index;
struct ib_device *dev; struct ib_device *dev;
int ret; int ret;
write_pnet(&rnet->net, net);
ret = rdma_nl_net_init(rnet);
if (ret)
return ret;
/* No need to create any compat devices in default init_net. */ /* No need to create any compat devices in default init_net. */
if (net_eq(net, &init_net)) if (net_eq(net, &init_net))
return 0; return 0;
write_pnet(&rnet->net, net);
ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL); ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
if (ret) if (ret) {
rdma_nl_net_exit(rnet);
return ret; return ret;
}
down_read(&devices_rwsem); down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
@ -1974,6 +1973,74 @@ void ib_dispatch_event(struct ib_event *event)
} }
EXPORT_SYMBOL(ib_dispatch_event); EXPORT_SYMBOL(ib_dispatch_event);
static int iw_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
struct in_device *inetdev;
struct net_device *netdev;
int err;
memset(port_attr, 0, sizeof(*port_attr));
netdev = ib_device_get_netdev(device, port_num);
if (!netdev)
return -ENODEV;
dev_put(netdev);
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev)) {
port_attr->state = IB_PORT_DOWN;
port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else {
inetdev = in_dev_get(netdev);
if (inetdev && inetdev->ifa_list) {
port_attr->state = IB_PORT_ACTIVE;
port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
in_dev_put(inetdev);
} else {
port_attr->state = IB_PORT_INIT;
port_attr->phys_state =
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
}
}
err = device->ops.query_port(device, port_num, port_attr);
if (err)
return err;
return 0;
}
static int __ib_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
union ib_gid gid = {};
int err;
memset(port_attr, 0, sizeof(*port_attr));
err = device->ops.query_port(device, port_num, port_attr);
if (err || port_attr->subnet_prefix)
return err;
if (rdma_port_get_link_layer(device, port_num) !=
IB_LINK_LAYER_INFINIBAND)
return 0;
err = device->ops.query_gid(device, port_num, 0, &gid);
if (err)
return err;
port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
return 0;
}
/** /**
* ib_query_port - Query IB port attributes * ib_query_port - Query IB port attributes
* @device:Device to query * @device:Device to query
@ -1987,26 +2054,13 @@ int ib_query_port(struct ib_device *device,
u8 port_num, u8 port_num,
struct ib_port_attr *port_attr) struct ib_port_attr *port_attr)
{ {
union ib_gid gid;
int err;
if (!rdma_is_port_valid(device, port_num)) if (!rdma_is_port_valid(device, port_num))
return -EINVAL; return -EINVAL;
memset(port_attr, 0, sizeof(*port_attr)); if (rdma_protocol_iwarp(device, port_num))
err = device->ops.query_port(device, port_num, port_attr); return iw_query_port(device, port_num, port_attr);
if (err || port_attr->subnet_prefix) else
return err; return __ib_query_port(device, port_num, port_attr);
if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
return 0;
err = device->ops.query_gid(device, port_num, 0, &gid);
if (err)
return err;
port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
return 0;
} }
EXPORT_SYMBOL(ib_query_port); EXPORT_SYMBOL(ib_query_port);
@ -2661,12 +2715,6 @@ static int __init ib_core_init(void)
goto err_comp_unbound; goto err_comp_unbound;
} }
ret = rdma_nl_init();
if (ret) {
pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
goto err_sysfs;
}
ret = addr_init(); ret = addr_init();
if (ret) { if (ret) {
pr_warn("Could't init IB address resolution\n"); pr_warn("Could't init IB address resolution\n");
@ -2712,8 +2760,6 @@ err_mad:
err_addr: err_addr:
addr_cleanup(); addr_cleanup();
err_ibnl: err_ibnl:
rdma_nl_exit();
err_sysfs:
class_unregister(&ib_class); class_unregister(&ib_class);
err_comp_unbound: err_comp_unbound:
destroy_workqueue(ib_comp_unbound_wq); destroy_workqueue(ib_comp_unbound_wq);

View File

@ -148,13 +148,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
hlist_del_init(&fmr->cache_node); hlist_del_init(&fmr->cache_node);
fmr->remap_count = 0; fmr->remap_count = 0;
list_add_tail(&fmr->fmr->list, &fmr_list); list_add_tail(&fmr->fmr->list, &fmr_list);
#ifdef DEBUG
if (fmr->ref_count !=0) {
pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
fmr, fmr->ref_count);
}
#endif
} }
list_splice_init(&pool->dirty_list, &unmap_list); list_splice_init(&pool->dirty_list, &unmap_list);
@ -496,12 +489,6 @@ void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
} }
} }
#ifdef DEBUG
if (fmr->ref_count < 0)
pr_warn(PFX "FMR %p has ref count %d < 0\n",
fmr, fmr->ref_count);
#endif
spin_unlock_irqrestore(&pool->pool_lock, flags); spin_unlock_irqrestore(&pool->pool_lock, flags);
} }
EXPORT_SYMBOL(ib_fmr_pool_unmap); EXPORT_SYMBOL(ib_fmr_pool_unmap);

View File

@ -112,7 +112,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL); ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) { if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */ skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNAVAILABLE; iwpm_user_pid = IWPM_PID_UNAVAILABLE;
@ -124,8 +124,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
return ret; return ret;
pid_query_error: pid_query_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
if (skb) dev_kfree_skb(skb);
dev_kfree_skb(skb);
if (nlmsg_request) if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref); iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret; return ret;
@ -202,7 +201,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg; nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) { if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */ skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED; iwpm_user_pid = IWPM_PID_UNDEFINED;
@ -214,8 +213,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
add_mapping_error: add_mapping_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
add_mapping_error_nowarn: add_mapping_error_nowarn:
if (skb) dev_kfree_skb(skb);
dev_kfree_skb(skb);
if (nlmsg_request) if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref); iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret; return ret;
@ -297,7 +295,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg; nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) { if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */ skb = NULL; /* skb is freed in the netlink send-op handling */
err_str = "Unable to send a nlmsg"; err_str = "Unable to send a nlmsg";
@ -308,8 +306,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
query_mapping_error: query_mapping_error:
pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client); pr_info("%s: %s (client = %d)\n", __func__, err_str, nl_client);
query_mapping_error_nowarn: query_mapping_error_nowarn:
if (skb) dev_kfree_skb(skb);
dev_kfree_skb(skb);
if (nlmsg_request) if (nlmsg_request)
iwpm_free_nlmsg_request(&nlmsg_request->kref); iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret; return ret;
@ -364,7 +361,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); ret = rdma_nl_unicast_wait(&init_net, skb, iwpm_user_pid);
if (ret) { if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */ skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED; iwpm_user_pid = IWPM_PID_UNDEFINED;

View File

@ -645,7 +645,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
ret = rdma_nl_unicast(skb, iwpm_pid); ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret) { if (ret) {
skb = NULL; skb = NULL;
err_str = "Unable to send a nlmsg"; err_str = "Unable to send a nlmsg";
@ -655,8 +655,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
return 0; return 0;
mapinfo_num_error: mapinfo_num_error:
pr_info("%s: %s\n", __func__, err_str); pr_info("%s: %s\n", __func__, err_str);
if (skb) dev_kfree_skb(skb);
dev_kfree_skb(skb);
return ret; return ret;
} }
@ -674,7 +673,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
return -ENOMEM; return -ENOMEM;
} }
nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_type = NLMSG_DONE;
ret = rdma_nl_unicast(skb, iwpm_pid); ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret) if (ret)
pr_warn("%s Unable to send a nlmsg\n", __func__); pr_warn("%s Unable to send a nlmsg\n", __func__);
return ret; return ret;
@ -778,8 +777,7 @@ send_mapping_info_unlock:
send_mapping_info_exit: send_mapping_info_exit:
if (ret) { if (ret) {
pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret);
if (skb) dev_kfree_skb(skb);
dev_kfree_skb(skb);
return ret; return ret;
} }
send_nlmsg_done(skb, nl_client, iwpm_pid); send_nlmsg_done(skb, nl_client, iwpm_pid);
@ -824,7 +822,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
goto hello_num_error; goto hello_num_error;
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
ret = rdma_nl_unicast(skb, iwpm_pid); ret = rdma_nl_unicast(&init_net, skb, iwpm_pid);
if (ret) { if (ret) {
skb = NULL; skb = NULL;
err_str = "Unable to send a nlmsg"; err_str = "Unable to send a nlmsg";
@ -834,7 +832,6 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
return 0; return 0;
hello_num_error: hello_num_error:
pr_info("%s: %s\n", __func__, err_str); pr_info("%s: %s\n", __func__, err_str);
if (skb) dev_kfree_skb(skb);
dev_kfree_skb(skb);
return ret; return ret;
} }

View File

@ -36,20 +36,22 @@
#include <linux/export.h> #include <linux/export.h>
#include <net/netlink.h> #include <net/netlink.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h> #include <net/sock.h>
#include <rdma/rdma_netlink.h> #include <rdma/rdma_netlink.h>
#include <linux/module.h> #include <linux/module.h>
#include "core_priv.h" #include "core_priv.h"
static DEFINE_MUTEX(rdma_nl_mutex); static DEFINE_MUTEX(rdma_nl_mutex);
static struct sock *nls;
static struct { static struct {
const struct rdma_nl_cbs *cb_table; const struct rdma_nl_cbs *cb_table;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS]; } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
bool rdma_nl_chk_listeners(unsigned int group) bool rdma_nl_chk_listeners(unsigned int group)
{ {
return netlink_has_listeners(nls, group); struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
return netlink_has_listeners(rnet->nl_sock, group);
} }
EXPORT_SYMBOL(rdma_nl_chk_listeners); EXPORT_SYMBOL(rdma_nl_chk_listeners);
@ -73,13 +75,21 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
return (op < max_num_ops[type]) ? true : false; return (op < max_num_ops[type]) ? true : false;
} }
static bool is_nl_valid(unsigned int type, unsigned int op) static bool
is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
{ {
const struct rdma_nl_cbs *cb_table; const struct rdma_nl_cbs *cb_table;
if (!is_nl_msg_valid(type, op)) if (!is_nl_msg_valid(type, op))
return false; return false;
/*
* Currently only NLDEV client is supporting netlink commands in
* non init_net net namespace.
*/
if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
return false;
if (!rdma_nl_types[type].cb_table) { if (!rdma_nl_types[type].cb_table) {
mutex_unlock(&rdma_nl_mutex); mutex_unlock(&rdma_nl_mutex);
request_module("rdma-netlink-subsys-%d", type); request_module("rdma-netlink-subsys-%d", type);
@ -161,7 +171,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int op = RDMA_NL_GET_OP(type); unsigned int op = RDMA_NL_GET_OP(type);
const struct rdma_nl_cbs *cb_table; const struct rdma_nl_cbs *cb_table;
if (!is_nl_valid(index, op)) if (!is_nl_valid(skb, index, op))
return -EINVAL; return -EINVAL;
cb_table = rdma_nl_types[index].cb_table; cb_table = rdma_nl_types[index].cb_table;
@ -185,7 +195,7 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.dump = cb_table[op].dump, .dump = cb_table[op].dump,
}; };
if (c.dump) if (c.dump)
return netlink_dump_start(nls, skb, nlh, &c); return netlink_dump_start(skb->sk, skb, nlh, &c);
return -EINVAL; return -EINVAL;
} }
@ -258,52 +268,65 @@ static void rdma_nl_rcv(struct sk_buff *skb)
mutex_unlock(&rdma_nl_mutex); mutex_unlock(&rdma_nl_mutex);
} }
int rdma_nl_unicast(struct sk_buff *skb, u32 pid) int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
{ {
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
int err; int err;
err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
return (err < 0) ? err : 0; return (err < 0) ? err : 0;
} }
EXPORT_SYMBOL(rdma_nl_unicast); EXPORT_SYMBOL(rdma_nl_unicast);
int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid) int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
{ {
struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
int err; int err;
err = netlink_unicast(nls, skb, pid, 0); err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
return (err < 0) ? err : 0; return (err < 0) ? err : 0;
} }
EXPORT_SYMBOL(rdma_nl_unicast_wait); EXPORT_SYMBOL(rdma_nl_unicast_wait);
int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags) int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
unsigned int group, gfp_t flags)
{ {
return nlmsg_multicast(nls, skb, 0, group, flags); struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
} }
EXPORT_SYMBOL(rdma_nl_multicast); EXPORT_SYMBOL(rdma_nl_multicast);
int __init rdma_nl_init(void)
{
struct netlink_kernel_cfg cfg = {
.input = rdma_nl_rcv,
};
nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
if (!nls)
return -ENOMEM;
nls->sk_sndtimeo = 10 * HZ;
return 0;
}
void rdma_nl_exit(void) void rdma_nl_exit(void)
{ {
int idx; int idx;
for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++) for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
rdma_nl_unregister(idx); WARN(rdma_nl_types[idx].cb_table,
"Netlink client %d wasn't released prior to unloading %s\n",
idx, KBUILD_MODNAME);
}
netlink_kernel_release(nls); int rdma_nl_net_init(struct rdma_dev_net *rnet)
{
struct net *net = read_pnet(&rnet->net);
struct netlink_kernel_cfg cfg = {
.input = rdma_nl_rcv,
};
struct sock *nls;
nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
if (!nls)
return -ENOMEM;
nls->sk_sndtimeo = 10 * HZ;
rnet->nl_sock = nls;
return 0;
}
void rdma_nl_net_exit(struct rdma_dev_net *rnet)
{
netlink_kernel_release(rnet->nl_sock);
} }
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);

View File

@ -831,7 +831,7 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free: err_free:
nlmsg_free(msg); nlmsg_free(msg);
@ -971,7 +971,7 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free: err_free:
nlmsg_free(msg); nlmsg_free(msg);
@ -1073,7 +1073,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free: err_free:
nlmsg_free(msg); nlmsg_free(msg);
@ -1250,7 +1250,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_free: err_free:
nlmsg_free(msg); nlmsg_free(msg);
@ -1595,7 +1595,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
put_device(data.cdev); put_device(data.cdev);
if (ibdev) if (ibdev)
ib_device_put(ibdev); ib_device_put(ibdev);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
out_data: out_data:
put_device(data.cdev); put_device(data.cdev);
@ -1635,7 +1635,7 @@ static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err; return err;
} }
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
} }
static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
@ -1733,7 +1733,7 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_fill: err_fill:
rdma_counter_unbind_qpn(device, port, qpn, cntn); rdma_counter_unbind_qpn(device, port, qpn, cntn);
@ -1801,7 +1801,7 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_fill: err_fill:
rdma_counter_bind_qpn(device, port, qpn, cntn); rdma_counter_bind_qpn(device, port, qpn, cntn);
@ -1892,7 +1892,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
mutex_unlock(&stats->lock); mutex_unlock(&stats->lock);
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_table: err_table:
nla_nest_cancel(msg, table_attr); nla_nest_cancel(msg, table_attr);
@ -1964,7 +1964,7 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
nlmsg_end(msg, nlh); nlmsg_end(msg, nlh);
ib_device_put(device); ib_device_put(device);
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
err_msg: err_msg:
nlmsg_free(msg); nlmsg_free(msg);

View File

@ -860,7 +860,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */ /* Repair the nlmsg header length */
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask); return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
} }
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)

View File

@ -289,6 +289,24 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
ib_width_enum_to_int(attr.active_width), speed); ib_width_enum_to_int(attr.active_width), speed);
} }
static const char *phys_state_to_str(enum ib_port_phys_state phys_state)
{
static const char * phys_state_str[] = {
"<unknown>",
"Sleep",
"Polling",
"Disabled",
"PortConfigurationTraining",
"LinkUp",
"LinkErrorRecovery",
"Phy Test",
};
if (phys_state < ARRAY_SIZE(phys_state_str))
return phys_state_str[phys_state];
return "<unknown>";
}
static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
char *buf) char *buf)
{ {
@ -300,16 +318,8 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused,
if (ret) if (ret)
return ret; return ret;
switch (attr.phys_state) { return sprintf(buf, "%d: %s\n", attr.phys_state,
case 1: return sprintf(buf, "1: Sleep\n"); phys_state_to_str(attr.phys_state));
case 2: return sprintf(buf, "2: Polling\n");
case 3: return sprintf(buf, "3: Disabled\n");
case 4: return sprintf(buf, "4: PortConfigurationTraining\n");
case 5: return sprintf(buf, "5: LinkUp\n");
case 6: return sprintf(buf, "6: LinkErrorRecovery\n");
case 7: return sprintf(buf, "7: Phy Test\n");
default: return sprintf(buf, "%d: <unknown>\n", attr.phys_state);
}
} }
static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused,

View File

@ -218,7 +218,7 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
umem_odp->interval_tree.start = umem_odp->interval_tree.start =
ALIGN_DOWN(umem_odp->umem.address, page_size); ALIGN_DOWN(umem_odp->umem.address, page_size);
if (check_add_overflow(umem_odp->umem.address, if (check_add_overflow(umem_odp->umem.address,
umem_odp->umem.length, (unsigned long)umem_odp->umem.length,
&umem_odp->interval_tree.last)) &umem_odp->interval_tree.last))
return -EOVERFLOW; return -EOVERFLOW;
umem_odp->interval_tree.last = umem_odp->interval_tree.last =

View File

@ -1042,7 +1042,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
ib_unregister_mad_agent(file->agent[i]); ib_unregister_mad_agent(file->agent[i]);
mutex_unlock(&file->port->file_mutex); mutex_unlock(&file->port->file_mutex);
mutex_destroy(&file->mutex);
kfree(file); kfree(file);
return 0; return 0;
} }

View File

@ -3479,7 +3479,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
err_copy: err_copy:
ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs)); ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
/* It was released in ib_destroy_srq_user */
srq = NULL;
err_free: err_free:
kfree(srq); kfree(srq);
err_put: err_put:

View File

@ -120,6 +120,8 @@ static void ib_uverbs_release_dev(struct device *device)
uverbs_destroy_api(dev->uapi); uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu); cleanup_srcu_struct(&dev->disassociate_srcu);
mutex_destroy(&dev->lists_mutex);
mutex_destroy(&dev->xrcd_tree_mutex);
kfree(dev); kfree(dev);
} }
@ -212,6 +214,8 @@ void ib_uverbs_release_file(struct kref *ref)
if (file->disassociate_page) if (file->disassociate_page)
__free_pages(file->disassociate_page, 0); __free_pages(file->disassociate_page, 0);
mutex_destroy(&file->umap_lock);
mutex_destroy(&file->ucontext_lock);
kfree(file); kfree(file);
} }

View File

@ -2259,6 +2259,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
if (ret) if (ret)
return ret; return ret;
} }
mutex_destroy(&xrcd->tgt_qp_mutex);
return xrcd->device->ops.dealloc_xrcd(xrcd, udata); return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
} }

View File

@ -74,7 +74,7 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd", [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
[BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded", [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
[BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd", [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
[BNXT_RE_MISSING_RESP] = "missin_resp", [BNXT_RE_MISSING_RESP] = "missing_resp",
[BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err", [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
[BNXT_RE_BAD_RESP_ERR] = "bad_resp_err", [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
[BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err", [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",

View File

@ -220,10 +220,10 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
port_attr->state = IB_PORT_ACTIVE; port_attr->state = IB_PORT_ACTIVE;
port_attr->phys_state = 5; port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else { } else {
port_attr->state = IB_PORT_DOWN; port_attr->state = IB_PORT_DOWN;
port_attr->phys_state = 3; port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} }
port_attr->max_mtu = IB_MTU_4096; port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
@ -1398,7 +1398,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
bnxt_qplib_destroy_srq(&rdev->qplib_res, bnxt_qplib_destroy_srq(&rdev->qplib_res,
&srq->qplib_srq); &srq->qplib_srq);
goto exit; goto fail;
} }
} }
if (nq) if (nq)

View File

@ -1473,7 +1473,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
&rdev->active_width); &rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
return 0; return 0;
free_sctx: free_sctx:

View File

@ -991,33 +991,8 @@ static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int iwch_query_port(struct ib_device *ibdev, static int iwch_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props) u8 port, struct ib_port_attr *props)
{ {
struct iwch_dev *dev;
struct net_device *netdev;
struct in_device *inetdev;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("%s ibdev %p\n", __func__, ibdev);
dev = to_iwch_dev(ibdev);
netdev = dev->rdev.port_info.lldevs[port-1];
/* props being zeroed by the caller, avoid zeroing it here */
props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
else {
inetdev = in_dev_get(netdev);
if (inetdev) {
if (inetdev->ifa_list)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_INIT;
in_dev_put(inetdev);
} else
props->state = IB_PORT_INIT;
}
props->port_cap_flags = props->port_cap_flags =
IB_PORT_CM_SUP | IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_SNMP_TUNNEL_SUP |
@ -1273,8 +1248,24 @@ static const struct ib_device_ops iwch_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext), INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
}; };
static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
{
int ret;
int i;
for (i = 0; i < rdev->port_info.nports; i++) {
ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
i + 1);
if (ret)
return ret;
}
return 0;
}
int iwch_register_device(struct iwch_dev *dev) int iwch_register_device(struct iwch_dev *dev)
{ {
int err;
pr_debug("%s iwch_dev %p\n", __func__, dev); pr_debug("%s iwch_dev %p\n", __func__, dev);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
@ -1315,6 +1306,10 @@ int iwch_register_device(struct iwch_dev *dev)
rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group); rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
ib_set_device_ops(&dev->ibdev, &iwch_dev_ops); ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
err = set_netdevs(&dev->ibdev, &dev->rdev);
if (err)
return err;
return ib_register_device(&dev->ibdev, "cxgb3_%d"); return ib_register_device(&dev->ibdev, "cxgb3_%d");
} }

View File

@ -305,32 +305,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int c4iw_query_port(struct ib_device *ibdev, u8 port, static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
struct c4iw_dev *dev;
struct net_device *netdev;
struct in_device *inetdev;
pr_debug("ibdev %p\n", ibdev); pr_debug("ibdev %p\n", ibdev);
dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1];
/* props being zeroed by the caller, avoid zeroing it here */
props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
else {
inetdev = in_dev_get(netdev);
if (inetdev) {
if (inetdev->ifa_list)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_INIT;
in_dev_put(inetdev);
} else
props->state = IB_PORT_INIT;
}
props->port_cap_flags = props->port_cap_flags =
IB_PORT_CM_SUP | IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_SNMP_TUNNEL_SUP |

View File

@ -156,5 +156,8 @@ int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_udata *udata); int qp_attr_mask, struct ib_udata *udata);
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
u8 port_num); u8 port_num);
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num);
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u8 port_num, int index);
#endif /* _EFA_H_ */ #endif /* _EFA_H_ */

View File

@ -109,17 +109,19 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
} while (time_is_after_jiffies(exp_time)); } while (time_is_after_jiffies(exp_time));
if (read_resp->req_id != mmio_read->seq_num) { if (read_resp->req_id != mmio_read->seq_num) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n", edev->efa_dev,
mmio_read->seq_num, offset, read_resp->req_id, "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
read_resp->reg_off); mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
err = EFA_MMIO_READ_INVALID; err = EFA_MMIO_READ_INVALID;
goto out; goto out;
} }
if (read_resp->reg_off != offset) { if (read_resp->reg_off != offset) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Reading register failed: wrong offset provided\n"); edev->efa_dev,
"Reading register failed: wrong offset provided\n");
err = EFA_MMIO_READ_INVALID; err = EFA_MMIO_READ_INVALID;
goto out; goto out;
} }
@ -293,9 +295,10 @@ static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
u16 ctx_id = cmd_id & (aq->depth - 1); u16 ctx_id = cmd_id & (aq->depth - 1);
if (aq->comp_ctx[ctx_id].occupied && capture) { if (aq->comp_ctx[ctx_id].occupied && capture) {
ibdev_err(aq->efa_dev, ibdev_err_ratelimited(
"Completion context for command_id %#x is occupied\n", aq->efa_dev,
cmd_id); "Completion context for command_id %#x is occupied\n",
cmd_id);
return NULL; return NULL;
} }
@ -401,7 +404,7 @@ static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue
spin_lock(&aq->sq.lock); spin_lock(&aq->sq.lock);
if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) { if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
ibdev_err(aq->efa_dev, "Admin queue is closed\n"); ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
spin_unlock(&aq->sq.lock); spin_unlock(&aq->sq.lock);
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
@ -519,8 +522,9 @@ static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_c
break; break;
if (time_is_before_jiffies(timeout)) { if (time_is_before_jiffies(timeout)) {
ibdev_err(aq->efa_dev, ibdev_err_ratelimited(
"Wait for completion (polling) timeout\n"); aq->efa_dev,
"Wait for completion (polling) timeout\n");
/* EFA didn't have any completion */ /* EFA didn't have any completion */
atomic64_inc(&aq->stats.no_completion); atomic64_inc(&aq->stats.no_completion);
@ -561,17 +565,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
atomic64_inc(&aq->stats.no_completion); atomic64_inc(&aq->stats.no_completion);
if (comp_ctx->status == EFA_CMD_COMPLETED) if (comp_ctx->status == EFA_CMD_COMPLETED)
ibdev_err(aq->efa_dev, ibdev_err_ratelimited(
"The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", aq->efa_dev,
efa_com_cmd_str(comp_ctx->cmd_opcode), "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
comp_ctx->cmd_opcode, comp_ctx->status, efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); comp_ctx->cmd_opcode, comp_ctx->status,
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
else else
ibdev_err(aq->efa_dev, ibdev_err_ratelimited(
"The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n", aq->efa_dev,
efa_com_cmd_str(comp_ctx->cmd_opcode), "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
comp_ctx->cmd_opcode, comp_ctx->status, efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); comp_ctx->cmd_opcode, comp_ctx->status,
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state); clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
err = -ETIME; err = -ETIME;
@ -633,10 +639,11 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
cmd->aq_common_descriptor.opcode); cmd->aq_common_descriptor.opcode);
comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size); comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
if (IS_ERR(comp_ctx)) { if (IS_ERR(comp_ctx)) {
ibdev_err(aq->efa_dev, ibdev_err_ratelimited(
"Failed to submit command %s (opcode %u) err %ld\n", aq->efa_dev,
efa_com_cmd_str(cmd->aq_common_descriptor.opcode), "Failed to submit command %s (opcode %u) err %ld\n",
cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx)); efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
up(&aq->avail_cmds); up(&aq->avail_cmds);
return PTR_ERR(comp_ctx); return PTR_ERR(comp_ctx);
@ -644,11 +651,12 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
err = efa_com_wait_and_process_admin_cq(comp_ctx, aq); err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
if (err) if (err)
ibdev_err(aq->efa_dev, ibdev_err_ratelimited(
"Failed to process command %s (opcode %u) comp_status %d err %d\n", aq->efa_dev,
efa_com_cmd_str(cmd->aq_common_descriptor.opcode), "Failed to process command %s (opcode %u) comp_status %d err %d\n",
cmd->aq_common_descriptor.opcode, efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
comp_ctx->comp_status, err); cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
err);
up(&aq->avail_cmds); up(&aq->avail_cmds);

View File

@ -44,7 +44,8 @@ int efa_com_create_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to create qp [%d]\n", err); ibdev_err_ratelimited(edev->efa_dev,
"Failed to create qp [%d]\n", err);
return err; return err;
} }
@ -82,9 +83,10 @@ int efa_com_modify_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp, (struct efa_admin_acq_entry *)&resp,
sizeof(resp)); sizeof(resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Failed to modify qp-%u modify_mask[%#x] [%d]\n", edev->efa_dev,
cmd.qp_handle, cmd.modify_mask, err); "Failed to modify qp-%u modify_mask[%#x] [%d]\n",
cmd.qp_handle, cmd.modify_mask, err);
return err; return err;
} }
@ -109,8 +111,9 @@ int efa_com_query_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp, (struct efa_admin_acq_entry *)&resp,
sizeof(resp)); sizeof(resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to query qp-%u [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
cmd.qp_handle, err); "Failed to query qp-%u [%d]\n",
cmd.qp_handle, err);
return err; return err;
} }
@ -139,8 +142,9 @@ int efa_com_destroy_qp(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
qp_cmd.qp_handle, err); "Failed to destroy qp-%u [%d]\n",
qp_cmd.qp_handle, err);
return err; return err;
} }
@ -173,7 +177,8 @@ int efa_com_create_cq(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to create cq[%d]\n", err); ibdev_err_ratelimited(edev->efa_dev,
"Failed to create cq[%d]\n", err);
return err; return err;
} }
@ -201,8 +206,9 @@ int efa_com_destroy_cq(struct efa_com_dev *edev,
sizeof(destroy_resp)); sizeof(destroy_resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
params->cq_idx, err); "Failed to destroy CQ-%u [%d]\n",
params->cq_idx, err);
return err; return err;
} }
@ -250,7 +256,8 @@ int efa_com_register_mr(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to register mr [%d]\n", err); ibdev_err_ratelimited(edev->efa_dev,
"Failed to register mr [%d]\n", err);
return err; return err;
} }
@ -277,9 +284,9 @@ int efa_com_dereg_mr(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(edev->efa_dev,
"Failed to de-register mr(lkey-%u) [%d]\n", "Failed to de-register mr(lkey-%u) [%d]\n",
mr_cmd.l_key, err); mr_cmd.l_key, err);
return err; return err;
} }
@ -306,8 +313,9 @@ int efa_com_create_ah(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to create ah for %pI6 [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
ah_cmd.dest_addr, err); "Failed to create ah for %pI6 [%d]\n",
ah_cmd.dest_addr, err);
return err; return err;
} }
@ -334,8 +342,9 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
ah_cmd.ah, ah_cmd.pd, err); "Failed to destroy ah-%d pd-%d [%d]\n",
ah_cmd.ah, ah_cmd.pd, err);
return err; return err;
} }
@ -367,8 +376,9 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
int err; int err;
if (!efa_com_check_supported_feature_id(edev, feature_id)) { if (!efa_com_check_supported_feature_id(edev, feature_id)) {
ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", ibdev_err_ratelimited(edev->efa_dev,
feature_id); "Feature %d isn't supported\n",
feature_id);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -396,9 +406,10 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
sizeof(*get_resp)); sizeof(*get_resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Failed to submit get_feature command %d [%d]\n", edev->efa_dev,
feature_id, err); "Failed to submit get_feature command %d [%d]\n",
feature_id, err);
return err; return err;
} }
@ -421,8 +432,9 @@ int efa_com_get_network_attr(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp, err = efa_com_get_feature(edev, &resp,
EFA_ADMIN_NETWORK_ATTR); EFA_ADMIN_NETWORK_ATTR);
if (err) { if (err) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(edev->efa_dev,
"Failed to get network attributes %d\n", err); "Failed to get network attributes %d\n",
err);
return err; return err;
} }
@ -441,8 +453,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR); err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR);
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to get device attributes %d\n", ibdev_err_ratelimited(edev->efa_dev,
err); "Failed to get device attributes %d\n",
err);
return err; return err;
} }
@ -456,9 +469,10 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->db_bar = resp.u.device_attr.db_bar; result->db_bar = resp.u.device_attr.db_bar;
if (result->admin_api_version < 1) { if (result->admin_api_version < 1) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Failed to get device attr api version [%u < 1]\n", edev->efa_dev,
result->admin_api_version); "Failed to get device attr api version [%u < 1]\n",
result->admin_api_version);
return -EINVAL; return -EINVAL;
} }
@ -466,8 +480,9 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp, err = efa_com_get_feature(edev, &resp,
EFA_ADMIN_QUEUE_ATTR); EFA_ADMIN_QUEUE_ATTR);
if (err) { if (err) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(edev->efa_dev,
"Failed to get network attributes %d\n", err); "Failed to get queue attributes %d\n",
err);
return err; return err;
} }
@ -497,7 +512,8 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev,
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS); err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS);
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to get hw hints %d\n", err); ibdev_err_ratelimited(edev->efa_dev,
"Failed to get hw hints %d\n", err);
return err; return err;
} }
@ -520,8 +536,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
int err; int err;
if (!efa_com_check_supported_feature_id(edev, feature_id)) { if (!efa_com_check_supported_feature_id(edev, feature_id)) {
ibdev_err(edev->efa_dev, "Feature %d isn't supported\n", ibdev_err_ratelimited(edev->efa_dev,
feature_id); "Feature %d isn't supported\n",
feature_id);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -545,9 +562,10 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
sizeof(*set_resp)); sizeof(*set_resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Failed to submit set_feature command %d error: %d\n", edev->efa_dev,
feature_id, err); "Failed to submit set_feature command %d error: %d\n",
feature_id, err);
return err; return err;
} }
@ -574,8 +592,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG); err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG);
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to get aenq attributes: %d\n", ibdev_err_ratelimited(edev->efa_dev,
err); "Failed to get aenq attributes: %d\n",
err);
return err; return err;
} }
@ -585,9 +604,10 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
get_resp.u.aenq.enabled_groups); get_resp.u.aenq.enabled_groups);
if ((get_resp.u.aenq.supported_groups & groups) != groups) { if ((get_resp.u.aenq.supported_groups & groups) != groups) {
ibdev_err(edev->efa_dev, ibdev_err_ratelimited(
"Trying to set unsupported aenq groups[%#x] supported[%#x]\n", edev->efa_dev,
groups, get_resp.u.aenq.supported_groups); "Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
groups, get_resp.u.aenq.supported_groups);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -595,8 +615,9 @@ int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
err = efa_com_set_feature(edev, &set_resp, &cmd, err = efa_com_set_feature(edev, &set_resp, &cmd,
EFA_ADMIN_AENQ_CONFIG); EFA_ADMIN_AENQ_CONFIG);
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to set aenq attributes: %d\n", ibdev_err_ratelimited(edev->efa_dev,
err); "Failed to set aenq attributes: %d\n",
err);
return err; return err;
} }
@ -619,7 +640,8 @@ int efa_com_alloc_pd(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp, (struct efa_admin_acq_entry *)&resp,
sizeof(resp)); sizeof(resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to allocate pd[%d]\n", err); ibdev_err_ratelimited(edev->efa_dev,
"Failed to allocate pd[%d]\n", err);
return err; return err;
} }
@ -645,8 +667,9 @@ int efa_com_dealloc_pd(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp, (struct efa_admin_acq_entry *)&resp,
sizeof(resp)); sizeof(resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to deallocate pd-%u [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
cmd.pd, err); "Failed to deallocate pd-%u [%d]\n",
cmd.pd, err);
return err; return err;
} }
@ -669,7 +692,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp, (struct efa_admin_acq_entry *)&resp,
sizeof(resp)); sizeof(resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to allocate uar[%d]\n", err); ibdev_err_ratelimited(edev->efa_dev,
"Failed to allocate uar[%d]\n", err);
return err; return err;
} }
@ -695,10 +719,47 @@ int efa_com_dealloc_uar(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&resp, (struct efa_admin_acq_entry *)&resp,
sizeof(resp)); sizeof(resp));
if (err) { if (err) {
ibdev_err(edev->efa_dev, "Failed to deallocate uar-%u [%d]\n", ibdev_err_ratelimited(edev->efa_dev,
cmd.uar, err); "Failed to deallocate uar-%u [%d]\n",
cmd.uar, err);
return err; return err;
} }
return 0; return 0;
} }
int efa_com_get_stats(struct efa_com_dev *edev,
struct efa_com_get_stats_params *params,
union efa_com_get_stats_result *result)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_aq_get_stats_cmd cmd = {};
struct efa_admin_acq_get_stats_resp resp;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
cmd.type = params->type;
cmd.scope = params->scope;
cmd.scope_modifier = params->scope_modifier;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to get stats type-%u scope-%u.%u [%d]\n",
cmd.type, cmd.scope, cmd.scope_modifier, err);
return err;
}
result->basic_stats.tx_bytes = resp.basic_stats.tx_bytes;
result->basic_stats.tx_pkts = resp.basic_stats.tx_pkts;
result->basic_stats.rx_bytes = resp.basic_stats.rx_bytes;
result->basic_stats.rx_pkts = resp.basic_stats.rx_pkts;
result->basic_stats.rx_drops = resp.basic_stats.rx_drops;
return 0;
}

View File

@ -225,6 +225,26 @@ struct efa_com_dealloc_uar_params {
u16 uarn; u16 uarn;
}; };
struct efa_com_get_stats_params {
/* see enum efa_admin_get_stats_type */
u8 type;
/* see enum efa_admin_get_stats_scope */
u8 scope;
u16 scope_modifier;
};
struct efa_com_basic_stats {
u64 tx_bytes;
u64 tx_pkts;
u64 rx_bytes;
u64 rx_pkts;
u64 rx_drops;
};
union efa_com_get_stats_result {
struct efa_com_basic_stats basic_stats;
};
void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low); void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_create_qp(struct efa_com_dev *edev, int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params, struct efa_com_create_qp_params *params,
@ -266,5 +286,8 @@ int efa_com_alloc_uar(struct efa_com_dev *edev,
struct efa_com_alloc_uar_result *result); struct efa_com_alloc_uar_result *result);
int efa_com_dealloc_uar(struct efa_com_dev *edev, int efa_com_dealloc_uar(struct efa_com_dev *edev,
struct efa_com_dealloc_uar_params *params); struct efa_com_dealloc_uar_params *params);
int efa_com_get_stats(struct efa_com_dev *edev,
struct efa_com_get_stats_params *params,
union efa_com_get_stats_result *result);
#endif /* _EFA_COM_CMD_H_ */ #endif /* _EFA_COM_CMD_H_ */

View File

@ -201,6 +201,7 @@ static const struct ib_device_ops efa_dev_ops = {
.driver_id = RDMA_DRIVER_EFA, .driver_id = RDMA_DRIVER_EFA,
.uverbs_abi_ver = EFA_UVERBS_ABI_VERSION, .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
.alloc_hw_stats = efa_alloc_hw_stats,
.alloc_pd = efa_alloc_pd, .alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext, .alloc_ucontext = efa_alloc_ucontext,
.create_ah = efa_create_ah, .create_ah = efa_create_ah,
@ -212,6 +213,7 @@ static const struct ib_device_ops efa_dev_ops = {
.destroy_ah = efa_destroy_ah, .destroy_ah = efa_destroy_ah,
.destroy_cq = efa_destroy_cq, .destroy_cq = efa_destroy_cq,
.destroy_qp = efa_destroy_qp, .destroy_qp = efa_destroy_qp,
.get_hw_stats = efa_get_hw_stats,
.get_link_layer = efa_port_link_layer, .get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable, .get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap, .mmap = efa_mmap,

View File

@ -41,6 +41,33 @@ static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
((u64)efa->mmap_page << PAGE_SHIFT); ((u64)efa->mmap_page << PAGE_SHIFT);
} }
#define EFA_DEFINE_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \
op(EFA_RX_BYTES, "rx_bytes") \
op(EFA_RX_PKTS, "rx_pkts") \
op(EFA_RX_DROPS, "rx_drops") \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
op(EFA_CREATE_QP_ERR, "create_qp_err") \
op(EFA_REG_MR_ERR, "reg_mr_err") \
op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
op(EFA_CREATE_AH_ERR, "create_ah_err")
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, name) [ename] = name,
enum efa_hw_stats {
EFA_DEFINE_STATS(EFA_STATS_ENUM)
};
static const char *const efa_stats_names[] = {
EFA_DEFINE_STATS(EFA_STATS_STR)
};
#define EFA_CHUNK_PAYLOAD_SHIFT 12 #define EFA_CHUNK_PAYLOAD_SHIFT 12
#define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT) #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
#define EFA_CHUNK_PAYLOAD_PTR_SIZE 8 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
@ -121,7 +148,7 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
} }
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
sizeof(((typeof(x) *)0)->fld) <= (sz)) FIELD_SIZEOF(typeof(x), fld) <= (sz))
#define is_reserved_cleared(reserved) \ #define is_reserved_cleared(reserved) \
!memchr_inv(reserved, 0, sizeof(reserved)) !memchr_inv(reserved, 0, sizeof(reserved))
@ -306,7 +333,7 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
props->lmc = 1; props->lmc = 1;
props->state = IB_PORT_ACTIVE; props->state = IB_PORT_ACTIVE;
props->phys_state = 5; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
props->gid_tbl_len = 1; props->gid_tbl_len = 1;
props->pkey_tbl_len = 1; props->pkey_tbl_len = 1;
props->active_speed = IB_SPEED_EDR; props->active_speed = IB_SPEED_EDR;
@ -1473,14 +1500,12 @@ int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey); ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
if (mr->umem) { params.l_key = mr->ibmr.lkey;
params.l_key = mr->ibmr.lkey; err = efa_com_dereg_mr(&dev->edev, &params);
err = efa_com_dereg_mr(&dev->edev, &params); if (err)
if (err) return err;
return err;
}
ib_umem_release(mr->umem);
ib_umem_release(mr->umem);
kfree(mr); kfree(mr);
return 0; return 0;
@ -1727,6 +1752,54 @@ void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
efa_ah_destroy(dev, ah); efa_ah_destroy(dev, ah);
} }
struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
{
return rdma_alloc_hw_stats_struct(efa_stats_names,
ARRAY_SIZE(efa_stats_names),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u8 port_num, int index)
{
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
struct efa_dev *dev = to_edev(ibdev);
struct efa_com_basic_stats *bs;
struct efa_com_stats_admin *as;
struct efa_stats *s;
int err;
params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
err = efa_com_get_stats(&dev->edev, &params, &result);
if (err)
return err;
bs = &result.basic_stats;
stats->value[EFA_TX_BYTES] = bs->tx_bytes;
stats->value[EFA_TX_PKTS] = bs->tx_pkts;
stats->value[EFA_RX_BYTES] = bs->rx_bytes;
stats->value[EFA_RX_PKTS] = bs->rx_pkts;
stats->value[EFA_RX_DROPS] = bs->rx_drops;
as = &dev->edev.aq.stats;
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
s = &dev->stats;
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
return ARRAY_SIZE(efa_stats_names);
}
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
u8 port_num) u8 port_num)
{ {

View File

@ -4101,6 +4101,7 @@ def_access_ibp_counter(rc_dupreq);
def_access_ibp_counter(rdma_seq); def_access_ibp_counter(rdma_seq);
def_access_ibp_counter(unaligned); def_access_ibp_counter(unaligned);
def_access_ibp_counter(seq_naks); def_access_ibp_counter(seq_naks);
def_access_ibp_counter(rc_crwaits);
static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = { static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH), [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
@ -5119,6 +5120,7 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq), [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned), [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks), [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
[C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL, [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
access_sw_cpu_rc_acks), access_sw_cpu_rc_acks),
[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL, [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,

View File

@ -1245,6 +1245,7 @@ enum {
C_SW_IBP_RDMA_SEQ, C_SW_IBP_RDMA_SEQ,
C_SW_IBP_UNALIGNED, C_SW_IBP_UNALIGNED,
C_SW_IBP_SEQ_NAK, C_SW_IBP_SEQ_NAK,
C_SW_IBP_RC_CRWAITS,
C_SW_CPU_RC_ACKS, C_SW_CPU_RC_ACKS,
C_SW_CPU_RC_QACKS, C_SW_CPU_RC_QACKS,
C_SW_CPU_RC_DELAYED_COMP, C_SW_CPU_RC_DELAYED_COMP,

View File

@ -2326,7 +2326,7 @@ struct opa_port_status_req {
__be32 vl_select_mask; __be32 vl_select_mask;
}; };
#define VL_MASK_ALL 0x000080ff #define VL_MASK_ALL 0x00000000000080ffUL
struct opa_port_status_rsp { struct opa_port_status_rsp {
__u8 port_num; __u8 port_num;
@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
} }
static void a0_portstatus(struct hfi1_pportdata *ppd, static void a0_portstatus(struct hfi1_pportdata *ppd,
struct opa_port_status_rsp *rsp, u32 vl_select_mask) struct opa_port_status_rsp *rsp)
{ {
if (!is_bx(ppd->dd)) { if (!is_bx(ppd->dd)) {
unsigned long vl; unsigned long vl;
u64 sum_vl_xmit_wait = 0; u64 sum_vl_xmit_wait = 0;
u32 vl_all_mask = VL_MASK_ALL; unsigned long vl_all_mask = VL_MASK_ALL;
for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
8 * sizeof(vl_all_mask)) {
u64 tmp = sum_vl_xmit_wait + u64 tmp = sum_vl_xmit_wait +
read_port_cntr(ppd, C_TX_WAIT_VL, read_port_cntr(ppd, C_TX_WAIT_VL,
idx_from_vl(vl)); idx_from_vl(vl));
@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
(struct opa_port_status_req *)pmp->data; (struct opa_port_status_req *)pmp->data;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev); struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct opa_port_status_rsp *rsp; struct opa_port_status_rsp *rsp;
u32 vl_select_mask = be32_to_cpu(req->vl_select_mask); unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
unsigned long vl; unsigned long vl;
size_t response_data_size; size_t response_data_size;
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
u8 port_num = req->port_num; u8 port_num = req->port_num;
u8 num_vls = hweight32(vl_select_mask); u8 num_vls = hweight64(vl_select_mask);
struct _vls_pctrs *vlinfo; struct _vls_pctrs *vlinfo;
struct hfi1_ibport *ibp = to_iport(ibdev, port); struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@ -2770,7 +2769,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
hfi1_read_link_quality(dd, &rsp->link_quality_indicator); hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
rsp->vl_select_mask = cpu_to_be32(vl_select_mask); rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS, rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
CNTR_INVALID_VL)); CNTR_INVALID_VL));
rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS, rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
@ -2841,8 +2840,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
* So in the for_each_set_bit() loop below, we don't need * So in the for_each_set_bit() loop below, we don't need
* any additional checks for vl. * any additional checks for vl.
*/ */
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
8 * sizeof(vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo)); memset(vlinfo, 0, sizeof(*vlinfo));
tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl)); tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
@ -2883,7 +2881,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
vfi++; vfi++;
} }
a0_portstatus(ppd, rsp, vl_select_mask); a0_portstatus(ppd, rsp);
if (resp_len) if (resp_len)
*resp_len += response_data_size; *resp_len += response_data_size;
@ -2930,16 +2928,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
return error_counter_summary; return error_counter_summary;
} }
static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp, static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
u32 vl_select_mask)
{ {
if (!is_bx(ppd->dd)) { if (!is_bx(ppd->dd)) {
unsigned long vl; unsigned long vl;
u64 sum_vl_xmit_wait = 0; u64 sum_vl_xmit_wait = 0;
u32 vl_all_mask = VL_MASK_ALL; unsigned long vl_all_mask = VL_MASK_ALL;
for_each_set_bit(vl, (unsigned long *)&(vl_all_mask), for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
8 * sizeof(vl_all_mask)) {
u64 tmp = sum_vl_xmit_wait + u64 tmp = sum_vl_xmit_wait +
read_port_cntr(ppd, C_TX_WAIT_VL, read_port_cntr(ppd, C_TX_WAIT_VL,
idx_from_vl(vl)); idx_from_vl(vl));
@ -2994,7 +2990,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u64 port_mask; u64 port_mask;
u8 port_num; u8 port_num;
unsigned long vl; unsigned long vl;
u32 vl_select_mask; unsigned long vl_select_mask;
int vfi; int vfi;
u16 link_width; u16 link_width;
u16 link_speed; u16 link_speed;
@ -3071,8 +3067,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
* So in the for_each_set_bit() loop below, we don't need * So in the for_each_set_bit() loop below, we don't need
* any additional checks for vl. * any additional checks for vl.
*/ */
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
8 * sizeof(req->vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo)); memset(vlinfo, 0, sizeof(*vlinfo));
rsp->vls[vfi].port_vl_xmit_data = rsp->vls[vfi].port_vl_xmit_data =
@ -3120,7 +3115,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
vfi++; vfi++;
} }
a0_datacounters(ppd, rsp, vl_select_mask); a0_datacounters(ppd, rsp);
if (resp_len) if (resp_len)
*resp_len += response_data_size; *resp_len += response_data_size;
@ -3215,7 +3210,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
struct _vls_ectrs *vlinfo; struct _vls_ectrs *vlinfo;
unsigned long vl; unsigned long vl;
u64 port_mask, tmp; u64 port_mask, tmp;
u32 vl_select_mask; unsigned long vl_select_mask;
int vfi; int vfi;
req = (struct opa_port_error_counters64_msg *)pmp->data; req = (struct opa_port_error_counters64_msg *)pmp->data;
@ -3273,8 +3268,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
vlinfo = &rsp->vls[0]; vlinfo = &rsp->vls[0];
vfi = 0; vfi = 0;
vl_select_mask = be32_to_cpu(req->vl_select_mask); vl_select_mask = be32_to_cpu(req->vl_select_mask);
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
8 * sizeof(req->vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo)); memset(vlinfo, 0, sizeof(*vlinfo));
rsp->vls[vfi].port_vl_xmit_discards = rsp->vls[vfi].port_vl_xmit_discards =
cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL, cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
@ -3485,7 +3479,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24; u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
u64 portn = be64_to_cpu(req->port_select_mask[3]); u64 portn = be64_to_cpu(req->port_select_mask[3]);
u32 counter_select = be32_to_cpu(req->counter_select_mask); u32 counter_select = be32_to_cpu(req->counter_select_mask);
u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */ unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
unsigned long vl; unsigned long vl;
if ((nports != 1) || (portn != 1 << port)) { if ((nports != 1) || (portn != 1 << port)) {
@ -3579,8 +3573,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_UNCORRECTABLE_ERRORS) if (counter_select & CS_UNCORRECTABLE_ERRORS)
write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0); write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask), for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
8 * sizeof(vl_select_mask)) {
if (counter_select & CS_PORT_XMIT_DATA) if (counter_select & CS_PORT_XMIT_DATA)
write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0); write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);

View File

@ -595,11 +595,8 @@ check_s_state:
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV: case IB_WR_SEND_WITH_INV:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!rvt_rc_credit_avail(qp, wqe))
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
}
if (len > pmtu) { if (len > pmtu) {
qp->s_state = OP(SEND_FIRST); qp->s_state = OP(SEND_FIRST);
len = pmtu; len = pmtu;
@ -632,11 +629,8 @@ check_s_state:
goto no_flow_control; goto no_flow_control;
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!rvt_rc_credit_avail(qp, wqe))
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
}
no_flow_control: no_flow_control:
put_ib_reth_vaddr( put_ib_reth_vaddr(
wqe->rdma_wr.remote_addr, wqe->rdma_wr.remote_addr,
@ -1483,6 +1477,11 @@ static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
req->ack_pending = cur_seg - req->comp_seg; req->ack_pending = cur_seg - req->comp_seg;
priv->pending_tid_r_segs += req->ack_pending; priv->pending_tid_r_segs += req->ack_pending;
qp->s_num_rd_atomic += req->ack_pending; qp->s_num_rd_atomic += req->ack_pending;
trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
wqe->wr.opcode,
wqe->psn,
wqe->lpsn,
req);
} else { } else {
priv->pending_tid_r_segs += req->total_segs; priv->pending_tid_r_segs += req->total_segs;
qp->s_num_rd_atomic += req->total_segs; qp->s_num_rd_atomic += req->total_segs;

View File

@ -2646,6 +2646,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
u32 fpsn; u32 fpsn;
lockdep_assert_held(&qp->r_lock); lockdep_assert_held(&qp->r_lock);
trace_hfi1_rsp_read_kdeth_eflags(qp, ibpsn);
trace_hfi1_sender_read_kdeth_eflags(qp);
trace_hfi1_tid_read_sender_kdeth_eflags(qp, 0);
spin_lock(&qp->s_lock); spin_lock(&qp->s_lock);
/* If the psn is out of valid range, drop the packet */ /* If the psn is out of valid range, drop the packet */
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
@ -2710,6 +2713,8 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
goto s_unlock; goto s_unlock;
req = wqe_to_tid_req(wqe); req = wqe_to_tid_req(wqe);
trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn,
wqe->lpsn, req);
switch (rcv_type) { switch (rcv_type) {
case RHF_RCV_TYPE_EXPECTED: case RHF_RCV_TYPE_EXPECTED:
switch (rte) { switch (rte) {
@ -2724,6 +2729,9 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* packets that could be still in the fabric. * packets that could be still in the fabric.
*/ */
flow = &req->flows[req->clear_tail]; flow = &req->flows[req->clear_tail];
trace_hfi1_tid_flow_read_kdeth_eflags(qp,
req->clear_tail,
flow);
if (priv->s_flags & HFI1_R_TID_SW_PSN) { if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn, diff = cmp_psn(psn,
flow->flow_state.r_next_psn); flow->flow_state.r_next_psn);

View File

@ -627,6 +627,12 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, index, flow) TP_ARGS(qp, index, flow)
); );
DEFINE_EVENT(/* event */
hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags,
TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
TP_ARGS(qp, index, flow)
);
DECLARE_EVENT_CLASS(/* tid_node */ DECLARE_EVENT_CLASS(/* tid_node */
hfi1_tid_node_template, hfi1_tid_node_template,
TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
@ -851,6 +857,12 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, psn) TP_ARGS(qp, psn)
); );
DEFINE_EVENT(/* event */
hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags,
TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DECLARE_EVENT_CLASS(/* sender_info */ DECLARE_EVENT_CLASS(/* sender_info */
hfi1_sender_info_template, hfi1_sender_info_template,
TP_PROTO(struct rvt_qp *qp), TP_PROTO(struct rvt_qp *qp),
@ -955,6 +967,12 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp) TP_ARGS(qp)
); );
DEFINE_EVENT(/* event */
hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags,
TP_PROTO(struct rvt_qp *qp),
TP_ARGS(qp)
);
DECLARE_EVENT_CLASS(/* tid_read_sender */ DECLARE_EVENT_CLASS(/* tid_read_sender */
hfi1_tid_read_sender_template, hfi1_tid_read_sender_template,
TP_PROTO(struct rvt_qp *qp, char newreq), TP_PROTO(struct rvt_qp *qp, char newreq),
@ -1015,6 +1033,12 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, newreq) TP_ARGS(qp, newreq)
); );
DEFINE_EVENT(/* event */
hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags,
TP_PROTO(struct rvt_qp *qp, char newreq),
TP_ARGS(qp, newreq)
);
DECLARE_EVENT_CLASS(/* tid_rdma_request */ DECLARE_EVENT_CLASS(/* tid_rdma_request */
hfi1_tid_rdma_request_template, hfi1_tid_rdma_request_template,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
@ -1215,6 +1239,13 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, newreq, opcode, psn, lpsn, req) TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
); );
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DEFINE_EVENT(/* event */ DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write, hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
@ -1229,6 +1260,13 @@ DEFINE_EVENT(/* event */
TP_ARGS(qp, newreq, opcode, psn, lpsn, req) TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
); );
DEFINE_EVENT(/* event */
hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic,
TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
struct tid_rdma_request *req),
TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
);
DECLARE_EVENT_CLASS(/* rc_rcv_err */ DECLARE_EVENT_CLASS(/* rc_rcv_err */
hfi1_rc_rcv_err_template, hfi1_rc_rcv_err_template,
TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),

View File

@ -110,12 +110,6 @@ enum pkt_q_sdma_state {
SDMA_PKT_Q_DEFERRED, SDMA_PKT_Q_DEFERRED,
}; };
/*
* Maximum retry attempts to submit a TX request
* before putting the process to sleep.
*/
#define MAX_DEFER_RETRY_COUNT 1
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */ #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
#define SDMA_DBG(req, fmt, ...) \ #define SDMA_DBG(req, fmt, ...) \

View File

@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
else else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc = create_pbc(ppd,
pbc, pbc,
qp->srate_mbps, qp->srate_mbps,
vl, vl,
plen); plen);
/* Update HCRC based on packet opcode */ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = update_hcrc(ps->opcode, pbc); pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
else
/* Update HCRC based on packet opcode */
pbc = update_hcrc(ps->opcode, pbc);
} }
tx->wqe = qp->s_wqe; tx->wqe = qp->s_wqe;
ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc); ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
else else
pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT); pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode))) if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
pbc = hfi1_fault_tx(qp, ps->opcode, pbc); pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen); else
/* Update HCRC based on packet opcode */
/* Update HCRC based on packet opcode */ pbc = update_hcrc(ps->opcode, pbc);
pbc = update_hcrc(ps->opcode, pbc);
} }
if (cb) if (cb)
iowait_pio_inc(&priv->s_iowait); iowait_pio_inc(&priv->s_iowait);

View File

@ -8,8 +8,6 @@ config INFINIBAND_HNS
is used in Hisilicon Hip06 and more further ICT SoC based on is used in Hisilicon Hip06 and more further ICT SoC based on
platform device. platform device.
To compile HIP06 or HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP06 config INFINIBAND_HNS_HIP06
tristate "Hisilicon Hip06 Family RoCE support" tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
@ -17,15 +15,9 @@ config INFINIBAND_HNS_HIP06
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices. Hip07 SoC. These RoCE engines are platform devices.
To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
module will be called hns-roce-hw-v1
config INFINIBAND_HNS_HIP08 config INFINIBAND_HNS_HIP08
tristate "Hisilicon Hip08 Family RoCE support" tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3 depends on INFINIBAND_HNS && PCI && HNS3
---help--- ---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device. The RoCE engine is a PCI device.
To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
module will be called hns-roce-hw-v2.

View File

@ -66,11 +66,9 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
HNS_ROCE_VLAN_SL_SHIFT; HNS_ROCE_VLAN_SL_SHIFT;
} }
ah->av.port_pd = cpu_to_le32(to_hr_pd(ibah->pd)->pdn | ah->av.port = rdma_ah_get_port_num(ah_attr);
(rdma_ah_get_port_num(ah_attr) <<
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index; ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag); ah->av.vlan = vlan_tag;
ah->av.vlan_en = vlan_en; ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index, dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan); ah->av.vlan);
@ -79,8 +77,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
ah->av.stat_rate = IB_RATE_10_GBPS; ah->av.stat_rate = IB_RATE_10_GBPS;
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
ah->av.sl_tclass_flowlabel = cpu_to_le32(rdma_ah_get_sl(ah_attr) << ah->av.sl = rdma_ah_get_sl(ah_attr);
HNS_ROCE_SL_SHIFT);
return 0; return 0;
} }
@ -91,17 +88,11 @@ int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
memset(ah_attr, 0, sizeof(*ah_attr)); memset(ah_attr, 0, sizeof(*ah_attr));
rdma_ah_set_sl(ah_attr, (le32_to_cpu(ah->av.sl_tclass_flowlabel) >> rdma_ah_set_sl(ah_attr, ah->av.sl);
HNS_ROCE_SL_SHIFT)); rdma_ah_set_port_num(ah_attr, ah->av.port);
rdma_ah_set_port_num(ah_attr, (le32_to_cpu(ah->av.port_pd) >>
HNS_ROCE_PORT_NUM_SHIFT));
rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate); rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate);
rdma_ah_set_grh(ah_attr, NULL, rdma_ah_set_grh(ah_attr, NULL, ah->av.flowlabel,
(le32_to_cpu(ah->av.sl_tclass_flowlabel) & ah->av.gid_index, ah->av.hop_limit, ah->av.tclass);
HNS_ROCE_FLOW_LABEL_MASK), ah->av.gid_index,
ah->av.hop_limit,
(le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_TCLASS_SHIFT));
rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid); rdma_ah_set_dgid_raw(ah_attr, ah->av.dgid);
return 0; return 0;

View File

@ -211,7 +211,6 @@ int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
mutex_init(&hr_dev->cmd.hcr_mutex); mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1); sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0; hr_dev->cmd.use_events = 0;
hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM; hr_dev->cmd.max_cmds = CMD_MAX_NUM;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE, HNS_ROCE_MAILBOX_SIZE,
@ -252,23 +251,15 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
hr_cmd->token_mask = CMD_TOKEN_MASK; hr_cmd->token_mask = CMD_TOKEN_MASK;
hr_cmd->use_events = 1; hr_cmd->use_events = 1;
down(&hr_cmd->poll_sem);
return 0; return 0;
} }
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
int i;
hr_cmd->use_events = 0;
for (i = 0; i < hr_cmd->max_cmds; ++i)
down(&hr_cmd->event_sem);
kfree(hr_cmd->context); kfree(hr_cmd->context);
up(&hr_cmd->poll_sem); hr_cmd->use_events = 0;
} }
struct hns_roce_cmd_mailbox struct hns_roce_cmd_mailbox

View File

@ -83,7 +83,6 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt, struct hns_roce_mtt *hr_mtt,
struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector) struct hns_roce_cq *hr_cq, int vector)
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
@ -154,7 +153,6 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hr_cq->cons_index = 0; hr_cq->cons_index = 0;
hr_cq->arm_sn = 1; hr_cq->arm_sn = 1;
hr_cq->uar = hr_uar;
atomic_set(&hr_cq->refcount, 1); atomic_set(&hr_cq->refcount, 1);
init_completion(&hr_cq->free); init_completion(&hr_cq->free);
@ -298,21 +296,127 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
&buf->hr_buf); &buf->hr_buf);
} }
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp,
int cq_entries)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
int ret;
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
udata, struct hns_roce_ucontext, ibucontext);
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n");
return -EFAULT;
}
/* Get user space address, write it into mtt table */
ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
}
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(*resp))) {
ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
&hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
goto err_mtt;
}
hr_cq->db_en = 1;
resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
}
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, int cq_entries)
{
struct device *dev = hr_dev->dev;
struct hns_roce_uar *uar;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret)
return ret;
hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0;
hr_cq->db_en = 1;
}
/* Init mtt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * uar->index;
return 0;
err_db:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
return ret;
}
static void destroy_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp)
{
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
udata, struct hns_roce_ucontext, ibucontext);
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(context, &hr_cq->db);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
int hns_roce_ib_create_cq(struct ib_cq *ib_cq, int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd;
struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector; int vector = attr->comp_vector;
int cq_entries = attr->cqe; int cq_entries = attr->cqe;
int ret; int ret;
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
udata, struct hns_roce_ucontext, ibucontext);
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@ -328,61 +432,21 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
spin_lock_init(&hr_cq->lock); spin_lock_init(&hr_cq->lock);
if (udata) { if (udata) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
dev_err(dev, "Failed to copy_from_udata.\n");
ret = -EFAULT;
goto err_cq;
}
/* Get user space address, write it into mtt table */
ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
&hr_cq->umem, ucmd.buf_addr,
cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n"); dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq; goto err_cq;
} }
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
(udata->outlen >= sizeof(resp))) {
ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
&hr_cq->db);
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
goto err_mtt;
}
hr_cq->db_en = 1;
resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
}
/* Get user space parameters */
uar = &context->uar;
} else { } else {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) { ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret)
goto err_cq;
hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0;
hr_cq->db_en = 1;
}
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
cq_entries);
if (ret) { if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n"); dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_db; goto err_cq;
} }
uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * uar->index;
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar, ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
hr_cq, vector); hr_cq, vector);
if (ret) { if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
@ -416,20 +480,10 @@ err_cqc:
hns_roce_free_cq(hr_dev, hr_cq); hns_roce_free_cq(hr_dev, hr_cq);
err_dbmap: err_dbmap:
if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && if (udata)
(udata->outlen >= sizeof(resp))) destroy_user_cq(hr_dev, hr_cq, udata, &resp);
hns_roce_db_unmap_user(context, &hr_cq->db); else
destroy_kernel_cq(hr_dev, hr_cq);
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
ib_umem_release(hr_cq->umem);
if (!udata)
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
hr_cq->ib_cq.cqe);
err_db:
if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_cq->db);
err_cq: err_cq:
return ret; return ret;

View File

@ -84,7 +84,6 @@
#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4 #define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10 #define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_TCLASS_SHIFT 20
#define HNS_ROCE_FLOW_LABEL_MASK 0xfffff #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff
@ -128,6 +127,11 @@
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230 #define SRQ_DB_REG 0x230
/* The chip implementation of the consumer index is calculated
* according to twice the actual EQ depth
*/
#define EQ_DEPTH_COEFF 2
enum { enum {
HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0, HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1, HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
@ -322,7 +326,7 @@ struct hns_roce_hem_table {
unsigned long num_hem; unsigned long num_hem;
/* HEM entry record obj total num */ /* HEM entry record obj total num */
unsigned long num_obj; unsigned long num_obj;
/*Single obj size */ /* Single obj size */
unsigned long obj_size; unsigned long obj_size;
unsigned long table_chunk_size; unsigned long table_chunk_size;
int lowmem; int lowmem;
@ -343,7 +347,7 @@ struct hns_roce_mtt {
struct hns_roce_buf_region { struct hns_roce_buf_region {
int offset; /* page offset */ int offset; /* page offset */
u32 count; /* page count*/ u32 count; /* page count */
int hopnum; /* addressing hop num */ int hopnum; /* addressing hop num */
}; };
@ -384,25 +388,25 @@ struct hns_roce_mr {
u64 size; /* Address range of MR */ u64 size; /* Address range of MR */
u32 key; /* Key of MR */ u32 key; /* Key of MR */
u32 pd; /* PD num of MR */ u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */ u32 access; /* Access permission of MR */
u32 npages; u32 npages;
int enabled; /* MR's active status */ int enabled; /* MR's active status */
int type; /* MR's register type */ int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */ u64 *pbl_buf; /* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
u32 pbl_size;/* PA number in the PBL */ u32 pbl_size; /* PA number in the PBL */
u64 pbl_ba;/* page table address */ u64 pbl_ba; /* page table address */
u32 l0_chunk_last_num;/* L0 last number */ u32 l0_chunk_last_num; /* L0 last number */
u32 l1_chunk_last_num;/* L1 last number */ u32 l1_chunk_last_num; /* L1 last number */
u64 **pbl_bt_l2;/* PBL BT L2 */ u64 **pbl_bt_l2; /* PBL BT L2 */
u64 **pbl_bt_l1;/* PBL BT L1 */ u64 **pbl_bt_l1; /* PBL BT L1 */
u64 *pbl_bt_l0;/* PBL BT L0 */ u64 *pbl_bt_l0; /* PBL BT L0 */
dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */ dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */
dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */ dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */
dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */ dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */
u32 pbl_ba_pg_sz;/* BT chunk page size */ u32 pbl_ba_pg_sz; /* BT chunk page size */
u32 pbl_buf_pg_sz;/* buf chunk page size */ u32 pbl_buf_pg_sz; /* buf chunk page size */
u32 pbl_hop_num;/* multi-hop number */ u32 pbl_hop_num; /* multi-hop number */
}; };
struct hns_roce_mr_table { struct hns_roce_mr_table {
@ -425,16 +429,16 @@ struct hns_roce_wq {
u32 max_post; u32 max_post;
int max_gs; int max_gs;
int offset; int offset;
int wqe_shift;/* WQE size */ int wqe_shift; /* WQE size */
u32 head; u32 head;
u32 tail; u32 tail;
void __iomem *db_reg_l; void __iomem *db_reg_l;
}; };
struct hns_roce_sge { struct hns_roce_sge {
int sge_cnt; /* SGE num */ int sge_cnt; /* SGE num */
int offset; int offset;
int sge_shift;/* SGE size */ int sge_shift; /* SGE size */
}; };
struct hns_roce_buf_list { struct hns_roce_buf_list {
@ -569,14 +573,16 @@ struct hns_roce_raq_table {
}; };
struct hns_roce_av { struct hns_roce_av {
__le32 port_pd; u8 port;
u8 gid_index; u8 gid_index;
u8 stat_rate; u8 stat_rate;
u8 hop_limit; u8 hop_limit;
__le32 sl_tclass_flowlabel; u32 flowlabel;
u8 sl;
u8 tclass;
u8 dgid[HNS_ROCE_GID_SIZE]; u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
__le16 vlan; u16 vlan;
bool vlan_en; bool vlan_en;
}; };
@ -618,7 +624,6 @@ struct hns_roce_cmdq {
* close device, switch into poll mode(non event mode) * close device, switch into poll mode(non event mode)
*/ */
u8 use_events; u8 use_events;
u8 toggle;
}; };
struct hns_roce_cmd_mailbox { struct hns_roce_cmd_mailbox {
@ -652,10 +657,8 @@ struct hns_roce_qp {
u8 rdb_en; u8 rdb_en;
u8 sdb_en; u8 sdb_en;
u32 doorbell_qpn; u32 doorbell_qpn;
__le32 sq_signal_bits; u32 sq_signal_bits;
u32 sq_next_wqe; u32 sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct hns_roce_wq sq; struct hns_roce_wq sq;
struct ib_umem *umem; struct ib_umem *umem;
@ -709,7 +712,7 @@ enum {
}; };
struct hns_roce_ceqe { struct hns_roce_ceqe {
u32 comp; __le32 comp;
}; };
struct hns_roce_aeqe { struct hns_roce_aeqe {
@ -752,7 +755,7 @@ struct hns_roce_eq {
struct hns_roce_dev *hr_dev; struct hns_roce_dev *hr_dev;
void __iomem *doorbell; void __iomem *doorbell;
int type_flag;/* Aeq:1 ceq:0 */ int type_flag; /* Aeq:1 ceq:0 */
int eqn; int eqn;
u32 entries; u32 entries;
int log_entries; int log_entries;
@ -798,22 +801,22 @@ struct hns_roce_caps {
int local_ca_ack_delay; int local_ca_ack_delay;
int num_uars; int num_uars;
u32 phy_num_uars; u32 phy_num_uars;
u32 max_sq_sg; /* 2 */ u32 max_sq_sg;
u32 max_sq_inline; /* 32 */ u32 max_sq_inline;
u32 max_rq_sg; /* 2 */ u32 max_rq_sg;
u32 max_extend_sg; u32 max_extend_sg;
int num_qps; /* 256k */ int num_qps;
int reserved_qps; int reserved_qps;
int num_qpc_timer; int num_qpc_timer;
int num_cqc_timer; int num_cqc_timer;
u32 max_srq_sg; u32 max_srq_sg;
int num_srqs; int num_srqs;
u32 max_wqes; /* 16k */ u32 max_wqes;
u32 max_srqs; u32 max_srqs;
u32 max_srq_wrs; u32 max_srq_wrs;
u32 max_srq_sges; u32 max_srq_sges;
u32 max_sq_desc_sz; /* 64 */ u32 max_sq_desc_sz;
u32 max_rq_desc_sz; /* 64 */ u32 max_rq_desc_sz;
u32 max_srq_desc_sz; u32 max_srq_desc_sz;
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
@ -824,7 +827,7 @@ struct hns_roce_caps {
int reserved_cqs; int reserved_cqs;
int reserved_srqs; int reserved_srqs;
u32 max_srqwqes; u32 max_srqwqes;
int num_aeq_vectors; /* 1 */ int num_aeq_vectors;
int num_comp_vectors; int num_comp_vectors;
int num_other_vectors; int num_other_vectors;
int num_mtpts; int num_mtpts;
@ -905,7 +908,7 @@ struct hns_roce_caps {
u32 sl_num; u32 sl_num;
u32 tsq_buf_pg_sz; u32 tsq_buf_pg_sz;
u32 tpq_buf_pg_sz; u32 tpq_buf_pg_sz;
u32 chunk_sz; /* chunk size in non multihop mode*/ u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags; u64 flags;
}; };
@ -991,16 +994,6 @@ struct hns_roce_hw {
const struct ib_device_ops *hns_roce_dev_srq_ops; const struct ib_device_ops *hns_roce_dev_srq_ops;
}; };
enum hns_phy_state {
HNS_ROCE_PHY_SLEEP = 1,
HNS_ROCE_PHY_POLLING = 2,
HNS_ROCE_PHY_DISABLED = 3,
HNS_ROCE_PHY_TRAINING = 4,
HNS_ROCE_PHY_LINKUP = 5,
HNS_ROCE_PHY_LINKERR = 6,
HNS_ROCE_PHY_TEST = 7
};
struct hns_roce_dev { struct hns_roce_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct platform_device *pdev; struct platform_device *pdev;
@ -1045,8 +1038,8 @@ struct hns_roce_dev {
int loop_idc; int loop_idc;
u32 sdb_offset; u32 sdb_offset;
u32 odb_offset; u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/ dma_addr_t tptr_dma_addr; /* only for hw v1 */
u32 tptr_size; /*only for hw v1*/ u32 tptr_size; /* only for hw v1 */
const struct hns_roce_hw *hw; const struct hns_roce_hw *hw;
void *priv; void *priv;
struct workqueue_struct *irq_workq; struct workqueue_struct *irq_workq;

View File

@ -41,29 +41,57 @@
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
{ {
if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) || int hop_num = 0;
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
(hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
(hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
(hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
(hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
(hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
return true;
return false; switch (type) {
case HEM_TYPE_QPC:
hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SRQC:
hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_SCCC:
hop_num = hr_dev->caps.sccc_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_CQE:
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_MTT:
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_SRQWQE:
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
return false;
}
return hop_num ? true : false;
} }
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx, static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
u32 bt_chunk_num) u32 bt_chunk_num, u64 hem_max_num)
{ {
int i; u64 check_max_num = start_idx + bt_chunk_num;
u64 i;
for (i = 0; i < bt_chunk_num; i++) for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
if (hem[start_idx + i]) if (hem[i])
return false; return false;
return true; return true;
@ -92,17 +120,13 @@ static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
return 0; return 0;
} }
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, static int get_hem_table_config(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj, struct hns_roce_hem_mhop *mhop,
struct hns_roce_hem_mhop *mhop) u32 type)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
switch (table->type) { switch (type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT); + PAGE_SHIFT);
@ -193,10 +217,26 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
break; break;
default: default:
dev_err(dev, "Table %d not support multi-hop addressing!\n", dev_err(dev, "Table %d not support multi-hop addressing!\n",
table->type); type);
return -EINVAL; return -EINVAL;
} }
return 0;
}
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj,
struct hns_roce_hem_mhop *mhop)
{
struct device *dev = hr_dev->dev;
u32 chunk_ba_num;
u32 table_idx;
u32 bt_num;
u32 chunk_size;
if (get_hem_table_config(hr_dev, mhop, table->type))
return -EINVAL;
if (!obj) if (!obj)
return 0; return 0;
@ -324,13 +364,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
{ {
spinlock_t *lock = &hr_dev->bt_cmd_lock; spinlock_t *lock = &hr_dev->bt_cmd_lock;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long end = 0; long end;
unsigned long flags; unsigned long flags;
struct hns_roce_hem_iter iter; struct hns_roce_hem_iter iter;
void __iomem *bt_cmd; void __iomem *bt_cmd;
u32 bt_cmd_h_val = 0; __le32 bt_cmd_val[2];
u32 bt_cmd_val[2]; __le32 bt_cmd_h = 0;
u32 bt_cmd_l = 0; __le32 bt_cmd_l = 0;
u64 bt_ba = 0; u64 bt_ba = 0;
int ret = 0; int ret = 0;
@ -340,30 +380,20 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
switch (table->type) { switch (table->type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
break;
case HEM_TYPE_MTPT: case HEM_TYPE_MTPT:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_MTPT);
break;
case HEM_TYPE_CQC: case HEM_TYPE_CQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
break;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
HEM_TYPE_SRQC);
break; break;
default: default:
return ret; return ret;
} }
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
/* Currently iter only a chunk */ /* Currently iter only a chunk */
for (hns_roce_hem_first(table->hem[i], &iter); for (hns_roce_hem_first(table->hem[i], &iter);
@ -375,7 +405,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = HW_SYNC_TIMEOUT_MSECS; end = HW_SYNC_TIMEOUT_MSECS;
while (end) { while (end > 0) {
if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
break; break;
@ -389,13 +419,13 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return -EBUSY; return -EBUSY;
} }
bt_cmd_l = (u32)bt_ba; bt_cmd_l = cpu_to_le32(bt_ba);
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
bt_ba >> BT_BA_SHIFT); bt_ba >> BT_BA_SHIFT);
bt_cmd_val[0] = bt_cmd_l; bt_cmd_val[0] = bt_cmd_l;
bt_cmd_val[1] = bt_cmd_h_val; bt_cmd_val[1] = bt_cmd_h;
hns_roce_write64_k(bt_cmd_val, hns_roce_write64_k(bt_cmd_val,
hr_dev->reg_base + ROCEE_BT_CMD_L_REG); hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
@ -457,6 +487,12 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
if (unlikely(hem_idx >= table->num_hem)) {
dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n",
table->type, hem_idx, table->num_hem);
return -EINVAL;
}
mutex_lock(&table->mutex); mutex_lock(&table->mutex);
if (table->hem[hem_idx]) { if (table->hem[hem_idx]) {
@ -693,7 +729,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
if (check_whether_bt_num_2(table->type, hop_num)) { if (check_whether_bt_num_2(table->type, hop_num)) {
start_idx = mhop.l0_idx * chunk_ba_num; start_idx = mhop.l0_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx, if (hns_roce_check_hem_null(table->hem, start_idx,
chunk_ba_num)) { chunk_ba_num, table->num_hem)) {
if (table->type < HEM_TYPE_MTT && if (table->type < HEM_TYPE_MTT &&
hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n"); dev_warn(dev, "Clear HEM base address failed.\n");
@ -707,7 +743,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num + start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
mhop.l1_idx * chunk_ba_num; mhop.l1_idx * chunk_ba_num;
if (hns_roce_check_hem_null(table->hem, start_idx, if (hns_roce_check_hem_null(table->hem, start_idx,
chunk_ba_num)) { chunk_ba_num, table->num_hem)) {
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
dev_warn(dev, "Clear HEM base address failed.\n"); dev_warn(dev, "Clear HEM base address failed.\n");
@ -791,7 +827,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
} else { } else {
u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop); if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
goto out;
/* mtt mhop */ /* mtt mhop */
i = mhop.l0_idx; i = mhop.l0_idx;
j = mhop.l1_idx; j = mhop.l1_idx;
@ -840,11 +877,13 @@ int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
{ {
struct hns_roce_hem_mhop mhop; struct hns_roce_hem_mhop mhop;
unsigned long inc = table->table_chunk_size / table->obj_size; unsigned long inc = table->table_chunk_size / table->obj_size;
unsigned long i; unsigned long i = 0;
int ret; int ret;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
if (ret)
goto fail;
inc = mhop.bt_chunk_size / table->obj_size; inc = mhop.bt_chunk_size / table->obj_size;
} }
@ -874,7 +913,8 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
unsigned long i; unsigned long i;
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
return;
inc = mhop.bt_chunk_size / table->obj_size; inc = mhop.bt_chunk_size / table->obj_size;
} }
@ -887,7 +927,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj, unsigned long obj_size, unsigned long nobj,
int use_lowmem) int use_lowmem)
{ {
struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk; unsigned long obj_per_chunk;
unsigned long num_hem; unsigned long num_hem;
@ -900,99 +939,21 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!table->hem) if (!table->hem)
return -ENOMEM; return -ENOMEM;
} else { } else {
struct hns_roce_hem_mhop mhop = {};
unsigned long buf_chunk_size; unsigned long buf_chunk_size;
unsigned long bt_chunk_size; unsigned long bt_chunk_size;
unsigned long bt_chunk_num; unsigned long bt_chunk_num;
unsigned long num_bt_l0 = 0; unsigned long num_bt_l0 = 0;
u32 hop_num; u32 hop_num;
switch (type) { if (get_hem_table_config(hr_dev, &mhop, type))
case HEM_TYPE_QPC:
buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_bt_num;
hop_num = hr_dev->caps.qpc_hop_num;
break;
case HEM_TYPE_MTPT:
buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.mpt_bt_num;
hop_num = hr_dev->caps.mpt_hop_num;
break;
case HEM_TYPE_CQC:
buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_bt_num;
hop_num = hr_dev->caps.cqc_hop_num;
break;
case HEM_TYPE_SCCC:
buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.sccc_bt_num;
hop_num = hr_dev->caps.sccc_hop_num;
break;
case HEM_TYPE_QPC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
hop_num = hr_dev->caps.qpc_timer_hop_num;
break;
case HEM_TYPE_CQC_TIMER:
buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
case HEM_TYPE_SRQC:
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ PAGE_SHIFT);
num_bt_l0 = hr_dev->caps.srqc_bt_num;
hop_num = hr_dev->caps.srqc_hop_num;
break;
case HEM_TYPE_MTT:
buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.mtt_hop_num;
break;
case HEM_TYPE_CQE:
buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.cqe_hop_num;
break;
case HEM_TYPE_SRQWQE:
buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.srqwqe_hop_num;
break;
case HEM_TYPE_IDX:
buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
+ PAGE_SHIFT);
bt_chunk_size = buf_chunk_size;
hop_num = hr_dev->caps.idx_hop_num;
break;
default:
dev_err(dev,
"Table %d not support to init hem table here!\n",
type);
return -EINVAL; return -EINVAL;
}
buf_chunk_size = mhop.buf_chunk_size;
bt_chunk_size = mhop.bt_chunk_size;
num_bt_l0 = mhop.ba_l0_num;
hop_num = mhop.hop_num;
obj_per_chunk = buf_chunk_size / obj_size; obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / BA_BYTE_LEN; bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
@ -1075,7 +1036,8 @@ static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
int i; int i;
u64 obj; u64 obj;
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop); if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
return;
buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size : buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
mhop.bt_chunk_size; mhop.bt_chunk_size;

View File

@ -102,9 +102,9 @@ struct hns_roce_hem_mhop {
u32 buf_chunk_size; u32 buf_chunk_size;
u32 bt_chunk_size; u32 bt_chunk_size;
u32 ba_l0_num; u32 ba_l0_num;
u32 l0_idx;/* level 0 base address table index */ u32 l0_idx; /* level 0 base address table index */
u32 l1_idx;/* level 1 base address table index */ u32 l1_idx; /* level 1 base address table index */
u32 l2_idx;/* level 2 base address table index */ u32 l2_idx; /* level 2 base address table index */
}; };
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);

View File

@ -73,7 +73,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
int ps_opcode = 0, i = 0; int ps_opcode = 0, i = 0;
unsigned long flags = 0; unsigned long flags = 0;
void *wqe = NULL; void *wqe = NULL;
u32 doorbell[2]; __le32 doorbell[2];
int nreq = 0; int nreq = 0;
u32 ind = 0; u32 ind = 0;
int ret = 0; int ret = 0;
@ -175,13 +175,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->u32_36, roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_FLOW_LABEL_M, UD_SEND_WQE_U32_36_FLOW_LABEL_M,
UD_SEND_WQE_U32_36_FLOW_LABEL_S, UD_SEND_WQE_U32_36_FLOW_LABEL_S,
ah->av.sl_tclass_flowlabel & ah->av.flowlabel);
HNS_ROCE_FLOW_LABEL_MASK);
roce_set_field(ud_sq_wqe->u32_36, roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_PRIORITY_M, UD_SEND_WQE_U32_36_PRIORITY_M,
UD_SEND_WQE_U32_36_PRIORITY_S, UD_SEND_WQE_U32_36_PRIORITY_S,
le32_to_cpu(ah->av.sl_tclass_flowlabel) >> ah->av.sl);
HNS_ROCE_SL_SHIFT);
roce_set_field(ud_sq_wqe->u32_36, roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S, UD_SEND_WQE_U32_36_SGID_INDEX_S,
@ -195,8 +193,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->u32_40, roce_set_field(ud_sq_wqe->u32_40,
UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
ah->av.sl_tclass_flowlabel >> ah->av.tclass);
HNS_ROCE_TCLASS_SHIFT);
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
@ -335,10 +332,10 @@ out:
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
doorbell[0] = le32_to_cpu(sq_db.u32_4); doorbell[0] = sq_db.u32_4;
doorbell[1] = le32_to_cpu(sq_db.u32_8); doorbell[1] = sq_db.u32_8;
hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l); hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
qp->sq_next_wqe = ind; qp->sq_next_wqe = ind;
} }
@ -363,7 +360,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_rq_db rq_db; struct hns_roce_rq_db rq_db;
uint32_t doorbell[2] = {0}; __le32 doorbell[2] = {0};
spin_lock_irqsave(&hr_qp->rq.lock, flags); spin_lock_irqsave(&hr_qp->rq.lock, flags);
ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1); ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
@ -437,11 +434,10 @@ out:
roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
1); 1);
doorbell[0] = le32_to_cpu(rq_db.u32_4); doorbell[0] = rq_db.u32_4;
doorbell[1] = le32_to_cpu(rq_db.u32_8); doorbell[1] = rq_db.u32_8;
hns_roce_write64_k((__le32 *)doorbell, hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
hr_qp->rq.db_reg_l);
} }
} }
spin_unlock_irqrestore(&hr_qp->rq.lock, flags); spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@ -715,7 +711,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
struct ib_cq *cq; struct ib_cq *cq;
struct ib_pd *pd; struct ib_pd *pd;
union ib_gid dgid; union ib_gid dgid;
u64 subnet_prefix; __be64 subnet_prefix;
int attr_mask = 0; int attr_mask = 0;
int ret; int ret;
int i, j; int i, j;
@ -971,7 +967,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
struct hns_roce_free_mr *free_mr; struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct completion comp; struct completion comp;
unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
priv = (struct hns_roce_v1_priv *)hr_dev->priv; priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr; free_mr = &priv->free_mr;
@ -991,7 +987,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
while (end) { while (end > 0) {
if (try_wait_for_completion(&comp)) if (try_wait_for_completion(&comp))
return 0; return 0;
msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
@ -1109,7 +1105,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_free_mr *free_mr; struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
struct completion comp; struct completion comp;
unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
unsigned long start = jiffies; unsigned long start = jiffies;
int npages; int npages;
int ret = 0; int ret = 0;
@ -1139,7 +1135,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
queue_work(free_mr->free_mr_wq, &(mr_work->work)); queue_work(free_mr->free_mr_wq, &(mr_work->work));
while (end) { while (end > 0) {
if (try_wait_for_completion(&comp)) if (try_wait_for_completion(&comp))
goto free_mr; goto free_mr;
msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
@ -2165,7 +2161,7 @@ static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
{ {
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag; u32 notification_flag;
__le32 doorbell[2]; __le32 doorbell[2] = {};
notification_flag = (flags & IB_CQ_SOLICITED_MASK) == notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
@ -2430,7 +2426,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv; struct hns_roce_v1_priv *priv;
unsigned long end = 0, flags = 0; unsigned long flags = 0;
long end = HW_SYNC_TIMEOUT_MSECS;
__le32 bt_cmd_val[2] = {0}; __le32 bt_cmd_val[2] = {0};
void __iomem *bt_cmd; void __iomem *bt_cmd;
u64 bt_ba = 0; u64 bt_ba = 0;
@ -2439,18 +2436,12 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
switch (table->type) { switch (table->type) {
case HEM_TYPE_QPC: case HEM_TYPE_QPC:
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
bt_ba = priv->bt_table.qpc_buf.map >> 12; bt_ba = priv->bt_table.qpc_buf.map >> 12;
break; break;
case HEM_TYPE_MTPT: case HEM_TYPE_MTPT:
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
bt_ba = priv->bt_table.mtpt_buf.map >> 12; bt_ba = priv->bt_table.mtpt_buf.map >> 12;
break; break;
case HEM_TYPE_CQC: case HEM_TYPE_CQC:
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
bt_ba = priv->bt_table.cqc_buf.map >> 12; bt_ba = priv->bt_table.cqc_buf.map >> 12;
break; break;
case HEM_TYPE_SRQC: case HEM_TYPE_SRQC:
@ -2459,6 +2450,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
default: default:
return 0; return 0;
} }
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
@ -2468,7 +2461,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = HW_SYNC_TIMEOUT_MSECS;
while (1) { while (1) {
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
if (!end) { if (!end) {
@ -2484,7 +2476,7 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
end -= HW_SYNC_SLEEP_TIME_INTERVAL; end -= HW_SYNC_SLEEP_TIME_INTERVAL;
} }
bt_cmd_val[0] = (__le32)bt_ba; bt_cmd_val[0] = cpu_to_le32(bt_ba);
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
@ -2627,7 +2619,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16, roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S, QP1C_BYTES_16_SIGNALING_TYPE_S,
le32_to_cpu(hr_qp->sq_signal_bits)); hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1); 1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@ -2933,7 +2925,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1); 1);
roce_set_bit(context->qpc_bytes_32, roce_set_bit(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
le32_to_cpu(hr_qp->sq_signal_bits)); hr_qp->sq_signal_bits);
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
hr_qp->port; hr_qp->port;
@ -3578,7 +3570,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
qp_attr->rnr_retry = (u8)context->rnr_retry; qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
done: done:
qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cur_qp_state = qp_attr->qp_state;
@ -4021,7 +4013,8 @@ static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
++eq->cons_index; ++eq->cons_index;
ceqes_found = 1; ceqes_found = 1;
if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) { if (eq->cons_index >
EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
dev_warn(&eq->hr_dev->pdev->dev, dev_warn(&eq->hr_dev->pdev->dev,
"cons_index overflow, set back to 0.\n"); "cons_index overflow, set back to 0.\n");
eq->cons_index = 0; eq->cons_index = 0;
@ -4518,7 +4511,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
struct platform_device *pdev = NULL; struct platform_device *pdev = NULL;
struct net_device *netdev = NULL; struct net_device *netdev = NULL;
struct device_node *net_node; struct device_node *net_node;
struct resource *res;
int port_cnt = 0; int port_cnt = 0;
u8 phy_port; u8 phy_port;
int ret; int ret;
@ -4557,8 +4549,7 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
} }
/* get the mapped register base address */ /* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base)) if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base); return PTR_ERR(hr_dev->reg_base);
@ -4633,10 +4624,8 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* fetch the interrupt numbers */ /* fetch the interrupt numbers */
for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) { if (hr_dev->irq[i] <= 0)
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
return -EINVAL; return -EINVAL;
}
} }
return 0; return 0;

File diff suppressed because it is too large Load Diff

View File

@ -96,7 +96,10 @@
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
#define HNS_ROCE_V2_HW_RST_UNINT_DELAY 100
#define HNS_ROCE_V2_HW_RST_COMPLETION_WAIT 20
#define HNS_ROCE_CONTEXT_HOP_NUM 1 #define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_SCCC_HOP_NUM 1 #define HNS_ROCE_SCCC_HOP_NUM 1
@ -126,8 +129,6 @@
#define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT) #define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
#define HNS_ROCE_CMQ_DESC_NUM_S 3 #define HNS_ROCE_CMQ_DESC_NUM_S 3
#define HNS_ROCE_CMQ_EN_B 16
#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5 #define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5

View File

@ -262,7 +262,8 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN; IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = (props->state == IB_PORT_ACTIVE) ? props->phys_state = (props->state == IB_PORT_ACTIVE) ?
HNS_ROCE_PHY_LINKUP : HNS_ROCE_PHY_DISABLED; IB_PORT_PHYS_STATE_LINK_UP :
IB_PORT_PHYS_STATE_DISABLED;
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
@ -901,6 +902,7 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_cmd_init; goto error_failed_cmd_init;
} }
/* EQ depends on poll mode, event mode depends on EQ */
ret = hr_dev->hw->init_eq(hr_dev); ret = hr_dev->hw->init_eq(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "eq init failed!\n"); dev_err(dev, "eq init failed!\n");
@ -910,8 +912,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod) { if (hr_dev->cmd_mod) {
ret = hns_roce_cmd_use_events(hr_dev); ret = hns_roce_cmd_use_events(hr_dev);
if (ret) { if (ret) {
dev_err(dev, "Switch to event-driven cmd failed!\n"); dev_warn(dev,
goto error_failed_use_event; "Cmd event mode failed, set back to poll!\n");
hns_roce_cmd_use_polling(hr_dev);
} }
} }
@ -954,8 +957,6 @@ error_failed_setup_hca:
error_failed_init_hem: error_failed_init_hem:
if (hr_dev->cmd_mod) if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev); hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
hr_dev->hw->cleanup_eq(hr_dev); hr_dev->hw->cleanup_eq(hr_dev);
error_failed_eq_table: error_failed_eq_table:

View File

@ -347,49 +347,178 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l0 = NULL; mr->pbl_bt_l0 = NULL;
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} }
static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
if (npages > pbl_bt_sz / 8) {
dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages);
return -EINVAL;
}
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf)
return -ENOMEM;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = 1;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
}
static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
int npages_allocated;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 size;
int i;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
/* alloc L1 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
npages_allocated = i * (pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
return -ENOMEM;
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num)
break;
}
mr->l0_chunk_last_num = i + 1;
return 0;
}
static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
int i;
int j = 0;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
return -ENOMEM;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_bt_l2),
GFP_KERNEL);
if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l2;
/* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
for (j = 0; j < pbl_bt_sz / 8; j++) {
bt_idx = i * pbl_bt_sz / 8 + j;
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
npages_allocated = bt_idx *
(pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
dev, size,
&(mr->pbl_l2_dma_addr[bt_idx]),
GFP_KERNEL);
if (!mr->pbl_bt_l2[bt_idx]) {
hns_roce_loop_free(hr_dev, mr, 2, i, j);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l1[i] + j) =
mr->pbl_l2_dma_addr[bt_idx];
pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num) {
mr_alloc_done = 1;
break;
}
}
if (mr_alloc_done)
break;
}
mr->l0_chunk_last_num = i + 1;
mr->l1_chunk_last_num = j + 1;
return 0;
err_dma_alloc_l0:
kfree(mr->pbl_bt_l2);
mr->pbl_bt_l2 = NULL;
err_kcalloc_bt_l2:
kfree(mr->pbl_l2_dma_addr);
mr->pbl_l2_dma_addr = NULL;
return -ENOMEM;
}
/* PBL multi hop addressing */ /* PBL multi hop addressing */
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr) struct hns_roce_mr *mr)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
int i = 0, j = 0;
u32 pbl_bt_sz; u32 pbl_bt_sz;
u32 mhop_num; u32 mhop_num;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num); mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT); pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
if (mhop_num == HNS_ROCE_HOP_NUM_0) if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0; return 0;
/* hop_num = 1 */ if (mhop_num == 1)
if (mhop_num == 1) { return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
if (npages > pbl_bt_sz / 8) {
dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages);
return -EINVAL;
}
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf)
return -ENOMEM;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = mhop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
}
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8, mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr), sizeof(*mr->pbl_l1_dma_addr),
@ -402,100 +531,23 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
if (!mr->pbl_bt_l1) if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1; goto err_kcalloc_bt_l1;
if (mhop_num == 3) {
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
goto err_kcalloc_l2_dma;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_bt_l2),
GFP_KERNEL);
if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l2;
}
/* alloc L0 BT */ /* alloc L0 BT */
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr), &(mr->pbl_l0_dma_addr),
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_bt_l0) if (!mr->pbl_bt_l0)
goto err_dma_alloc_l0; goto err_kcalloc_l2_dma;
if (mhop_num == 2) { if (mhop_num == 2) {
/* alloc L1 BT */ if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
for (i = 0; i < pbl_bt_sz / 8; i++) { goto err_kcalloc_l2_dma;
if (pbl_bt_cnt + 1 < pbl_last_bt_num) { }
size = pbl_bt_sz;
} else { if (mhop_num == 3) {
npages_allocated = i * (pbl_bt_sz / 8); if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
size = (npages - npages_allocated) * 8; goto err_kcalloc_l2_dma;
}
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num)
break;
}
} else if (mhop_num == 3) {
/* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l1_dma_addr[i]),
GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
for (j = 0; j < pbl_bt_sz / 8; j++) {
bt_idx = i * pbl_bt_sz / 8 + j;
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
size = pbl_bt_sz;
} else {
npages_allocated = bt_idx *
(pbl_bt_sz / 8);
size = (npages - npages_allocated) * 8;
}
mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
dev, size,
&(mr->pbl_l2_dma_addr[bt_idx]),
GFP_KERNEL);
if (!mr->pbl_bt_l2[bt_idx]) {
hns_roce_loop_free(hr_dev, mr, 2, i, j);
goto err_dma_alloc_l0;
}
*(mr->pbl_bt_l1[i] + j) =
mr->pbl_l2_dma_addr[bt_idx];
pbl_bt_cnt++;
if (pbl_bt_cnt >= pbl_last_bt_num) {
mr_alloc_done = 1;
break;
}
}
if (mr_alloc_done)
break;
}
} }
mr->l0_chunk_last_num = i + 1;
if (mhop_num == 3)
mr->l1_chunk_last_num = j + 1;
mr->pbl_size = npages; mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr; mr->pbl_ba = mr->pbl_l0_dma_addr;
@ -505,14 +557,6 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
return 0; return 0;
err_dma_alloc_l0:
kfree(mr->pbl_bt_l2);
mr->pbl_bt_l2 = NULL;
err_kcalloc_bt_l2:
kfree(mr->pbl_l2_dma_addr);
mr->pbl_l2_dma_addr = NULL;
err_kcalloc_l2_dma: err_kcalloc_l2_dma:
kfree(mr->pbl_bt_l1); kfree(mr->pbl_bt_l1);
mr->pbl_bt_l1 = NULL; mr->pbl_bt_l1 = NULL;
@ -1161,6 +1205,83 @@ err_free:
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct hns_roce_cmd_mailbox *mailbox,
u32 pdn, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct device *dev = hr_dev->dev;
int npages;
int ret;
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8,
mr->pbl_buf, mr->pbl_dma_addr);
}
ib_umem_release(mr->umem);
mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
return -ENOMEM;
}
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num) {
ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
if (ret)
goto release_umem;
} else {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf) {
ret = -ENOMEM;
goto release_umem;
}
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
if (ret)
goto release_umem;
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) {
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8,
mr->pbl_buf,
mr->pbl_dma_addr);
}
goto release_umem;
}
return 0;
release_umem:
ib_umem_release(mr->umem);
return ret;
}
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd, u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata) struct ib_udata *udata)
@ -1171,7 +1292,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned long mtpt_idx; unsigned long mtpt_idx;
u32 pdn = 0; u32 pdn = 0;
int npages;
int ret; int ret;
if (!mr->enabled) if (!mr->enabled)
@ -1198,73 +1318,25 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
pdn = to_hr_pd(pd)->pdn; pdn = to_hr_pd(pd)->pdn;
if (flags & IB_MR_REREG_TRANS) { if (flags & IB_MR_REREG_TRANS) {
if (mr->size != ~0ULL) { ret = rereg_mr_trans(ibmr, flags,
npages = ib_umem_page_count(mr->umem); start, length,
virt_addr, mr_access_flags,
if (hr_dev->caps.pbl_hop_num) mailbox, pdn, udata);
hns_roce_mhop_free(hr_dev, mr); if (ret)
else
dma_free_coherent(dev, npages * 8, mr->pbl_buf,
mr->pbl_dma_addr);
}
ib_umem_release(mr->umem);
mr->umem =
ib_umem_get(udata, start, length, mr_access_flags, 0);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
goto free_cmd_mbox; goto free_cmd_mbox;
} } else {
npages = ib_umem_page_count(mr->umem); ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
if (hr_dev->caps.pbl_hop_num) { length, mailbox->buf);
ret = hns_roce_mhop_alloc(hr_dev, npages, mr); if (ret)
if (ret)
goto release_umem;
} else {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
GFP_KERNEL);
if (!mr->pbl_buf) {
ret = -ENOMEM;
goto release_umem;
}
}
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
if (ret) {
if (flags & IB_MR_REREG_TRANS)
goto release_umem;
else
goto free_cmd_mbox; goto free_cmd_mbox;
} }
if (flags & IB_MR_REREG_TRANS) {
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) {
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
if (hr_dev->caps.pbl_hop_num)
hns_roce_mhop_free(hr_dev, mr);
else
dma_free_coherent(dev, npages * 8,
mr->pbl_buf,
mr->pbl_dma_addr);
}
goto release_umem;
}
}
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx); ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) { if (ret) {
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret); dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
goto release_umem; ib_umem_release(mr->umem);
goto free_cmd_mbox;
} }
mr->enabled = 1; mr->enabled = 1;
@ -1275,9 +1347,6 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
return 0; return 0;
release_umem:
ib_umem_release(mr->umem);
free_cmd_mbox: free_cmd_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@ -1357,7 +1426,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{ {
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
mr->pbl_buf[mr->npages++] = cpu_to_le64(addr); mr->pbl_buf[mr->npages++] = addr;
return 0; return 0;
} }
@ -1528,10 +1597,9 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
/* Save page addr, low 12 bits : 0 */ /* Save page addr, low 12 bits : 0 */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
mtts[i] = cpu_to_le64(bufs[npage] >> mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT;
PAGE_ADDR_SHIFT);
else else
mtts[i] = cpu_to_le64(bufs[npage]); mtts[i] = bufs[npage];
npage++; npage++;
} }

View File

@ -324,31 +324,46 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp, struct hns_roce_ib_create_qp *ucmd)
struct hns_roce_ib_create_qp *ucmd)
{ {
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz); u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride); u8 max_sq_stride = ilog2(roundup_sq_stride);
u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride || ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
dev_err(hr_dev->dev, "check SQ size error!\n"); ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
return -EINVAL; return -EINVAL;
} }
if (cap->max_send_sge > hr_dev->caps.max_sq_sg) { if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n", ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n",
cap->max_send_sge); cap->max_send_sge);
return -EINVAL; return -EINVAL;
} }
return 0;
}
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
int ret;
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
return ret;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
@ -501,43 +516,10 @@ static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
return bt_pg_shift - PAGE_SHIFT; return bt_pg_shift - PAGE_SHIFT;
} }
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
struct hns_roce_qp *hr_qp)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 page_size;
u32 max_cnt;
int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2) { if (hr_qp->sq.max_gs > 2) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
@ -560,6 +542,52 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
} }
} }
return 0;
}
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
{
struct device *dev = hr_dev->dev;
u32 page_size;
u32 max_cnt;
int size;
int ret;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
if (hr_dev->caps.min_wqes)
max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
else
max_cnt = cap->max_send_wr;
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else
hr_qp->sq.max_gs = max_cnt;
ret = set_extend_sge_param(hr_dev, hr_qp);
if (ret) {
dev_err(dev, "set extend sge parameters fail\n");
return ret;
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */ /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
@ -607,13 +635,57 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
return 1; return 1;
} }
static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
u32 max_recv_sge = init_attr->cap.max_recv_sge;
struct hns_roce_rinl_wqe *wqe_list;
u32 wqe_cnt = hr_qp->rq.wqe_cnt;
int i;
/* allocate recv inline buf */
wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
GFP_KERNEL);
if (!wqe_list)
goto err;
/* Allocate a continuous buffer for all inline sge we need */
wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
sizeof(struct hns_roce_rinl_sge)),
GFP_KERNEL);
if (!wqe_list[0].sg_list)
goto err_wqe_list;
/* Assign buffers of sg_list to each inline wqe */
for (i = 1; i < wqe_cnt; i++)
wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
hr_qp->rq_inl_buf.wqe_list = wqe_list;
hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt;
return 0;
err_wqe_list:
kfree(wqe_list);
err:
return -ENOMEM;
}
static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
{
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd, struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, unsigned long sqpn, struct ib_udata *udata, unsigned long sqpn,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { 0 }; dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp ucmd;
struct hns_roce_ib_create_qp_resp resp = {}; struct hns_roce_ib_create_qp_resp resp = {};
@ -635,9 +707,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->ibqp.qp_type = init_attr->qp_type; hr_qp->ibqp.qp_type = init_attr->qp_type;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR); hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else else
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR); hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
hns_roce_qp_has_rq(init_attr), hr_qp); hns_roce_qp_has_rq(init_attr), hr_qp);
@ -648,33 +720,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
hns_roce_qp_has_rq(init_attr)) { hns_roce_qp_has_rq(init_attr)) {
/* allocate recv inline buf */ ret = alloc_rq_inline_buf(hr_qp, init_attr);
hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt, if (ret) {
sizeof(struct hns_roce_rinl_wqe), dev_err(dev, "allocate receive inline buffer failed\n");
GFP_KERNEL);
if (!hr_qp->rq_inl_buf.wqe_list) {
ret = -ENOMEM;
goto err_out; goto err_out;
} }
hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
/* Firstly, allocate a list of sge space buffer */
hr_qp->rq_inl_buf.wqe_list[0].sg_list =
kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
init_attr->cap.max_recv_sge *
sizeof(struct hns_roce_rinl_sge),
GFP_KERNEL);
if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
ret = -ENOMEM;
goto err_wqe_list;
}
for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
/* Secondly, reallocate the buffer */
hr_qp->rq_inl_buf.wqe_list[i].sg_list =
&hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
init_attr->cap.max_recv_sge];
} }
page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
@ -682,14 +732,14 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n"); dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT; ret = -EFAULT;
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd); &ucmd);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
@ -697,7 +747,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) { if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n"); dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem); ret = PTR_ERR(hr_qp->umem);
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp,
hr_qp->regions, ARRAY_SIZE(hr_qp->regions), hr_qp->regions, ARRAY_SIZE(hr_qp->regions),
@ -758,13 +808,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n"); dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL; ret = -EINVAL;
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n"); dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL; ret = -EINVAL;
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
/* Set SQ size */ /* Set SQ size */
@ -772,7 +822,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp); hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
/* QP doorbell register address */ /* QP doorbell register address */
@ -786,7 +836,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
if (ret) { if (ret) {
dev_err(dev, "rq record doorbell alloc failed!\n"); dev_err(dev, "rq record doorbell alloc failed!\n");
goto err_rq_sge_list; goto err_alloc_rq_inline_buf;
} }
*hr_qp->rdb.db_record = 0; *hr_qp->rdb.db_record = 0;
hr_qp->rdb_en = 1; hr_qp->rdb_en = 1;
@ -826,11 +876,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
GFP_KERNEL); GFP_KERNEL);
hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) {
GFP_KERNEL);
if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_wrid; goto err_get_bufs;
}
if (hr_qp->rq.wqe_cnt) {
hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) {
ret = -ENOMEM;
goto err_sq_wrid;
}
} }
} }
@ -875,7 +932,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (sqpn) if (sqpn)
hr_qp->doorbell_qpn = 1; hr_qp->doorbell_qpn = 1;
else else
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); hr_qp->doorbell_qpn = (u32)hr_qp->qpn;
if (udata) { if (udata) {
ret = ib_copy_to_udata(udata, &resp, ret = ib_copy_to_udata(udata, &resp,
@ -916,8 +973,8 @@ err_wrid:
hns_roce_qp_has_rq(init_attr)) hns_roce_qp_has_rq(init_attr))
hns_roce_db_unmap_user(uctx, &hr_qp->rdb); hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
} else { } else {
kfree(hr_qp->sq.wrid); if (hr_qp->rq.wqe_cnt)
kfree(hr_qp->rq.wrid); kfree(hr_qp->rq.wrid);
} }
err_sq_dbmap: err_sq_dbmap:
@ -928,6 +985,10 @@ err_sq_dbmap:
hns_roce_qp_has_sq(init_attr)) hns_roce_qp_has_sq(init_attr))
hns_roce_db_unmap_user(uctx, &hr_qp->sdb); hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_sq_wrid:
if (!udata)
kfree(hr_qp->sq.wrid);
err_get_bufs: err_get_bufs:
hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); hns_roce_free_buf_list(buf_list, hr_qp->region_cnt);
@ -941,13 +1002,10 @@ err_db:
(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
hns_roce_free_db(hr_dev, &hr_qp->rdb); hns_roce_free_db(hr_dev, &hr_qp->rdb);
err_rq_sge_list: err_alloc_rq_inline_buf:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); hns_roce_qp_has_rq(init_attr))
free_rq_inline_buf(hr_qp);
err_wqe_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
kfree(hr_qp->rq_inl_buf.wqe_list);
err_out: err_out:
return ret; return ret;
@ -958,7 +1016,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_sqp *hr_sqp; struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp; struct hns_roce_qp *hr_qp;
int ret; int ret;
@ -972,7 +1030,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp); hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create RC QP failed\n"); ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp); kfree(hr_qp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -984,7 +1043,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
case IB_QPT_GSI: { case IB_QPT_GSI: {
/* Userspace is not allowed to create special QPs: */ /* Userspace is not allowed to create special QPs: */
if (udata) { if (udata) {
dev_err(dev, "not support usr space GSI\n"); ibdev_err(ibdev, "not support usr space GSI\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
@ -1006,7 +1065,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp); hr_qp->ibqp.qp_num, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create GSI QP failed!\n"); ibdev_err(ibdev, "Create GSI QP failed!\n");
kfree(hr_sqp); kfree(hr_sqp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -1014,7 +1073,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
break; break;
} }
default:{ default:{
dev_err(dev, "not support QP type %d\n", init_attr->qp_type); ibdev_err(ibdev, "not support QP type %d\n",
init_attr->qp_type);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
} }
@ -1040,23 +1100,88 @@ int to_hr_qp_type(int qp_type)
return transport_type; return transport_type;
} }
static int check_mtu_validate(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_attr *attr, int attr_mask)
{
enum ib_mtu active_mtu;
int p;
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
attr->path_mtu > hr_dev->caps.max_mtu) ||
attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
ibdev_err(&hr_dev->ib_dev,
"attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
return -EINVAL;
}
return 0;
}
static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int p;
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
ibdev_err(&hr_dev->ib_dev,
"attr port_num invalid.attr->port_num=%d\n",
attr->port_num);
return -EINVAL;
}
if (attr_mask & IB_QP_PKEY_INDEX) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
ibdev_err(&hr_dev->ib_dev,
"attr pkey_index invalid.attr->pkey_index=%d\n",
attr->pkey_index);
return -EINVAL;
}
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
ibdev_err(&hr_dev->ib_dev,
"attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
attr->max_rd_atomic);
return -EINVAL;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
ibdev_err(&hr_dev->ib_dev,
"attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
attr->max_dest_rd_atomic);
return -EINVAL;
}
if (attr_mask & IB_QP_PATH_MTU)
return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
return 0;
}
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata) int attr_mask, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state; enum ib_qp_state cur_state, new_state;
struct device *dev = hr_dev->dev;
int ret = -EINVAL; int ret = -EINVAL;
int p;
enum ib_mtu active_mtu;
mutex_lock(&hr_qp->mutex); mutex_lock(&hr_qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : (enum ib_qp_state)hr_qp->state; attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
new_state = attr_mask & IB_QP_STATE ? new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
attr->qp_state : cur_state;
if (ibqp->uobject && if (ibqp->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
@ -1066,67 +1191,27 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (hr_qp->rdb_en == 1) if (hr_qp->rdb_en == 1)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else { } else {
dev_warn(dev, "flush cqe is not supported in userspace!\n"); ibdev_warn(&hr_dev->ib_dev,
"flush cqe is not supported in userspace!\n");
goto out; goto out;
} }
} }
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
attr_mask)) { attr_mask)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n"); ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
goto out; goto out;
} }
if ((attr_mask & IB_QP_PORT) && ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
(attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) { if (ret)
dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
attr->port_num);
goto out; goto out;
}
if (attr_mask & IB_QP_PKEY_INDEX) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
attr->pkey_index);
goto out;
}
}
if (attr_mask & IB_QP_PATH_MTU) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
attr->path_mtu > IB_MTU_4096) ||
(hr_dev->caps.max_mtu == IB_MTU_2048 &&
attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
goto out;
}
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
attr->max_rd_atomic);
goto out;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
attr->max_dest_rd_atomic);
goto out;
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) { if (cur_state == new_state && cur_state == IB_QPS_RESET) {
if (hr_dev->caps.min_wqes) { if (hr_dev->caps.min_wqes) {
ret = -EPERM; ret = -EPERM;
dev_err(dev, "cur_state=%d new_state=%d\n", cur_state, ibdev_err(&hr_dev->ib_dev,
"cur_state=%d new_state=%d\n", cur_state,
new_state); new_state);
} else { } else {
ret = 0; ret = 0;

View File

@ -175,6 +175,76 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR); hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
} }
static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq ucmd;
u32 page_shift;
u32 npages;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
if (ret)
goto err_user_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_user_srq_mtt;
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_user_srq_mtt;
}
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
PAGE_SHIFT, &srq->idx_que.mtt);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_user_idx_buf;
}
return 0;
err_user_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_user_idx_mtt:
ib_umem_release(srq->idx_que.umem);
err_user_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_user_buf:
ib_umem_release(srq->umem);
return ret;
}
static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
u32 page_shift) u32 page_shift)
{ {
@ -196,6 +266,93 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
return 0; return 0;
} }
static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
int ret;
if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
&srq->buf, page_shift))
return -ENOMEM;
srq->head = 0;
srq->tail = srq->max - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
if (ret)
goto err_kernel_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_kernel_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
goto err_kernel_srq_mtt;
}
/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_kernel_create_idx;
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_kernel_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
}
return 0;
err_kernel_idx_buf:
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
err_kernel_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
err_kernel_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_kernel_buf:
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
return ret;
}
static void destroy_user_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq)
{
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
ib_umem_release(srq->idx_que.umem);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
ib_umem_release(srq->umem);
}
static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq, int srq_buf_size)
{
kvfree(srq->wrid);
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
kfree(srq->idx_que.bitmap);
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
}
int hns_roce_create_srq(struct ib_srq *ib_srq, int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata) struct ib_udata *udata)
@ -205,9 +362,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_srq *srq = to_hr_srq(ib_srq); struct hns_roce_srq *srq = to_hr_srq(ib_srq);
int srq_desc_size; int srq_desc_size;
int srq_buf_size; int srq_buf_size;
u32 page_shift;
int ret = 0; int ret = 0;
u32 npages;
u32 cqn; u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */ /* Check the actual SRQ wqe and SRQ sge num */
@ -233,115 +388,16 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX; srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
if (udata) { if (udata) {
struct hns_roce_ib_create_srq ucmd; ret = create_user_srq(srq, udata, srq_buf_size);
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
srq->umem =
ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
if (hr_dev->caps.srqwqe_buf_pg_sz) {
npages = (ib_umem_page_count(srq->umem) +
(1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
(1 << hr_dev->caps.srqwqe_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift,
&srq->mtt);
} else
ret = hns_roce_mtt_init(hr_dev,
ib_umem_page_count(srq->umem),
PAGE_SHIFT, &srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
if (ret)
goto err_srq_mtt;
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
srq->idx_que.buf_size, 0, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev,
"ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_srq_mtt;
}
if (hr_dev->caps.idx_buf_pg_sz) {
npages = (ib_umem_page_count(srq->idx_que.umem) +
(1 << hr_dev->caps.idx_buf_pg_sz) - 1) /
(1 << hr_dev->caps.idx_buf_pg_sz);
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_mtt_init(hr_dev, npages,
page_shift, &srq->idx_que.mtt);
} else {
ret = hns_roce_mtt_init(
hr_dev, ib_umem_page_count(srq->idx_que.umem),
PAGE_SHIFT, &srq->idx_que.mtt);
}
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev, "Create user srq failed\n");
"hns_roce_mtt_init error for idx que\n"); goto err_srq;
goto err_idx_mtt;
}
ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
srq->idx_que.umem);
if (ret) {
dev_err(hr_dev->dev,
"hns_roce_ib_umem_write_mtt error for idx que\n");
goto err_idx_buf;
} }
} else { } else {
page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz; ret = create_kernel_srq(srq, srq_buf_size);
if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
(1 << page_shift) * 2, &srq->buf,
page_shift))
return -ENOMEM;
srq->head = 0;
srq->tail = srq->max - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages,
srq->buf.page_shift, &srq->mtt);
if (ret)
goto err_buf;
ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
if (ret)
goto err_srq_mtt;
page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", dev_err(hr_dev->dev, "Create kernel srq failed\n");
ret); goto err_srq;
goto err_srq_mtt;
}
/* Init mtt table for idx_que */
ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
srq->idx_que.idx_buf.page_shift,
&srq->idx_que.mtt);
if (ret)
goto err_create_idx;
/* Write buffer address into the mtt table */
ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
&srq->idx_que.idx_buf);
if (ret)
goto err_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_idx_buf;
} }
} }
@ -356,7 +412,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
goto err_wrid; goto err_wrid;
srq->event = hns_roce_ib_srq_event; srq->event = hns_roce_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->srqn;
resp.srqn = srq->srqn; resp.srqn = srq->srqn;
if (udata) { if (udata) {
@ -373,27 +428,12 @@ err_srqc_alloc:
hns_roce_srq_free(hr_dev, srq); hns_roce_srq_free(hr_dev, srq);
err_wrid: err_wrid:
kvfree(srq->wrid); if (udata)
destroy_user_srq(hr_dev, srq);
err_idx_buf: else
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); destroy_kernel_srq(hr_dev, srq, srq_buf_size);
err_idx_mtt:
ib_umem_release(srq->idx_que.umem);
err_create_idx:
hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
&srq->idx_que.idx_buf);
bitmap_free(srq->idx_que.bitmap);
err_srq_mtt:
hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
err_buf:
ib_umem_release(srq->umem);
if (!udata)
hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
err_srq:
return ret; return ret;
} }

View File

@ -97,18 +97,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
u8 port, u8 port,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct net_device *netdev = iwdev->netdev;
/* props being zeroed by the caller, avoid zeroing it here */
props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1; props->lid = 1;
if (netif_carrier_ok(iwdev->netdev))
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_DOWN;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1; props->gid_tbl_len = 1;

View File

@ -734,7 +734,8 @@ out:
static u8 state_to_phys_state(enum ib_port_state state) static u8 state_to_phys_state(enum ib_port_state state)
{ {
return state == IB_PORT_ACTIVE ? 5 : 3; return state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
} }
static int eth_link_query_port(struct ib_device *ibdev, u8 port, static int eth_link_query_port(struct ib_device *ibdev, u8 port,

View File

@ -377,6 +377,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
* again * again
*/ */
if (!ib_access_writable(access_flags)) { if (!ib_access_writable(access_flags)) {
unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma; struct vm_area_struct *vma;
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
@ -385,9 +386,9 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
* cover the memory, but for now it requires a single vma to * cover the memory, but for now it requires a single vma to
* entirely cover the MR to support RO mappings. * entirely cover the MR to support RO mappings.
*/ */
vma = find_vma(current->mm, start); vma = find_vma(current->mm, untagged_start);
if (vma && vma->vm_end >= start + length && if (vma && vma->vm_end >= untagged_start + length &&
vma->vm_start <= start) { vma->vm_start <= untagged_start) {
if (vma->vm_flags & VM_WRITE) if (vma->vm_flags & VM_WRITE)
access_flags |= IB_ACCESS_LOCAL_WRITE; access_flags |= IB_ACCESS_LOCAL_WRITE;
} else { } else {

View File

@ -325,7 +325,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
} }
static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
bool is_user, int has_rq, struct mlx4_ib_qp *qp, bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
u32 inl_recv_sz) u32 inl_recv_sz)
{ {
/* Sanity check RQ size before proceeding */ /* Sanity check RQ size before proceeding */
@ -506,10 +506,10 @@ static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
kfree(qp->sqp_proxy_rcv); kfree(qp->sqp_proxy_rcv);
} }
static int qp_has_rq(struct ib_qp_init_attr *attr) static bool qp_has_rq(struct ib_qp_init_attr *attr)
{ {
if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
return 0; return false;
return !attr->srq; return !attr->srq;
} }
@ -855,12 +855,143 @@ static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
mutex_unlock(&context->wqn_ranges_mutex); mutex_unlock(&context->wqn_ranges_mutex);
} }
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
enum mlx4_ib_source_type src, struct ib_udata *udata, struct mlx4_ib_qp *qp)
struct ib_qp_init_attr *init_attr, {
struct mlx4_ib_dev *dev = to_mdev(pd->device);
int qpn;
int err;
struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx4_ib_ucontext, ibucontext);
struct mlx4_ib_cq *mcq;
unsigned long flags;
int range_size;
struct mlx4_ib_create_wq wq;
size_t copy_len;
int shift;
int n;
qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
INIT_LIST_HEAD(&qp->steering_rules);
qp->state = IB_QPS_RESET;
copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
if (ib_copy_from_udata(&wq, udata, copy_len)) {
err = -EFAULT;
goto err;
}
if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
wq.reserved[2]) {
pr_debug("user command isn't supported\n");
err = -EOPNOTSUPP;
goto err;
}
if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
pr_debug("WQN range size must be equal or smaller than %d\n",
dev->dev->caps.max_rss_tbl_sz);
err = -EOPNOTSUPP;
goto err;
}
range_size = 1 << wq.log_range_size;
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
qp->flags |= MLX4_IB_QP_SCATTER_FCS;
err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
if (err)
goto err;
qp->sq_no_prefetch = 1;
qp->sq.wqe_cnt = 1;
qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
}
n = ib_umem_page_count(qp->umem);
shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
if (err)
goto err_buf;
err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
if (err)
goto err_mtt;
err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
if (err)
goto err_mtt;
qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
if (err)
goto err_wrid;
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
goto err_qpn;
/*
* Hardware wants QPN written in big-endian order (after
* shifting) for send doorbell. Precompute this value to save
* a little bit when posting sends.
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
qp->mqp.event = mlx4_ib_wq_event;
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
/* Maintain device to QPs access, needed for further handling
* via reset flow
*/
list_add_tail(&qp->qps_list, &dev->qp_list);
/* Maintain CQ to QPs access, needed for further handling
* via reset flow
*/
mcq = to_mcq(init_attr->send_cq);
list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
mcq = to_mcq(init_attr->recv_cq);
list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_qpn:
mlx4_ib_release_wqn(context, qp, 0);
err_wrid:
mlx4_ib_db_unmap_user(context, &qp->db);
err_mtt:
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
err_buf:
ib_umem_release(qp->umem);
err:
return err;
}
static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn, struct ib_udata *udata, int sqpn,
struct mlx4_ib_qp **caller_qp) struct mlx4_ib_qp **caller_qp)
{ {
struct mlx4_ib_dev *dev = to_mdev(pd->device);
int qpn; int qpn;
int err; int err;
struct mlx4_ib_sqp *sqp = NULL; struct mlx4_ib_sqp *sqp = NULL;
@ -870,7 +1001,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq; struct mlx4_ib_cq *mcq;
unsigned long flags; unsigned long flags;
int range_size = 0;
/* When tunneling special qps, we use a plain UD qp */ /* When tunneling special qps, we use a plain UD qp */
if (sqpn) { if (sqpn) {
@ -921,15 +1051,13 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (!sqp) if (!sqp)
return -ENOMEM; return -ENOMEM;
qp = &sqp->qp; qp = &sqp->qp;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else { } else {
qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
if (!qp) if (!qp)
return -ENOMEM; return -ENOMEM;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} }
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else } else
qp = *caller_qp; qp = *caller_qp;
@ -941,48 +1069,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->gid_list);
INIT_LIST_HEAD(&qp->steering_rules); INIT_LIST_HEAD(&qp->steering_rules);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
if (udata) { if (udata) {
union { struct mlx4_ib_create_qp ucmd;
struct mlx4_ib_create_qp qp;
struct mlx4_ib_create_wq wq;
} ucmd;
size_t copy_len; size_t copy_len;
int shift; int shift;
int n; int n;
copy_len = (src == MLX4_IB_QP_SRC) ? copy_len = sizeof(struct mlx4_ib_create_qp);
sizeof(struct mlx4_ib_create_qp) :
min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
if (ib_copy_from_udata(&ucmd, udata, copy_len)) { if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
err = -EFAULT; err = -EFAULT;
goto err; goto err;
} }
if (src == MLX4_IB_RWQ_SRC) { qp->inl_recv_sz = ucmd.inl_recv_sz;
if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
pr_debug("user command isn't supported\n");
err = -EOPNOTSUPP;
goto err;
}
if (ucmd.wq.log_range_size >
ilog2(dev->dev->caps.max_rss_tbl_sz)) {
pr_debug("WQN range size must be equal or smaller than %d\n",
dev->dev->caps.max_rss_tbl_sz);
err = -EOPNOTSUPP;
goto err;
}
range_size = 1 << ucmd.wq.log_range_size;
} else {
qp->inl_recv_sz = ucmd.qp.inl_recv_sz;
}
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
if (!(dev->dev->caps.flags & if (!(dev->dev->caps.flags &
@ -1000,30 +1104,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err) if (err)
goto err; goto err;
if (src == MLX4_IB_QP_SRC) { qp->sq_no_prefetch = ucmd.sq_no_prefetch;
qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch;
err = set_user_sq_size(dev, qp, err = set_user_sq_size(dev, qp, &ucmd);
(struct mlx4_ib_create_qp *) if (err)
&ucmd); goto err;
if (err)
goto err;
} else {
qp->sq_no_prefetch = 1;
qp->sq.wqe_cnt = 1;
qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
/* Allocated buffer expects to have at least that SQ
* size.
*/
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
}
qp->umem = qp->umem =
ib_umem_get(udata, ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
(src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
ucmd.wq.buf_addr,
qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) { if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem); err = PTR_ERR(qp->umem);
goto err; goto err;
@ -1041,11 +1129,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_mtt; goto err_mtt;
if (qp_has_rq(init_attr)) { if (qp_has_rq(init_attr)) {
err = mlx4_ib_db_map_user(udata, err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
(src == MLX4_IB_QP_SRC) ?
ucmd.qp.db_addr :
ucmd.wq.db_addr,
&qp->db);
if (err) if (err)
goto err_mtt; goto err_mtt;
} }
@ -1115,10 +1199,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_wrid; goto err_wrid;
} }
} }
} else if (src == MLX4_IB_RWQ_SRC) {
err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
if (err)
goto err_wrid;
} else { } else {
/* Raw packet QPNs may not have bits 6,7 set in their qp_num; /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
* otherwise, the WQE BlueFlame setup flow wrongly causes * otherwise, the WQE BlueFlame setup flow wrongly causes
@ -1157,8 +1237,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*/ */
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event : qp->mqp.event = mlx4_ib_qp_event;
mlx4_ib_wq_event;
if (!*caller_qp) if (!*caller_qp)
*caller_qp = qp; *caller_qp = qp;
@ -1186,8 +1265,6 @@ err_qpn:
if (!sqpn) { if (!sqpn) {
if (qp->flags & MLX4_IB_QP_NETIF) if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1); mlx4_ib_steer_qp_free(dev, qpn, 1);
else if (src == MLX4_IB_RWQ_SRC)
mlx4_ib_release_wqn(context, qp, 0);
else else
mlx4_qp_release_range(dev->dev, qpn, 1); mlx4_qp_release_range(dev->dev, qpn, 1);
} }
@ -1518,8 +1595,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
/* fall through */ /* fall through */
case IB_QPT_UD: case IB_QPT_UD:
{ {
err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC, err = create_qp_common(pd, init_attr, udata, 0, &qp);
init_attr, udata, 0, &qp);
if (err) { if (err) {
kfree(qp); kfree(qp);
return ERR_PTR(err); return ERR_PTR(err);
@ -1549,8 +1625,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
sqpn = get_sqp_num(to_mdev(pd->device), init_attr); sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
} }
err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC, err = create_qp_common(pd, init_attr, udata, sqpn, &qp);
init_attr, udata, sqpn, &qp);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
@ -4047,8 +4122,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct mlx4_ib_dev *dev; struct mlx4_dev *dev = to_mdev(pd->device)->dev;
struct ib_qp_init_attr ib_qp_init_attr; struct ib_qp_init_attr ib_qp_init_attr = {};
struct mlx4_ib_qp *qp; struct mlx4_ib_qp *qp;
struct mlx4_ib_create_wq ucmd; struct mlx4_ib_create_wq ucmd;
int err, required_cmd_sz; int err, required_cmd_sz;
@ -4073,14 +4148,13 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if (udata->outlen) if (udata->outlen)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
dev = to_mdev(pd->device);
if (init_attr->wq_type != IB_WQT_RQ) { if (init_attr->wq_type != IB_WQT_RQ) {
pr_debug("unsupported wq type %d\n", init_attr->wq_type); pr_debug("unsupported wq type %d\n", init_attr->wq_type);
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) { if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
pr_debug("unsupported create_flags %u\n", pr_debug("unsupported create_flags %u\n",
init_attr->create_flags); init_attr->create_flags);
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
@ -4093,7 +4167,6 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
qp->pri.vid = 0xFFFF; qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF; qp->alt.vid = 0xFFFF;
memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr));
ib_qp_init_attr.qp_context = init_attr->wq_context; ib_qp_init_attr.qp_context = init_attr->wq_context;
ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET; ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr; ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
@ -4104,8 +4177,7 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS; ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr, err = create_rq(pd, &ib_qp_init_attr, udata, qp);
udata, 0, &qp);
if (err) { if (err) {
kfree(qp); kfree(qp);
return ERR_PTR(err); return ERR_PTR(err);

View File

@ -233,6 +233,8 @@ static bool is_legacy_obj_event_num(u16 event_num)
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_DCT_DRAINED:
case MLX5_EVENT_TYPE_COMP: case MLX5_EVENT_TYPE_COMP:
case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
case MLX5_EVENT_TYPE_XRQ_ERROR:
return true; return true;
default: default:
return false; return false;
@ -315,8 +317,10 @@ static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
return eqe->data.qp_srq.type; return eqe->data.qp_srq.type;
case MLX5_EVENT_TYPE_CQ_ERROR: case MLX5_EVENT_TYPE_CQ_ERROR:
case MLX5_EVENT_TYPE_XRQ_ERROR:
return 0; return 0;
case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_DCT_DRAINED:
case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
return MLX5_EVENT_QUEUE_TYPE_DCT; return MLX5_EVENT_QUEUE_TYPE_DCT;
default: default:
return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
@ -542,6 +546,8 @@ static u64 devx_get_obj_id(const void *in)
break; break;
case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
case MLX5_CMD_OP_MODIFY_XRQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ, obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
MLX5_GET(arm_xrq_in, in, xrqn)); MLX5_GET(arm_xrq_in, in, xrqn));
break; break;
@ -776,6 +782,14 @@ static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
return true; return true;
return false; return false;
} }
case MLX5_CMD_OP_CREATE_PSV:
{
u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
if (num_psv == 1)
return true;
return false;
}
default: default:
return false; return false;
} }
@ -810,6 +824,8 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_ARM_XRQ:
case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
case MLX5_CMD_OP_MODIFY_XRQ:
return true; return true;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
{ {
@ -1216,6 +1232,12 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
case MLX5_CMD_OP_ALLOC_XRCD: case MLX5_CMD_OP_ALLOC_XRCD:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD); MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
break; break;
case MLX5_CMD_OP_CREATE_PSV:
MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
MLX5_CMD_OP_DESTROY_PSV);
MLX5_SET(destroy_psv_in, din, psvn,
MLX5_GET(create_psv_out, out, psv0_index));
break;
default: default:
/* The entry must match to one of the devx_is_obj_create_cmd */ /* The entry must match to one of the devx_is_obj_create_cmd */
WARN_ON(true); WARN_ON(true);
@ -2286,7 +2308,11 @@ static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
break; break;
case MLX5_EVENT_TYPE_XRQ_ERROR:
obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
break;
case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_DCT_DRAINED:
case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
break; break;
case MLX5_EVENT_TYPE_CQ_ERROR: case MLX5_EVENT_TYPE_CQ_ERROR:

View File

@ -32,6 +32,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
*namespace = MLX5_FLOW_NAMESPACE_FDB; *namespace = MLX5_FLOW_NAMESPACE_FDB;
break; break;
case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
*namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
@ -101,6 +104,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx) if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
return -EINVAL; return -EINVAL;
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
return -EINVAL;
if (dest_devx) { if (dest_devx) {
devx_obj = uverbs_attr_get_obj( devx_obj = uverbs_attr_get_obj(
attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
@ -112,8 +120,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
*/ */
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type)) if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
return -EINVAL; return -EINVAL;
/* Allow only flow table as dest when inserting to FDB */ /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL; return -EINVAL;
} else if (dest_qp) { } else if (dest_qp) {

View File

@ -535,7 +535,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
props->pkey_tbl_len = 1; props->pkey_tbl_len = 1;
props->state = IB_PORT_DOWN; props->state = IB_PORT_DOWN;
props->phys_state = 3; props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
props->qkey_viol_cntr = qkey_viol_cntr; props->qkey_viol_cntr = qkey_viol_cntr;
@ -561,7 +561,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (netif_running(ndev) && netif_carrier_ok(ndev)) { if (netif_running(ndev) && netif_carrier_ok(ndev)) {
props->state = IB_PORT_ACTIVE; props->state = IB_PORT_ACTIVE;
props->phys_state = 5; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} }
ndev_ib_mtu = iboe_get_mtu(ndev->mtu); ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
@ -3971,6 +3971,11 @@ _get_flow_table(struct mlx5_ib_dev *dev,
esw_encap) esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
priority = FDB_BYPASS_PATH; priority = FDB_BYPASS_PATH;
} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
max_table_size =
BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
log_max_ft_size));
priority = fs_matcher->priority;
} }
max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
@ -3985,6 +3990,8 @@ _get_flow_table(struct mlx5_ib_dev *dev,
prio = &dev->flow_db->egress_prios[priority]; prio = &dev->flow_db->egress_prios[priority];
else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
prio = &dev->flow_db->fdb; prio = &dev->flow_db->fdb;
else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
prio = &dev->flow_db->rdma_rx[priority];
if (!prio) if (!prio)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@ -5326,11 +5333,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
}; };
static bool is_mdev_switchdev_mode(const struct mlx5_core_dev *mdev)
{
return MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) ==
MLX5_ESWITCH_OFFLOADS;
}
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{ {
int num_cnt_ports;
int i; int i;
for (i = 0; i < dev->num_ports; i++) { num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
for (i = 0; i < num_cnt_ports; i++) {
if (dev->port[i].cnts.set_id_valid) if (dev->port[i].cnts.set_id_valid)
mlx5_core_dealloc_q_counter(dev->mdev, mlx5_core_dealloc_q_counter(dev->mdev,
dev->port[i].cnts.set_id); dev->port[i].cnts.set_id);
@ -5432,13 +5449,15 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{ {
int num_cnt_ports;
int err = 0; int err = 0;
int i; int i;
bool is_shared; bool is_shared;
is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
for (i = 0; i < dev->num_ports; i++) { for (i = 0; i < num_cnt_ports; i++) {
err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts); err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
if (err) if (err)
goto err_alloc; goto err_alloc;
@ -5458,7 +5477,6 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
} }
dev->port[i].cnts.set_id_valid = true; dev->port[i].cnts.set_id_valid = true;
} }
return 0; return 0;
err_alloc: err_alloc:
@ -5466,25 +5484,50 @@ err_alloc:
return err; return err;
} }
static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
u8 port_num)
{
return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
&dev->port[port_num].cnts;
}
/**
* mlx5_ib_get_counters_id - Returns counters id to use for device+port
* @dev: Pointer to mlx5 IB device
* @port_num: Zero based port number
*
* mlx5_ib_get_counters_id() Returns counters set id to use for given
* device port combination in switchdev and non switchdev mode of the
* parent device.
*/
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num)
{
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
return cnts->set_id;
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
u8 port_num) u8 port_num)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1]; const struct mlx5_ib_counters *cnts;
bool is_switchdev = is_mdev_switchdev_mode(dev->mdev);
/* We support only per port stats */ if ((is_switchdev && port_num) || (!is_switchdev && !port_num))
if (port_num == 0)
return NULL; return NULL;
return rdma_alloc_hw_stats_struct(port->cnts.names, cnts = get_counters(dev, port_num - 1);
port->cnts.num_q_counters +
port->cnts.num_cong_counters + return rdma_alloc_hw_stats_struct(cnts->names,
port->cnts.num_ext_ppcnt_counters, cnts->num_q_counters +
cnts->num_cong_counters +
cnts->num_ext_ppcnt_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN); RDMA_HW_STATS_DEFAULT_LIFESPAN);
} }
static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct mlx5_ib_port *port, const struct mlx5_ib_counters *cnts,
struct rdma_hw_stats *stats, struct rdma_hw_stats *stats,
u16 set_id) u16 set_id)
{ {
@ -5501,8 +5544,8 @@ static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
if (ret) if (ret)
goto free; goto free;
for (i = 0; i < port->cnts.num_q_counters; i++) { for (i = 0; i < cnts->num_q_counters; i++) {
val = *(__be32 *)(out + port->cnts.offsets[i]); val = *(__be32 *)(out + cnts->offsets[i]);
stats->value[i] = (u64)be32_to_cpu(val); stats->value[i] = (u64)be32_to_cpu(val);
} }
@ -5512,10 +5555,10 @@ free:
} }
static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev, static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
struct mlx5_ib_port *port, const struct mlx5_ib_counters *cnts,
struct rdma_hw_stats *stats) struct rdma_hw_stats *stats)
{ {
int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters; int offset = cnts->num_q_counters + cnts->num_cong_counters;
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
int ret, i; int ret, i;
void *out; void *out;
@ -5528,12 +5571,10 @@ static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
if (ret) if (ret)
goto free; goto free;
for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) { for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
stats->value[i + offset] = stats->value[i + offset] =
be64_to_cpup((__be64 *)(out + be64_to_cpup((__be64 *)(out +
port->cnts.offsets[i + offset])); cnts->offsets[i + offset]));
}
free: free:
kvfree(out); kvfree(out);
return ret; return ret;
@ -5544,7 +5585,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
u8 port_num, int index) u8 port_num, int index)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1]; const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
int ret, num_counters; int ret, num_counters;
u8 mdev_port_num; u8 mdev_port_num;
@ -5552,18 +5593,17 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
if (!stats) if (!stats)
return -EINVAL; return -EINVAL;
num_counters = port->cnts.num_q_counters + num_counters = cnts->num_q_counters +
port->cnts.num_cong_counters + cnts->num_cong_counters +
port->cnts.num_ext_ppcnt_counters; cnts->num_ext_ppcnt_counters;
/* q_counters are per IB device, query the master mdev */ /* q_counters are per IB device, query the master mdev */
ret = mlx5_ib_query_q_counters(dev->mdev, port, stats, ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
port->cnts.set_id);
if (ret) if (ret)
return ret; return ret;
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
ret = mlx5_ib_query_ext_ppcnt_counters(dev, port, stats); ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
if (ret) if (ret)
return ret; return ret;
} }
@ -5580,10 +5620,10 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
} }
ret = mlx5_lag_query_cong_counters(dev->mdev, ret = mlx5_lag_query_cong_counters(dev->mdev,
stats->value + stats->value +
port->cnts.num_q_counters, cnts->num_q_counters,
port->cnts.num_cong_counters, cnts->num_cong_counters,
port->cnts.offsets + cnts->offsets +
port->cnts.num_q_counters); cnts->num_q_counters);
mlx5_ib_put_native_port_mdev(dev, port_num); mlx5_ib_put_native_port_mdev(dev, port_num);
if (ret) if (ret)
@ -5598,20 +5638,22 @@ static struct rdma_hw_stats *
mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
{ {
struct mlx5_ib_dev *dev = to_mdev(counter->device); struct mlx5_ib_dev *dev = to_mdev(counter->device);
struct mlx5_ib_port *port = &dev->port[counter->port - 1]; const struct mlx5_ib_counters *cnts =
get_counters(dev, counter->port - 1);
/* Q counters are in the beginning of all counters */ /* Q counters are in the beginning of all counters */
return rdma_alloc_hw_stats_struct(port->cnts.names, return rdma_alloc_hw_stats_struct(cnts->names,
port->cnts.num_q_counters, cnts->num_q_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN); RDMA_HW_STATS_DEFAULT_LIFESPAN);
} }
static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
{ {
struct mlx5_ib_dev *dev = to_mdev(counter->device); struct mlx5_ib_dev *dev = to_mdev(counter->device);
struct mlx5_ib_port *port = &dev->port[counter->port - 1]; const struct mlx5_ib_counters *cnts =
get_counters(dev, counter->port - 1);
return mlx5_ib_query_q_counters(dev->mdev, port, return mlx5_ib_query_q_counters(dev->mdev, cnts,
counter->stats, counter->id); counter->stats, counter->id);
} }
@ -5788,7 +5830,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
} }
/* The mlx5_ib_multiport_mutex should be held when calling this function */
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi) struct mlx5_ib_multiport_info *mpi)
{ {
@ -5798,6 +5839,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
int err; int err;
int i; int i;
lockdep_assert_held(&mlx5_ib_multiport_mutex);
mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
spin_lock(&port->mp.mpi_lock); spin_lock(&port->mp.mpi_lock);
@ -5847,13 +5890,14 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
} }
/* The mlx5_ib_multiport_mutex should be held when calling this function */
static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi) struct mlx5_ib_multiport_info *mpi)
{ {
u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
int err; int err;
lockdep_assert_held(&mlx5_ib_multiport_mutex);
spin_lock(&ibdev->port[port_num].mp.mpi_lock); spin_lock(&ibdev->port[port_num].mp.mpi_lock);
if (ibdev->port[port_num].mp.mpi) { if (ibdev->port[port_num].mp.mpi) {
mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
@ -6882,7 +6926,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->port = kcalloc(num_ports, sizeof(*dev->port), dev->port = kcalloc(num_ports, sizeof(*dev->port),
GFP_KERNEL); GFP_KERNEL);
if (!dev->port) { if (!dev->port) {
ib_dealloc_device((struct ib_device *)dev); ib_dealloc_device(&dev->ib_dev);
return NULL; return NULL;
} }
@ -6909,6 +6953,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
list_del(&mpi->list); list_del(&mpi->list);
mutex_unlock(&mlx5_ib_multiport_mutex); mutex_unlock(&mlx5_ib_multiport_mutex);
kfree(mpi);
return; return;
} }

View File

@ -200,6 +200,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_ib_flow_prio fdb; struct mlx5_ib_flow_prio fdb;
struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_flow_table *lag_demux_ft; struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables /* Protect flow steering bypass flow tables
* when add/del flow rules. * when add/del flow rules.
@ -1476,6 +1477,7 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
bool dyn_bfreg); bool dyn_bfreg);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u8 port_num);
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev, static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
bool do_modify_atomic) bool do_modify_atomic)

View File

@ -982,17 +982,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
return ret < 0 ? ret : npages; return ret < 0 ? ret : npages;
} }
static const u32 mlx5_ib_odp_opcode_cap[] = {
[MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
[MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
[MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
[MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
[MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
};
/* /*
* Parse initiator WQE. Advances the wqe pointer to point at the * Parse initiator WQE. Advances the wqe pointer to point at the
* scatter-gather list, and set wqe_end to the end of the WQE. * scatter-gather list, and set wqe_end to the end of the WQE.
@ -1003,12 +992,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
{ {
struct mlx5_wqe_ctrl_seg *ctrl = *wqe; struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
u16 wqe_index = pfault->wqe.wqe_index; u16 wqe_index = pfault->wqe.wqe_index;
u32 transport_caps;
struct mlx5_base_av *av; struct mlx5_base_av *av;
unsigned ds, opcode; unsigned ds, opcode;
#if defined(DEBUG)
u32 ctrl_wqe_index, ctrl_qpn;
#endif
u32 qpn = qp->trans_qp.base.mqp.qpn; u32 qpn = qp->trans_qp.base.mqp.qpn;
ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
@ -1024,58 +1009,17 @@ static int mlx5_ib_mr_initiator_pfault_handler(
return -EFAULT; return -EFAULT;
} }
#if defined(DEBUG)
ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
if (wqe_index != ctrl_wqe_index) {
mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
wqe_index, qpn,
ctrl_wqe_index);
return -EFAULT;
}
ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
MLX5_WQE_CTRL_QPN_SHIFT;
if (qpn != ctrl_qpn) {
mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
wqe_index, qpn,
ctrl_qpn);
return -EFAULT;
}
#endif /* DEBUG */
*wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
*wqe += sizeof(*ctrl); *wqe += sizeof(*ctrl);
opcode = be32_to_cpu(ctrl->opmod_idx_opcode) & opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
MLX5_WQE_CTRL_OPCODE_MASK; MLX5_WQE_CTRL_OPCODE_MASK;
switch (qp->ibqp.qp_type) { if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
case IB_QPT_XRC_INI:
*wqe += sizeof(struct mlx5_wqe_xrc_seg); *wqe += sizeof(struct mlx5_wqe_xrc_seg);
transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
break;
case IB_QPT_RC:
transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
break;
case IB_QPT_UD:
transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
break;
default:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}
if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) || if (qp->ibqp.qp_type == IB_QPT_UD ||
!(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) { qp->qp_sub_type == MLX5_IB_QPT_DCI) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
}
if (qp->ibqp.qp_type == IB_QPT_UD) {
av = *wqe; av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av); *wqe += sizeof(struct mlx5_av);
@ -1138,19 +1082,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
return -EFAULT; return -EFAULT;
} }
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_RECV))
goto invalid_transport_or_opcode;
break;
default:
invalid_transport_or_opcode:
mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
qp->ibqp.qp_type);
return -EFAULT;
}
*wqe_end = wqe + wqe_size; *wqe_end = wqe + wqe_size;
return 0; return 0;
@ -1200,7 +1131,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
{ {
bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
u16 wqe_index = pfault->wqe.wqe_index; u16 wqe_index = pfault->wqe.wqe_index;
void *wqe = NULL, *wqe_end = NULL; void *wqe, *wqe_start = NULL, *wqe_end = NULL;
u32 bytes_mapped, total_wqe_bytes; u32 bytes_mapped, total_wqe_bytes;
struct mlx5_core_rsc_common *res; struct mlx5_core_rsc_common *res;
int resume_with_error = 1; int resume_with_error = 1;
@ -1221,12 +1152,13 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
goto resolve_page_fault; goto resolve_page_fault;
} }
wqe = (void *)__get_free_page(GFP_KERNEL); wqe_start = (void *)__get_free_page(GFP_KERNEL);
if (!wqe) { if (!wqe_start) {
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n"); mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
goto resolve_page_fault; goto resolve_page_fault;
} }
wqe = wqe_start;
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL; qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
if (qp && sq) { if (qp && sq) {
ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE, ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
@ -1281,7 +1213,7 @@ resolve_page_fault:
pfault->wqe.wq_num, resume_with_error, pfault->wqe.wq_num, resume_with_error,
pfault->type); pfault->type);
mlx5_core_res_put(res); mlx5_core_res_put(res);
free_page((unsigned long)wqe); free_page((unsigned long)wqe_start);
} }
static int pages_in_range(u64 address, u32 length) static int pages_in_range(u64 address, u32 length)

View File

@ -3386,19 +3386,16 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp); struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_qp_context context = {}; struct mlx5_qp_context context = {};
struct mlx5_ib_port *mibport = NULL;
struct mlx5_ib_qp_base *base; struct mlx5_ib_qp_base *base;
u32 set_id; u32 set_id;
if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
return 0; return 0;
if (counter) { if (counter)
set_id = counter->id; set_id = counter->id;
} else { else
mibport = &dev->port[mqp->port - 1]; set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
set_id = mibport->cnts.set_id;
}
base = &mqp->trans_qp.base; base = &mqp->trans_qp.base;
context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
@ -3459,7 +3456,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context; struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd; struct mlx5_ib_pd *pd;
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new; enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar; enum mlx5_qp_optpar optpar;
u32 set_id = 0; u32 set_id = 0;
@ -3624,11 +3620,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (qp->flags & MLX5_IB_QP_UNDERLAY) if (qp->flags & MLX5_IB_QP_UNDERLAY)
port_num = 0; port_num = 0;
mibport = &dev->port[port_num];
if (ibqp->counter) if (ibqp->counter)
set_id = ibqp->counter->id; set_id = ibqp->counter->id;
else else
set_id = mibport->cnts.set_id; set_id = mlx5_ib_get_counters_id(dev, port_num);
context->qp_counter_set_usr_page |= context->qp_counter_set_usr_page |=
cpu_to_be32(set_id << 24); cpu_to_be32(set_id << 24);
} }
@ -3817,6 +3812,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u16 set_id;
required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
if (!is_valid_mask(attr_mask, required, 0)) if (!is_valid_mask(attr_mask, required, 0))
return -EINVAL; return -EINVAL;
@ -3843,7 +3840,9 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num); MLX5_SET(dctc, dctc, port, attr->port_num);
MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {}; struct mlx5_ib_modify_qp_resp resp = {};
@ -6345,11 +6344,13 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
} }
if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
u16 set_id;
set_id = mlx5_ib_get_counters_id(dev, 0);
if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
MLX5_SET(rqc, rqc, counter_set_id, MLX5_SET(rqc, rqc, counter_set_id, set_id);
dev->port->cnts.set_id);
} else } else
dev_info_once( dev_info_once(
&dev->ib_dev.dev, &dev->ib_dev.dev,

View File

@ -163,10 +163,10 @@ int ocrdma_query_port(struct ib_device *ibdev,
netdev = dev->nic_info.netdev; netdev = dev->nic_info.netdev;
if (netif_running(netdev) && netif_oper_up(netdev)) { if (netif_running(netdev) && netif_oper_up(netdev)) {
port_state = IB_PORT_ACTIVE; port_state = IB_PORT_ACTIVE;
props->phys_state = 5; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else { } else {
port_state = IB_PORT_DOWN; port_state = IB_PORT_DOWN;
props->phys_state = 3; props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} }
props->max_mtu = IB_MTU_4096; props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(netdev->mtu); props->active_mtu = iboe_get_mtu(netdev->mtu);

View File

@ -826,7 +826,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
if (rc) if (rc)
goto out; goto out;
dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr; dev->db_addr = out_params.dpi_addr;
dev->db_phys_addr = out_params.dpi_phys_addr; dev->db_phys_addr = out_params.dpi_phys_addr;
dev->db_size = out_params.dpi_size; dev->db_size = out_params.dpi_size;
dev->dpi = out_params.dpi; dev->dpi = out_params.dpi;

View File

@ -229,7 +229,7 @@ struct qedr_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct qedr_dev *dev; struct qedr_dev *dev;
struct qedr_pd *pd; struct qedr_pd *pd;
u64 dpi_addr; void __iomem *dpi_addr;
u64 dpi_phys_addr; u64 dpi_phys_addr;
u32 dpi_size; u32 dpi_size;
u16 dpi; u16 dpi;

View File

@ -221,10 +221,10 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
/* *attr being zeroed by the caller, avoid zeroing it here */ /* *attr being zeroed by the caller, avoid zeroing it here */
if (rdma_port->port_state == QED_RDMA_PORT_UP) { if (rdma_port->port_state == QED_RDMA_PORT_UP) {
attr->state = IB_PORT_ACTIVE; attr->state = IB_PORT_ACTIVE;
attr->phys_state = 5; attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else { } else {
attr->state = IB_PORT_DOWN; attr->state = IB_PORT_DOWN;
attr->phys_state = 3; attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} }
attr->max_mtu = IB_MTU_4096; attr->max_mtu = IB_MTU_4096;
attr->active_mtu = iboe_get_mtu(dev->ndev->mtu); attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
@ -2451,7 +2451,6 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct qedr_dev *dev = qp->dev; struct qedr_dev *dev = qp->dev;
struct ib_qp_attr attr; struct ib_qp_attr attr;
int attr_mask = 0; int attr_mask = 0;
int rc = 0;
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type); qp, qp->qp_type);
@ -2496,7 +2495,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
xa_erase_irq(&dev->qps, qp->qp_id); xa_erase_irq(&dev->qps, qp->qp_id);
kfree(qp); kfree(qp);
} }
return rc; return 0;
} }
int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags, int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,

View File

@ -1789,7 +1789,6 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
static int qib_close(struct inode *in, struct file *fp) static int qib_close(struct inode *in, struct file *fp)
{ {
int ret = 0;
struct qib_filedata *fd; struct qib_filedata *fd;
struct qib_ctxtdata *rcd; struct qib_ctxtdata *rcd;
struct qib_devdata *dd; struct qib_devdata *dd;
@ -1873,7 +1872,7 @@ static int qib_close(struct inode *in, struct file *fp)
bail: bail:
kfree(fd); kfree(fd);
return ret; return 0;
} }
static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)

View File

@ -313,11 +313,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_SEND: case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM: case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!rvt_rc_credit_avail(qp, wqe))
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
}
if (len > pmtu) { if (len > pmtu) {
qp->s_state = OP(SEND_FIRST); qp->s_state = OP(SEND_FIRST);
len = pmtu; len = pmtu;
@ -344,11 +341,8 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
goto no_flow_control; goto no_flow_control;
case IB_WR_RDMA_WRITE_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */ /* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && if (!rvt_rc_credit_avail(qp, wqe))
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail; goto bail;
}
no_flow_control: no_flow_control:
ohdr->u.rc.reth.vaddr = ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->rdma_wr.remote_addr); cpu_to_be64(wqe->rdma_wr.remote_addr);

View File

@ -436,6 +436,7 @@ QIB_DIAGC_ATTR(dmawait);
QIB_DIAGC_ATTR(unaligned); QIB_DIAGC_ATTR(unaligned);
QIB_DIAGC_ATTR(rc_dupreq); QIB_DIAGC_ATTR(rc_dupreq);
QIB_DIAGC_ATTR(rc_seqnak); QIB_DIAGC_ATTR(rc_seqnak);
QIB_DIAGC_ATTR(rc_crwaits);
static struct attribute *diagc_default_attributes[] = { static struct attribute *diagc_default_attributes[] = {
&qib_diagc_attr_rc_resends.attr, &qib_diagc_attr_rc_resends.attr,
@ -453,6 +454,7 @@ static struct attribute *diagc_default_attributes[] = {
&qib_diagc_attr_unaligned.attr, &qib_diagc_attr_unaligned.attr,
&qib_diagc_attr_rc_dupreq.attr, &qib_diagc_attr_rc_dupreq.attr,
&qib_diagc_attr_rc_seqnak.attr, &qib_diagc_attr_rc_seqnak.attr,
&qib_diagc_attr_rc_crwaits.attr,
NULL NULL
}; };

View File

@ -89,9 +89,15 @@ static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
void usnic_ib_log_vf(struct usnic_ib_vf *vf) void usnic_ib_log_vf(struct usnic_ib_vf *vf)
{ {
char buf[1000]; char *buf = kzalloc(1000, GFP_KERNEL);
usnic_ib_dump_vf(vf, buf, sizeof(buf));
if (!buf)
return;
usnic_ib_dump_vf(vf, buf, 1000);
usnic_dbg("%s\n", buf); usnic_dbg("%s\n", buf);
kfree(buf);
} }
/* Start of netdev section */ /* Start of netdev section */

View File

@ -194,7 +194,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
return ERR_CAST(dev_list); return ERR_CAST(dev_list);
for (i = 0; dev_list[i]; i++) { for (i = 0; dev_list[i]; i++) {
dev = dev_list[i]; dev = dev_list[i];
vf = pci_get_drvdata(to_pci_dev(dev)); vf = dev_get_drvdata(dev);
spin_lock(&vf->lock); spin_lock(&vf->lock);
vnic = vf->vnic; vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) { if (!usnic_vnic_check_room(vnic, res_spec)) {
@ -356,13 +356,14 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
if (!us_ibdev->ufdev->link_up) { if (!us_ibdev->ufdev->link_up) {
props->state = IB_PORT_DOWN; props->state = IB_PORT_DOWN;
props->phys_state = 3; props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
} else if (!us_ibdev->ufdev->inaddr) { } else if (!us_ibdev->ufdev->inaddr) {
props->state = IB_PORT_INIT; props->state = IB_PORT_INIT;
props->phys_state = 4; props->phys_state =
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
} else { } else {
props->state = IB_PORT_ACTIVE; props->state = IB_PORT_ACTIVE;
props->phys_state = 5; props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} }
props->port_cap_flags = 0; props->port_cap_flags = 0;

View File

@ -65,10 +65,6 @@
*/ */
#define RXE_UVERBS_ABI_VERSION 2 #define RXE_UVERBS_ABI_VERSION 2
#define RDMA_LINK_PHYS_STATE_LINK_UP (5)
#define RDMA_LINK_PHYS_STATE_DISABLED (3)
#define RDMA_LINK_PHYS_STATE_POLLING (2)
#define RXE_ROCE_V2_SPORT (0xc000) #define RXE_ROCE_V2_SPORT (0xc000)
static inline u32 rxe_crc32(struct rxe_dev *rxe, static inline u32 rxe_crc32(struct rxe_dev *rxe,

View File

@ -154,7 +154,7 @@ enum rxe_port_param {
RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X, RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X,
RXE_PORT_ACTIVE_SPEED = 1, RXE_PORT_ACTIVE_SPEED = 1,
RXE_PORT_PKEY_TBL_LEN = 64, RXE_PORT_PKEY_TBL_LEN = 64,
RXE_PORT_PHYS_STATE = 2, RXE_PORT_PHYS_STATE = IB_PORT_PHYS_STATE_POLLING,
RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL, RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL,
}; };

View File

@ -69,11 +69,11 @@ static int rxe_query_port(struct ib_device *dev,
&attr->active_width); &attr->active_width);
if (attr->state == IB_PORT_ACTIVE) if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP; attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
else if (dev_get_flags(rxe->ndev) & IFF_UP) else if (dev_get_flags(rxe->ndev) & IFF_UP)
attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING; attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
else else
attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED; attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mutex_unlock(&rxe->usdev_lock); mutex_unlock(&rxe->usdev_lock);

View File

@ -76,16 +76,15 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (unlikely(!p)) if (unlikely(!p))
return -EFAULT; return -EFAULT;
buffer = kmap_atomic(p); buffer = kmap(p);
if (likely(PAGE_SIZE - off >= bytes)) { if (likely(PAGE_SIZE - off >= bytes)) {
memcpy(paddr, buffer + off, bytes); memcpy(paddr, buffer + off, bytes);
kunmap_atomic(buffer);
} else { } else {
unsigned long part = bytes - (PAGE_SIZE - off); unsigned long part = bytes - (PAGE_SIZE - off);
memcpy(paddr, buffer + off, part); memcpy(paddr, buffer + off, part);
kunmap_atomic(buffer); kunmap(p);
if (!mem->is_pbl) if (!mem->is_pbl)
p = siw_get_upage(mem->umem, p = siw_get_upage(mem->umem,
@ -97,11 +96,10 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (unlikely(!p)) if (unlikely(!p))
return -EFAULT; return -EFAULT;
buffer = kmap_atomic(p); buffer = kmap(p);
memcpy(paddr + part, buffer, memcpy(paddr + part, buffer, bytes - part);
bytes - part);
kunmap_atomic(buffer);
} }
kunmap(p);
} }
} }
return (int)bytes; return (int)bytes;
@ -518,11 +516,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
c_tx->mpa_crc_hd, c_tx->mpa_crc_hd,
iov[seg].iov_base, iov[seg].iov_base,
plen); plen);
} else if (do_crc) } else if (do_crc) {
crypto_shash_update( crypto_shash_update(c_tx->mpa_crc_hd,
c_tx->mpa_crc_hd, kmap(p) + fp_off,
page_address(p) + fp_off, plen);
plen); kunmap(p);
}
} else { } else {
u64 va = sge->laddr + sge_off; u64 va = sge->laddr + sge_off;

View File

@ -206,7 +206,8 @@ int siw_query_port(struct ib_device *base_dev, u8 port,
attr->gid_tbl_len = 1; attr->gid_tbl_len = 1;
attr->max_msg_sz = -1; attr->max_msg_sz = -1;
attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3; attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
attr->pkey_tbl_len = 1; attr->pkey_tbl_len = 1;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
attr->state = sdev->state; attr->state = sdev->state;

View File

@ -102,9 +102,10 @@
/* Default support is 512KB I/O size */ /* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024 #define ISER_DEF_MAX_SECTORS 1024
#define ISCSI_ISER_DEF_SG_TABLESIZE ((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K) #define ISCSI_ISER_DEF_SG_TABLESIZE \
/* Maximum support is 8MB I/O size */ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K)
#define ISCSI_ISER_MAX_SG_TABLESIZE ((16384 * 512) >> SHIFT_4K) /* Maximum support is 16MB I/O size */
#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K)
#define ISER_DEF_XMIT_CMDS_DEFAULT 512 #define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT

View File

@ -1767,8 +1767,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out; goto out;
retry: retry:
ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size, ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size,
0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE); IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) { if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq); ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n", pr_err("failed to create CQ cqe= %d ret= %d\n",

View File

@ -188,8 +188,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
/* new rate limit */ /* new rate limit */
err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl);
if (err) { if (err) {
mlx5_core_err(dev, "Failed configuring rate limit(err %d): \ mlx5_core_err(dev, "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
err, rl->rate, rl->max_burst_sz, err, rl->rate, rl->max_burst_sz,
rl->typical_pkt_sz); rl->typical_pkt_sz);
goto out; goto out;
@ -218,8 +217,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
mutex_lock(&table->rl_lock); mutex_lock(&table->rl_lock);
entry = find_rl_entry(table, rl); entry = find_rl_entry(table, rl);
if (!entry || !entry->refcount) { if (!entry || !entry->refcount) {
mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \ mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
are not configured\n",
rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
goto out; goto out;
} }

View File

@ -798,9 +798,8 @@ static int qed_rdma_add_user(void *rdma_cxt,
/* Calculate the corresponding DPI address */ /* Calculate the corresponding DPI address */
dpi_start_offset = p_hwfn->dpi_start_offset; dpi_start_offset = p_hwfn->dpi_start_offset;
out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells + out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
dpi_start_offset + out_params->dpi * p_hwfn->dpi_size;
((out_params->dpi) * p_hwfn->dpi_size));
out_params->dpi_phys_addr = p_hwfn->db_phys_addr + out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
dpi_start_offset + dpi_start_offset +

View File

@ -1654,15 +1654,17 @@ static struct smbd_connection *_smbd_get_connection(
info->send_cq = NULL; info->send_cq = NULL;
info->recv_cq = NULL; info->recv_cq = NULL;
info->send_cq = ib_alloc_cq(info->id->device, info, info->send_cq =
info->send_credit_target, 0, IB_POLL_SOFTIRQ); ib_alloc_cq_any(info->id->device, info,
info->send_credit_target, IB_POLL_SOFTIRQ);
if (IS_ERR(info->send_cq)) { if (IS_ERR(info->send_cq)) {
info->send_cq = NULL; info->send_cq = NULL;
goto alloc_cq_failed; goto alloc_cq_failed;
} }
info->recv_cq = ib_alloc_cq(info->id->device, info, info->recv_cq =
info->receive_credit_max, 0, IB_POLL_SOFTIRQ); ib_alloc_cq_any(info->id->device, info,
info->receive_credit_max, IB_POLL_SOFTIRQ);
if (IS_ERR(info->recv_cq)) { if (IS_ERR(info->recv_cq)) {
info->recv_cq = NULL; info->recv_cq = NULL;
goto alloc_cq_failed; goto alloc_cq_failed;

View File

@ -881,12 +881,6 @@ header-test- += net/xdp.h
header-test- += net/xdp_priv.h header-test- += net/xdp_priv.h
header-test- += pcmcia/cistpl.h header-test- += pcmcia/cistpl.h
header-test- += pcmcia/ds.h header-test- += pcmcia/ds.h
header-test- += rdma/ib.h
header-test- += rdma/iw_portmap.h
header-test- += rdma/opa_port_info.h
header-test- += rdma/rdmavt_cq.h
header-test- += rdma/restrack.h
header-test- += rdma/signature.h
header-test- += rdma/tid_rdma_defs.h header-test- += rdma/tid_rdma_defs.h
header-test- += scsi/fc/fc_encaps.h header-test- += scsi/fc/fc_encaps.h
header-test- += scsi/fc/fc_fc2.h header-test- += scsi/fc/fc_fc2.h

View File

@ -328,6 +328,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
@ -345,6 +346,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe, MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
@ -584,6 +586,12 @@ struct mlx5_eqe_cq_err {
u8 syndrome; u8 syndrome;
}; };
struct mlx5_eqe_xrq_err {
__be32 reserved1[5];
__be32 type_xrqn;
__be32 reserved2;
};
struct mlx5_eqe_port_state { struct mlx5_eqe_port_state {
u8 reserved0[8]; u8 reserved0[8];
u8 port; u8 port;
@ -698,6 +706,7 @@ union ev_data {
struct mlx5_eqe_pps pps; struct mlx5_eqe_pps pps;
struct mlx5_eqe_dct dct; struct mlx5_eqe_dct dct;
struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err;
} __packed; } __packed;
struct mlx5_eqe { struct mlx5_eqe {

View File

@ -225,7 +225,7 @@ struct qed_rdma_start_in_params {
struct qed_rdma_add_user_out_params { struct qed_rdma_add_user_out_params {
u16 dpi; u16 dpi;
u64 dpi_addr; void __iomem *dpi_addr;
u64 dpi_phys_addr; u64 dpi_phys_addr;
u32 dpi_size; u32 dpi_size;
u16 wid_count; u16 wid_count;

View File

@ -36,6 +36,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/cred.h> #include <linux/cred.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
struct ib_addr { struct ib_addr {
union { union {

View File

@ -98,15 +98,54 @@ void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
#if defined(CONFIG_DYNAMIC_DEBUG) #if defined(CONFIG_DYNAMIC_DEBUG)
#define ibdev_dbg(__dev, format, args...) \ #define ibdev_dbg(__dev, format, args...) \
dynamic_ibdev_dbg(__dev, format, ##args) dynamic_ibdev_dbg(__dev, format, ##args)
#elif defined(DEBUG)
#define ibdev_dbg(__dev, format, args...) \
ibdev_printk(KERN_DEBUG, __dev, format, ##args)
#else #else
__printf(2, 3) __cold __printf(2, 3) __cold
static inline static inline
void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
#endif #endif
#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
} while (0)
#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
#define ibdev_err_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
#define ibdev_info_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG)
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
__dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
##__VA_ARGS__); \
} while (0)
#else
__printf(2, 3) __cold
static inline
void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
#endif
union ib_gid { union ib_gid {
u8 raw[16]; u8 raw[16];
struct { struct {
@ -451,6 +490,16 @@ enum ib_port_state {
IB_PORT_ACTIVE_DEFER = 5 IB_PORT_ACTIVE_DEFER = 5
}; };
enum ib_port_phys_state {
IB_PORT_PHYS_STATE_SLEEP = 1,
IB_PORT_PHYS_STATE_POLLING = 2,
IB_PORT_PHYS_STATE_DISABLED = 3,
IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
IB_PORT_PHYS_STATE_LINK_UP = 5,
IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
IB_PORT_PHYS_STATE_PHY_TEST = 7,
};
enum ib_port_width { enum ib_port_width {
IB_WIDTH_1X = 1, IB_WIDTH_1X = 1,
IB_WIDTH_2X = 16, IB_WIDTH_2X = 16,
@ -3710,6 +3759,25 @@ static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
NULL); NULL);
} }
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
int nr_cqe, enum ib_poll_context poll_ctx,
const char *caller);
/**
* ib_alloc_cq_any: Allocate kernel CQ
* @dev: The IB device
* @private: Private data attached to the CQE
* @nr_cqe: Number of CQEs in the CQ
* @poll_ctx: Context used for polling the CQ
*/
static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
void *private, int nr_cqe,
enum ib_poll_context poll_ctx)
{
return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
KBUILD_MODNAME);
}
/** /**
* ib_free_cq_user - Free kernel/user CQ * ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free * @cq: The CQ to free

View File

@ -33,6 +33,9 @@
#ifndef _IW_PORTMAP_H #ifndef _IW_PORTMAP_H
#define _IW_PORTMAP_H #define _IW_PORTMAP_H
#include <linux/socket.h>
#include <linux/netlink.h>
#define IWPM_ULIBNAME_SIZE 32 #define IWPM_ULIBNAME_SIZE 32
#define IWPM_DEVNAME_SIZE 32 #define IWPM_DEVNAME_SIZE 32
#define IWPM_IFNAME_SIZE 16 #define IWPM_IFNAME_SIZE 16

View File

@ -33,6 +33,8 @@
#if !defined(OPA_PORT_INFO_H) #if !defined(OPA_PORT_INFO_H)
#define OPA_PORT_INFO_H #define OPA_PORT_INFO_H
#include <rdma/opa_smi.h>
#define OPA_PORT_LINK_MODE_NOP 0 /* No change */ #define OPA_PORT_LINK_MODE_NOP 0 /* No change */
#define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */

View File

@ -76,28 +76,32 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
/** /**
* Send the supplied skb to a specific userspace PID. * Send the supplied skb to a specific userspace PID.
* @net: Net namespace in which to send the skb
* @skb: The netlink skb * @skb: The netlink skb
* @pid: Userspace netlink process ID * @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
*/ */
int rdma_nl_unicast(struct sk_buff *skb, u32 pid); int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid);
/** /**
* Send, with wait/1 retry, the supplied skb to a specific userspace PID. * Send, with wait/1 retry, the supplied skb to a specific userspace PID.
* @net: Net namespace in which to send the skb
* @skb: The netlink skb * @skb: The netlink skb
* @pid: Userspace netlink process ID * @pid: Userspace netlink process ID
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
*/ */
int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid); int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid);
/** /**
* Send the supplied skb to a netlink group. * Send the supplied skb to a netlink group.
* @net: Net namespace in which to send the skb
* @skb: The netlink skb * @skb: The netlink skb
* @group: Netlink group ID * @group: Netlink group ID
* @flags: allocation flags * @flags: allocation flags
* Returns 0 on success or a negative error code. * Returns 0 on success or a negative error code.
*/ */
int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags); int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
unsigned int group, gfp_t flags);
/** /**
* Check if there are any listeners to the netlink group * Check if there are any listeners to the netlink group

View File

@ -116,6 +116,7 @@ struct rvt_ibport {
u64 n_unaligned; u64 n_unaligned;
u64 n_rc_dupreq; u64 n_rc_dupreq;
u64 n_rc_seqnak; u64 n_rc_seqnak;
u64 n_rc_crwaits;
u16 pkey_violations; u16 pkey_violations;
u16 qkey_violations; u16 qkey_violations;
u16 mkey_violations; u16 mkey_violations;

View File

@ -53,6 +53,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
/* /*
* Define an ib_cq_notify value that is not valid so we know when CQ * Define an ib_cq_notify value that is not valid so we know when CQ

View File

@ -973,6 +973,41 @@ static inline void rvt_free_rq(struct rvt_rq *rq)
rq->wq = NULL; rq->wq = NULL;
} }
/**
* rvt_to_iport - Get the ibport pointer
* @qp: the qp pointer
*
* This function returns the ibport pointer from the qp pointer.
*/
static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
{
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
return rdi->ports[qp->port_num - 1];
}
/**
* rvt_rc_credit_avail - Check if there are enough RC credits for the request
* @qp: the qp
* @wqe: the request
*
* This function returns false when there are not enough credits for the given
* request and true otherwise.
*/
static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
{
lockdep_assert_held(&qp->s_lock);
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
struct rvt_ibport *rvp = rvt_to_iport(qp);
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
rvp->n_rc_crwaits++;
return false;
}
return true;
}
struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi, struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
u64 v, u64 v,
void (*cb)(struct rvt_qp *qp, u64 v)); void (*cb)(struct rvt_qp *qp, u64 v));

View File

@ -6,6 +6,8 @@
#ifndef _RDMA_SIGNATURE_H_ #ifndef _RDMA_SIGNATURE_H_
#define _RDMA_SIGNATURE_H_ #define _RDMA_SIGNATURE_H_
#include <linux/types.h>
enum ib_signature_prot_cap { enum ib_signature_prot_cap {
IB_PROT_T10DIF_TYPE_1 = 1, IB_PROT_T10DIF_TYPE_1 = 1,
IB_PROT_T10DIF_TYPE_2 = 1 << 1, IB_PROT_T10DIF_TYPE_2 = 1 << 1,

View File

@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
}; };
enum mlx5_ib_uapi_flow_action_packet_reformat_type { enum mlx5_ib_uapi_flow_action_packet_reformat_type {

View File

@ -685,9 +685,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
goto error; goto error;
/* Create the Completion Queue */ /* Create the Completion Queue */
rdma->cq = ib_alloc_cq(rdma->cm_id->device, client, rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client,
opts.sq_depth + opts.rq_depth + 1, opts.sq_depth + opts.rq_depth + 1,
0, IB_POLL_SOFTIRQ); IB_POLL_SOFTIRQ);
if (IS_ERR(rdma->cq)) if (IS_ERR(rdma->cq))
goto error; goto error;

View File

@ -454,14 +454,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk("svcrdma: error creating PD for connect request\n"); dprintk("svcrdma: error creating PD for connect request\n");
goto errout; goto errout;
} }
newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth, newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
0, IB_POLL_WORKQUEUE); IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_sq_cq)) { if (IS_ERR(newxprt->sc_sq_cq)) {
dprintk("svcrdma: error creating SQ CQ for connect request\n"); dprintk("svcrdma: error creating SQ CQ for connect request\n");
goto errout; goto errout;
} }
newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth, newxprt->sc_rq_cq =
0, IB_POLL_WORKQUEUE); ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_rq_cq)) { if (IS_ERR(newxprt->sc_rq_cq)) {
dprintk("svcrdma: error creating RQ CQ for connect request\n"); dprintk("svcrdma: error creating RQ CQ for connect request\n");
goto errout; goto errout;

View File

@ -521,18 +521,17 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
init_waitqueue_head(&ep->rep_connect_wait); init_waitqueue_head(&ep->rep_connect_wait);
ep->rep_receive_count = 0; ep->rep_receive_count = 0;
sendcq = ib_alloc_cq(ia->ri_id->device, NULL, sendcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
ep->rep_attr.cap.max_send_wr + 1, ep->rep_attr.cap.max_send_wr + 1,
ia->ri_id->device->num_comp_vectors > 1 ? 1 : 0, IB_POLL_WORKQUEUE);
IB_POLL_WORKQUEUE);
if (IS_ERR(sendcq)) { if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq); rc = PTR_ERR(sendcq);
goto out1; goto out1;
} }
recvcq = ib_alloc_cq(ia->ri_id->device, NULL, recvcq = ib_alloc_cq_any(ia->ri_id->device, NULL,
ep->rep_attr.cap.max_recv_wr + 1, ep->rep_attr.cap.max_recv_wr + 1,
0, IB_POLL_WORKQUEUE); IB_POLL_WORKQUEUE);
if (IS_ERR(recvcq)) { if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq); rc = PTR_ERR(recvcq);
goto out2; goto out2;