Second RDMA 5.7 rc pull request
A few minor bug fixes for user visible defects, and one regression: - Various bugs from static checkers and syzkaller - Add missing error checking in mlx4 - Prevent RTNL lock recursion in i40iw - Fix segfault in cxgb4 in peer abort cases - Fix a regression added in 5.7 where the IB_EVENT_DEVICE_FATAL could be lost, and wasn't delivered to all the FDs -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl6+5V8ACgkQOG33FX4g mxo/Zw//QlrhDSM4DG4hZsFhFX3i/vT7xbxkNTp/U1+Vue9cKcUlotNOzQ79lQur /12QEiDffLZNsy2jCD1nxf/rEo9NDc8iSHPcryQ3aGa2K3XJ3QQ2ZhHBzfFU9GVP GQYlGt9uB4t6LEcU4P/ZCdf1nmkfYFvNDfmveVWrPLasABK7WqxCBXaqbLed+0ca lGhFBGLdGNJqyK2BkPCtXr9XYTjzZhW0lJqMuex0YD7cIAFfn+qbzvLQheBy/mhH o9n28GQbIgpNXvYz2HvUkTfiwDLylFaNVBYVctnm10cbNtLHv2J0bBQQb2ZaXUAM xt54AQ2QAMHKC2h6sUqyDAmWKfPEOnxZ9LycYDa0ZaIvm/uK/jiEOXE9XbQeiOKF eHiyuDg0EB4AVcYlNSm7roHdbh3rAluHerGe4Kv3efF/b1Zt2mtTW7q/XuROfSeT WBnALyl7RurnSG0HY9hyUMak65JwolgqPdBDpzRjqIF5jeKW0nn8MsbGTPGWlcyN zLu5IQn/vv+sfISav1cVMRoijboe85+3SOVtMBQOJ2m7EkdzRyRSQh3oG1556n2y oqseNSQFKNFA1p4hEeTf8BVEqVX3gj50ykj+2g1HyxH3sB+iE3+EctnHNhfd3IKI n6e/67Nm0ub/WM1OXeuXBmW0NvsTt0aPBfsp4s+oq3eICD4YbFE= =R5BJ -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "A few minor bug fixes for user visible defects, and one regression: - Various bugs from static checkers and syzkaller - Add missing error checking in mlx4 - Prevent RTNL lock recursion in i40iw - Fix segfault in cxgb4 in peer abort cases - Fix a regression added in 5.7 where the IB_EVENT_DEVICE_FATAL could be lost, and wasn't delivered to all the FDs" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/uverbs: Move IB_EVENT_DEVICE_FATAL to destroy_uobj RDMA/uverbs: Do not discard the IB_EVENT_DEVICE_FATAL event RDMA/iw_cxgb4: Fix incorrect function parameters RDMA/core: Fix double put of resource IB/core: Fix potential NULL pointer dereference in pkey cache IB/hfi1: Fix another case where pq is left on waitlist IB/i40iw: Remove bogus call to netdev_master_upper_dev_get() IB/mlx4: Test return value of calls to ib_get_cached_pkey RDMA/rxe: Always return ERR_PTR from rxe_create_mmap_info() i40iw: Fix error handling in i40iw_manage_arp_cache()alistair/sunxi64-5.7-dsi
commit
d5dfe4f1b4
|
@ -1553,8 +1553,11 @@ int ib_cache_setup_one(struct ib_device *device)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
rdma_for_each_port (device, p)
|
rdma_for_each_port (device, p) {
|
||||||
ib_cache_update(device, p, true);
|
err = ib_cache_update(device, p, true);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||||
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
|
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
|
||||||
|
|
||||||
ret = fill_func(msg, has_cap_net_admin, res, port);
|
ret = fill_func(msg, has_cap_net_admin, res, port);
|
||||||
|
|
||||||
rdma_restrack_put(res);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
|
rdma_restrack_put(res);
|
||||||
nlmsg_end(msg, nlh);
|
nlmsg_end(msg, nlh);
|
||||||
ib_device_put(device);
|
ib_device_put(device);
|
||||||
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
|
||||||
|
|
|
@ -459,7 +459,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
|
||||||
struct ib_uobject *uobj;
|
struct ib_uobject *uobj;
|
||||||
struct file *filp;
|
struct file *filp;
|
||||||
|
|
||||||
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
|
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
|
||||||
|
fd_type->fops->release != &uverbs_async_event_release))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
new_fd = get_unused_fd_flags(O_CLOEXEC);
|
new_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||||
|
|
|
@ -219,6 +219,7 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
|
||||||
void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
|
void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
|
||||||
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
|
void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
|
||||||
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
|
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
|
||||||
|
int uverbs_async_event_release(struct inode *inode, struct file *filp);
|
||||||
|
|
||||||
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
|
int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
|
||||||
int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
|
int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
|
||||||
|
@ -227,6 +228,9 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
|
||||||
struct ib_ucq_object *uobj);
|
struct ib_ucq_object *uobj);
|
||||||
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
|
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
|
||||||
void ib_uverbs_release_file(struct kref *ref);
|
void ib_uverbs_release_file(struct kref *ref);
|
||||||
|
void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
|
||||||
|
__u64 element, __u64 event,
|
||||||
|
struct list_head *obj_list, u32 *counter);
|
||||||
|
|
||||||
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
|
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
|
||||||
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
|
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
|
||||||
|
|
|
@ -346,7 +346,7 @@ const struct file_operations uverbs_async_event_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.read = ib_uverbs_async_event_read,
|
.read = ib_uverbs_async_event_read,
|
||||||
.poll = ib_uverbs_async_event_poll,
|
.poll = ib_uverbs_async_event_poll,
|
||||||
.release = uverbs_uobject_fd_release,
|
.release = uverbs_async_event_release,
|
||||||
.fasync = ib_uverbs_async_event_fasync,
|
.fasync = ib_uverbs_async_event_fasync,
|
||||||
.llseek = no_llseek,
|
.llseek = no_llseek,
|
||||||
};
|
};
|
||||||
|
@ -386,10 +386,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
|
||||||
kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
|
kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
|
||||||
ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
|
__u64 element, __u64 event,
|
||||||
__u64 element, __u64 event, struct list_head *obj_list,
|
struct list_head *obj_list, u32 *counter)
|
||||||
u32 *counter)
|
|
||||||
{
|
{
|
||||||
struct ib_uverbs_event *entry;
|
struct ib_uverbs_event *entry;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -1187,9 +1186,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
|
||||||
*/
|
*/
|
||||||
mutex_unlock(&uverbs_dev->lists_mutex);
|
mutex_unlock(&uverbs_dev->lists_mutex);
|
||||||
|
|
||||||
ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
|
|
||||||
IB_EVENT_DEVICE_FATAL, NULL, NULL);
|
|
||||||
|
|
||||||
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
|
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
|
||||||
kref_put(&file->ref, ib_uverbs_release_file);
|
kref_put(&file->ref, ib_uverbs_release_file);
|
||||||
|
|
||||||
|
|
|
@ -26,10 +26,38 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
|
||||||
container_of(uobj, struct ib_uverbs_async_event_file, uobj);
|
container_of(uobj, struct ib_uverbs_async_event_file, uobj);
|
||||||
|
|
||||||
ib_unregister_event_handler(&event_file->event_handler);
|
ib_unregister_event_handler(&event_file->event_handler);
|
||||||
ib_uverbs_free_event_queue(&event_file->ev_queue);
|
|
||||||
|
if (why == RDMA_REMOVE_DRIVER_REMOVE)
|
||||||
|
ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
|
||||||
|
NULL, NULL);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int uverbs_async_event_release(struct inode *inode, struct file *filp)
|
||||||
|
{
|
||||||
|
struct ib_uverbs_async_event_file *event_file;
|
||||||
|
struct ib_uobject *uobj = filp->private_data;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!uobj)
|
||||||
|
return uverbs_uobject_fd_release(inode, filp);
|
||||||
|
|
||||||
|
event_file =
|
||||||
|
container_of(uobj, struct ib_uverbs_async_event_file, uobj);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after
|
||||||
|
* disassociation, so cleaning the event list must only happen after
|
||||||
|
* release. The user knows it has reached the end of the event stream
|
||||||
|
* when it sees IB_EVENT_DEVICE_FATAL.
|
||||||
|
*/
|
||||||
|
uverbs_uobject_get(uobj);
|
||||||
|
ret = uverbs_uobject_fd_release(inode, filp);
|
||||||
|
ib_uverbs_free_event_queue(&event_file->ev_queue);
|
||||||
|
uverbs_uobject_put(uobj);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
DECLARE_UVERBS_NAMED_METHOD(
|
DECLARE_UVERBS_NAMED_METHOD(
|
||||||
UVERBS_METHOD_ASYNC_EVENT_ALLOC,
|
UVERBS_METHOD_ASYNC_EVENT_ALLOC,
|
||||||
UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
|
UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
|
||||||
|
|
|
@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||||
srqidx = ABORT_RSS_SRQIDX_G(
|
srqidx = ABORT_RSS_SRQIDX_G(
|
||||||
be32_to_cpu(req->srqidx_status));
|
be32_to_cpu(req->srqidx_status));
|
||||||
if (srqidx) {
|
if (srqidx) {
|
||||||
complete_cached_srq_buffers(ep,
|
complete_cached_srq_buffers(ep, srqidx);
|
||||||
req->srqidx_status);
|
|
||||||
} else {
|
} else {
|
||||||
/* Hold ep ref until finish_peer_abort() */
|
/* Hold ep ref until finish_peer_abort() */
|
||||||
c4iw_get_ep(&ep->com);
|
c4iw_get_ep(&ep->com);
|
||||||
|
@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
|
ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
|
||||||
TCB_RQ_START_S);
|
TCB_RQ_START_S);
|
||||||
cleanup:
|
cleanup:
|
||||||
pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
|
pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
|
||||||
|
|
||||||
|
|
|
@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
|
||||||
|
|
||||||
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
|
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
|
||||||
pq->state = SDMA_PKT_Q_ACTIVE;
|
pq->state = SDMA_PKT_Q_ACTIVE;
|
||||||
/* Send the first N packets in the request to buy us some time */
|
|
||||||
ret = user_sdma_send_pkts(req, pcount);
|
|
||||||
if (unlikely(ret < 0 && ret != -EBUSY))
|
|
||||||
goto free_req;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is a somewhat blocking send implementation.
|
* This is a somewhat blocking send implementation.
|
||||||
|
|
|
@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
struct neighbour *neigh;
|
struct neighbour *neigh;
|
||||||
int rc = arpindex;
|
int rc = arpindex;
|
||||||
struct net_device *netdev = iwdev->netdev;
|
|
||||||
__be32 dst_ipaddr = htonl(dst_ip);
|
__be32 dst_ipaddr = htonl(dst_ip);
|
||||||
__be32 src_ipaddr = htonl(src_ip);
|
__be32 src_ipaddr = htonl(src_ip);
|
||||||
|
|
||||||
|
@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (netif_is_bond_slave(netdev))
|
|
||||||
netdev = netdev_master_upper_dev_get(netdev);
|
|
||||||
|
|
||||||
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
|
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
|
||||||
{
|
{
|
||||||
struct neighbour *neigh;
|
struct neighbour *neigh;
|
||||||
int rc = arpindex;
|
int rc = arpindex;
|
||||||
struct net_device *netdev = iwdev->netdev;
|
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
struct sockaddr_in6 dst_addr;
|
struct sockaddr_in6 dst_addr;
|
||||||
struct sockaddr_in6 src_addr;
|
struct sockaddr_in6 src_addr;
|
||||||
|
@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (netif_is_bond_slave(netdev))
|
|
||||||
netdev = netdev_master_upper_dev_get(netdev);
|
|
||||||
|
|
||||||
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
|
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
|
@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
|
||||||
int arp_index;
|
int arp_index;
|
||||||
|
|
||||||
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
|
arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
|
||||||
if (arp_index == -1)
|
if (arp_index < 0)
|
||||||
return;
|
return;
|
||||||
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
|
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
|
||||||
if (!cqp_request)
|
if (!cqp_request)
|
||||||
|
|
|
@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||||
int send_size;
|
int send_size;
|
||||||
int header_size;
|
int header_size;
|
||||||
int spc;
|
int spc;
|
||||||
|
int err;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (wr->wr.opcode != IB_WR_SEND)
|
if (wr->wr.opcode != IB_WR_SEND)
|
||||||
|
@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||||
|
|
||||||
sqp->ud_header.lrh.virtual_lane = 0;
|
sqp->ud_header.lrh.virtual_lane = 0;
|
||||||
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
||||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
|
err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
||||||
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
|
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
|
||||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
||||||
|
@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
|
||||||
}
|
}
|
||||||
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
||||||
if (!sqp->qp.ibqp.qp_num)
|
if (!sqp->qp.ibqp.qp_num)
|
||||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
|
err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
|
||||||
|
&pkey);
|
||||||
else
|
else
|
||||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
|
err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
|
||||||
|
&pkey);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
||||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
||||||
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
|
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
|
||||||
|
|
|
@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
|
||||||
|
|
||||||
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
||||||
if (!ip)
|
if (!ip)
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
|
|
|
@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
||||||
|
|
||||||
if (outbuf) {
|
if (outbuf) {
|
||||||
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
|
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
|
||||||
if (!ip)
|
if (IS_ERR(ip)) {
|
||||||
|
err = PTR_ERR(ip);
|
||||||
goto err1;
|
goto err1;
|
||||||
|
}
|
||||||
|
|
||||||
err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
|
if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
|
||||||
if (err)
|
err = -EFAULT;
|
||||||
goto err2;
|
goto err2;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_bh(&rxe->pending_lock);
|
spin_lock_bh(&rxe->pending_lock);
|
||||||
list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
|
list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
|
||||||
|
@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
||||||
err2:
|
err2:
|
||||||
kfree(ip);
|
kfree(ip);
|
||||||
err1:
|
err1:
|
||||||
return -EINVAL;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void rxe_queue_reset(struct rxe_queue *q)
|
inline void rxe_queue_reset(struct rxe_queue *q)
|
||||||
|
|
Loading…
Reference in New Issue