From 6e04b103568983bd699fac96b80a9b96ede68118 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Fri, 25 May 2018 12:01:21 -0400 Subject: [PATCH 1/3] RDMA/bnxt_re: Fix broken RoCE driver due to recent L2 driver changes The recent changes in Broadcom's ethernet driver(L2 driver) broke RoCE functionality in terms of MSIx vector allocation and de-allocation. There is a possibility that L2 driver would initiate MSIx vector reallocation depending upon the requests coming from administrator. In such cases L2 driver needs to free up all the MSIx vectors allocated previously and reallocate/initialize those. If RoCE driver is loaded and reshuffling is attempted, there will be kernel crashes because RoCE driver would still be holding the MSIx vectors but L2 driver would attempt to free in-use vectors. Thus leading to a kernel crash. Making changes in roce driver to fix crashes described above. As part of solution L2 driver tells RoCE driver to release the MSIx vector whenever there is a need. When RoCE driver get message it sync up with all the running tasklets and IRQ handlers and releases the vectors. L2 driver send one more message to RoCE driver to resume the MSIx vectors. L2 driver guarantees that RoCE vector do not change during reshuffling. Fixes: ec86f14ea506 ("bnxt_en: Add ULP calls to stop and restart IRQs.") Fixes: 08654eb213a8 ("bnxt_en: Change IRQ assignment for RDMA driver.") Signed-off-by: Devesh Sharma Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 55 ++++++++++++- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 96 ++++++++++++++-------- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 3 + drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 61 ++++++++++---- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 3 + 5 files changed, 164 insertions(+), 54 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index f6c739ec8b62..20b9f31052bf 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p) bnxt_re_ib_unreg(rdev, false); } +static void bnxt_re_stop_irq(void *handle) +{ + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; + struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; + struct bnxt_qplib_nq *nq; + int indx; + + for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { + nq = &rdev->nq[indx - 1]; + bnxt_qplib_nq_stop_irq(nq, false); + } + + bnxt_qplib_rcfw_stop_irq(rcfw, false); +} + +static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) +{ + struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; + struct bnxt_msix_entry *msix_ent = rdev->msix_entries; + struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; + struct bnxt_qplib_nq *nq; + int indx, rc; + + if (!ent) { + /* Not setting the f/w timeout bit in rcfw. + * During the driver unload the first command + * to f/w will timeout and that will set the + * timeout bit. + */ + dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n"); + return; + } + + /* Vectors may change after restart, so update with new vectors + * in device sctructure. + */ + for (indx = 0; indx < rdev->num_msix; indx++) + rdev->msix_entries[indx].vector = ent[indx].vector; + + bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, + false); + for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { + nq = &rdev->nq[indx - 1]; + rc = bnxt_qplib_nq_start_irq(nq, indx - 1, + msix_ent[indx].vector, false); + if (rc) + dev_warn(rdev_to_dev(rdev), + "Failed to reinit NQ index %d\n", indx - 1); + } +} + static struct bnxt_ulp_ops bnxt_re_ulp_ops = { .ulp_async_notifier = NULL, .ulp_stop = bnxt_re_stop, .ulp_start = bnxt_re_start, .ulp_sriov_config = bnxt_re_sriov_config, - .ulp_shutdown = bnxt_re_shutdown + .ulp_shutdown = bnxt_re_shutdown, + .ulp_irq_stop = bnxt_re_stop_irq, + .ulp_irq_restart = bnxt_re_start_irq }; /* RoCE -> Net driver */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 3a78faba8d91..50d8f1fc98d5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) return IRQ_HANDLED; } +void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill) +{ + tasklet_disable(&nq->worker); + /* Mask h/w interrupt */ + NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); + /* Sync with last running IRQ handler */ + synchronize_irq(nq->vector); + if (kill) + tasklet_kill(&nq->worker); + if (nq->requested) { + irq_set_affinity_hint(nq->vector, NULL); + free_irq(nq->vector, nq); + nq->requested = false; + } +} + void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) { if (nq->cqn_wq) { destroy_workqueue(nq->cqn_wq); nq->cqn_wq = NULL; } - /* Make sure the HW is stopped! */ - synchronize_irq(nq->vector); - tasklet_disable(&nq->worker); - tasklet_kill(&nq->worker); - if (nq->requested) { - irq_set_affinity_hint(nq->vector, NULL); - free_irq(nq->vector, nq); - nq->requested = false; - } + /* Make sure the HW is stopped! */ + bnxt_qplib_nq_stop_irq(nq, true); + if (nq->bar_reg_iomem) iounmap(nq->bar_reg_iomem); nq->bar_reg_iomem = NULL; @@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) nq->vector = 0; } +int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, + int msix_vector, bool need_init) +{ + int rc; + + if (nq->requested) + return -EFAULT; + + nq->vector = msix_vector; + if (need_init) + tasklet_init(&nq->worker, bnxt_qplib_service_nq, + (unsigned long)nq); + else + tasklet_enable(&nq->worker); + + snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx); + rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); + if (rc) + return rc; + + cpumask_clear(&nq->mask); + cpumask_set_cpu(nq_indx, &nq->mask); + rc = irq_set_affinity_hint(nq->vector, &nq->mask); + if (rc) { + dev_warn(&nq->pdev->dev, + "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", + nq->vector, nq_indx); + } + nq->requested = true; + NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); + + return rc; +} + int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, int nq_idx, int msix_vector, int bar_reg_offset, int (*cqn_handler)(struct bnxt_qplib_nq *nq, @@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, resource_size_t nq_base; int rc = -1; - nq->pdev = pdev; - nq->vector = msix_vector; if (cqn_handler) nq->cqn_handler = cqn_handler; if (srqn_handler) nq->srqn_handler = srqn_handler; - tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq); - /* Have a task to schedule CQ notifiers in post send case */ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); if (!nq->cqn_wq) - goto fail; + return -ENOMEM; - nq->requested = false; - memset(nq->name, 0, 32); - sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx); - rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq); - if (rc) { - dev_err(&nq->pdev->dev, - "Failed to request IRQ for NQ: %#x", rc); - goto fail; - } - - cpumask_clear(&nq->mask); - cpumask_set_cpu(nq_idx, &nq->mask); - rc = irq_set_affinity_hint(nq->vector, &nq->mask); - if (rc) { - dev_warn(&nq->pdev->dev, - "QPLIB: set affinity failed; vector: %d nq_idx: %d\n", - nq->vector, nq_idx); - } - - nq->requested = true; nq->bar_reg = NQ_CONS_PCI_BAR_REGION; nq->bar_reg_off = bar_reg_offset; nq_base = pci_resource_start(pdev, nq->bar_reg); @@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, rc = -ENOMEM; goto fail; } - NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); + + rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true); + if (rc) { + dev_err(&nq->pdev->dev, + "QPLIB: Failed to request irq for nq-idx %d", nq_idx); + goto fail; + } return 0; fail: diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ade9f13c0fd1..72352ca80ace 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work { struct bnxt_qplib_cq *cq; }; +void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill); void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); +int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx, + int msix_vector, bool need_init); int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, int nq_idx, int msix_vector, int bar_reg_offset, int (*cqn_handler)(struct bnxt_qplib_nq *nq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 80027a494730..2852d350ada1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -582,19 +582,29 @@ fail: return -ENOMEM; } -void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) +void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) { - unsigned long indx; - - /* Make sure the HW channel is stopped! */ - synchronize_irq(rcfw->vector); tasklet_disable(&rcfw->worker); - tasklet_kill(&rcfw->worker); + /* Mask h/w interrupts */ + CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, + rcfw->creq.max_elements); + /* Sync with last running IRQ-handler */ + synchronize_irq(rcfw->vector); + if (kill) + tasklet_kill(&rcfw->worker); if (rcfw->requested) { free_irq(rcfw->vector, rcfw); rcfw->requested = false; } +} + +void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) +{ + unsigned long indx; + + bnxt_qplib_rcfw_stop_irq(rcfw, true); + if (rcfw->cmdq_bar_reg_iomem) iounmap(rcfw->cmdq_bar_reg_iomem); rcfw->cmdq_bar_reg_iomem = NULL; @@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) rcfw->vector = 0; } +int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, + bool need_init) +{ + int rc; + + if (rcfw->requested) + return -EFAULT; + + rcfw->vector = msix_vector; + if (need_init) + tasklet_init(&rcfw->worker, + bnxt_qplib_service_creq, (unsigned long)rcfw); + else + tasklet_enable(&rcfw->worker); + rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, + "bnxt_qplib_creq", rcfw); + if (rc) + return rc; + rcfw->requested = true; + CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, + rcfw->creq.max_elements); + + return 0; +} + int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, struct bnxt_qplib_rcfw *rcfw, int msix_vector, @@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, rcfw->creq_qp_event_processed = 0; rcfw->creq_func_event_processed = 0; - rcfw->vector = msix_vector; if (aeq_handler) rcfw->aeq_handler = aeq_handler; + init_waitqueue_head(&rcfw->waitq); - tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, - (unsigned long)rcfw); - - rcfw->requested = false; - rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, - "bnxt_qplib_creq", rcfw); + rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); if (rc) { dev_err(&rcfw->pdev->dev, "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); bnxt_qplib_disable_rcfw_channel(rcfw); return rc; } - rcfw->requested = true; - - init_waitqueue_head(&rcfw->waitq); - - CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); init.cmdq_size_cmdq_lvl = cpu_to_le16( diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index c7cce2e4185e..46416dfe8830 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw { void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz); +void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); +int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, + bool need_init); int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, struct bnxt_qplib_rcfw *rcfw, int msix_vector, From 533d1daea8d8a389b37207ad7b50c4e750969231 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 25 May 2018 23:29:59 +0200 Subject: [PATCH 2/3] IB: Revert "remove redundant INFINIBAND kconfig dependencies" Several subsystems depend on INFINIBAND_ADDR_TRANS, which in turn depends on INFINIBAND. However, when with CONFIG_INIFIBAND=m, this leads to a link error when another driver using it is built-in. The INFINIBAND_ADDR_TRANS dependency is insufficient here as this is a 'bool' symbol that does not force anything to be a module in turn. fs/cifs/smbdirect.o: In function `smbd_disconnect_rdma_work': smbdirect.c:(.text+0x1e4): undefined reference to `rdma_disconnect' net/9p/trans_rdma.o: In function `rdma_request': trans_rdma.c:(.text+0x7bc): undefined reference to `rdma_disconnect' net/9p/trans_rdma.o: In function `rdma_destroy_trans': trans_rdma.c:(.text+0x830): undefined reference to `ib_destroy_qp' trans_rdma.c:(.text+0x858): undefined reference to `ib_dealloc_pd' Fixes: 9533b292a7ac ("IB: remove redundant INFINIBAND kconfig dependencies") Signed-off-by: Arnd Bergmann Acked-by: Greg Thelen Signed-off-by: Jason Gunthorpe --- drivers/infiniband/ulp/srpt/Kconfig | 2 +- drivers/nvme/host/Kconfig | 2 +- drivers/nvme/target/Kconfig | 2 +- drivers/staging/lustre/lnet/Kconfig | 2 +- fs/cifs/Kconfig | 2 +- net/9p/Kconfig | 2 +- net/rds/Kconfig | 2 +- net/sunrpc/Kconfig | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig index 25bf6955b6d0..fb8b7182f05e 100644 --- a/drivers/infiniband/ulp/srpt/Kconfig +++ b/drivers/infiniband/ulp/srpt/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_SRPT tristate "InfiniBand SCSI RDMA Protocol target support" - depends on INFINIBAND_ADDR_TRANS && TARGET_CORE + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE ---help--- Support for the SCSI RDMA Protocol (SRP) Target driver. The diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index dbb7464c018c..88a8b5916624 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -27,7 +27,7 @@ config NVME_FABRICS config NVME_RDMA tristate "NVM Express over Fabrics RDMA host driver" - depends on INFINIBAND_ADDR_TRANS && BLOCK + depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK select NVME_CORE select NVME_FABRICS select SG_POOL diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 7595664ee753..3c7b61ddb0d1 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -27,7 +27,7 @@ config NVME_TARGET_LOOP config NVME_TARGET_RDMA tristate "NVMe over Fabrics RDMA target support" - depends on INFINIBAND_ADDR_TRANS + depends on INFINIBAND && INFINIBAND_ADDR_TRANS depends on NVME_TARGET select SGL_ALLOC help diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig index f3b1ad4bd3dc..ad049e6f24e4 100644 --- a/drivers/staging/lustre/lnet/Kconfig +++ b/drivers/staging/lustre/lnet/Kconfig @@ -34,7 +34,7 @@ config LNET_SELFTEST config LNET_XPRT_IB tristate "LNET infiniband support" - depends on LNET && PCI && INFINIBAND_ADDR_TRANS + depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS default LNET && INFINIBAND help This option allows the LNET users to use infiniband as an diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index d61e2de8d0eb..5f132d59dfc2 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -197,7 +197,7 @@ config CIFS_SMB311 config CIFS_SMB_DIRECT bool "SMB Direct support (Experimental)" - depends on CIFS=m && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND_ADDR_TRANS=y + depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y help Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1. SMB Direct allows transferring SMB packets over RDMA. If unsure, diff --git a/net/9p/Kconfig b/net/9p/Kconfig index 46c39f7da444..e6014e0e51f7 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig @@ -32,7 +32,7 @@ config NET_9P_XEN config NET_9P_RDMA - depends on INET && INFINIBAND_ADDR_TRANS + depends on INET && INFINIBAND && INFINIBAND_ADDR_TRANS tristate "9P RDMA Transport (Experimental)" help This builds support for an RDMA transport. diff --git a/net/rds/Kconfig b/net/rds/Kconfig index 1a31502ee7db..bffde4b46c5d 100644 --- a/net/rds/Kconfig +++ b/net/rds/Kconfig @@ -8,7 +8,7 @@ config RDS config RDS_RDMA tristate "RDS over Infiniband" - depends on RDS && INFINIBAND_ADDR_TRANS + depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS ---help--- Allow RDS to use Infiniband as a transport. This transport supports RDMA operations. diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 6358e5271070..ac09ca803296 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig @@ -50,7 +50,7 @@ config SUNRPC_DEBUG config SUNRPC_XPRT_RDMA tristate "RPC-over-RDMA transport" - depends on SUNRPC && INFINIBAND_ADDR_TRANS + depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS default SUNRPC && INFINIBAND select SG_POOL help From a840c93ca7582bb6c88df2345a33f979b7a67874 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 27 May 2018 14:49:16 +0300 Subject: [PATCH 3/3] IB/core: Fix error code for invalid GID entry When a GID entry is invalid EAGAIN is returned. This is an incorrect error code, there is nothing that will make this GID entry valid again in bounded time. Some user space tools fail incorrectly if EAGAIN is returned here, and this represents a small ABI change from earlier kernels. The first patch in the Fixes list makes entries that were valid before to become invalid, allowing this code to trigger, while the second patch in the Fixes list introduced the wrong EAGAIN. Therefore revert the return result to EINVAL which matches the historical expectations of the ibv_query_gid_type() API of the libibverbs user space library. Cc: Fixes: 598ff6bae689 ("IB/core: Refactor GID modify code for RoCE") Fixes: 03db3a2d81e6 ("IB/core: Add RoCE GID table management") Reviewed-by: Daniel Jurgens Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index fb2d347f760f..ecc55e98ddd3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, return -EINVAL; if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) - return -EAGAIN; + return -EINVAL; memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); if (attr) {