1
0
Fork 0

i40iw: remove support for ib_get_vector_affinity

Devices that does not use managed affinity can not export a vector
affinity as the consumer relies on having a static mapping it can map to
upper layer affinity (e.g. sw queues). If the driver allows the user to
set the device irq affinity, then the affinitization of a long term
existing entites is not relevant.

For example, nvme-rdma controllers queue-irq affinitization is determined
at init time so if the irq affinity changes over time, we are no longer
aligned.

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
hifive-unleashed-5.1
Sagi Grimberg 2018-11-01 13:08:07 -07:00 committed by Jason Gunthorpe
parent 9afc97c29b
commit 759ace7832
1 changed files with 0 additions and 20 deletions

View File

@ -2721,25 +2721,6 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
return 0;
}
/**
* i40iw_get_vector_affinity - report IRQ affinity mask
* @ibdev: IB device
* @comp_vector: completion vector index
*/
static const struct cpumask *i40iw_get_vector_affinity(struct ib_device *ibdev,
int comp_vector)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_msix_vector *msix_vec;
if (iwdev->msix_shared)
msix_vec = &iwdev->iw_msixtbl[comp_vector];
else
msix_vec = &iwdev->iw_msixtbl[comp_vector + 1];
return irq_get_affinity_mask(msix_vec->irq);
}
/**
* i40iw_init_rdma_device - initialization of iwarp device
* @iwdev: iwarp device
@ -2832,7 +2813,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
iwibdev->ibdev.post_send = i40iw_post_send;
iwibdev->ibdev.post_recv = i40iw_post_recv;
iwibdev->ibdev.get_vector_affinity = i40iw_get_vector_affinity;
return iwibdev;
}