1
0
Fork 0

net/smc: immediate termination for SMCR link groups

If the SMC module is unloaded or an IB device is thrown away, the
immediate link group freeing introduced for SMCD is exploited for SMCR
as well. That means SMCR-specifics are added to smc_conn_kill().

Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
alistair/sunxi64-5.5-dsi
Ursula Braun 2019-11-14 13:02:47 +01:00 committed by David S. Miller
parent 6a37ad3da5
commit 0b29ec6436
4 changed files with 46 additions and 22 deletions

View File

@ -566,6 +566,10 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
wake_up(&lnk->wr_reg_wait);
if (lnk->state != SMC_LNK_INACTIVE) {
smc_link_send_delete(lnk, false);
smc_llc_link_inactive(lnk);
}
}
}
@ -638,14 +642,16 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
if (!lgr->is_smcd &&
lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
list_move(&lgr->list, &lgr_free_list);
lgr->freeing = 1;
}
}
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
list_del_init(&lgr->list);
__smc_lgr_terminate(lgr, true);
__smc_lgr_terminate(lgr, false);
}
}
@ -695,6 +701,36 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
}
/* Called when an SMCR device is removed or the smc module is unloaded.
* If smcibdev is given, all SMCR link groups using this device are terminated.
* If smcibdev is NULL, all SMCR link groups are terminated.
*/
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_free_list);
spin_lock_bh(&smc_lgr_list.lock);
if (!smcibdev) {
list_splice_init(&smc_lgr_list.list, &lgr_free_list);
list_for_each_entry(lgr, &lgr_free_list, list)
lgr->freeing = 1;
} else {
list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
list_move(&lgr->list, &lgr_free_list);
lgr->freeing = 1;
}
}
}
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
list_del_init(&lgr->list);
__smc_lgr_terminate(lgr, false);
}
}
/* Determine vlan of internal TCP socket.
* @vlan_id: address to store the determined vlan id into
*/
@ -1215,32 +1251,16 @@ static void smc_core_going_away(void)
/* Clean up all SMC link groups */
static void smc_lgrs_shutdown(void)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_freeing_list);
struct smcd_dev *smcd;
smc_core_going_away();
spin_lock_bh(&smc_lgr_list.lock);
list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
spin_unlock_bh(&smc_lgr_list.lock);
smc_smcr_terminate_all(NULL);
spin_lock(&smcd_dev_list.lock);
list_for_each_entry(smcd, &smcd_dev_list.list, list)
smc_smcd_terminate_all(smcd);
spin_unlock(&smcd_dev_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
list_del_init(&lgr->list);
if (!lgr->is_smcd) {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK], false);
smc_llc_link_inactive(lnk);
}
cancel_delayed_work_sync(&lgr->free_work);
smc_lgr_free(lgr); /* free link group */
}
}
/* Called (from smc_exit) when module is removed */

View File

@ -287,7 +287,7 @@ static inline struct smc_connection *smc_lgr_find_conn(
static inline void smc_lgr_terminate_sched(struct smc_link_group *lgr)
{
if (!lgr->terminating)
if (!lgr->terminating && !lgr->freeing)
schedule_work(&lgr->terminate_work);
}
@ -301,6 +301,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
unsigned short vlan);
void smc_smcd_terminate_all(struct smcd_dev *dev);
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
int smc_buf_create(struct smc_sock *smc, bool is_smcd);
int smc_uncompress_bufsize(u8 compressed);
int smc_rmb_rtoken_handling(struct smc_connection *conn,

View File

@ -565,7 +565,7 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
schedule_work(&smcibdev->port_event_work);
}
/* callback function for ib_register_client() */
/* callback function for ib_unregister_client() */
static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
{
struct smc_ib_device *smcibdev;
@ -575,6 +575,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
spin_unlock(&smc_ib_devices.lock);
smc_smcr_terminate_all(smcibdev);
smc_ib_cleanup_per_ibdev(smcibdev);
ib_unregister_event_handler(&smcibdev->event_handler);
kfree(smcibdev);

View File

@ -698,9 +698,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
int smc_llc_do_delete_rkey(struct smc_link *link,
struct smc_buf_desc *rmb_desc)
{
int rc;
int rc = 0;
mutex_lock(&link->llc_delete_rkey_mutex);
if (link->state != SMC_LNK_ACTIVE)
goto out;
reinit_completion(&link->llc_delete_rkey);
rc = smc_llc_send_delete_rkey(link, rmb_desc);
if (rc)