dm mpath: move trigger_event to system workqueue

The same workqueue is used both for sending uevents and processing queued I/O.
Deadlock has been reported in RHEL5 when sending a uevent was blocked waiting
for the queued I/O to be processed.  Use scheduled_work() for the asynchronous
uevents instead.

Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
Alasdair G Kergon 2009-01-06 03:05:13 +00:00
parent 784aae735d
commit fe9cf30eb8

View file

@ -889,7 +889,7 @@ static int fail_path(struct pgpath *pgpath)
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
queue_work(kmultipathd, &m->trigger_event);
schedule_work(&m->trigger_event);
queue_work(kmultipathd, &pgpath->deactivate_path);
out:
@ -932,7 +932,7 @@ static int reinstate_path(struct pgpath *pgpath)
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths);
queue_work(kmultipathd, &m->trigger_event);
schedule_work(&m->trigger_event);
out:
spin_unlock_irqrestore(&m->lock, flags);
@ -976,7 +976,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
spin_unlock_irqrestore(&m->lock, flags);
queue_work(kmultipathd, &m->trigger_event);
schedule_work(&m->trigger_event);
}
/*
@ -1006,7 +1006,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
}
spin_unlock_irqrestore(&m->lock, flags);
queue_work(kmultipathd, &m->trigger_event);
schedule_work(&m->trigger_event);
return 0;
}