[SCSI] mpt2sas: RESCAN Barrier work is added in case of HBA reset.

Add the cancel_pending_work flag from the fw_event_work structure, and then to
set the flag during host reset, check the flag later from work threads
context and if cancel_pending_work_flag is set ingore those events.

Now Rescan after host reset is changed.
Added special task MPT2SAS_RESCAN_AFTER_HOST_RESET. This task will be queued
at the time of HBA reset. this task is treated as barrier. All work after
MPT2SAS_RESCAN_AFTER_HOST_RESET will be treated as new work and will be
server by callback handle. If host_recovery is going on while running RESCAN
task, it will wait for shos_recovery_done completion which will be called
from HBA reset DONE context.

Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
Reviewed-by: Eric Moore <eric.moore@lsi.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
This commit is contained in:
Kashyap, Desai 2010-03-09 16:31:43 +05:30 committed by James Bottomley
parent 36dd288f0f
commit f1c35e6aea
3 changed files with 90 additions and 80 deletions

View file

@ -3607,6 +3607,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
mutex_init(&ioc->ctl_cmds.mutex);
init_completion(&ioc->shost_recovery_done);
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
ioc->event_masks[i] = -1;
@ -3811,9 +3813,8 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
complete(&ioc->shost_recovery_done);
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (!r)
_base_reset_handler(ioc, MPT2_IOC_RUNNING);
return r;
}

View file

@ -119,7 +119,6 @@
#define MPT2_IOC_PRE_RESET 1 /* prior to host reset */
#define MPT2_IOC_AFTER_RESET 2 /* just after host reset */
#define MPT2_IOC_DONE_RESET 3 /* links re-initialized */
#define MPT2_IOC_RUNNING 4 /* shost running */
/*
* logging format
@ -603,7 +602,6 @@ struct MPT2SAS_ADAPTER {
/* fw event handler */
char firmware_event_name[20];
struct workqueue_struct *firmware_event_thread;
u8 fw_events_off;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
@ -611,6 +609,7 @@ struct MPT2SAS_ADAPTER {
int aen_event_read_flag;
u8 broadcast_aen_busy;
u8 shost_recovery;
struct completion shost_recovery_done;
spinlock_t ioc_reset_in_progress_lock;
u8 ioc_link_reset_in_progress;
u8 ignore_loginfos;

View file

@ -109,14 +109,16 @@ struct sense_info {
};
#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
/**
* struct fw_event_work - firmware event struct
* @list: link list framework
* @work: work object (ioc->fault_reset_work_q)
* @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @host_reset_handling: handling events during host reset
* @ignore: flag meaning this event has been marked to ignore
* @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
* @event_data: reply event data payload follows
@ -125,11 +127,11 @@ struct sense_info {
*/
struct fw_event_work {
struct list_head list;
struct work_struct work;
u8 cancel_pending_work;
struct delayed_work delayed_work;
struct MPT2SAS_ADAPTER *ioc;
u8 VF_ID;
u8 VP_ID;
u8 host_reset_handling;
u8 ignore;
u16 event;
void *event_data;
@ -2325,8 +2327,9 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
INIT_WORK(&fw_event->work, _firmware_event_work);
queue_work(ioc->firmware_event_thread, &fw_event->work);
INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
queue_delayed_work(ioc->firmware_event_thread,
&fw_event->delayed_work, 0);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@ -2353,62 +2356,55 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/**
* _scsih_fw_event_add - requeue an event
* _scsih_queue_rescan - queue a topology rescan from user context
* @ioc: per adapter object
* @fw_event: object describing the event
* Context: This function will acquire ioc->fw_event_lock.
*
* Return nothing.
*/
static void
_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
*fw_event, unsigned long delay)
_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
{
unsigned long flags;
if (ioc->firmware_event_thread == NULL)
struct fw_event_work *fw_event;
if (ioc->wait_for_port_enable_to_complete)
return;
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event)
return;
fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
fw_event->ioc = ioc;
_scsih_fw_event_add(ioc, fw_event);
}
/**
* _scsih_fw_event_cleanup_queue - cleanup event queue
* @ioc: per adapter object
*
* Walk the firmware event queue, either killing timers, or waiting
* for outstanding events to complete
*
* Return nothing.
*/
static void
_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event, *next;
if (list_empty(&ioc->fw_event_list) ||
!ioc->firmware_event_thread || in_interrupt())
return;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
queue_work(ioc->firmware_event_thread, &fw_event->work);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
if (cancel_delayed_work(&fw_event->delayed_work)) {
_scsih_fw_event_free(ioc, fw_event);
continue;
}
fw_event->cancel_pending_work = 1;
}
}
/**
* _scsih_fw_event_off - turn flag off preventing event handling
* @ioc: per adapter object
*
* Used to prevent handling of firmware events during adapter reset
* driver unload.
*
* Return nothing.
*/
static void
_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
ioc->fw_events_off = 1;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/**
* _scsih_fw_event_on - turn flag on allowing firmware event handling
* @ioc: per adapter object
*
* Returns nothing.
*/
static void
_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
ioc->fw_events_off = 0;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
/**
* _scsih_ublock_io_device - set the device state to SDEV_RUNNING
@ -5694,13 +5690,13 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
}
/**
* _scsih_remove_unresponding_devices - removing unresponding devices
* _scsih_remove_unresponding_sas_devices - removing unresponding devices
* @ioc: per adapter object
*
* Return nothing.
*/
static void
_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander;
@ -5774,31 +5770,28 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
case MPT2_IOC_PRE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
_scsih_fw_event_off(ioc);
break;
case MPT2_IOC_AFTER_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
complete(&ioc->scsih_cmds.done);
}
if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
ioc->tm_cmds.status |= MPT2_CMD_RESET;
mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
complete(&ioc->tm_cmds.done);
}
_scsih_fw_event_on(ioc);
_scsih_fw_event_cleanup_queue(ioc);
_scsih_flush_running_cmds(ioc);
_scsih_queue_rescan(ioc);
break;
case MPT2_IOC_DONE_RESET:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
_scsih_sas_host_refresh(ioc);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
break;
case MPT2_IOC_RUNNING:
dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
"MPT2_IOC_RUNNING\n", ioc->name, __func__));
_scsih_remove_unresponding_devices(ioc);
break;
}
}
@ -5815,21 +5808,31 @@ static void
_firmware_event_work(struct work_struct *work)
{
struct fw_event_work *fw_event = container_of(work,
struct fw_event_work, work);
struct fw_event_work, delayed_work.work);
unsigned long flags;
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* the queue is being flushed so ignore this event */
spin_lock_irqsave(&ioc->fw_event_lock, flags);
if (ioc->fw_events_off || ioc->remove_host) {
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (ioc->remove_host || fw_event->cancel_pending_work) {
_scsih_fw_event_free(ioc, fw_event);
return;
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (ioc->shost_recovery) {
_scsih_fw_event_requeue(ioc, fw_event, 1000);
if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
_scsih_fw_event_free(ioc, fw_event);
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery) {
init_completion(&ioc->shost_recovery_done);
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
flags);
wait_for_completion(&ioc->shost_recovery_done);
} else
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
flags);
_scsih_search_responding_sas_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
_scsih_remove_unresponding_sas_devices(ioc);
return;
}
@ -5891,16 +5894,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
{
struct fw_event_work *fw_event;
Mpi2EventNotificationReply_t *mpi_reply;
unsigned long flags;
u16 event;
/* events turned off due to host reset or driver unloading */
spin_lock_irqsave(&ioc->fw_event_lock, flags);
if (ioc->fw_events_off || ioc->remove_host) {
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (ioc->remove_host)
return 1;
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
event = le16_to_cpu(mpi_reply->Event);
@ -6158,6 +6156,18 @@ _scsih_shutdown(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
struct workqueue_struct *wq;
unsigned long flags;
ioc->remove_host = 1;
_scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->firmware_event_thread;
ioc->firmware_event_thread = NULL;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
if (wq)
destroy_workqueue(wq);
_scsih_ir_shutdown(ioc);
mpt2sas_base_detach(ioc);
@ -6184,7 +6194,7 @@ _scsih_remove(struct pci_dev *pdev)
unsigned long flags;
ioc->remove_host = 1;
_scsih_fw_event_off(ioc);
_scsih_fw_event_cleanup_queue(ioc);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->firmware_event_thread;