isci: kill "host quiesce" mechanism

The midlayer is already throttling i/o in the places where host_quiesce
was trying to prevent further i/o to the device.  It's also problematic
in that it holds a lock over GFP_KERNEL allocations.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Dan Williams 2011-03-07 14:47:35 -08:00
parent 3a97eec6d7
commit 8acaec1593
4 changed files with 8 additions and 91 deletions

View file

@ -73,7 +73,6 @@ enum isci_status {
isci_ready_for_io = 0x03,
isci_stopping = 0x04,
isci_stopped = 0x05,
isci_host_quiesce = 0x06
};
/**

View file

@ -267,36 +267,13 @@ isci_remote_device_alloc(struct isci_host *isci_host, struct isci_port *port)
INIT_LIST_HEAD(&isci_device->reqs_in_process);
INIT_LIST_HEAD(&isci_device->node);
isci_device->host_quiesce = false;
spin_lock_init(&isci_device->state_lock);
spin_lock_init(&isci_device->host_quiesce_lock);
isci_remote_device_change_state(isci_device, isci_freed);
return isci_device;
}
/**
* isci_device_set_host_quiesce_lock_state() - This function sets the host I/O
* quiesce lock state for the remote_device object.
* @isci_device,: This parameter points to the isci_remote_device object
* @isci_device: This parameter specifies the new quiesce state.
*
*/
void isci_device_set_host_quiesce_lock_state(
struct isci_remote_device *isci_device,
bool lock_state)
{
unsigned long flags;
dev_dbg(&isci_device->isci_port->isci_host->pdev->dev,
"%s: isci_device=%p, lock_state=%d\n",
__func__, isci_device, lock_state);
spin_lock_irqsave(&isci_device->host_quiesce_lock, flags);
isci_device->host_quiesce = lock_state;
spin_unlock_irqrestore(&isci_device->host_quiesce_lock, flags);
}
/**
* isci_remote_device_ready() - This function is called by the scic when the
@ -314,8 +291,8 @@ void isci_remote_device_ready(struct isci_remote_device *isci_device)
"%s: isci_device = %p\n", __func__, isci_device);
/* device ready is actually a "ready for io" state. */
if ((isci_starting == isci_remote_device_get_state(isci_device)) ||
(isci_ready == isci_remote_device_get_state(isci_device))) {
if (isci_device->status == isci_starting ||
isci_device->status == isci_ready) {
spin_lock_irqsave(&isci_device->isci_port->remote_device_lock,
flags);
isci_remote_device_change_state(isci_device, isci_ready_for_io);

View file

@ -68,8 +68,6 @@ struct isci_remote_device {
struct list_head reqs_in_process;
struct work_struct stop_work;
spinlock_t state_lock;
spinlock_t host_quiesce_lock;
bool host_quiesce;
};
static inline struct scic_sds_remote_device *to_sci_dev(struct isci_remote_device *idev)
@ -84,22 +82,6 @@ static inline struct scic_sds_remote_device *to_sci_dev(struct isci_remote_devic
#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
/**
* This function gets the status of the remote_device object.
* @isci_device: This parameter points to the isci_remote_device object
*
* status of the object as a isci_status enum.
*/
static inline
enum isci_status isci_remote_device_get_state(
struct isci_remote_device *isci_device)
{
return (isci_device->host_quiesce)
? isci_host_quiesce
: isci_device->status;
}
/**
* isci_dev_from_domain_dev() - This accessor retrieves the remote_device
* object reference from the Linux domain_device reference.
@ -146,10 +128,6 @@ bool isci_device_is_reset_pending(
void isci_device_clear_reset_pending(
struct isci_remote_device *isci_device);
void isci_device_set_host_quiesce_lock_state(
struct isci_remote_device *isci_device,
bool lock_state);
void isci_remote_device_change_state(
struct isci_remote_device *isci_device,
enum isci_status status);

View file

@ -81,7 +81,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
struct isci_request *request = NULL;
struct isci_remote_device *device;
unsigned long flags;
unsigned long quiesce_flags = 0;
int ret;
enum sci_status status;
@ -151,21 +150,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
isci_host = isci_host_from_sas_ha(task->dev->port->ha);
/* check if the controller hasn't started or if the device
* is ready but not accepting IO.
*/
if (device) {
spin_lock_irqsave(&device->host_quiesce_lock,
quiesce_flags);
}
/* From this point onward, any process that needs to guarantee
* that there is no kernel I/O being started will have to wait
* for the quiesce spinlock.
*/
if ((device && ((isci_remote_device_get_state(device) == isci_ready) ||
(isci_remote_device_get_state(device) == isci_host_quiesce)))) {
if (device && device->status == isci_ready) {
/* Forces a retry from scsi mid layer. */
dev_warn(task->dev->port->ha->dev,
@ -179,8 +164,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
if (device)
dev_dbg(task->dev->port->ha->dev,
"%s: device->status = 0x%x\n",
__func__,
isci_remote_device_get_state(device));
__func__, device->status);
/* Indicate QUEUE_FULL so that the scsi midlayer
* retries.
@ -194,7 +178,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
isci_host_can_dequeue(isci_host, 1);
}
/* the device is going down... */
else if (!device || (isci_ready_for_io != isci_remote_device_get_state(device))) {
else if (!device || device->status != isci_ready_for_io) {
dev_dbg(task->dev->port->ha->dev,
"%s: task %p: isci_host->status = %d, "
@ -207,8 +191,7 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
if (device)
dev_dbg(task->dev->port->ha->dev,
"%s: device->status = 0x%x\n",
__func__,
isci_remote_device_get_state(device));
__func__, device->status);
/* Indicate SAS_TASK_UNDELIVERED, so that the scsi
* midlayer removes the target.
@ -247,11 +230,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
isci_host_can_dequeue(isci_host, 1);
}
}
if (device) {
spin_unlock_irqrestore(&device->host_quiesce_lock,
quiesce_flags
);
}
task = list_entry(task->list.next, struct sas_task, list);
} while (--num > 0);
return 0;
@ -442,14 +420,11 @@ int isci_task_execute_tmf(
/* sanity check, return TMF_RESP_FUNC_FAILED
* if the device is not there and ready.
*/
if (!isci_device ||
((isci_ready_for_io != isci_remote_device_get_state(isci_device)) &&
(isci_host_quiesce != isci_remote_device_get_state(isci_device)))) {
if (!isci_device || isci_device->status != isci_ready_for_io) {
dev_dbg(&isci_host->pdev->dev,
"%s: isci_device = %p not ready (%d)\n",
__func__,
isci_device,
isci_remote_device_get_state(isci_device));
isci_device, isci_device->status);
return TMF_RESP_FUNC_FAILED;
} else
dev_dbg(&isci_host->pdev->dev,
@ -986,9 +961,6 @@ int isci_task_lu_reset(
return TMF_RESP_FUNC_FAILED;
}
/* Stop I/O to the remote device. */
isci_device_set_host_quiesce_lock_state(isci_device, true);
/* Send the task management part of the reset. */
if (sas_protocol_ata(domain_device->tproto)) {
ret = isci_task_send_lu_reset_sata(
@ -1004,9 +976,6 @@ int isci_task_lu_reset(
isci_device,
terminating);
/* Resume I/O to the remote device. */
isci_device_set_host_quiesce_lock_state(isci_device, false);
return ret;
}
@ -1627,9 +1596,6 @@ int isci_bus_reset_handler(struct scsi_cmnd *cmd)
if (isci_host != NULL)
spin_unlock_irqrestore(&isci_host->scic_lock, flags);
/* Stop I/O to the remote device. */
isci_device_set_host_quiesce_lock_state(isci_dev, true);
/* Make sure all pending requests are able to be fully terminated. */
isci_device_clear_reset_pending(isci_dev);
@ -1671,8 +1637,5 @@ int isci_bus_reset_handler(struct scsi_cmnd *cmd)
"%s: cmd %p, isci_dev %p complete.\n",
__func__, cmd, isci_dev);
/* Resume I/O to the remote device. */
isci_device_set_host_quiesce_lock_state(isci_dev, false);
return TMF_RESP_FUNC_COMPLETE;
}