1
0
Fork 0

scsi: hisi_sas: Don't lock DQ for complete task sending

Currently we lock the DQ to protect whole delivery process.  So this
stops us building slots for the same queue in parallel, and can affect
performance.

To optimise it, only lock the DQ during special periods, specifically
when allocating a slot from the DQ and when delivering a slot to the HW.

This approach is now safe, thanks to the previous patches to ensure that
we always deliver a slot to the HW once allocated.

Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
hifive-unleashed-5.1
Xiang Chen 2018-05-09 23:10:48 +08:00 committed by Martin K. Petersen
parent 3de0026dad
commit fa222db0b0
5 changed files with 96 additions and 41 deletions

View File

@ -161,7 +161,7 @@ struct hisi_sas_cq {
struct hisi_sas_dq {
struct hisi_hba *hisi_hba;
struct hisi_sas_slot *slot_prep;
struct list_head list;
spinlock_t lock;
int wr_point;
int id;
@ -181,6 +181,7 @@ struct hisi_sas_device {
struct hisi_sas_slot {
struct list_head entry;
struct list_head delivery;
struct sas_task *task;
struct hisi_sas_port *port;
u64 n_elem;
@ -190,6 +191,7 @@ struct hisi_sas_slot {
int cmplt_queue_slot;
int idx;
int abort;
int ready;
void *buf;
dma_addr_t buf_dma;
void *cmd_hdr;

View File

@ -307,9 +307,9 @@ out:
task->task_done(task);
}
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
*dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
int *pass)
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq *dq,
int is_tmf, struct hisi_sas_tmf_task *tmf,
int *pass)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
struct domain_device *device = task->dev;
@ -321,7 +321,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
unsigned long flags;
unsigned long flags, flags_dq;
int wr_q_index;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
@ -422,12 +423,18 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
goto err_out_tag;
}
rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc)
spin_lock_irqsave(&dq->lock, flags_dq);
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
spin_unlock_irqrestore(&dq->lock, flags_dq);
goto err_out_buf;
}
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags_dq);
dlvry_queue = dq->id;
dlvry_queue_slot = dq->wr_point;
dlvry_queue_slot = wr_q_index;
slot->idx = slot_idx;
slot->n_elem = n_elem;
@ -471,8 +478,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
dq->slot_prep = slot;
++(*pass);
slot->ready = 1;
return 0;
@ -518,11 +525,11 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
return -EINVAL;
/* protect task_prep and start_delivery sequence */
spin_lock_irqsave(&dq->lock, flags);
rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
if (rc)
dev_err(dev, "task exec: failed[%d]!\n", rc);
spin_lock_irqsave(&dq->lock, flags);
if (likely(pass))
hisi_hba->hw->start_delivery(dq);
spin_unlock_irqrestore(&dq->lock, flags);
@ -1503,7 +1510,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct hisi_sas_dq *dq = sas_dev->dq;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
unsigned long flags, flags_dq;
unsigned long flags, flags_dq = 0;
int wr_q_index;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
@ -1531,16 +1539,18 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
rc = -ENOMEM;
goto err_out_tag;
}
spin_lock_irqsave(&dq->lock, flags_dq);
rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc) {
rc = -ENOMEM;
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (wr_q_index < 0) {
spin_unlock_irqrestore(&dq->lock, flags_dq);
goto err_out_buf;
}
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags_dq);
dlvry_queue = dq->id;
dlvry_queue_slot = dq->wr_point;
dlvry_queue_slot = wr_q_index;
slot->idx = slot_idx;
slot->n_elem = n_elem;
@ -1560,18 +1570,16 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
abort_flag, task_tag);
spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
dq->slot_prep = slot;
slot->ready = 1;
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
hisi_hba->hw->start_delivery(dq);
spin_unlock_irqrestore(&dq->lock, flags_dq);
spin_unlock_irqrestore(&dq->lock, flags);
return 0;
@ -1856,6 +1864,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
/* Delivery queue structure */
spin_lock_init(&dq->lock);
INIT_LIST_HEAD(&dq->list);
dq->id = i;
dq->hisi_hba = hisi_hba;

View File

@ -921,18 +921,33 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
return -EAGAIN;
}
return 0;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
return w;
}
/* DQ lock must be taken here */
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
int dlvry_queue = dq->slot_prep->dlvry_queue;
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
struct hisi_sas_slot *s, *s1;
struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp, count = 0;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
dq->wr_point);
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
count++;
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
list_del(&s->delivery);
}
if (!count)
return;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
static void prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,

View File

@ -1663,23 +1663,38 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
dev_warn(dev, "full queue=%d r=%d w=%d\n",
queue, r, w);
return -EAGAIN;
}
return 0;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
return w;
}
/* DQ lock must be taken here */
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
int dlvry_queue = dq->slot_prep->dlvry_queue;
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
struct hisi_sas_slot *s, *s1;
struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp, count = 0;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
dq->wr_point);
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
count++;
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
list_del(&s->delivery);
}
if (!count)
return;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
static void prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,

View File

@ -840,23 +840,37 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
dev_warn(dev, "full queue=%d r=%d w=%d\n",
queue, r, w);
return -EAGAIN;
}
return 0;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
return w;
}
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
int dlvry_queue = dq->slot_prep->dlvry_queue;
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
struct hisi_sas_slot *s, *s1;
struct list_head *dq_list;
int dlvry_queue = dq->id;
int wp, count = 0;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
dq->wr_point);
dq_list = &dq->list;
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
if (!s->ready)
break;
count++;
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
list_del(&s->delivery);
}
if (!count)
return;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
}
static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,