hpsa: allocate reply queues individually

Now that we can allocate more than 4 reply queues (up to 64)
we shouldn't try to make them share the same allocation but
should allocate them separately.

Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com>
Reviewed-by: Mike Miller <michael.miller@canonical.com>
Reviewed-by: Scott Teel <scott.teel@hp.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Stephen M. Cameron 2014-05-29 10:53:07 -05:00 committed by Christoph Hellwig
parent f89439bc2e
commit 072b0518b0
2 changed files with 38 additions and 28 deletions

View file

@ -695,7 +695,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c)
static inline u32 next_command(struct ctlr_info *h, u8 q) static inline u32 next_command(struct ctlr_info *h, u8 q)
{ {
u32 a; u32 a;
struct reply_pool *rq = &h->reply_queue[q]; struct reply_queue_buffer *rq = &h->reply_queue[q];
unsigned long flags; unsigned long flags;
if (h->transMethod & CFGTBL_Trans_io_accel1) if (h->transMethod & CFGTBL_Trans_io_accel1)
@ -6707,6 +6707,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
#endif /* CONFIG_PCI_MSI */ #endif /* CONFIG_PCI_MSI */
} }
static void hpsa_free_reply_queues(struct ctlr_info *h)
{
int i;
for (i = 0; i < h->nreply_queues; i++) {
if (!h->reply_queue[i].head)
continue;
pci_free_consistent(h->pdev, h->reply_queue_size,
h->reply_queue[i].head, h->reply_queue[i].busaddr);
h->reply_queue[i].head = NULL;
h->reply_queue[i].busaddr = 0;
}
}
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
{ {
hpsa_free_irqs_and_disable_msix(h); hpsa_free_irqs_and_disable_msix(h);
@ -6714,8 +6728,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
hpsa_free_cmd_pool(h); hpsa_free_cmd_pool(h);
kfree(h->ioaccel1_blockFetchTable); kfree(h->ioaccel1_blockFetchTable);
kfree(h->blockFetchTable); kfree(h->blockFetchTable);
pci_free_consistent(h->pdev, h->reply_pool_size, hpsa_free_reply_queues(h);
h->reply_pool, h->reply_pool_dhandle);
if (h->vaddr) if (h->vaddr)
iounmap(h->vaddr); iounmap(h->vaddr);
if (h->transtable) if (h->transtable)
@ -7164,8 +7177,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
pci_free_consistent(h->pdev, pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo), h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool, h->errinfo_pool_dhandle); h->errinfo_pool, h->errinfo_pool_dhandle);
pci_free_consistent(h->pdev, h->reply_pool_size, hpsa_free_reply_queues(h);
h->reply_pool, h->reply_pool_dhandle);
kfree(h->cmd_pool_bits); kfree(h->cmd_pool_bits);
kfree(h->blockFetchTable); kfree(h->blockFetchTable);
kfree(h->ioaccel1_blockFetchTable); kfree(h->ioaccel1_blockFetchTable);
@ -7278,7 +7290,8 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
*/ */
/* Controller spec: zero out this buffer. */ /* Controller spec: zero out this buffer. */
memset(h->reply_pool, 0, h->reply_pool_size); for (i = 0; i < h->nreply_queues; i++)
memset(h->reply_queue[i].head, 0, h->reply_queue_size);
bft[7] = SG_ENTRIES_IN_CMD + 4; bft[7] = SG_ENTRIES_IN_CMD + 4;
calc_bucket_map(bft, ARRAY_SIZE(bft), calc_bucket_map(bft, ARRAY_SIZE(bft),
@ -7294,8 +7307,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
for (i = 0; i < h->nreply_queues; i++) { for (i = 0; i < h->nreply_queues; i++) {
writel(0, &h->transtable->RepQAddr[i].upper); writel(0, &h->transtable->RepQAddr[i].upper);
writel(h->reply_pool_dhandle + writel(h->reply_queue[i].busaddr,
(h->max_commands * sizeof(u64) * i),
&h->transtable->RepQAddr[i].lower); &h->transtable->RepQAddr[i].lower);
} }
@ -7343,8 +7355,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
h->ioaccel1_blockFetchTable); h->ioaccel1_blockFetchTable);
/* initialize all reply queue entries to unused */ /* initialize all reply queue entries to unused */
memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, for (i = 0; i < h->nreply_queues; i++)
h->reply_pool_size); memset(h->reply_queue[i].head,
(u8) IOACCEL_MODE1_REPLY_UNUSED,
h->reply_queue_size);
/* set all the constant fields in the accelerator command /* set all the constant fields in the accelerator command
* frames once at init time to save CPU cycles later. * frames once at init time to save CPU cycles later.
@ -7500,16 +7514,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
} }
} }
/* TODO, check that this next line h->nreply_queues is correct */
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
hpsa_get_max_perf_mode_cmds(h); hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */ /* Performant mode ring buffer and supporting data structures */
h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; h->reply_queue_size = h->max_commands * sizeof(u64);
h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
&(h->reply_pool_dhandle));
for (i = 0; i < h->nreply_queues; i++) { for (i = 0; i < h->nreply_queues; i++) {
h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
h->reply_queue_size,
&(h->reply_queue[i].busaddr));
if (!h->reply_queue[i].head)
goto clean_up;
h->reply_queue[i].size = h->max_commands; h->reply_queue[i].size = h->max_commands;
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
h->reply_queue[i].current_entry = 0; h->reply_queue[i].current_entry = 0;
@ -7518,18 +7533,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
/* Need a block fetch table for performant mode */ /* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL); sizeof(u32)), GFP_KERNEL);
if (!h->blockFetchTable)
if ((h->reply_pool == NULL)
|| (h->blockFetchTable == NULL))
goto clean_up; goto clean_up;
hpsa_enter_performant_mode(h, trans_support); hpsa_enter_performant_mode(h, trans_support);
return; return;
clean_up: clean_up:
if (h->reply_pool) hpsa_free_reply_queues(h);
pci_free_consistent(h->pdev, h->reply_pool_size,
h->reply_pool, h->reply_pool_dhandle);
kfree(h->blockFetchTable); kfree(h->blockFetchTable);
} }

View file

@ -57,11 +57,12 @@ struct hpsa_scsi_dev_t {
}; };
struct reply_pool { struct reply_queue_buffer {
u64 *head; u64 *head;
size_t size; size_t size;
u8 wraparound; u8 wraparound;
u32 current_entry; u32 current_entry;
dma_addr_t busaddr;
}; };
#pragma pack(1) #pragma pack(1)
@ -174,11 +175,9 @@ struct ctlr_info {
/* /*
* Performant mode completion buffers * Performant mode completion buffers
*/ */
u64 *reply_pool; size_t reply_queue_size;
size_t reply_pool_size; struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
struct reply_pool reply_queue[MAX_REPLY_QUEUES];
u8 nreply_queues; u8 nreply_queues;
dma_addr_t reply_pool_dhandle;
u32 *blockFetchTable; u32 *blockFetchTable;
u32 *ioaccel1_blockFetchTable; u32 *ioaccel1_blockFetchTable;
u32 *ioaccel2_blockFetchTable; u32 *ioaccel2_blockFetchTable;
@ -392,7 +391,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
{ {
struct reply_pool *rq = &h->reply_queue[q]; struct reply_queue_buffer *rq = &h->reply_queue[q];
unsigned long flags, register_value = FIFO_EMPTY; unsigned long flags, register_value = FIFO_EMPTY;
/* msi auto clears the interrupt pending bit. */ /* msi auto clears the interrupt pending bit. */
@ -507,7 +506,7 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
{ {
u64 register_value; u64 register_value;
struct reply_pool *rq = &h->reply_queue[q]; struct reply_queue_buffer *rq = &h->reply_queue[q];
unsigned long flags; unsigned long flags;
BUG_ON(q >= h->nreply_queues); BUG_ON(q >= h->nreply_queues);