1
0
Fork 0

Merge branch 'topic/bcm' into for-linus

hifive-unleashed-5.1
Vinod Koul 2017-09-06 21:54:09 +05:30
commit 07e24b8559
1 changed files with 310 additions and 264 deletions

View File

@ -36,6 +36,7 @@
*/
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/list.h>
@ -48,7 +49,8 @@
#include "dmaengine.h"
/* SBA command related defines */
/* ====== Driver macros and defines ===== */
#define SBA_TYPE_SHIFT 48
#define SBA_TYPE_MASK GENMASK(1, 0)
#define SBA_TYPE_A 0x0
@ -82,39 +84,40 @@
#define SBA_CMD_WRITE_BUFFER 0xc
#define SBA_CMD_GALOIS 0xe
#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
/* Driver helper macros */
#define to_sba_request(tx) \
container_of(tx, struct sba_request, tx)
#define to_sba_device(dchan) \
container_of(dchan, struct sba_device, dma_chan)
enum sba_request_state {
SBA_REQUEST_STATE_FREE = 1,
SBA_REQUEST_STATE_ALLOCED = 2,
SBA_REQUEST_STATE_PENDING = 3,
SBA_REQUEST_STATE_ACTIVE = 4,
SBA_REQUEST_STATE_RECEIVED = 5,
SBA_REQUEST_STATE_COMPLETED = 6,
SBA_REQUEST_STATE_ABORTED = 7,
/* ===== Driver data structures ===== */
enum sba_request_flags {
SBA_REQUEST_STATE_FREE = 0x001,
SBA_REQUEST_STATE_ALLOCED = 0x002,
SBA_REQUEST_STATE_PENDING = 0x004,
SBA_REQUEST_STATE_ACTIVE = 0x008,
SBA_REQUEST_STATE_ABORTED = 0x010,
SBA_REQUEST_STATE_MASK = 0x0ff,
SBA_REQUEST_FENCE = 0x100,
};
struct sba_request {
/* Global state */
struct list_head node;
struct sba_device *sba;
enum sba_request_state state;
bool fence;
u32 flags;
/* Chained requests management */
struct sba_request *first;
struct list_head next;
unsigned int next_count;
atomic_t next_pending_count;
/* BRCM message data */
void *resp;
dma_addr_t resp_dma;
struct brcm_sba_command *cmds;
struct brcm_message msg;
struct dma_async_tx_descriptor tx;
/* SBA commands */
struct brcm_sba_command cmds[0];
};
enum sba_version {
@ -152,19 +155,18 @@ struct sba_device {
void *cmds_base;
dma_addr_t cmds_dma_base;
spinlock_t reqs_lock;
struct sba_request *reqs;
bool reqs_fence;
struct list_head reqs_alloc_list;
struct list_head reqs_pending_list;
struct list_head reqs_active_list;
struct list_head reqs_received_list;
struct list_head reqs_completed_list;
struct list_head reqs_aborted_list;
struct list_head reqs_free_list;
int reqs_free_count;
/* DebugFS directory entries */
struct dentry *root;
struct dentry *stats;
};
/* ====== SBA command helper routines ===== */
/* ====== Command helper routines ===== */
static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
{
@ -196,32 +198,50 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
}
/* ====== Channel resource management routines ===== */
/* ====== General helper routines ===== */
static void sba_peek_mchans(struct sba_device *sba)
{
int mchan_idx;
for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
mbox_client_peek_data(sba->mchans[mchan_idx]);
}
static struct sba_request *sba_alloc_request(struct sba_device *sba)
{
bool found = false;
unsigned long flags;
struct sba_request *req = NULL;
spin_lock_irqsave(&sba->reqs_lock, flags);
list_for_each_entry(req, &sba->reqs_free_list, node) {
if (async_tx_test_ack(&req->tx)) {
list_move_tail(&req->node, &sba->reqs_alloc_list);
found = true;
break;
}
}
spin_unlock_irqrestore(&sba->reqs_lock, flags);
req = list_first_entry_or_null(&sba->reqs_free_list,
struct sba_request, node);
if (req) {
list_move_tail(&req->node, &sba->reqs_alloc_list);
req->state = SBA_REQUEST_STATE_ALLOCED;
req->fence = false;
req->first = req;
INIT_LIST_HEAD(&req->next);
req->next_count = 1;
atomic_set(&req->next_pending_count, 1);
sba->reqs_free_count--;
dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
if (!found) {
/*
* We have no more free requests so, we peek
* mailbox channels hoping few active requests
* would have completed which will create more
* room for new requests.
*/
sba_peek_mchans(sba);
return NULL;
}
spin_unlock_irqrestore(&sba->reqs_lock, flags);
req->flags = SBA_REQUEST_STATE_ALLOCED;
req->first = req;
INIT_LIST_HEAD(&req->next);
atomic_set(&req->next_pending_count, 1);
dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
async_tx_ack(&req->tx);
return req;
}
@ -231,7 +251,8 @@ static void _sba_pending_request(struct sba_device *sba,
struct sba_request *req)
{
lockdep_assert_held(&sba->reqs_lock);
req->state = SBA_REQUEST_STATE_PENDING;
req->flags &= ~SBA_REQUEST_STATE_MASK;
req->flags |= SBA_REQUEST_STATE_PENDING;
list_move_tail(&req->node, &sba->reqs_pending_list);
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
@ -246,9 +267,10 @@ static bool _sba_active_request(struct sba_device *sba,
sba->reqs_fence = false;
if (sba->reqs_fence)
return false;
req->state = SBA_REQUEST_STATE_ACTIVE;
req->flags &= ~SBA_REQUEST_STATE_MASK;
req->flags |= SBA_REQUEST_STATE_ACTIVE;
list_move_tail(&req->node, &sba->reqs_active_list);
if (req->fence)
if (req->flags & SBA_REQUEST_FENCE)
sba->reqs_fence = true;
return true;
}
@ -258,7 +280,8 @@ static void _sba_abort_request(struct sba_device *sba,
struct sba_request *req)
{
lockdep_assert_held(&sba->reqs_lock);
req->state = SBA_REQUEST_STATE_ABORTED;
req->flags &= ~SBA_REQUEST_STATE_MASK;
req->flags |= SBA_REQUEST_STATE_ABORTED;
list_move_tail(&req->node, &sba->reqs_aborted_list);
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
@ -269,42 +292,11 @@ static void _sba_free_request(struct sba_device *sba,
struct sba_request *req)
{
lockdep_assert_held(&sba->reqs_lock);
req->state = SBA_REQUEST_STATE_FREE;
req->flags &= ~SBA_REQUEST_STATE_MASK;
req->flags |= SBA_REQUEST_STATE_FREE;
list_move_tail(&req->node, &sba->reqs_free_list);
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
sba->reqs_free_count++;
}
static void sba_received_request(struct sba_request *req)
{
unsigned long flags;
struct sba_device *sba = req->sba;
spin_lock_irqsave(&sba->reqs_lock, flags);
req->state = SBA_REQUEST_STATE_RECEIVED;
list_move_tail(&req->node, &sba->reqs_received_list);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
static void sba_complete_chained_requests(struct sba_request *req)
{
unsigned long flags;
struct sba_request *nreq;
struct sba_device *sba = req->sba;
spin_lock_irqsave(&sba->reqs_lock, flags);
req->state = SBA_REQUEST_STATE_COMPLETED;
list_move_tail(&req->node, &sba->reqs_completed_list);
list_for_each_entry(nreq, &req->next, next) {
nreq->state = SBA_REQUEST_STATE_COMPLETED;
list_move_tail(&nreq->node, &sba->reqs_completed_list);
}
if (list_empty(&sba->reqs_active_list))
sba->reqs_fence = false;
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
static void sba_free_chained_requests(struct sba_request *req)
@ -332,8 +324,7 @@ static void sba_chain_request(struct sba_request *first,
list_add_tail(&req->next, &first->next);
req->first = first;
first->next_count++;
atomic_set(&first->next_pending_count, first->next_count);
atomic_inc(&first->next_pending_count);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
@ -349,14 +340,6 @@ static void sba_cleanup_nonpending_requests(struct sba_device *sba)
list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
_sba_free_request(sba, req);
/* Freeup all received request */
list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
_sba_free_request(sba, req);
/* Freeup all completed request */
list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
_sba_free_request(sba, req);
/* Set all active requests as aborted */
list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
_sba_abort_request(sba, req);
@ -383,6 +366,144 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req)
{
int mchans_idx, ret = 0;
/* Select mailbox channel in round-robin fashion */
mchans_idx = atomic_inc_return(&sba->mchans_current);
mchans_idx = mchans_idx % sba->mchans_count;
/* Send message for the request */
req->msg.error = 0;
ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
if (ret < 0) {
dev_err(sba->dev, "send message failed with error %d", ret);
return ret;
}
/* Check error returned by mailbox controller */
ret = req->msg.error;
if (ret < 0) {
dev_err(sba->dev, "message error %d", ret);
}
/* Signal txdone for mailbox channel */
mbox_client_txdone(sba->mchans[mchans_idx], ret);
return ret;
}
/* Note: Must be called with sba->reqs_lock held */
static void _sba_process_pending_requests(struct sba_device *sba)
{
int ret;
u32 count;
struct sba_request *req;
/*
* Process few pending requests
*
* For now, we process (<number_of_mailbox_channels> * 8)
* number of requests at a time.
*/
count = sba->mchans_count * 8;
while (!list_empty(&sba->reqs_pending_list) && count) {
/* Get the first pending request */
req = list_first_entry(&sba->reqs_pending_list,
struct sba_request, node);
/* Try to make request active */
if (!_sba_active_request(sba, req))
break;
/* Send request to mailbox channel */
ret = sba_send_mbox_request(sba, req);
if (ret < 0) {
_sba_pending_request(sba, req);
break;
}
count--;
}
}
static void sba_process_received_request(struct sba_device *sba,
struct sba_request *req)
{
unsigned long flags;
struct dma_async_tx_descriptor *tx;
struct sba_request *nreq, *first = req->first;
/* Process only after all chained requests are received */
if (!atomic_dec_return(&first->next_pending_count)) {
tx = &first->tx;
WARN_ON(tx->cookie < 0);
if (tx->cookie > 0) {
dma_cookie_complete(tx);
dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
tx->callback = NULL;
tx->callback_result = NULL;
}
dma_run_dependencies(tx);
spin_lock_irqsave(&sba->reqs_lock, flags);
/* Free all requests chained to first request */
list_for_each_entry(nreq, &first->next, next)
_sba_free_request(sba, nreq);
INIT_LIST_HEAD(&first->next);
/* Free the first request */
_sba_free_request(sba, first);
/* Process pending requests */
_sba_process_pending_requests(sba);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
}
static void sba_write_stats_in_seqfile(struct sba_device *sba,
struct seq_file *file)
{
unsigned long flags;
struct sba_request *req;
u32 free_count = 0, alloced_count = 0;
u32 pending_count = 0, active_count = 0, aborted_count = 0;
spin_lock_irqsave(&sba->reqs_lock, flags);
list_for_each_entry(req, &sba->reqs_free_list, node)
if (async_tx_test_ack(&req->tx))
free_count++;
list_for_each_entry(req, &sba->reqs_alloc_list, node)
alloced_count++;
list_for_each_entry(req, &sba->reqs_pending_list, node)
pending_count++;
list_for_each_entry(req, &sba->reqs_active_list, node)
active_count++;
list_for_each_entry(req, &sba->reqs_aborted_list, node)
aborted_count++;
spin_unlock_irqrestore(&sba->reqs_lock, flags);
seq_printf(file, "maximum requests = %d\n", sba->max_req);
seq_printf(file, "free requests = %d\n", free_count);
seq_printf(file, "alloced requests = %d\n", alloced_count);
seq_printf(file, "pending requests = %d\n", pending_count);
seq_printf(file, "active requests = %d\n", active_count);
seq_printf(file, "aborted requests = %d\n", aborted_count);
}
/* ====== DMAENGINE callbacks ===== */
static void sba_free_chan_resources(struct dma_chan *dchan)
@ -403,58 +524,14 @@ static int sba_device_terminate_all(struct dma_chan *dchan)
return 0;
}
static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req)
{
int mchans_idx, ret = 0;
/* Select mailbox channel in round-robin fashion */
mchans_idx = atomic_inc_return(&sba->mchans_current);
mchans_idx = mchans_idx % sba->mchans_count;
/* Send message for the request */
req->msg.error = 0;
ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
if (ret < 0) {
dev_err(sba->dev, "send message failed with error %d", ret);
return ret;
}
ret = req->msg.error;
if (ret < 0) {
dev_err(sba->dev, "message error %d", ret);
return ret;
}
return 0;
}
static void sba_issue_pending(struct dma_chan *dchan)
{
int ret;
unsigned long flags;
struct sba_request *req, *req1;
struct sba_device *sba = to_sba_device(dchan);
/* Process pending requests */
spin_lock_irqsave(&sba->reqs_lock, flags);
/* Process all pending request */
list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) {
/* Try to make request active */
if (!_sba_active_request(sba, req))
break;
/* Send request to mailbox channel */
spin_unlock_irqrestore(&sba->reqs_lock, flags);
ret = sba_send_mbox_request(sba, req);
spin_lock_irqsave(&sba->reqs_lock, flags);
/* If something went wrong then keep request pending */
if (ret < 0) {
_sba_pending_request(sba, req);
break;
}
}
_sba_process_pending_requests(sba);
spin_unlock_irqrestore(&sba->reqs_lock, flags);
}
@ -486,17 +563,15 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
int mchan_idx;
enum dma_status ret;
struct sba_device *sba = to_sba_device(dchan);
for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
mbox_client_peek_data(sba->mchans[mchan_idx]);
ret = dma_cookie_status(dchan, cookie, txstate);
if (ret == DMA_COMPLETE)
return ret;
sba_peek_mchans(sba);
return dma_cookie_status(dchan, cookie, txstate);
}
@ -506,6 +581,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req,
{
u64 cmd;
u32 c_mdata;
dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
/* Type-B command to load dummy data into buf0 */
@ -521,7 +597,7 @@ static void sba_fillup_interrupt_msg(struct sba_request *req,
cmdsp->cmd = cmd;
*cmdsp->cmd_dma = cpu_to_le64(cmd);
cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
cmdsp->data = req->resp_dma;
cmdsp->data = resp_dma;
cmdsp->data_len = req->sba->hw_resp_size;
cmdsp++;
@ -542,11 +618,11 @@ static void sba_fillup_interrupt_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
cmdsp->data = req->resp_dma;
cmdsp->data = resp_dma;
cmdsp->data_len = req->sba->hw_resp_size;
cmdsp++;
@ -573,7 +649,7 @@ sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
* Force fence so that no requests are submitted
* until DMA callback for this request is invoked.
*/
req->fence = true;
req->flags |= SBA_REQUEST_FENCE;
/* Fillup request message */
sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
@ -593,6 +669,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req,
{
u64 cmd;
u32 c_mdata;
dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
/* Type-B command to load data into buf0 */
@ -629,7 +706,7 @@ static void sba_fillup_memcpy_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@ -656,7 +733,8 @@ sba_prep_dma_memcpy_req(struct sba_device *sba,
req = sba_alloc_request(sba);
if (!req)
return NULL;
req->fence = (flags & DMA_PREP_FENCE) ? true : false;
if (flags & DMA_PREP_FENCE)
req->flags |= SBA_REQUEST_FENCE;
/* Fillup request message */
sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
@ -711,6 +789,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
u64 cmd;
u32 c_mdata;
unsigned int i;
dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
/* Type-B command to load data into buf0 */
@ -766,7 +845,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@ -782,7 +861,7 @@ static void sba_fillup_xor_msg(struct sba_request *req,
msg->error = 0;
}
struct sba_request *
static struct sba_request *
sba_prep_dma_xor_req(struct sba_device *sba,
dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
u32 src_cnt, size_t len, unsigned long flags)
@ -793,7 +872,8 @@ sba_prep_dma_xor_req(struct sba_device *sba,
req = sba_alloc_request(sba);
if (!req)
return NULL;
req->fence = (flags & DMA_PREP_FENCE) ? true : false;
if (flags & DMA_PREP_FENCE)
req->flags |= SBA_REQUEST_FENCE;
/* Fillup request message */
sba_fillup_xor_msg(req, req->cmds, &req->msg,
@ -854,6 +934,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
u64 cmd;
u32 c_mdata;
unsigned int i;
dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
if (pq_continue) {
@ -947,7 +1028,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@ -974,7 +1055,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@ -991,7 +1072,7 @@ static void sba_fillup_pq_msg(struct sba_request *req,
msg->error = 0;
}
struct sba_request *
static struct sba_request *
sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
@ -1002,7 +1083,8 @@ sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
req = sba_alloc_request(sba);
if (!req)
return NULL;
req->fence = (flags & DMA_PREP_FENCE) ? true : false;
if (flags & DMA_PREP_FENCE)
req->flags |= SBA_REQUEST_FENCE;
/* Fillup request messages */
sba_fillup_pq_msg(req, dmaf_continue(flags),
@ -1027,6 +1109,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req,
u64 cmd;
u32 c_mdata;
u8 pos, dpos = raid6_gflog[scf];
dma_addr_t resp_dma = req->tx.phys;
struct brcm_sba_command *cmdsp = cmds;
if (!dst_p)
@ -1105,7 +1188,7 @@ static void sba_fillup_pq_single_msg(struct sba_request *req,
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@ -1226,7 +1309,7 @@ skip_q_computation:
cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
if (req->sba->hw_resp_size) {
cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
cmdsp->resp = req->resp_dma;
cmdsp->resp = resp_dma;
cmdsp->resp_len = req->sba->hw_resp_size;
}
cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
@ -1243,7 +1326,7 @@ skip_q:
msg->error = 0;
}
struct sba_request *
static struct sba_request *
sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
dma_addr_t *dst_p, dma_addr_t *dst_q,
dma_addr_t src, u8 scf, size_t len,
@ -1255,7 +1338,8 @@ sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
req = sba_alloc_request(sba);
if (!req)
return NULL;
req->fence = (flags & DMA_PREP_FENCE) ? true : false;
if (flags & DMA_PREP_FENCE)
req->flags |= SBA_REQUEST_FENCE;
/* Fillup request messages */
sba_fillup_pq_single_msg(req, dmaf_continue(flags),
@ -1370,40 +1454,10 @@ fail:
/* ====== Mailbox callbacks ===== */
static void sba_dma_tx_actions(struct sba_request *req)
{
struct dma_async_tx_descriptor *tx = &req->tx;
WARN_ON(tx->cookie < 0);
if (tx->cookie > 0) {
dma_cookie_complete(tx);
/*
* Call the callback (must not sleep or submit new
* operations to this channel)
*/
if (tx->callback)
tx->callback(tx->callback_param);
dma_descriptor_unmap(tx);
}
/* Run dependent operations */
dma_run_dependencies(tx);
/* If waiting for 'ack' then move to completed list */
if (!async_tx_test_ack(&req->tx))
sba_complete_chained_requests(req);
else
sba_free_chained_requests(req);
}
static void sba_receive_message(struct mbox_client *cl, void *msg)
{
unsigned long flags;
struct brcm_message *m = msg;
struct sba_request *req = m->ctx, *req1;
struct sba_request *req = m->ctx;
struct sba_device *sba = req->sba;
/* Error count if message has error */
@ -1411,52 +1465,37 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
dev_err(sba->dev, "%s got message with error %d",
dma_chan_name(&sba->dma_chan), m->error);
/* Mark request as received */
sba_received_request(req);
/* Process received request */
sba_process_received_request(sba, req);
}
/* Wait for all chained requests to be completed */
if (atomic_dec_return(&req->first->next_pending_count))
goto done;
/* ====== Debugfs callbacks ====== */
/* Point to first request */
req = req->first;
static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
{
struct platform_device *pdev = to_platform_device(file->private);
struct sba_device *sba = platform_get_drvdata(pdev);
/* Update request */
if (req->state == SBA_REQUEST_STATE_RECEIVED)
sba_dma_tx_actions(req);
else
sba_free_chained_requests(req);
/* Write stats in file */
sba_write_stats_in_seqfile(sba, file);
spin_lock_irqsave(&sba->reqs_lock, flags);
/* Re-check all completed request waiting for 'ack' */
list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) {
spin_unlock_irqrestore(&sba->reqs_lock, flags);
sba_dma_tx_actions(req);
spin_lock_irqsave(&sba->reqs_lock, flags);
}
spin_unlock_irqrestore(&sba->reqs_lock, flags);
done:
/* Try to submit pending request */
sba_issue_pending(&sba->dma_chan);
return 0;
}
/* ====== Platform driver routines ===== */
static int sba_prealloc_channel_resources(struct sba_device *sba)
{
int i, j, p, ret = 0;
int i, j, ret = 0;
struct sba_request *req = NULL;
sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev,
sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
sba->max_resp_pool_size,
&sba->resp_dma_base, GFP_KERNEL);
if (!sba->resp_base)
return -ENOMEM;
sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev,
sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
sba->max_cmds_pool_size,
&sba->cmds_dma_base, GFP_KERNEL);
if (!sba->cmds_base) {
@ -1469,36 +1508,23 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
INIT_LIST_HEAD(&sba->reqs_alloc_list);
INIT_LIST_HEAD(&sba->reqs_pending_list);
INIT_LIST_HEAD(&sba->reqs_active_list);
INIT_LIST_HEAD(&sba->reqs_received_list);
INIT_LIST_HEAD(&sba->reqs_completed_list);
INIT_LIST_HEAD(&sba->reqs_aborted_list);
INIT_LIST_HEAD(&sba->reqs_free_list);
sba->reqs = devm_kcalloc(sba->dev, sba->max_req,
sizeof(*req), GFP_KERNEL);
if (!sba->reqs) {
ret = -ENOMEM;
goto fail_free_cmds_pool;
}
for (i = 0, p = 0; i < sba->max_req; i++) {
req = &sba->reqs[i];
INIT_LIST_HEAD(&req->node);
req->sba = sba;
req->state = SBA_REQUEST_STATE_FREE;
INIT_LIST_HEAD(&req->next);
req->next_count = 1;
atomic_set(&req->next_pending_count, 0);
req->fence = false;
req->resp = sba->resp_base + p;
req->resp_dma = sba->resp_dma_base + p;
p += sba->hw_resp_size;
req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req,
sizeof(*req->cmds), GFP_KERNEL);
if (!req->cmds) {
for (i = 0; i < sba->max_req; i++) {
req = devm_kzalloc(sba->dev,
sizeof(*req) +
sba->max_cmd_per_req * sizeof(req->cmds[0]),
GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto fail_free_cmds_pool;
}
INIT_LIST_HEAD(&req->node);
req->sba = sba;
req->flags = SBA_REQUEST_STATE_FREE;
INIT_LIST_HEAD(&req->next);
atomic_set(&req->next_pending_count, 0);
for (j = 0; j < sba->max_cmd_per_req; j++) {
req->cmds[j].cmd = 0;
req->cmds[j].cmd_dma = sba->cmds_base +
@ -1509,21 +1535,20 @@ static int sba_prealloc_channel_resources(struct sba_device *sba)
}
memset(&req->msg, 0, sizeof(req->msg));
dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
async_tx_ack(&req->tx);
req->tx.tx_submit = sba_tx_submit;
req->tx.phys = req->resp_dma;
req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
list_add_tail(&req->node, &sba->reqs_free_list);
}
sba->reqs_free_count = sba->max_req;
return 0;
fail_free_cmds_pool:
dma_free_coherent(sba->dma_dev.dev,
dma_free_coherent(sba->mbox_dev,
sba->max_cmds_pool_size,
sba->cmds_base, sba->cmds_dma_base);
fail_free_resp_pool:
dma_free_coherent(sba->dma_dev.dev,
dma_free_coherent(sba->mbox_dev,
sba->max_resp_pool_size,
sba->resp_base, sba->resp_dma_base);
return ret;
@ -1532,9 +1557,9 @@ fail_free_resp_pool:
static void sba_freeup_channel_resources(struct sba_device *sba)
{
dmaengine_terminate_all(&sba->dma_chan);
dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size,
dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
sba->cmds_base, sba->cmds_dma_base);
dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size,
dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
sba->resp_base, sba->resp_dma_base);
sba->resp_base = NULL;
sba->resp_dma_base = 0;
@ -1625,6 +1650,13 @@ static int sba_probe(struct platform_device *pdev)
sba->dev = &pdev->dev;
platform_set_drvdata(pdev, sba);
/* Number of channels equals number of mailbox channels */
ret = of_count_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells");
if (ret <= 0)
return -ENODEV;
mchans_count = ret;
/* Determine SBA version from DT compatible string */
if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
sba->ver = SBA_VER_1;
@ -1637,14 +1669,12 @@ static int sba_probe(struct platform_device *pdev)
/* Derived Configuration parameters */
switch (sba->ver) {
case SBA_VER_1:
sba->max_req = 1024;
sba->hw_buf_size = 4096;
sba->hw_resp_size = 8;
sba->max_pq_coefs = 6;
sba->max_pq_srcs = 6;
break;
case SBA_VER_2:
sba->max_req = 1024;
sba->hw_buf_size = 4096;
sba->hw_resp_size = 8;
sba->max_pq_coefs = 30;
@ -1658,6 +1688,7 @@ static int sba_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
sba->max_cmd_per_req = sba->max_pq_srcs + 3;
sba->max_xor_srcs = sba->max_cmd_per_req - 1;
sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
@ -1668,25 +1699,17 @@ static int sba_probe(struct platform_device *pdev)
sba->client.dev = &pdev->dev;
sba->client.rx_callback = sba_receive_message;
sba->client.tx_block = false;
sba->client.knows_txdone = false;
sba->client.knows_txdone = true;
sba->client.tx_tout = 0;
/* Number of channels equals number of mailbox channels */
ret = of_count_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells");
if (ret <= 0)
return -ENODEV;
mchans_count = ret;
sba->mchans_count = 0;
atomic_set(&sba->mchans_current, 0);
/* Allocate mailbox channel array */
sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count,
sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
sizeof(*sba->mchans), GFP_KERNEL);
if (!sba->mchans)
return -ENOMEM;
/* Request mailbox channels */
sba->mchans_count = 0;
for (i = 0; i < mchans_count; i++) {
sba->mchans[i] = mbox_request_channel(&sba->client, i);
if (IS_ERR(sba->mchans[i])) {
@ -1695,6 +1718,7 @@ static int sba_probe(struct platform_device *pdev)
}
sba->mchans_count++;
}
atomic_set(&sba->mchans_current, 0);
/* Find-out underlying mailbox device */
ret = of_parse_phandle_with_args(pdev->dev.of_node,
@ -1723,15 +1747,34 @@ static int sba_probe(struct platform_device *pdev)
}
}
/* Register DMA device with linux async framework */
ret = sba_async_register(sba);
if (ret)
goto fail_free_mchans;
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
goto fail_async_dev_unreg;
goto fail_free_mchans;
/* Check availability of debugfs */
if (!debugfs_initialized())
goto skip_debugfs;
/* Create debugfs root entry */
sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
if (IS_ERR_OR_NULL(sba->root)) {
dev_err(sba->dev, "failed to create debugfs root entry\n");
sba->root = NULL;
goto skip_debugfs;
}
/* Create debugfs stats entry */
sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
sba_debugfs_stats_show);
if (IS_ERR_OR_NULL(sba->stats))
dev_err(sba->dev, "failed to create debugfs stats file\n");
skip_debugfs:
/* Register DMA device with Linux async framework */
ret = sba_async_register(sba);
if (ret)
goto fail_free_resources;
/* Print device info */
dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
@ -1740,8 +1783,9 @@ static int sba_probe(struct platform_device *pdev)
return 0;
fail_async_dev_unreg:
dma_async_device_unregister(&sba->dma_dev);
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
fail_free_mchans:
for (i = 0; i < sba->mchans_count; i++)
mbox_free_channel(sba->mchans[i]);
@ -1753,10 +1797,12 @@ static int sba_remove(struct platform_device *pdev)
int i;
struct sba_device *sba = platform_get_drvdata(pdev);
sba_freeup_channel_resources(sba);
dma_async_device_unregister(&sba->dma_dev);
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
for (i = 0; i < sba->mchans_count; i++)
mbox_free_channel(sba->mchans[i]);