1
0
Fork 0

amso1100: fold c2_reg_phys_mr into c2_get_dma_mr

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> [core]
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
hifive-unleashed-5.1
Christoph Hellwig 2015-12-23 19:12:51 +01:00 committed by Doug Ledford
parent f64054d850
commit 33f0921934
1 changed files with 14 additions and 57 deletions

View File

@ -337,43 +337,21 @@ static inline u32 c2_convert_access(int acc)
C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
}
static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 * iova_start)
static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
{
struct c2_mr *mr;
u64 *page_list;
u32 total_len;
int err, i, j, k, page_shift, pbl_depth;
const u32 total_len = 0xffffffff; /* AMSO1100 limit */
int err, page_shift, pbl_depth, i;
u64 kva = 0;
pbl_depth = 0;
total_len = 0;
pr_debug("%s:%u\n", __func__, __LINE__);
page_shift = PAGE_SHIFT;
/*
* If there is only 1 buffer we assume this could
* be a map of all phy mem...use a 32k page_shift.
* This is a map of all phy mem...use a 32k page_shift.
*/
if (num_phys_buf == 1)
page_shift += 3;
for (i = 0; i < num_phys_buf; i++) {
if (offset_in_page(buffer_list[i].addr)) {
pr_debug("Unaligned Memory Buffer: 0x%x\n",
(unsigned int) buffer_list[i].addr);
return ERR_PTR(-EINVAL);
}
if (!buffer_list[i].size) {
pr_debug("Invalid Buffer Size\n");
return ERR_PTR(-EINVAL);
}
total_len += buffer_list[i].size;
pbl_depth += ALIGN(buffer_list[i].size,
BIT(page_shift)) >> page_shift;
}
page_shift = PAGE_SHIFT + 3;
pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
page_list = vmalloc(sizeof(u64) * pbl_depth);
if (!page_list) {
@ -382,16 +360,8 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
return ERR_PTR(-ENOMEM);
}
for (i = 0, j = 0; i < num_phys_buf; i++) {
int naddrs;
naddrs = ALIGN(buffer_list[i].size,
BIT(page_shift)) >> page_shift;
for (k = 0; k < naddrs; k++)
page_list[j++] = (buffer_list[i].addr +
(k << page_shift));
}
for (i = 0; i < pbl_depth; i++)
page_list[i] = (i << page_shift);
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
@ -399,17 +369,17 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
return ERR_PTR(-ENOMEM);
}
mr->pd = to_c2pd(ib_pd);
mr->pd = to_c2pd(pd);
mr->umem = NULL;
pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
"*iova_start %llx, first pa %llx, last pa %llx\n",
__func__, page_shift, pbl_depth, total_len,
(unsigned long long) *iova_start,
(unsigned long long) kva,
(unsigned long long) page_list[0],
(unsigned long long) page_list[pbl_depth-1]);
err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
BIT(page_shift), pbl_depth,
total_len, 0, iova_start,
total_len, 0, &kva,
c2_convert_access(acc), mr);
vfree(page_list);
if (err) {
@ -420,19 +390,6 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
return &mr->ibmr;
}
static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
{
struct ib_phys_buf bl;
u64 kva = 0;
pr_debug("%s:%u\n", __func__, __LINE__);
/* AMSO1100 limit */
bl.size = 0xffffffff;
bl.addr = 0;
return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
}
static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{