1
0
Fork 0

IB/ehca: Change meaning of hca_cap_mr_pgsize

ehca_shca.hca_cap_mr_pgsize now contains all supported page sizes ORed
together. This makes some checks easier to code and understand, plus
we can return this value verbatim in query_hca(), fixing a problem
with SRP (reported by Anton Blanchard -- thanks!).

Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
hifive-unleashed-5.1
Joachim Fenkes 2007-10-16 17:31:14 +02:00 committed by Roland Dreier
parent 8c08d50d4f
commit abc39d3672
4 changed files with 36 additions and 22 deletions

View File

@ -323,7 +323,6 @@ extern int ehca_static_rate;
extern int ehca_port_act_time;
extern int ehca_use_hp_mr;
extern int ehca_scaling_code;
extern int ehca_mr_largepage;
struct ipzu_queue_resp {
u32 qe_size; /* queue entry size */

View File

@ -77,6 +77,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
}
memset(props, 0, sizeof(struct ib_device_attr));
props->page_size_cap = shca->hca_cap_mr_pgsize;
props->fw_ver = rblock->hw_ver;
props->max_mr_size = rblock->max_mr_size;
props->vendor_id = rblock->vendor_id >> 8;

View File

@ -260,13 +260,20 @@ static struct cap_descr {
{ HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
};
int ehca_sense_attributes(struct ehca_shca *shca)
static int ehca_sense_attributes(struct ehca_shca *shca)
{
int i, ret = 0;
u64 h_ret;
struct hipz_query_hca *rblock;
struct hipz_query_port *port;
static const u32 pgsize_map[] = {
HCA_CAP_MR_PGSIZE_4K, 0x1000,
HCA_CAP_MR_PGSIZE_64K, 0x10000,
HCA_CAP_MR_PGSIZE_1M, 0x100000,
HCA_CAP_MR_PGSIZE_16M, 0x1000000,
};
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory.");
@ -329,8 +336,15 @@ int ehca_sense_attributes(struct ehca_shca *shca)
if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
shca->hca_cap_mr_pgsize = rblock->memory_page_size_supported;
/* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
if (ehca_mr_largepage) { /* support extra sizes only if enabled */
for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
if (rblock->memory_page_size_supported & pgsize_map[i])
shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
}
/* query max MTU from first port -- it's the same for all ports */
port = (struct hipz_query_port *)rblock;
h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
if (h_ret != H_SUCCESS) {

View File

@ -79,9 +79,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize)
static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
{
if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)
return EHCA_MR_PGSIZE16M;
return EHCA_MR_PGSIZE4K;
return 1UL << ilog2(shca->hca_cap_mr_pgsize);
}
static struct ehca_mr *ehca_mr_new(void)
@ -288,7 +286,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
container_of(pd->device, struct ehca_shca, ib_device);
struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
struct ehca_mr_pginfo pginfo;
int ret;
int ret, page_shift;
u32 num_kpages;
u32 num_hwpages;
u64 hwpage_size;
@ -343,19 +341,20 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
/* determine number of MR pages */
num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
/* select proper hw_pgsize */
if (ehca_mr_largepage &&
(shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) {
int page_shift = PAGE_SHIFT;
if (e_mr->umem->hugetlb) {
/* determine page_shift, clamp between 4K and 16M */
page_shift = (fls64(length - 1) + 3) & ~3;
page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
EHCA_MR_PGSHIFT16M);
}
hwpage_size = 1UL << page_shift;
} else
hwpage_size = EHCA_MR_PGSIZE4K; /* ehca1 only supports 4k */
ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size);
page_shift = PAGE_SHIFT;
if (e_mr->umem->hugetlb) {
/* determine page_shift, clamp between 4K and 16M */
page_shift = (fls64(length - 1) + 3) & ~3;
page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
EHCA_MR_PGSHIFT16M);
}
hwpage_size = 1UL << page_shift;
/* now that we have the desired page size, shift until it's
* supported, too. 4K is always supported, so this terminates.
*/
while (!(hwpage_size & shca->hca_cap_mr_pgsize))
hwpage_size >>= 4;
reg_user_mr_fallback:
num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
@ -801,8 +800,9 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
ib_fmr = ERR_PTR(-EINVAL);
goto alloc_fmr_exit0;
}
hw_pgsize = ehca_get_max_hwpage_size(shca);
if ((1 << fmr_attr->page_shift) != hw_pgsize) {
hw_pgsize = 1 << fmr_attr->page_shift;
if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
fmr_attr->page_shift);
ib_fmr = ERR_PTR(-EINVAL);