staging/rdma: remove deprecated ehca driver

This driver was moved to staging for eventual deletion.  Time
to complete that task.

Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Doug Ledford 2016-02-03 11:08:53 -05:00
parent f5e741b7c7
commit e581d111da
35 changed files with 0 additions and 13582 deletions

View file

@ -4177,13 +4177,6 @@ W: http://aeschi.ch.eu.org/efs/
S: Orphan
F: fs/efs/
EHCA (IBM GX bus InfiniBand adapter) DRIVER
M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
M: Christoph Raisch <raisch@de.ibm.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/ehca/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
L: netdev@vger.kernel.org

View file

@ -22,8 +22,6 @@ menuconfig STAGING_RDMA
# Please keep entries in alphabetic order
if STAGING_RDMA
source "drivers/staging/rdma/ehca/Kconfig"
source "drivers/staging/rdma/hfi1/Kconfig"
source "drivers/staging/rdma/ipath/Kconfig"

View file

@ -1,4 +1,3 @@
# Entries for RDMA_STAGING tree
obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_IPATH) += ipath/

View file

@ -1,10 +0,0 @@
config INFINIBAND_EHCA
tristate "eHCA support"
depends on IBMEBUS
---help---
This driver supports the deprecated IBM pSeries eHCA InfiniBand
adapter.
To compile the driver as a module, choose M here. The module
will be called ib_ehca.

View file

@ -1,16 +0,0 @@
# Authors: Heiko J Schick <schickhj@de.ibm.com>
# Christoph Raisch <raisch@de.ibm.com>
# Joachim Fenkes <fenkes@de.ibm.com>
#
# Copyright (c) 2005 IBM Corporation
#
# All rights reserved.
#
# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o

View file

@ -1,4 +0,0 @@
9/2015
The ehca driver has been deprecated and moved to drivers/staging/rdma.
It will be removed in the 4.6 merge window.

View file

@ -1,279 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* address vector functions
*
* Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Khadija Souissi <souissik@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
static struct kmem_cache *av_cache;
int ehca_calc_ipd(struct ehca_shca *shca, int port,
enum ib_rate path_rate, u32 *ipd)
{
int path = ib_rate_to_mult(path_rate);
int link, ret;
struct ib_port_attr pa;
if (path_rate == IB_RATE_PORT_CURRENT) {
*ipd = 0;
return 0;
}
if (unlikely(path < 0)) {
ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
path_rate);
return -EINVAL;
}
ret = ehca_query_port(&shca->ib_device, port, &pa);
if (unlikely(ret < 0)) {
ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
return ret;
}
link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
if (path >= link)
/* no need to throttle if path faster than link */
*ipd = 0;
else
/* IPD = round((link / path) - 1) */
*ipd = ((link + (path >> 1)) / path) - 1;
return 0;
}
struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
int ret;
struct ehca_av *av;
struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
ib_device);
av = kmem_cache_alloc(av_cache, GFP_KERNEL);
if (!av) {
ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
pd, ah_attr);
return ERR_PTR(-ENOMEM);
}
av->av.sl = ah_attr->sl;
av->av.dlid = ah_attr->dlid;
av->av.slid_path_bits = ah_attr->src_path_bits;
if (ehca_static_rate < 0) {
u32 ipd;
if (ehca_calc_ipd(shca, ah_attr->port_num,
ah_attr->static_rate, &ipd)) {
ret = -EINVAL;
goto create_ah_exit1;
}
av->av.ipd = ipd;
} else
av->av.ipd = ehca_static_rate;
av->av.lnh = ah_attr->ah_flags;
av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
ah_attr->grh.traffic_class);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
ah_attr->grh.flow_label);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
ah_attr->grh.hop_limit);
av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(pd->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
ret = -EINVAL;
ehca_err(pd->device, "Invalid port number "
"ehca_query_port() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(pd->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
ret = -EINVAL;
ehca_err(pd->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"pd=%p ah_attr=%p", rc, pd, ah_attr);
goto create_ah_exit1;
}
memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
}
av->av.pmtu = shca->max_mtu;
/* dgid comes in grh.word_3 */
memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
return &av->ib_ah;
create_ah_exit1:
kmem_cache_free(av_cache, av);
return ERR_PTR(ret);
}
int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
struct ehca_av *av;
struct ehca_ud_av new_ehca_av;
struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
ib_device);
memset(&new_ehca_av, 0, sizeof(new_ehca_av));
new_ehca_av.sl = ah_attr->sl;
new_ehca_av.dlid = ah_attr->dlid;
new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
new_ehca_av.ipd = ah_attr->static_rate;
new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
(ah_attr->ah_flags & IB_AH_GRH) > 0);
new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
ah_attr->grh.traffic_class);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
ah_attr->grh.flow_label);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
ah_attr->grh.hop_limit);
new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
/* set sgid in grh.word_1 */
if (ah_attr->ah_flags & IB_AH_GRH) {
int rc;
struct ib_port_attr port_attr;
union ib_gid gid;
memset(&port_attr, 0, sizeof(port_attr));
rc = ehca_query_port(ah->device, ah_attr->port_num,
&port_attr);
if (rc) { /* invalid port number */
ehca_err(ah->device, "Invalid port number "
"ehca_query_port() returned %x "
"ah=%p ah_attr=%p port_num=%x",
rc, ah, ah_attr, ah_attr->port_num);
return -EINVAL;
}
memset(&gid, 0, sizeof(gid));
rc = ehca_query_gid(ah->device,
ah_attr->port_num,
ah_attr->grh.sgid_index, &gid);
if (rc) {
ehca_err(ah->device, "Failed to retrieve sgid "
"ehca_query_gid() returned %x "
"ah=%p ah_attr=%p port_num=%x "
"sgid_index=%x",
rc, ah, ah_attr, ah_attr->port_num,
ah_attr->grh.sgid_index);
return -EINVAL;
}
memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
}
new_ehca_av.pmtu = shca->max_mtu;
memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
sizeof(ah_attr->grh.dgid));
av = container_of(ah, struct ehca_av, ib_ah);
av->av = new_ehca_av;
return 0;
}
int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
{
struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
sizeof(ah_attr->grh.dgid));
ah_attr->sl = av->av.sl;
ah_attr->dlid = av->av.dlid;
ah_attr->src_path_bits = av->av.slid_path_bits;
ah_attr->static_rate = av->av.ipd;
ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
av->av.grh.word_0);
ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
av->av.grh.word_0);
ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
av->av.grh.word_0);
return 0;
}
int ehca_destroy_ah(struct ib_ah *ah)
{
kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
return 0;
}
int ehca_init_av_cache(void)
{
av_cache = kmem_cache_create("ehca_cache_av",
sizeof(struct ehca_av), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!av_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_av_cache(void)
{
kmem_cache_destroy(av_cache);
}

View file

@ -1,481 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Struct definition for eHCA internal structures
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
* Joachim Fenkes <fenkes@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __EHCA_CLASSES_H__
#define __EHCA_CLASSES_H__
struct ehca_module;
struct ehca_qp;
struct ehca_cq;
struct ehca_eq;
struct ehca_mr;
struct ehca_mw;
struct ehca_pd;
struct ehca_av;
#include <linux/wait.h>
#include <linux/mutex.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#ifdef CONFIG_PPC64
#include "ehca_classes_pSeries.h"
#endif
#include "ipz_pt_fn.h"
#include "ehca_qes.h"
#include "ehca_irq.h"
#define EHCA_EQE_CACHE_SIZE 20
#define EHCA_MAX_NUM_QUEUES 0xffff
struct ehca_eqe_cache_entry {
struct ehca_eqe *eqe;
struct ehca_cq *cq;
};
struct ehca_eq {
u32 length;
struct ipz_queue ipz_queue;
struct ipz_eq_handle ipz_eq_handle;
struct work_struct work;
struct h_galpas galpas;
int is_initialized;
struct ehca_pfeq pf;
spinlock_t spinlock;
struct tasklet_struct interrupt_task;
u32 ist;
spinlock_t irq_spinlock;
struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
};
struct ehca_sma_attr {
u16 lid, lmc, sm_sl, sm_lid;
u16 pkey_tbl_len, pkeys[16];
};
struct ehca_sport {
struct ib_cq *ibcq_aqp1;
struct ib_qp *ibqp_sqp[2];
/* lock to serialze modify_qp() calls for sqp in normal
* and irq path (when event PORT_ACTIVE is received first time)
*/
spinlock_t mod_sqp_lock;
enum ib_port_state port_state;
struct ehca_sma_attr saved_attr;
u32 pma_qp_nr;
};
#define HCA_CAP_MR_PGSIZE_4K 0x80000000
#define HCA_CAP_MR_PGSIZE_64K 0x40000000
#define HCA_CAP_MR_PGSIZE_1M 0x20000000
#define HCA_CAP_MR_PGSIZE_16M 0x10000000
struct ehca_shca {
struct ib_device ib_device;
struct platform_device *ofdev;
u8 num_ports;
int hw_level;
struct list_head shca_list;
struct ipz_adapter_handle ipz_hca_handle;
struct ehca_sport sport[2];
struct ehca_eq eq;
struct ehca_eq neq;
struct ehca_mr *maxmr;
struct ehca_pd *pd;
struct h_galpas galpas;
struct mutex modify_mutex;
u64 hca_cap;
/* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
u32 hca_cap_mr_pgsize;
int max_mtu;
int max_num_qps;
int max_num_cqs;
atomic_t num_cqs;
atomic_t num_qps;
};
struct ehca_pd {
struct ib_pd ib_pd;
struct ipz_pd fw_pd;
/* small queue mgmt */
struct mutex lock;
struct list_head free[2];
struct list_head full[2];
};
enum ehca_ext_qp_type {
EQPT_NORMAL = 0,
EQPT_LLQP = 1,
EQPT_SRQBASE = 2,
EQPT_SRQ = 3,
};
/* struct to cache modify_qp()'s parms for GSI/SMI qp */
struct ehca_mod_qp_parm {
int mask;
struct ib_qp_attr attr;
};
#define EHCA_MOD_QP_PARM_MAX 4
#define QMAP_IDX_MASK 0xFFFFULL
/* struct for tracking if cqes have been reported to the application */
struct ehca_qmap_entry {
u16 app_wr_id;
u8 reported;
u8 cqe_req;
};
struct ehca_queue_map {
struct ehca_qmap_entry *map;
unsigned int entries;
unsigned int tail;
unsigned int left_to_poll;
unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
};
/* function to calculate the next index for the qmap */
static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
{
unsigned int temp = cur_index + 1;
return (temp == limit) ? 0 : temp;
}
struct ehca_qp {
union {
struct ib_qp ib_qp;
struct ib_srq ib_srq;
};
u32 qp_type;
enum ehca_ext_qp_type ext_type;
enum ib_qp_state state;
struct ipz_queue ipz_squeue;
struct ehca_queue_map sq_map;
struct ipz_queue ipz_rqueue;
struct ehca_queue_map rq_map;
struct h_galpas galpas;
u32 qkey;
u32 real_qp_num;
u32 token;
spinlock_t spinlock_s;
spinlock_t spinlock_r;
u32 sq_max_inline_data_size;
struct ipz_qp_handle ipz_qp_handle;
struct ehca_pfqp pf;
struct ib_qp_init_attr init_attr;
struct ehca_cq *send_cq;
struct ehca_cq *recv_cq;
unsigned int sqerr_purgeflag;
struct hlist_node list_entries;
/* array to cache modify_qp()'s parms for GSI/SMI qp */
struct ehca_mod_qp_parm *mod_qp_parm;
int mod_qp_parm_idx;
/* mmap counter for resources mapped into user space */
u32 mm_count_squeue;
u32 mm_count_rqueue;
u32 mm_count_galpa;
/* unsolicited ack circumvention */
int unsol_ack_circ;
int mtu_shift;
u32 message_count;
u32 packet_count;
atomic_t nr_events; /* events seen */
wait_queue_head_t wait_completion;
int mig_armed;
struct list_head sq_err_node;
struct list_head rq_err_node;
};
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
/* must be power of 2 */
#define QP_HASHTAB_LEN 8
struct ehca_cq {
struct ib_cq ib_cq;
struct ipz_queue ipz_queue;
struct h_galpas galpas;
spinlock_t spinlock;
u32 cq_number;
u32 token;
u32 nr_of_entries;
struct ipz_cq_handle ipz_cq_handle;
struct ehca_pfcq pf;
spinlock_t cb_lock;
struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
struct list_head entry;
u32 nr_callbacks; /* #events assigned to cpu by scaling code */
atomic_t nr_events; /* #events seen */
wait_queue_head_t wait_completion;
spinlock_t task_lock;
/* mmap counter for resources mapped into user space */
u32 mm_count_queue;
u32 mm_count_galpa;
struct list_head sqp_err_list;
struct list_head rqp_err_list;
};
enum ehca_mr_flag {
EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
};
struct ehca_mr {
union {
struct ib_mr ib_mr; /* must always be first in ehca_mr */
struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
} ib;
struct ib_umem *umem;
spinlock_t mrlock;
enum ehca_mr_flag flags;
u32 num_kpages; /* number of kernel pages */
u32 num_hwpages; /* number of hw pages to form MR */
u64 hwpage_size; /* hw page size used for this MR */
int acl; /* ACL (stored here for usage in reregister) */
u64 *start; /* virtual start address (stored here for */
/* usage in reregister) */
u64 size; /* size (stored here for usage in reregister) */
u32 fmr_page_size; /* page size for FMR */
u32 fmr_max_pages; /* max pages for FMR */
u32 fmr_max_maps; /* max outstanding maps for FMR */
u32 fmr_map_cnt; /* map counter for FMR */
/* fw specific data */
struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
struct h_galpas galpas;
};
struct ehca_mw {
struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
spinlock_t mwlock;
u8 never_bound; /* indication MW was never bound */
struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
struct h_galpas galpas;
};
enum ehca_mr_pgi_type {
EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
* ehca_rereg_phys_mr,
* ehca_reg_internal_maxmr */
EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
};
struct ehca_mr_pginfo {
enum ehca_mr_pgi_type type;
u64 num_kpages;
u64 kpage_cnt;
u64 hwpage_size; /* hw page size used for this MR */
u64 num_hwpages; /* number of hw pages */
u64 hwpage_cnt; /* counter for hw pages */
u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
union {
struct { /* type EHCA_MR_PGI_PHYS section */
u64 addr;
u16 size;
} phy;
struct { /* type EHCA_MR_PGI_USER section */
struct ib_umem *region;
struct scatterlist *next_sg;
u64 next_nmap;
} usr;
struct { /* type EHCA_MR_PGI_FMR section */
u64 fmr_pgsize;
u64 *page_list;
u64 next_listelem;
} fmr;
} u;
};
/* output parameters for MR/FMR hipz calls */
struct ehca_mr_hipzout_parms {
struct ipz_mrmw_handle handle;
u32 lkey;
u32 rkey;
u64 len;
u64 vaddr;
u32 acl;
};
/* output parameters for MW hipz calls */
struct ehca_mw_hipzout_parms {
struct ipz_mrmw_handle handle;
u32 rkey;
};
struct ehca_av {
struct ib_ah ib_ah;
struct ehca_ud_av av;
};
struct ehca_ucontext {
struct ib_ucontext ib_ucontext;
};
int ehca_init_pd_cache(void);
void ehca_cleanup_pd_cache(void);
int ehca_init_cq_cache(void);
void ehca_cleanup_cq_cache(void);
int ehca_init_qp_cache(void);
void ehca_cleanup_qp_cache(void);
int ehca_init_av_cache(void);
void ehca_cleanup_av_cache(void);
int ehca_init_mrmw_cache(void);
void ehca_cleanup_mrmw_cache(void);
int ehca_init_small_qp_cache(void);
void ehca_cleanup_small_qp_cache(void);
extern rwlock_t ehca_qp_idr_lock;
extern rwlock_t ehca_cq_idr_lock;
extern struct idr ehca_qp_idr;
extern struct idr ehca_cq_idr;
extern spinlock_t shca_list_lock;
extern int ehca_static_rate;
extern int ehca_port_act_time;
extern bool ehca_use_hp_mr;
extern bool ehca_scaling_code;
extern int ehca_lock_hcalls;
extern int ehca_nr_ports;
extern int ehca_max_cq;
extern int ehca_max_qp;
struct ipzu_queue_resp {
u32 qe_size; /* queue entry size */
u32 act_nr_of_sg;
u32 queue_length; /* queue length allocated in bytes */
u32 pagesize;
u32 toggle_state;
u32 offset; /* save offset within a page for small_qp */
};
struct ehca_create_cq_resp {
u32 cq_number;
u32 token;
struct ipzu_queue_resp ipz_queue;
u32 fw_handle_ofs;
u32 dummy;
};
struct ehca_create_qp_resp {
u32 qp_num;
u32 token;
u32 qp_type;
u32 ext_type;
u32 qkey;
/* qp_num assigned by ehca: sqp0/1 may have got different numbers */
u32 real_qp_num;
u32 fw_handle_ofs;
u32 dummy;
struct ipzu_queue_resp ipz_squeue;
struct ipzu_queue_resp ipz_rqueue;
};
struct ehca_alloc_cq_parms {
u32 nr_cqe;
u32 act_nr_of_entries;
u32 act_pages;
struct ipz_eq_handle eq_handle;
};
enum ehca_service_type {
ST_RC = 0,
ST_UC = 1,
ST_RD = 2,
ST_UD = 3,
};
enum ehca_ll_comp_flags {
LLQP_SEND_COMP = 0x20,
LLQP_RECV_COMP = 0x40,
LLQP_COMP_MASK = 0x60,
};
struct ehca_alloc_queue_parms {
/* input parameters */
int max_wr;
int max_sge;
int page_size;
int is_small;
/* output parameters */
u16 act_nr_wqes;
u8 act_nr_sges;
u32 queue_size; /* bytes for small queues, pages otherwise */
};
struct ehca_alloc_qp_parms {
struct ehca_alloc_queue_parms squeue;
struct ehca_alloc_queue_parms rqueue;
/* input parameters */
enum ehca_service_type servicetype;
int qp_storage;
int sigtype;
enum ehca_ext_qp_type ext_type;
enum ehca_ll_comp_flags ll_comp_flags;
int ud_av_l_key_ctl;
u32 token;
struct ipz_eq_handle eq_handle;
struct ipz_pd pd;
struct ipz_cq_handle send_cq_handle, recv_cq_handle;
u32 srq_qpn, srq_token, srq_limit;
/* output parameters */
u32 real_qp_num;
struct ipz_qp_handle qp_handle;
struct h_galpas galpas;
};
int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
#endif

View file

@ -1,208 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* pSeries interface definitions
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __EHCA_CLASSES_PSERIES_H__
#define __EHCA_CLASSES_PSERIES_H__
#include "hcp_phyp.h"
#include "ipz_pt_fn.h"
struct ehca_pfqp {
struct ipz_qpt sqpt;
struct ipz_qpt rqpt;
};
struct ehca_pfcq {
struct ipz_qpt qpt;
u32 cqnr;
};
struct ehca_pfeq {
struct ipz_qpt qpt;
struct h_galpa galpa;
u32 eqnr;
};
struct ipz_adapter_handle {
u64 handle;
};
struct ipz_cq_handle {
u64 handle;
};
struct ipz_eq_handle {
u64 handle;
};
struct ipz_qp_handle {
u64 handle;
};
struct ipz_mrmw_handle {
u64 handle;
};
struct ipz_pd {
u32 value;
};
struct hcp_modify_qp_control_block {
u32 qkey; /* 00 */
u32 rdd; /* reliable datagram domain */
u32 send_psn; /* 02 */
u32 receive_psn; /* 03 */
u32 prim_phys_port; /* 04 */
u32 alt_phys_port; /* 05 */
u32 prim_p_key_idx; /* 06 */
u32 alt_p_key_idx; /* 07 */
u32 rdma_atomic_ctrl; /* 08 */
u32 qp_state; /* 09 */
u32 reserved_10; /* 10 */
u32 rdma_nr_atomic_resp_res; /* 11 */
u32 path_migration_state; /* 12 */
u32 rdma_atomic_outst_dest_qp; /* 13 */
u32 dest_qp_nr; /* 14 */
u32 min_rnr_nak_timer_field; /* 15 */
u32 service_level; /* 16 */
u32 send_grh_flag; /* 17 */
u32 retry_count; /* 18 */
u32 timeout; /* 19 */
u32 path_mtu; /* 20 */
u32 max_static_rate; /* 21 */
u32 dlid; /* 22 */
u32 rnr_retry_count; /* 23 */
u32 source_path_bits; /* 24 */
u32 traffic_class; /* 25 */
u32 hop_limit; /* 26 */
u32 source_gid_idx; /* 27 */
u32 flow_label; /* 28 */
u32 reserved_29; /* 29 */
union { /* 30 */
u64 dw[2];
u8 byte[16];
} dest_gid;
u32 service_level_al; /* 34 */
u32 send_grh_flag_al; /* 35 */
u32 retry_count_al; /* 36 */
u32 timeout_al; /* 37 */
u32 max_static_rate_al; /* 38 */
u32 dlid_al; /* 39 */
u32 rnr_retry_count_al; /* 40 */
u32 source_path_bits_al; /* 41 */
u32 traffic_class_al; /* 42 */
u32 hop_limit_al; /* 43 */
u32 source_gid_idx_al; /* 44 */
u32 flow_label_al; /* 45 */
u32 reserved_46; /* 46 */
u32 reserved_47; /* 47 */
union { /* 48 */
u64 dw[2];
u8 byte[16];
} dest_gid_al;
u32 max_nr_outst_send_wr; /* 52 */
u32 max_nr_outst_recv_wr; /* 53 */
u32 disable_ete_credit_check; /* 54 */
u32 qp_number; /* 55 */
u64 send_queue_handle; /* 56 */
u64 recv_queue_handle; /* 58 */
u32 actual_nr_sges_in_sq_wqe; /* 60 */
u32 actual_nr_sges_in_rq_wqe; /* 61 */
u32 qp_enable; /* 62 */
u32 curr_srq_limit; /* 63 */
u64 qp_aff_asyn_ev_log_reg; /* 64 */
u64 shared_rq_hndl; /* 66 */
u64 trigg_doorbell_qp_hndl; /* 68 */
u32 reserved_70_127[58]; /* 70 */
};
#define MQPCB_MASK_QKEY EHCA_BMASK_IBM( 0, 0)
#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM( 2, 2)
#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM( 3, 3)
#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM( 4, 4)
#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM( 5, 5)
#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM( 6, 6)
#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24, 31)
#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14, 14)
#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15, 15)
#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16, 16)
#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17, 17)
#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
#endif /* __EHCA_CLASSES_PSERIES_H__ */

View file

@ -1,397 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Completion queue handling
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Khadija Souissi <souissi@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
*
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_iverbs.h"
#include "ehca_classes.h"
#include "ehca_irq.h"
#include "hcp_if.h"
static struct kmem_cache *cq_cache;
int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
{
unsigned int qp_num = qp->real_qp_num;
unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
unsigned long flags;
spin_lock_irqsave(&cq->spinlock, flags);
hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
spin_unlock_irqrestore(&cq->spinlock, flags);
ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
cq->cq_number, qp_num);
return 0;
}
int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
{
int ret = -EINVAL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
struct hlist_node *iter;
struct ehca_qp *qp;
unsigned long flags;
spin_lock_irqsave(&cq->spinlock, flags);
hlist_for_each(iter, &cq->qp_hashtab[key]) {
qp = hlist_entry(iter, struct ehca_qp, list_entries);
if (qp->real_qp_num == real_qp_num) {
hlist_del(iter);
ehca_dbg(cq->ib_cq.device,
"removed qp from cq .cq_num=%x real_qp_num=%x",
cq->cq_number, real_qp_num);
ret = 0;
break;
}
}
spin_unlock_irqrestore(&cq->spinlock, flags);
if (ret)
ehca_err(cq->ib_cq.device,
"qp not found cq_num=%x real_qp_num=%x",
cq->cq_number, real_qp_num);
return ret;
}
struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
{
struct ehca_qp *ret = NULL;
unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
struct hlist_node *iter;
struct ehca_qp *qp;
hlist_for_each(iter, &cq->qp_hashtab[key]) {
qp = hlist_entry(iter, struct ehca_qp, list_entries);
if (qp->real_qp_num == real_qp_num) {
ret = qp;
break;
}
}
return ret;
}
struct ib_cq *ehca_create_cq(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata)
{
int cqe = attr->cqe;
static const u32 additional_cqe = 20;
struct ib_cq *cq;
struct ehca_cq *my_cq;
struct ehca_shca *shca =
container_of(device, struct ehca_shca, ib_device);
struct ipz_adapter_handle adapter_handle;
struct ehca_alloc_cq_parms param; /* h_call's out parameters */
struct h_galpa gal;
void *vpage;
u32 counter;
u64 rpage, cqx_fec, h_ret;
int rc, i;
unsigned long flags;
if (attr->flags)
return ERR_PTR(-EINVAL);
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
return ERR_PTR(-EINVAL);
if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
ehca_err(device, "Unable to create CQ, max number of %i "
"CQs reached.", shca->max_num_cqs);
ehca_err(device, "To increase the maximum number of CQs "
"use the number_of_cqs module parameter.\n");
return ERR_PTR(-ENOSPC);
}
my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
if (!my_cq) {
ehca_err(device, "Out of memory for ehca_cq struct device=%p",
device);
atomic_dec(&shca->num_cqs);
return ERR_PTR(-ENOMEM);
}
memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
spin_lock_init(&my_cq->spinlock);
spin_lock_init(&my_cq->cb_lock);
spin_lock_init(&my_cq->task_lock);
atomic_set(&my_cq->nr_events, 0);
init_waitqueue_head(&my_cq->wait_completion);
cq = &my_cq->ib_cq;
adapter_handle = shca->ipz_hca_handle;
param.eq_handle = shca->eq.ipz_eq_handle;
idr_preload(GFP_KERNEL);
write_lock_irqsave(&ehca_cq_idr_lock, flags);
rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
idr_preload_end();
if (rc < 0) {
cq = ERR_PTR(-ENOMEM);
ehca_err(device, "Can't allocate new idr entry. device=%p",
device);
goto create_cq_exit1;
}
my_cq->token = rc;
/*
* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
* for receiving errors CQEs.
*/
param.nr_cqe = cqe + additional_cqe;
h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
if (h_ret != H_SUCCESS) {
ehca_err(device, "hipz_h_alloc_resource_cq() failed "
"h_ret=%lli device=%p", h_ret, device);
cq = ERR_PTR(ehca2ib_return_code(h_ret));
goto create_cq_exit2;
}
rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
if (!rc) {
ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
rc, device);
cq = ERR_PTR(-EINVAL);
goto create_cq_exit3;
}
for (counter = 0; counter < param.act_pages; counter++) {
vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
if (!vpage) {
ehca_err(device, "ipz_qpageit_get_inc() "
"returns NULL device=%p", device);
cq = ERR_PTR(-EAGAIN);
goto create_cq_exit4;
}
rpage = __pa(vpage);
h_ret = hipz_h_register_rpage_cq(adapter_handle,
my_cq->ipz_cq_handle,
&my_cq->pf,
0,
0,
rpage,
1,
my_cq->galpas.
kernel);
if (h_ret < H_SUCCESS) {
ehca_err(device, "hipz_h_register_rpage_cq() failed "
"ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
"act_pages=%i", my_cq, my_cq->cq_number,
h_ret, counter, param.act_pages);
cq = ERR_PTR(-EINVAL);
goto create_cq_exit4;
}
if (counter == (param.act_pages - 1)) {
vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
if ((h_ret != H_SUCCESS) || vpage) {
ehca_err(device, "Registration of pages not "
"complete ehca_cq=%p cq_num=%x "
"h_ret=%lli", my_cq, my_cq->cq_number,
h_ret);
cq = ERR_PTR(-EAGAIN);
goto create_cq_exit4;
}
} else {
if (h_ret != H_PAGE_REGISTERED) {
ehca_err(device, "Registration of page failed "
"ehca_cq=%p cq_num=%x h_ret=%lli "
"counter=%i act_pages=%i",
my_cq, my_cq->cq_number,
h_ret, counter, param.act_pages);
cq = ERR_PTR(-ENOMEM);
goto create_cq_exit4;
}
}
}
ipz_qeit_reset(&my_cq->ipz_queue);
gal = my_cq->galpas.kernel;
cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
my_cq, my_cq->cq_number, cqx_fec);
my_cq->ib_cq.cqe = my_cq->nr_of_entries =
param.act_nr_of_entries - additional_cqe;
my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
for (i = 0; i < QP_HASHTAB_LEN; i++)
INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
INIT_LIST_HEAD(&my_cq->sqp_err_list);
INIT_LIST_HEAD(&my_cq->rqp_err_list);
if (context) {
struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
struct ehca_create_cq_resp resp;
memset(&resp, 0, sizeof(resp));
resp.cq_number = my_cq->cq_number;
resp.token = my_cq->token;
resp.ipz_queue.qe_size = ipz_queue->qe_size;
resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
resp.ipz_queue.queue_length = ipz_queue->queue_length;
resp.ipz_queue.pagesize = ipz_queue->pagesize;
resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
resp.fw_handle_ofs = (u32)
(my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
ehca_err(device, "Copy to udata failed.");
cq = ERR_PTR(-EFAULT);
goto create_cq_exit4;
}
}
return cq;
create_cq_exit4:
ipz_queue_dtor(NULL, &my_cq->ipz_queue);
create_cq_exit3:
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
if (h_ret != H_SUCCESS)
ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
"cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
create_cq_exit2:
write_lock_irqsave(&ehca_cq_idr_lock, flags);
idr_remove(&ehca_cq_idr, my_cq->token);
write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
create_cq_exit1:
kmem_cache_free(cq_cache, my_cq);
atomic_dec(&shca->num_cqs);
return cq;
}
int ehca_destroy_cq(struct ib_cq *cq)
{
u64 h_ret;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
int cq_num = my_cq->cq_number;
struct ib_device *device = cq->device;
struct ehca_shca *shca = container_of(device, struct ehca_shca,
ib_device);
struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
unsigned long flags;
if (cq->uobject) {
if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
ehca_err(device, "Resources still referenced in "
"user space cq_num=%x", my_cq->cq_number);
return -EINVAL;
}
}
/*
* remove the CQ from the idr first to make sure
* no more interrupt tasklets will touch this CQ
*/
write_lock_irqsave(&ehca_cq_idr_lock, flags);
idr_remove(&ehca_cq_idr, my_cq->token);
write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
/* now wait until all pending events have completed */
wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
/* nobody's using our CQ any longer -- we can destroy it */
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
if (h_ret == H_R_STATE) {
/* cq in err: read err data and destroy it forcibly */
ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
"state. Try to delete it forcibly.",
my_cq, cq_num, my_cq->ipz_cq_handle.handle);
ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
if (h_ret == H_SUCCESS)
ehca_dbg(device, "cq_num=%x deleted successfully.",
cq_num);
}
if (h_ret != H_SUCCESS) {
ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
"ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
return ehca2ib_return_code(h_ret);
}
ipz_queue_dtor(NULL, &my_cq->ipz_queue);
kmem_cache_free(cq_cache, my_cq);
atomic_dec(&shca->num_cqs);
return 0;
}
int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
/* TODO: proper resize needs to be done */
ehca_err(cq->device, "not implemented yet");
return -EFAULT;
}
int ehca_init_cq_cache(void)
{
cq_cache = kmem_cache_create("ehca_cache_cq",
sizeof(struct ehca_cq), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!cq_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_cq_cache(void)
{
kmem_cache_destroy(cq_cache);
}

View file

@ -1,189 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Event queue handling
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Khadija Souissi <souissi@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
*
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "ehca_classes.h"
#include "ehca_irq.h"
#include "ehca_iverbs.h"
#include "ehca_qes.h"
#include "hcp_if.h"
#include "ipz_pt_fn.h"
int ehca_create_eq(struct ehca_shca *shca,
struct ehca_eq *eq,
const enum ehca_eq_type type, const u32 length)
{
int ret;
u64 h_ret;
u32 nr_pages;
u32 i;
void *vpage;
struct ib_device *ib_dev = &shca->ib_device;
spin_lock_init(&eq->spinlock);
spin_lock_init(&eq->irq_spinlock);
eq->is_initialized = 0;
if (type != EHCA_EQ && type != EHCA_NEQ) {
ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
return -EINVAL;
}
if (!length) {
ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
return -EINVAL;
}
h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
&eq->pf,
type,
length,
&eq->ipz_eq_handle,
&eq->length,
&nr_pages, &eq->ist);
if (h_ret != H_SUCCESS) {
ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
return -EINVAL;
}
ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
if (!ret) {
ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
goto create_eq_exit1;
}
for (i = 0; i < nr_pages; i++) {
u64 rpage;
vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
if (!vpage)
goto create_eq_exit2;
rpage = __pa(vpage);
h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
eq->ipz_eq_handle,
&eq->pf,
0, 0, rpage, 1);
if (i == (nr_pages - 1)) {
/* last page */
vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
if (h_ret != H_SUCCESS || vpage)
goto create_eq_exit2;
} else {
if (h_ret != H_PAGE_REGISTERED)
goto create_eq_exit2;
}
}
ipz_qeit_reset(&eq->ipz_queue);
/* register interrupt handlers and initialize work queues */
if (type == EHCA_EQ) {
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
0, "ehca_eq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
} else if (type == EHCA_NEQ) {
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
0, "ehca_neq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
}
eq->is_initialized = 1;
return 0;
create_eq_exit2:
ipz_queue_dtor(NULL, &eq->ipz_queue);
create_eq_exit1:
hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
return -EINVAL;
}
void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
{
unsigned long flags;
void *eqe;
spin_lock_irqsave(&eq->spinlock, flags);
eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
}
int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
{
unsigned long flags;
u64 h_ret;
ibmebus_free_irq(eq->ist, (void *)shca);
spin_lock_irqsave(&shca_list_lock, flags);
eq->is_initialized = 0;
spin_unlock_irqrestore(&shca_list_lock, flags);
tasklet_kill(&eq->interrupt_task);
h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't free EQ resources.");
return -EINVAL;
}
ipz_queue_dtor(NULL, &eq->ipz_queue);
return 0;
}

View file

@ -1,414 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* HCA query functions
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/gfp.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
static unsigned int limit_uint(unsigned int value)
{
return min_t(unsigned int, value, INT_MAX);
}
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
int i, ret = 0;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_hca *rblock;
static const u32 cap_mapping[] = {
IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
};
if (uhw->inlen || uhw->outlen)
return -EINVAL;
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
}
if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query device properties");
ret = -EINVAL;
goto query_device1;
}
memset(props, 0, sizeof(struct ib_device_attr));
props->page_size_cap = shca->hca_cap_mr_pgsize;
props->fw_ver = rblock->hw_ver;
props->max_mr_size = rblock->max_mr_size;
props->vendor_id = rblock->vendor_id >> 8;
props->vendor_part_id = rblock->vendor_part_id >> 16;
props->hw_ver = rblock->hw_ver;
props->max_qp = limit_uint(rblock->max_qp);
props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
props->max_sge = limit_uint(rblock->max_sge);
props->max_sge_rd = limit_uint(rblock->max_sge_rd);
props->max_cq = limit_uint(rblock->max_cq);
props->max_cqe = limit_uint(rblock->max_cqe);
props->max_mr = limit_uint(rblock->max_mr);
props->max_mw = limit_uint(rblock->max_mw);
props->max_pd = limit_uint(rblock->max_pd);
props->max_ah = limit_uint(rblock->max_ah);
props->max_ee = limit_uint(rblock->max_rd_ee_context);
props->max_rdd = limit_uint(rblock->max_rd_domain);
props->max_fmr = limit_uint(rblock->max_mr);
props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
props->max_srq = limit_uint(props->max_qp);
props->max_srq_wr = limit_uint(props->max_qp_wr);
props->max_srq_sge = 3;
}
props->max_pkeys = 16;
/* Some FW versions say 0 here; insert sensible value in that case */
props->local_ca_ack_delay = rblock->local_ca_ack_delay ?
min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
props->max_total_mcast_qp_attach
= limit_uint(rblock->max_total_mcast_qp_attach);
/* translate device capabilities */
props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
if (rblock->hca_cap_indicators & cap_mapping[i + 1])
props->device_cap_flags |= cap_mapping[i];
query_device1:
ehca_free_fw_ctrlblock(rblock);
return ret;
}
static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
{
switch (fw_mtu) {
case 0x1:
return IB_MTU_256;
case 0x2:
return IB_MTU_512;
case 0x3:
return IB_MTU_1024;
case 0x4:
return IB_MTU_2048;
case 0x5:
return IB_MTU_4096;
default:
ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
fw_mtu);
return 0;
}
}
static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
{
switch (vl_cap) {
case 0x1:
return 1;
case 0x2:
return 2;
case 0x3:
return 4;
case 0x4:
return 8;
case 0x5:
return 15;
default:
ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
vl_cap);
return 0;
}
}
int ehca_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
int ret = 0;
u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
}
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_port1;
}
memset(props, 0, sizeof(struct ib_port_attr));
props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
props->port_cap_flags = rblock->capability_mask;
props->gid_tbl_len = rblock->gid_tbl_len;
if (rblock->max_msg_sz)
props->max_msg_sz = rblock->max_msg_sz;
else
props->max_msg_sz = 0x1 << 31;
props->bad_pkey_cntr = rblock->bad_pkey_cntr;
props->qkey_viol_cntr = rblock->qkey_viol_cntr;
props->pkey_tbl_len = rblock->pkey_tbl_len;
props->lid = rblock->lid;
props->sm_lid = rblock->sm_lid;
props->lmc = rblock->lmc;
props->sm_sl = rblock->sm_sl;
props->subnet_timeout = rblock->subnet_timeout;
props->init_type_reply = rblock->init_type_reply;
props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
if (rblock->state && rblock->phys_width) {
props->phys_state = rblock->phys_pstate;
props->state = rblock->phys_state;
props->active_width = rblock->phys_width;
props->active_speed = rblock->phys_speed;
} else {
/* old firmware releases don't report physical
* port info, so use default values
*/
props->phys_state = 5;
props->state = rblock->state;
props->active_width = IB_WIDTH_12X;
props->active_speed = IB_SPEED_SDR;
}
query_port1:
ehca_free_fw_ctrlblock(rblock);
return ret;
}
int ehca_query_sma_attr(struct ehca_shca *shca,
u8 port, struct ehca_sma_attr *attr)
{
int ret = 0;
u64 h_ret;
struct hipz_query_port *rblock;
rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
}
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_sma_attr1;
}
memset(attr, 0, sizeof(struct ehca_sma_attr));
attr->lid = rblock->lid;
attr->lmc = rblock->lmc;
attr->sm_sl = rblock->sm_sl;
attr->sm_lid = rblock->sm_lid;
attr->pkey_tbl_len = rblock->pkey_tbl_len;
memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
query_sma_attr1:
ehca_free_fw_ctrlblock(rblock);
return ret;
}
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
int ret = 0;
u64 h_ret;
struct ehca_shca *shca;
struct hipz_query_port *rblock;
shca = container_of(ibdev, struct ehca_shca, ib_device);
if (index > 16) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
}
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
}
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_pkey1;
}
memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
query_pkey1:
ehca_free_fw_ctrlblock(rblock);
return ret;
}
int ehca_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
int ret = 0;
u64 h_ret;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
ib_device);
struct hipz_query_port *rblock;
if (index < 0 || index > 255) {
ehca_err(&shca->ib_device, "Invalid index: %x.", index);
return -EINVAL;
}
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
}
h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto query_gid1;
}
memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
query_gid1:
ehca_free_fw_ctrlblock(rblock);
return ret;
}
static const u32 allowed_port_caps = (
IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP);
int ehca_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
struct ib_port_modify *props)
{
int ret = 0;
struct ehca_shca *shca;
struct hipz_query_port *rblock;
u32 cap;
u64 hret;
shca = container_of(ibdev, struct ehca_shca, ib_device);
if ((props->set_port_cap_mask | props->clr_port_cap_mask)
& ~allowed_port_caps) {
ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
"set=%x clr=%x allowed=%x", props->set_port_cap_mask,
props->clr_port_cap_mask, allowed_port_caps);
return -EINVAL;
}
if (mutex_lock_interruptible(&shca->modify_mutex))
return -ERESTARTSYS;
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
ret = -ENOMEM;
goto modify_port1;
}
hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto modify_port2;
}
cap = (rblock->capability_mask | props->set_port_cap_mask)
& ~props->clr_port_cap_mask;
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
cap, props->init_type, port_modify_mask);
if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
hret);
ret = -EINVAL;
}
modify_port2:
ehca_free_fw_ctrlblock(rblock);
modify_port1:
mutex_unlock(&shca->modify_mutex);
return ret;
}

View file

@ -1,870 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Functions for EQs, NEQs and interrupts
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Khadija Souissi <souissi@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Joachim Fenkes <fenkes@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include <linux/smpboot.h>
#include "ehca_classes.h"
#include "ehca_irq.h"
#include "ehca_iverbs.h"
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM( 2, 7)
#define EQE_CQ_NUMBER EHCA_BMASK_IBM( 8, 31)
#define EQE_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
#define EQE_QP_TOKEN EHCA_BMASK_IBM(32, 63)
#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32, 63)
#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM( 1, 1)
#define NEQE_EVENT_CODE EHCA_BMASK_IBM( 2, 7)
#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
static void queue_comp_task(struct ehca_cq *__cq);
static struct ehca_comp_pool *pool;
static inline void comp_event_callback(struct ehca_cq *cq)
{
if (!cq->ib_cq.comp_handler)
return;
spin_lock(&cq->cb_lock);
cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
spin_unlock(&cq->cb_lock);
return;
}
static void print_error_data(struct ehca_shca *shca, void *data,
u64 *rblock, int length)
{
u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
u64 resource = rblock[1];
switch (type) {
case 0x1: /* Queue Pair */
{
struct ehca_qp *qp = (struct ehca_qp *)data;
/* only print error data if AER is set */
if (rblock[6] == 0)
return;
ehca_err(&shca->ib_device,
"QP 0x%x (resource=%llx) has errors.",
qp->ib_qp.qp_num, resource);
break;
}
case 0x4: /* Completion Queue */
{
struct ehca_cq *cq = (struct ehca_cq *)data;
ehca_err(&shca->ib_device,
"CQ 0x%x (resource=%llx) has errors.",
cq->cq_number, resource);
break;
}
default:
ehca_err(&shca->ib_device,
"Unknown error type: %llx on %s.",
type, shca->ib_device.name);
break;
}
ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
ehca_err(&shca->ib_device, "EHCA ----- error data begin "
"---------------------------------------------------");
ehca_dmp(rblock, length, "resource=%llx", resource);
ehca_err(&shca->ib_device, "EHCA ----- error data end "
"----------------------------------------------------");
return;
}
int ehca_error_data(struct ehca_shca *shca, void *data,
u64 resource)
{
unsigned long ret;
u64 *rblock;
unsigned long block_count;
rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
if (!rblock) {
ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
ret = -ENOMEM;
goto error_data1;
}
/* rblock must be 4K aligned and should be 4K large */
ret = hipz_h_error_data(shca->ipz_hca_handle,
resource,
rblock,
&block_count);
if (ret == H_R_STATE)
ehca_err(&shca->ib_device,
"No error data is available: %llx.", resource);
else if (ret == H_SUCCESS) {
int length;
length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
if (length > EHCA_PAGESIZE)
length = EHCA_PAGESIZE;
print_error_data(shca, data, rblock, length);
} else
ehca_err(&shca->ib_device,
"Error data could not be fetched: %llx", resource);
ehca_free_fw_ctrlblock(rblock);
error_data1:
return ret;
}
static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
enum ib_event_type event_type)
{
struct ib_event event;
/* PATH_MIG without the QP ever having been armed is false alarm */
if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
return;
event.device = &shca->ib_device;
event.event = event_type;
if (qp->ext_type == EQPT_SRQ) {
if (!qp->ib_srq.event_handler)
return;
event.element.srq = &qp->ib_srq;
qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
} else {
if (!qp->ib_qp.event_handler)
return;
event.element.qp = &qp->ib_qp;
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
}
}
static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
enum ib_event_type event_type, int fatal)
{
struct ehca_qp *qp;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token);
if (qp)
atomic_inc(&qp->nr_events);
read_unlock(&ehca_qp_idr_lock);
if (!qp)
return;
if (fatal)
ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
IB_EVENT_SRQ_ERR : event_type);
/*
* eHCA only processes one WQE at a time for SRQ base QPs,
* so the last WQE has been processed as soon as the QP enters
* error state.
*/
if (fatal && qp->ext_type == EQPT_SRQBASE)
dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
if (atomic_dec_and_test(&qp->nr_events))
wake_up(&qp->wait_completion);
return;
}
static void cq_event_callback(struct ehca_shca *shca,
u64 eqe)
{
struct ehca_cq *cq;
u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, token);
if (cq)
atomic_inc(&cq->nr_events);
read_unlock(&ehca_cq_idr_lock);
if (!cq)
return;
ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
return;
}
static void parse_identifier(struct ehca_shca *shca, u64 eqe)
{
u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
switch (identifier) {
case 0x02: /* path migrated */
qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
break;
case 0x03: /* communication established */
qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
break;
case 0x04: /* send queue drained */
qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
break;
case 0x05: /* QP error */
case 0x06: /* QP error */
qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
break;
case 0x07: /* CQ error */
case 0x08: /* CQ error */
cq_event_callback(shca, eqe);
break;
case 0x09: /* MRMWPTE error */
ehca_err(&shca->ib_device, "MRMWPTE error.");
break;
case 0x0A: /* port event */
ehca_err(&shca->ib_device, "Port event.");
break;
case 0x0B: /* MR access error */
ehca_err(&shca->ib_device, "MR access error.");
break;
case 0x0C: /* EQ error */
ehca_err(&shca->ib_device, "EQ error.");
break;
case 0x0D: /* P/Q_Key mismatch */
ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
break;
case 0x10: /* sampling complete */
ehca_err(&shca->ib_device, "Sampling complete.");
break;
case 0x11: /* unaffiliated access error */
ehca_err(&shca->ib_device, "Unaffiliated access error.");
break;
case 0x12: /* path migrating */
ehca_err(&shca->ib_device, "Path migrating.");
break;
case 0x13: /* interface trace stopped */
ehca_err(&shca->ib_device, "Interface trace stopped.");
break;
case 0x14: /* first error capture info available */
ehca_info(&shca->ib_device, "First error capture available");
break;
case 0x15: /* SRQ limit reached */
qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
break;
default:
ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
identifier, shca->ib_device.name);
break;
}
return;
}
static void dispatch_port_event(struct ehca_shca *shca, int port_num,
enum ib_event_type type, const char *msg)
{
struct ib_event event;
ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
event.device = &shca->ib_device;
event.event = type;
event.element.port_num = port_num;
ib_dispatch_event(&event);
}
static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
{
struct ehca_sma_attr new_attr;
struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
ehca_query_sma_attr(shca, port_num, &new_attr);
if (new_attr.sm_sl != old_attr->sm_sl ||
new_attr.sm_lid != old_attr->sm_lid)
dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
"SM changed");
if (new_attr.lid != old_attr->lid ||
new_attr.lmc != old_attr->lmc)
dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
"LID changed");
if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
memcmp(new_attr.pkeys, old_attr->pkeys,
sizeof(u16) * new_attr.pkey_tbl_len))
dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
"P_Key changed");
*old_attr = new_attr;
}
/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
static int replay_modify_qp(struct ehca_sport *sport)
{
int aqp1_destroyed;
unsigned long flags;
spin_lock_irqsave(&sport->mod_sqp_lock, flags);
aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
if (sport->ibqp_sqp[IB_QPT_SMI])
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
if (!aqp1_destroyed)
ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
return aqp1_destroyed;
}
static void parse_ec(struct ehca_shca *shca, u64 eqe)
{
u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
u8 spec_event;
struct ehca_sport *sport = &shca->sport[port - 1];
switch (ec) {
case 0x30: /* port availability change */
if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
/* only replay modify_qp calls in autodetect mode;
* if AQP1 was destroyed, the port is already down
* again and we can drop the event.
*/
if (ehca_nr_ports < 0)
if (replay_modify_qp(sport))
break;
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active");
ehca_query_sma_attr(shca, port, &sport->saved_attr);
} else {
sport->port_state = IB_PORT_DOWN;
dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
"is inactive");
}
break;
case 0x31:
/* port configuration change
* disruptive change is caused by
* LID, PKEY or SM change
*/
if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
ehca_warn(&shca->ib_device, "disruptive port "
"%d configuration change", port);
sport->port_state = IB_PORT_DOWN;
dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
"is inactive");
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active");
ehca_query_sma_attr(shca, port,
&sport->saved_attr);
} else
notify_port_conf_change(shca, port);
break;
case 0x32: /* adapter malfunction */
ehca_err(&shca->ib_device, "Adapter malfunction.");
break;
case 0x33: /* trace stopped */
ehca_err(&shca->ib_device, "Traced stopped.");
break;
case 0x34: /* util async event */
spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
if (spec_event == 0x80) /* client reregister required */
dispatch_port_event(shca, port,
IB_EVENT_CLIENT_REREGISTER,
"client reregister req.");
else
ehca_warn(&shca->ib_device, "Unknown util async "
"event %x on port %x", spec_event, port);
break;
default:
ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
ec, shca->ib_device.name);
break;
}
return;
}
static inline void reset_eq_pending(struct ehca_cq *cq)
{
u64 CQx_EP;
struct h_galpa gal = cq->galpas.kernel;
hipz_galpa_store_cq(gal, cqx_ep, 0x0);
CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
return;
}
irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
{
struct ehca_shca *shca = (struct ehca_shca*)dev_id;
tasklet_hi_schedule(&shca->neq.interrupt_task);
return IRQ_HANDLED;
}
void ehca_tasklet_neq(unsigned long data)
{
struct ehca_shca *shca = (struct ehca_shca*)data;
struct ehca_eqe *eqe;
u64 ret;
eqe = ehca_poll_eq(shca, &shca->neq);
while (eqe) {
if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
parse_ec(shca, eqe->entry);
eqe = ehca_poll_eq(shca, &shca->neq);
}
ret = hipz_h_reset_event(shca->ipz_hca_handle,
shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
if (ret != H_SUCCESS)
ehca_err(&shca->ib_device, "Can't clear notification events.");
return;
}
irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
{
struct ehca_shca *shca = (struct ehca_shca*)dev_id;
tasklet_hi_schedule(&shca->eq.interrupt_task);
return IRQ_HANDLED;
}
static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
{
u64 eqe_value;
u32 token;
struct ehca_cq *cq;
eqe_value = eqe->entry;
ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
ehca_dbg(&shca->ib_device, "Got completion event");
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, token);
if (cq)
atomic_inc(&cq->nr_events);
read_unlock(&ehca_cq_idr_lock);
if (cq == NULL) {
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq token=%x",
token);
return;
}
reset_eq_pending(cq);
if (ehca_scaling_code)
queue_comp_task(cq);
else {
comp_event_callback(cq);
if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
}
} else {
ehca_dbg(&shca->ib_device, "Got non completion event");
parse_identifier(shca, eqe_value);
}
}
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
struct ehca_eq *eq = &shca->eq;
struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
u64 eqe_value, ret;
int eqe_cnt, i;
int eq_empty = 0;
spin_lock(&eq->irq_spinlock);
if (is_irq) {
const int max_query_cnt = 100;
int query_cnt = 0;
int int_state = 1;
do {
int_state = hipz_h_query_int_state(
shca->ipz_hca_handle, eq->ist);
query_cnt++;
iosync();
} while (int_state && query_cnt < max_query_cnt);
if (unlikely((query_cnt == max_query_cnt)))
ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
int_state, query_cnt);
}
/* read out all eqes */
eqe_cnt = 0;
do {
u32 token;
eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
if (!eqe_cache[eqe_cnt].eqe)
break;
eqe_value = eqe_cache[eqe_cnt].eqe->entry;
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
read_lock(&ehca_cq_idr_lock);
eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
if (eqe_cache[eqe_cnt].cq)
atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
read_unlock(&ehca_cq_idr_lock);
if (!eqe_cache[eqe_cnt].cq) {
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq "
"token=%x", token);
continue;
}
} else
eqe_cache[eqe_cnt].cq = NULL;
eqe_cnt++;
} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
if (!eqe_cnt) {
if (is_irq)
ehca_dbg(&shca->ib_device,
"No eqe found for irq event");
goto unlock_irq_spinlock;
} else if (!is_irq) {
ret = hipz_h_eoi(eq->ist);
if (ret != H_SUCCESS)
ehca_err(&shca->ib_device,
"bad return code EOI -rc = %lld\n", ret);
ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
}
if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
/* enable irq for new packets */
for (i = 0; i < eqe_cnt; i++) {
if (eq->eqe_cache[i].cq)
reset_eq_pending(eq->eqe_cache[i].cq);
}
/* check eq */
spin_lock(&eq->spinlock);
eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
spin_unlock(&eq->spinlock);
/* call completion handler for cached eqes */
for (i = 0; i < eqe_cnt; i++)
if (eq->eqe_cache[i].cq) {
if (ehca_scaling_code)
queue_comp_task(eq->eqe_cache[i].cq);
else {
struct ehca_cq *cq = eq->eqe_cache[i].cq;
comp_event_callback(cq);
if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
}
} else {
ehca_dbg(&shca->ib_device, "Got non completion event");
parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
}
/* poll eq if not empty */
if (eq_empty)
goto unlock_irq_spinlock;
do {
struct ehca_eqe *eqe;
eqe = ehca_poll_eq(shca, &shca->eq);
if (!eqe)
break;
process_eqe(shca, eqe);
} while (1);
unlock_irq_spinlock:
spin_unlock(&eq->irq_spinlock);
}
void ehca_tasklet_eq(unsigned long data)
{
ehca_process_eq((struct ehca_shca*)data, 1);
}
static int find_next_online_cpu(struct ehca_comp_pool *pool)
{
int cpu;
unsigned long flags;
WARN_ON_ONCE(!in_interrupt());
if (ehca_debug_level >= 3)
ehca_dmp(cpu_online_mask, cpumask_size(), "");
spin_lock_irqsave(&pool->last_cpu_lock, flags);
do {
cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask);
pool->last_cpu = cpu;
} while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
return cpu;
}
static void __queue_comp_task(struct ehca_cq *__cq,
struct ehca_cpu_comp_task *cct,
struct task_struct *thread)
{
unsigned long flags;
spin_lock_irqsave(&cct->task_lock, flags);
spin_lock(&__cq->task_lock);
if (__cq->nr_callbacks == 0) {
__cq->nr_callbacks++;
list_add_tail(&__cq->entry, &cct->cq_list);
cct->cq_jobs++;
wake_up_process(thread);
} else
__cq->nr_callbacks++;
spin_unlock(&__cq->task_lock);
spin_unlock_irqrestore(&cct->task_lock, flags);
}
static void queue_comp_task(struct ehca_cq *__cq)
{
int cpu_id;
struct ehca_cpu_comp_task *cct;
struct task_struct *thread;
int cq_jobs;
unsigned long flags;
cpu_id = find_next_online_cpu(pool);
BUG_ON(!cpu_online(cpu_id));
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
BUG_ON(!cct || !thread);
spin_lock_irqsave(&cct->task_lock, flags);
cq_jobs = cct->cq_jobs;
spin_unlock_irqrestore(&cct->task_lock, flags);
if (cq_jobs > 0) {
cpu_id = find_next_online_cpu(pool);
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
BUG_ON(!cct || !thread);
}
__queue_comp_task(__cq, cct, thread);
}
static void run_comp_task(struct ehca_cpu_comp_task *cct)
{
struct ehca_cq *cq;
while (!list_empty(&cct->cq_list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
spin_unlock_irq(&cct->task_lock);
comp_event_callback(cq);
if (atomic_dec_and_test(&cq->nr_events))
wake_up(&cq->wait_completion);
spin_lock_irq(&cct->task_lock);
spin_lock(&cq->task_lock);
cq->nr_callbacks--;
if (!cq->nr_callbacks) {
list_del_init(cct->cq_list.next);
cct->cq_jobs--;
}
spin_unlock(&cq->task_lock);
}
}
static void comp_task_park(unsigned int cpu)
{
struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
struct ehca_cpu_comp_task *target;
struct task_struct *thread;
struct ehca_cq *cq, *tmp;
LIST_HEAD(list);
spin_lock_irq(&cct->task_lock);
cct->cq_jobs = 0;
cct->active = 0;
list_splice_init(&cct->cq_list, &list);
spin_unlock_irq(&cct->task_lock);
cpu = find_next_online_cpu(pool);
target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
spin_lock_irq(&target->task_lock);
list_for_each_entry_safe(cq, tmp, &list, entry) {
list_del(&cq->entry);
__queue_comp_task(cq, target, thread);
}
spin_unlock_irq(&target->task_lock);
}
static void comp_task_stop(unsigned int cpu, bool online)
{
struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
spin_lock_irq(&cct->task_lock);
cct->cq_jobs = 0;
cct->active = 0;
WARN_ON(!list_empty(&cct->cq_list));
spin_unlock_irq(&cct->task_lock);
}
static int comp_task_should_run(unsigned int cpu)
{
struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
return cct->cq_jobs;
}
static void comp_task(unsigned int cpu)
{
struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
int cql_empty;
spin_lock_irq(&cct->task_lock);
cql_empty = list_empty(&cct->cq_list);
if (!cql_empty) {
__set_current_state(TASK_RUNNING);
run_comp_task(cct);
}
spin_unlock_irq(&cct->task_lock);
}
static struct smp_hotplug_thread comp_pool_threads = {
.thread_should_run = comp_task_should_run,
.thread_fn = comp_task,
.thread_comm = "ehca_comp/%u",
.cleanup = comp_task_stop,
.park = comp_task_park,
};
int ehca_create_comp_pool(void)
{
int cpu, ret = -ENOMEM;
if (!ehca_scaling_code)
return 0;
pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
if (pool == NULL)
return -ENOMEM;
spin_lock_init(&pool->last_cpu_lock);
pool->last_cpu = cpumask_any(cpu_online_mask);
pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
if (!pool->cpu_comp_tasks)
goto out_pool;
pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
if (!pool->cpu_comp_threads)
goto out_tasks;
for_each_present_cpu(cpu) {
struct ehca_cpu_comp_task *cct;
cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
spin_lock_init(&cct->task_lock);
INIT_LIST_HEAD(&cct->cq_list);
}
comp_pool_threads.store = pool->cpu_comp_threads;
ret = smpboot_register_percpu_thread(&comp_pool_threads);
if (ret)
goto out_threads;
pr_info("eHCA scaling code enabled\n");
return ret;
out_threads:
free_percpu(pool->cpu_comp_threads);
out_tasks:
free_percpu(pool->cpu_comp_tasks);
out_pool:
kfree(pool);
return ret;
}
void ehca_destroy_comp_pool(void)
{
if (!ehca_scaling_code)
return;
smpboot_unregister_percpu_thread(&comp_pool_threads);
free_percpu(pool->cpu_comp_threads);
free_percpu(pool->cpu_comp_tasks);
kfree(pool);
}

View file

@ -1,77 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Function definitions and structs for EQs, NEQs and interrupts
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Khadija Souissi <souissi@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __EHCA_IRQ_H
#define __EHCA_IRQ_H
struct ehca_shca;
#include <linux/interrupt.h>
#include <linux/types.h>
int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
void ehca_tasklet_neq(unsigned long data);
irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
void ehca_tasklet_eq(unsigned long data);
void ehca_process_eq(struct ehca_shca *shca, int is_irq);
struct ehca_cpu_comp_task {
struct list_head cq_list;
spinlock_t task_lock;
int cq_jobs;
int active;
};
struct ehca_comp_pool {
struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
struct task_struct * __percpu *cpu_comp_threads;
int last_cpu;
spinlock_t last_cpu_lock;
};
int ehca_create_comp_pool(void);
void ehca_destroy_comp_pool(void);
#endif

View file

@ -1,202 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Function definitions for internal functions
*
* Authors: Heiko J Schick <schickhj@de.ibm.com>
* Dietmar Decker <ddecker@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __EHCA_IVERBS_H__
#define __EHCA_IVERBS_H__
#include "ehca_classes.h"
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw);
int ehca_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
enum rdma_protocol_type
ehca_query_protocol(struct ib_device *device, u8 port_num);
int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
struct ehca_sma_attr *attr);
int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
struct ib_port_modify *props);
struct ib_pd *ehca_alloc_pd(struct ib_device *device,
struct ib_ucontext *context,
struct ib_udata *udata);
int ehca_dealloc_pd(struct ib_pd *pd);
struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
int ehca_destroy_ah(struct ib_ah *ah);
struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int mr_access_flags,
struct ib_udata *udata);
int ehca_dereg_mr(struct ib_mr *mr);
struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
int ehca_dealloc_mw(struct ib_mw *mw);
struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int ehca_map_phys_fmr(struct ib_fmr *fmr,
u64 *page_list, int list_len, u64 iova);
int ehca_unmap_fmr(struct list_head *fmr_list);
int ehca_dealloc_fmr(struct ib_fmr *fmr);
enum ehca_eq_type {
EHCA_EQ = 0, /* Event Queue */
EHCA_NEQ /* Notification Event Queue */
};
int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
enum ehca_eq_type type, const u32 length);
int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
struct ib_cq *ehca_create_cq(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
struct ib_udata *udata);
int ehca_destroy_cq(struct ib_cq *cq);
int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
struct ib_qp *ehca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
int ehca_destroy_qp(struct ib_qp *qp);
int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr);
int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
int ehca_post_srq_recv(struct ib_srq *srq,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr);
struct ib_srq *ehca_create_srq(struct ib_pd *pd,
struct ib_srq_init_attr *init_attr,
struct ib_udata *udata);
int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int ehca_destroy_srq(struct ib_srq *srq);
u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
struct ib_qp_init_attr *qp_init_attr);
int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
struct ib_udata *udata);
int ehca_dealloc_ucontext(struct ib_ucontext *context);
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
void ehca_poll_eqs(unsigned long data);
int ehca_calc_ipd(struct ehca_shca *shca, int port,
enum ib_rate path_rate, u32 *ipd);
void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
#ifdef CONFIG_PPC_64K_PAGES
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
#else
#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
#endif
void ehca_recover_sqp(struct ib_qp *sqp);
#endif

File diff suppressed because it is too large Load diff

View file

@ -1,131 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* mcast functions
*
* Authors: Khadija Souissi <souissik@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/module.h>
#include <linux/err.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_qes.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#define MAX_MC_LID 0xFFFE
#define MIN_MC_LID 0xC000 /* Multicast limits */
#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
#define EHCA_VALID_MULTICAST_LID(lid) \
(((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
ib_device);
union ib_gid my_gid;
u64 subnet_prefix, interface_id, h_ret;
if (ibqp->qp_type != IB_QPT_UD) {
ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
return -EINVAL;
}
if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
ehca_err(ibqp->device, "invalid mulitcast gid");
return -EINVAL;
} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
return -EINVAL;
}
memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
my_qp->galpas.kernel,
lid, subnet_prefix, interface_id);
if (h_ret != H_SUCCESS)
ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
"h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
struct ehca_shca *shca = container_of(ibqp->pd->device,
struct ehca_shca, ib_device);
union ib_gid my_gid;
u64 subnet_prefix, interface_id, h_ret;
if (ibqp->qp_type != IB_QPT_UD) {
ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
return -EINVAL;
}
if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
ehca_err(ibqp->device, "invalid mulitcast gid");
return -EINVAL;
} else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
return -EINVAL;
}
memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
interface_id = be64_to_cpu(my_gid.global.interface_id);
h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
my_qp->ipz_qp_handle,
my_qp->galpas.kernel,
lid, subnet_prefix, interface_id);
if (h_ret != H_SUCCESS)
ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
"h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}

File diff suppressed because it is too large Load diff

View file

@ -1,127 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* MR/MW declarations and inline functions
*
* Authors: Dietmar Decker <ddecker@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EHCA_MRMW_H_
#define _EHCA_MRMW_H_
enum ehca_reg_type {
EHCA_REG_MR,
EHCA_REG_BUSMAP_MR
};
int ehca_reg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
u64 size,
int acl,
struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo,
u32 *lkey,
u32 *rkey,
enum ehca_reg_type reg_type);
int ehca_reg_mr_rpages(struct ehca_shca *shca,
struct ehca_mr *e_mr,
struct ehca_mr_pginfo *pginfo);
int ehca_rereg_mr(struct ehca_shca *shca,
struct ehca_mr *e_mr,
u64 *iova_start,
u64 size,
int mr_access_flags,
struct ehca_pd *e_pd,
struct ehca_mr_pginfo *pginfo,
u32 *lkey,
u32 *rkey);
int ehca_unmap_one_fmr(struct ehca_shca *shca,
struct ehca_mr *e_fmr);
int ehca_reg_smr(struct ehca_shca *shca,
struct ehca_mr *e_origmr,
struct ehca_mr *e_newmr,
u64 *iova_start,
int acl,
struct ehca_pd *e_pd,
u32 *lkey,
u32 *rkey);
int ehca_reg_internal_maxmr(struct ehca_shca *shca,
struct ehca_pd *e_pd,
struct ehca_mr **maxmr);
int ehca_reg_maxmr(struct ehca_shca *shca,
struct ehca_mr *e_newmr,
u64 *iova_start,
int acl,
struct ehca_pd *e_pd,
u32 *lkey,
u32 *rkey);
int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
u64 *page_list,
int list_len);
int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
u32 number,
u64 *kpage);
int ehca_mr_is_maxmr(u64 size,
u64 *iova_start);
void ehca_mrmw_map_acl(int ib_acl,
u32 *hipz_acl);
void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
int *ib_acl);
void ehca_mr_deletenew(struct ehca_mr *mr);
int ehca_create_busmap(void);
void ehca_destroy_busmap(void);
extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
#endif /*_EHCA_MRMW_H_*/

View file

@ -1,123 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* PD functions
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_tools.h"
#include "ehca_iverbs.h"
static struct kmem_cache *pd_cache;
struct ib_pd *ehca_alloc_pd(struct ib_device *device,
struct ib_ucontext *context, struct ib_udata *udata)
{
struct ehca_pd *pd;
int i;
pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
if (!pd) {
ehca_err(device, "device=%p context=%p out of memory",
device, context);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < 2; i++) {
INIT_LIST_HEAD(&pd->free[i]);
INIT_LIST_HEAD(&pd->full[i]);
}
mutex_init(&pd->lock);
/*
* Kernel PD: when device = -1, 0
* User PD: when context != -1
*/
if (!context) {
/*
* Kernel PDs after init reuses always
* the one created in ehca_shca_reopen()
*/
struct ehca_shca *shca = container_of(device, struct ehca_shca,
ib_device);
pd->fw_pd.value = shca->pd->fw_pd.value;
} else
pd->fw_pd.value = (u64)pd;
return &pd->ib_pd;
}
int ehca_dealloc_pd(struct ib_pd *pd)
{
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
int i, leftovers = 0;
struct ipz_small_queue_page *page, *tmp;
for (i = 0; i < 2; i++) {
list_splice(&my_pd->full[i], &my_pd->free[i]);
list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
leftovers = 1;
free_page(page->page);
kmem_cache_free(small_qp_cache, page);
}
}
if (leftovers)
ehca_warn(pd->device,
"Some small queue pages were not freed");
kmem_cache_free(pd_cache, my_pd);
return 0;
}
int ehca_init_pd_cache(void)
{
pd_cache = kmem_cache_create("ehca_cache_pd",
sizeof(struct ehca_pd), 0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!pd_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_pd_cache(void)
{
kmem_cache_destroy(pd_cache);
}

View file

@ -1,260 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Hardware request structures
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EHCA_QES_H_
#define _EHCA_QES_H_
#include "ehca_tools.h"
/* virtual scatter gather entry to specify remote addresses with length */
struct ehca_vsgentry {
u64 vaddr;
u32 lkey;
u32 length;
};
#define GRH_FLAG_MASK EHCA_BMASK_IBM( 7, 7)
#define GRH_IPVERSION_MASK EHCA_BMASK_IBM( 0, 3)
#define GRH_TCLASS_MASK EHCA_BMASK_IBM( 4, 12)
#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13, 31)
#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32, 47)
#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48, 55)
#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56, 63)
/*
* Unreliable Datagram Address Vector Format
* see IBTA Vol1 chapter 8.3 Global Routing Header
*/
struct ehca_ud_av {
u8 sl;
u8 lnh;
u16 dlid;
u8 reserved1;
u8 reserved2;
u8 reserved3;
u8 slid_path_bits;
u8 reserved4;
u8 ipd;
u8 reserved5;
u8 pmtu;
u32 reserved6;
u64 reserved7;
union {
struct {
u64 word_0; /* always set to 6 */
/*should be 0x1B for IB transport */
u64 word_1;
u64 word_2;
u64 word_3;
u64 word_4;
} grh;
struct {
u32 wd_0;
u32 wd_1;
/* DWord_1 --> SGID */
u32 sgid_wd3;
u32 sgid_wd2;
u32 sgid_wd1;
u32 sgid_wd0;
/* DWord_3 --> DGID */
u32 dgid_wd3;
u32 dgid_wd2;
u32 dgid_wd1;
u32 dgid_wd0;
} grh_l;
};
};
/* maximum number of sg entries allowed in a WQE */
#define MAX_WQE_SG_ENTRIES 252
#define WQE_OPTYPE_SEND 0x80
#define WQE_OPTYPE_RDMAREAD 0x40
#define WQE_OPTYPE_RDMAWRITE 0x20
#define WQE_OPTYPE_CMPSWAP 0x10
#define WQE_OPTYPE_FETCHADD 0x08
#define WQE_OPTYPE_BIND 0x04
#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80
#define WQE_WRFLAG_FENCE 0x40
#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
#define WQE_WRFLAG_SOLIC_EVENT 0x10
#define WQEF_CACHE_HINT 0x80
#define WQEF_CACHE_HINT_RD_WR 0x40
#define WQEF_TIMED_WQE 0x20
#define WQEF_PURGE 0x08
#define WQEF_HIGH_NIBBLE 0xF0
#define MW_BIND_ACCESSCTRL_R_WRITE 0x40
#define MW_BIND_ACCESSCTRL_R_READ 0x20
#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10
struct ehca_wqe {
u64 work_request_id;
u8 optype;
u8 wr_flag;
u16 pkeyi;
u8 wqef;
u8 nr_of_data_seg;
u16 wqe_provided_slid;
u32 destination_qp_number;
u32 resync_psn_sqp;
u32 local_ee_context_qkey;
u32 immediate_data;
union {
struct {
u64 remote_virtual_address;
u32 rkey;
u32 reserved;
u64 atomic_1st_op_dma_len;
u64 atomic_2nd_op;
struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
} nud;
struct {
u64 ehca_ud_av_ptr;
u64 reserved1;
u64 reserved2;
u64 reserved3;
struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
} ud_avp;
struct {
struct ehca_ud_av ud_av;
struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
2];
} ud_av;
struct {
u64 reserved0;
u64 reserved1;
u64 reserved2;
u64 reserved3;
struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
} all_rcv;
struct {
u64 reserved;
u32 rkey;
u32 old_rkey;
u64 reserved1;
u64 reserved2;
u64 virtual_address;
u32 reserved3;
u32 length;
u32 reserved4;
u16 reserved5;
u8 reserved6;
u8 lr_ctl;
u32 lkey;
u32 reserved7;
u64 reserved8;
u64 reserved9;
u64 reserved10;
u64 reserved11;
} bind;
struct {
u64 reserved12;
u64 reserved13;
u32 size;
u32 start;
} inline_data;
} u;
};
#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
#define WC_IMM_DATA EHCA_BMASK_IBM(1, 1)
#define WC_GRH_PRESENT EHCA_BMASK_IBM(2, 2)
#define WC_SE_BIT EHCA_BMASK_IBM(3, 3)
#define WC_STATUS_ERROR_BIT 0x80000000
#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
#define WC_STATUS_PURGE_BIT 0x10
#define WC_SEND_RECEIVE_BIT 0x80
struct ehca_cqe {
u64 work_request_id;
u8 optype;
u8 w_completion_flags;
u16 reserved1;
u32 nr_bytes_transferred;
u32 immediate_data;
u32 local_qp_number;
u8 freed_resource_count;
u8 service_level;
u16 wqe_count;
u32 qp_token;
u32 qkey_ee_token;
u32 remote_qp_number;
u16 dlid;
u16 rlid;
u16 reserved2;
u16 pkey_index;
u32 cqe_timestamp;
u32 wqe_timestamp;
u8 wqe_timestamp_valid;
u8 reserved3;
u8 reserved4;
u8 cqe_flags;
u32 status;
};
struct ehca_eqe {
u64 entry;
};
struct ehca_mrte {
u64 starting_va;
u64 length; /* length of memory region in bytes*/
u32 pd;
u8 key_instance;
u8 pagesize;
u8 mr_control;
u8 local_remote_access_ctrl;
u8 reserved[0x20 - 0x18];
u64 at_pointer[4];
};
#endif /*_EHCA_QES_H_*/

File diff suppressed because it is too large Load diff

View file

@ -1,953 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* post_send/recv, poll_cq, req_notify
*
* Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
* Joachim Fenkes <fenkes@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_qes.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#include "hipz_fns.h"
/* in RC traffic, insert an empty RDMA READ every this many packets */
#define ACK_CIRC_THRESHOLD 2000000
static u64 replace_wr_id(u64 wr_id, u16 idx)
{
u64 ret;
ret = wr_id & ~QMAP_IDX_MASK;
ret |= idx & QMAP_IDX_MASK;
return ret;
}
static u16 get_app_wr_id(u64 wr_id)
{
return wr_id & QMAP_IDX_MASK;
}
static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
struct ehca_wqe *wqe_p,
struct ib_recv_wr *recv_wr,
u32 rq_map_idx)
{
u8 cnt_ds;
if (unlikely((recv_wr->num_sge < 0) ||
(recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
ehca_gen_err("Invalid number of WQE SGE. "
"num_sqe=%x max_nr_of_sg=%x",
recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
return -EINVAL; /* invalid SG list length */
}
/* clear wqe header until sglist */
memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
wqe_p->nr_of_data_seg = recv_wr->num_sge;
for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
recv_wr->sg_list[cnt_ds].addr;
wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
recv_wr->sg_list[cnt_ds].lkey;
wqe_p->u.all_rcv.sg_list[cnt_ds].length =
recv_wr->sg_list[cnt_ds].length;
}
if (ehca_debug_level >= 3) {
ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
ipz_rqueue);
ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
}
return 0;
}
#if defined(DEBUG_GSI_SEND_WR)
/* need ib_mad struct */
#include <rdma/ib_mad.h>
static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
{
int idx;
int j;
while (ud_wr) {
struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
struct ib_sge *sge = ud_wr->wr.sg_list;
ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
"send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
ud_wr->wr.num_sge, ud_wr->wr.send_flags,
ud_wr->.wr.opcode);
if (mad_hdr) {
ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
"mgmt_class=%x class_version=%x method=%x "
"status=%x class_specific=%x tid=%lx "
"attr_id=%x resv=%x attr_mod=%x",
idx, mad_hdr->base_version,
mad_hdr->mgmt_class,
mad_hdr->class_version, mad_hdr->method,
mad_hdr->status, mad_hdr->class_specific,
mad_hdr->tid, mad_hdr->attr_id,
mad_hdr->resv,
mad_hdr->attr_mod);
}
for (j = 0; j < ud_wr->wr.num_sge; j++) {
u8 *data = __va(sge->addr);
ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
"lkey=%x",
idx, j, data, sge->length, sge->lkey);
/* assume length is n*16 */
ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
idx, j);
sge++;
} /* eof for j */
idx++;
ud_wr = ud_wr(ud_wr->wr.next);
} /* eof while ud_wr */
}
#endif /* DEBUG_GSI_SEND_WR */
static inline int ehca_write_swqe(struct ehca_qp *qp,
struct ehca_wqe *wqe_p,
struct ib_send_wr *send_wr,
u32 sq_map_idx,
int hidden)
{
u32 idx;
u64 dma_length;
struct ehca_av *my_av;
u32 remote_qkey;
struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
if (unlikely((send_wr->num_sge < 0) ||
(send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
ehca_gen_err("Invalid number of WQE SGE. "
"num_sqe=%x max_nr_of_sg=%x",
send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
return -EINVAL; /* invalid SG list length */
}
/* clear wqe header until sglist */
memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 0;
switch (send_wr->opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
wqe_p->optype = WQE_OPTYPE_SEND;
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
break;
case IB_WR_RDMA_READ:
wqe_p->optype = WQE_OPTYPE_RDMAREAD;
break;
default:
ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
return -EINVAL; /* invalid opcode */
}
wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
wqe_p->wr_flag = 0;
if ((send_wr->send_flags & IB_SEND_SIGNALED ||
qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
&& !hidden) {
wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
qmap_entry->cqe_req = 1;
}
if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
/* this might not work as long as HW does not support it */
wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
}
wqe_p->nr_of_data_seg = send_wr->num_sge;
switch (qp->qp_type) {
case IB_QPT_SMI:
case IB_QPT_GSI:
/* no break is intential here */
case IB_QPT_UD:
/* IB 1.2 spec C10-15 compliance */
remote_qkey = ud_wr(send_wr)->remote_qkey;
if (remote_qkey & 0x80000000)
remote_qkey = qp->qkey;
wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
wqe_p->local_ee_context_qkey = remote_qkey;
if (unlikely(!ud_wr(send_wr)->ah)) {
ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
return -EINVAL;
}
if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
return -EINVAL;
}
my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
wqe_p->u.ud_av.ud_av = my_av->av;
/*
* omitted check of IB_SEND_INLINE
* since HW does not support it
*/
for (idx = 0; idx < send_wr->num_sge; idx++) {
wqe_p->u.ud_av.sg_list[idx].vaddr =
send_wr->sg_list[idx].addr;
wqe_p->u.ud_av.sg_list[idx].lkey =
send_wr->sg_list[idx].lkey;
wqe_p->u.ud_av.sg_list[idx].length =
send_wr->sg_list[idx].length;
} /* eof for idx */
if (qp->qp_type == IB_QPT_SMI ||
qp->qp_type == IB_QPT_GSI)
wqe_p->u.ud_av.ud_av.pmtu = 1;
if (qp->qp_type == IB_QPT_GSI) {
wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
#ifdef DEBUG_GSI_SEND_WR
trace_ud_wr(ud_wr(send_wr));
#endif /* DEBUG_GSI_SEND_WR */
}
break;
case IB_QPT_UC:
if (send_wr->send_flags & IB_SEND_FENCE)
wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
/* no break is intentional here */
case IB_QPT_RC:
/* TODO: atomic not implemented */
wqe_p->u.nud.remote_virtual_address =
rdma_wr(send_wr)->remote_addr;
wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
/*
* omitted checking of IB_SEND_INLINE
* since HW does not support it
*/
dma_length = 0;
for (idx = 0; idx < send_wr->num_sge; idx++) {
wqe_p->u.nud.sg_list[idx].vaddr =
send_wr->sg_list[idx].addr;
wqe_p->u.nud.sg_list[idx].lkey =
send_wr->sg_list[idx].lkey;
wqe_p->u.nud.sg_list[idx].length =
send_wr->sg_list[idx].length;
dma_length += send_wr->sg_list[idx].length;
} /* eof idx */
wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
/* unsolicited ack circumvention */
if (send_wr->opcode == IB_WR_RDMA_READ) {
/* on RDMA read, switch on and reset counters */
qp->message_count = qp->packet_count = 0;
qp->unsol_ack_circ = 1;
} else
/* else estimate #packets */
qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
break;
default:
ehca_gen_err("Invalid qptype=%x", qp->qp_type);
return -EINVAL;
}
if (ehca_debug_level >= 3) {
ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
}
return 0;
}
/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
static inline void map_ib_wc_status(u32 cqe_status,
enum ib_wc_status *wc_status)
{
if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
switch (cqe_status & 0x3F) {
case 0x01:
case 0x21:
*wc_status = IB_WC_LOC_LEN_ERR;
break;
case 0x02:
case 0x22:
*wc_status = IB_WC_LOC_QP_OP_ERR;
break;
case 0x03:
case 0x23:
*wc_status = IB_WC_LOC_EEC_OP_ERR;
break;
case 0x04:
case 0x24:
*wc_status = IB_WC_LOC_PROT_ERR;
break;
case 0x05:
case 0x25:
*wc_status = IB_WC_WR_FLUSH_ERR;
break;
case 0x06:
*wc_status = IB_WC_MW_BIND_ERR;
break;
case 0x07: /* remote error - look into bits 20:24 */
switch ((cqe_status
& WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
case 0x0:
/*
* PSN Sequence Error!
* couldn't find a matching status!
*/
*wc_status = IB_WC_GENERAL_ERR;
break;
case 0x1:
*wc_status = IB_WC_REM_INV_REQ_ERR;
break;
case 0x2:
*wc_status = IB_WC_REM_ACCESS_ERR;
break;
case 0x3:
*wc_status = IB_WC_REM_OP_ERR;
break;
case 0x4:
*wc_status = IB_WC_REM_INV_RD_REQ_ERR;
break;
}
break;
case 0x08:
*wc_status = IB_WC_RETRY_EXC_ERR;
break;
case 0x09:
*wc_status = IB_WC_RNR_RETRY_EXC_ERR;
break;
case 0x0A:
case 0x2D:
*wc_status = IB_WC_REM_ABORT_ERR;
break;
case 0x0B:
case 0x2E:
*wc_status = IB_WC_INV_EECN_ERR;
break;
case 0x0C:
case 0x2F:
*wc_status = IB_WC_INV_EEC_STATE_ERR;
break;
case 0x0D:
*wc_status = IB_WC_BAD_RESP_ERR;
break;
case 0x10:
/* WQE purged */
*wc_status = IB_WC_WR_FLUSH_ERR;
break;
default:
*wc_status = IB_WC_FATAL_ERR;
}
} else
*wc_status = IB_WC_SUCCESS;
}
static inline int post_one_send(struct ehca_qp *my_qp,
struct ib_send_wr *cur_send_wr,
int hidden)
{
struct ehca_wqe *wqe_p;
int ret;
u32 sq_map_idx;
u64 start_offset = my_qp->ipz_squeue.current_q_offset;
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -ENOMEM;
}
/*
* Get the index of the WQE in the send queue. The same index is used
* for writing into the sq_map.
*/
sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
/* write a SEND WQE into the QUEUE */
ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
/*
* if something failed,
* reset the free entry pointer to the start value
*/
if (unlikely(ret)) {
my_qp->ipz_squeue.current_q_offset = start_offset;
ehca_err(my_qp->ib_qp.device, "Could not write WQE "
"qp_num=%x", my_qp->ib_qp.qp_num);
return -EINVAL;
}
return 0;
}
int ehca_post_send(struct ib_qp *qp,
struct ib_send_wr *send_wr,
struct ib_send_wr **bad_send_wr)
{
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
int wqe_cnt = 0;
int ret = 0;
unsigned long flags;
/* Reject WR if QP is in RESET, INIT or RTR state */
if (unlikely(my_qp->state < IB_QPS_RTS)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
ret = -EINVAL;
goto out;
}
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_s, flags);
/* Send an empty extra RDMA read if:
* 1) there has been an RDMA read on this connection before
* 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
* 3) we can be sure that any previous extra RDMA read has been
* processed so we don't overflow the SQ
*/
if (unlikely(my_qp->unsol_ack_circ &&
my_qp->packet_count > ACK_CIRC_THRESHOLD &&
my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
/* insert an empty RDMA READ to fix up the remote QP state */
struct ib_send_wr circ_wr;
memset(&circ_wr, 0, sizeof(circ_wr));
circ_wr.opcode = IB_WR_RDMA_READ;
post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
wqe_cnt++;
ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
my_qp->message_count = my_qp->packet_count = 0;
}
/* loop processes list of send reqs */
while (send_wr) {
ret = post_one_send(my_qp, send_wr, 0);
if (unlikely(ret)) {
goto post_send_exit0;
}
wqe_cnt++;
send_wr = send_wr->next;
}
post_send_exit0:
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt);
if (unlikely(ret || ehca_debug_level >= 2))
ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, qp->qp_num, wqe_cnt, ret);
my_qp->message_count += wqe_cnt;
spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
out:
if (ret)
*bad_send_wr = send_wr;
return ret;
}
static int internal_post_recv(struct ehca_qp *my_qp,
struct ib_device *dev,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
u32 rq_map_idx;
unsigned long flags;
struct ehca_qmap_entry *qmap_entry;
if (unlikely(!HAS_RQ(my_qp))) {
ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
my_qp, my_qp->real_qp_num, my_qp->ext_type);
ret = -ENODEV;
goto out;
}
/* LOCK the QUEUE */
spin_lock_irqsave(&my_qp->spinlock_r, flags);
/* loop processes list of recv reqs */
while (recv_wr) {
u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
/* get pointer next to free WQE */
wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
if (unlikely(!wqe_p)) {
/* too many posted work requests: queue overflow */
ret = -ENOMEM;
ehca_err(dev, "Too many posted WQEs "
"qp_num=%x", my_qp->real_qp_num);
goto post_recv_exit0;
}
/*
* Get the index of the WQE in the recv queue. The same index
* is used for writing into the rq_map.
*/
rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
/* write a RECV WQE into the QUEUE */
ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
rq_map_idx);
/*
* if something failed,
* reset the free entry pointer to the start value
*/
if (unlikely(ret)) {
my_qp->ipz_rqueue.current_q_offset = start_offset;
ret = -EINVAL;
ehca_err(dev, "Could not write WQE "
"qp_num=%x", my_qp->real_qp_num);
goto post_recv_exit0;
}
qmap_entry = &my_qp->rq_map.map[rq_map_idx];
qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 1;
wqe_cnt++;
recv_wr = recv_wr->next;
} /* eof for recv_wr */
post_recv_exit0:
iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt);
if (unlikely(ret || ehca_debug_level >= 2))
ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
my_qp, my_qp->real_qp_num, wqe_cnt, ret);
spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
out:
if (ret)
*bad_recv_wr = recv_wr;
return ret;
}
int ehca_post_recv(struct ib_qp *qp,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
/* Reject WR if QP is in RESET state */
if (unlikely(my_qp->state == IB_QPS_RESET)) {
ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x",
my_qp->state, qp->qp_num);
*bad_recv_wr = recv_wr;
return -EINVAL;
}
return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
}
int ehca_post_srq_recv(struct ib_srq *srq,
struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr)
{
return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
srq->device, recv_wr, bad_recv_wr);
}
/*
* ib_wc_opcode table converts ehca wc opcode to ib
* Since we use zero to indicate invalid opcode, the actual ib opcode must
* be decremented!!!
*/
static const u8 ib_wc_opcode[255] = {
[0x01] = IB_WC_RECV+1,
[0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
[0x08] = IB_WC_FETCH_ADD+1,
[0x10] = IB_WC_COMP_SWAP+1,
[0x20] = IB_WC_RDMA_WRITE+1,
[0x40] = IB_WC_RDMA_READ+1,
[0x80] = IB_WC_SEND+1
};
/* internal function to poll one entry of cq */
static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
{
int ret = 0, qmap_tail_idx;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe;
struct ehca_qp *my_qp;
struct ehca_qmap_entry *qmap_entry;
struct ehca_queue_map *qmap;
int cqe_count = 0, is_error;
repoll:
cqe = (struct ehca_cqe *)
ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
if (!cqe) {
ret = -EAGAIN;
if (ehca_debug_level >= 3)
ehca_dbg(cq->device, "Completion queue is empty "
"my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
goto poll_cq_one_exit0;
}
/* prevents loads being reordered across this point */
rmb();
cqe_count++;
if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
struct ehca_qp *qp;
int purgeflag;
unsigned long flags;
qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
if (!qp) {
ehca_err(cq->device, "cq_num=%x qp_num=%x "
"could not find qp -> ignore cqe",
my_cq->cq_number, cqe->local_qp_number);
ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
my_cq->cq_number, cqe->local_qp_number);
/* ignore this purged cqe */
goto repoll;
}
spin_lock_irqsave(&qp->spinlock_s, flags);
purgeflag = qp->sqerr_purgeflag;
spin_unlock_irqrestore(&qp->spinlock_s, flags);
if (purgeflag) {
ehca_dbg(cq->device,
"Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
if (ehca_debug_level >= 2)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
cqe->local_qp_number,
cqe->remote_qp_number);
/*
* ignore this to avoid double cqes of bad wqe
* that caused sqe and turn off purge flag
*/
qp->sqerr_purgeflag = 0;
goto repoll;
}
}
is_error = cqe->status & WC_STATUS_ERROR_BIT;
/* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
ehca_dbg(cq->device,
"Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
ehca_dbg(cq->device,
"ehca_cq=%p cq_num=%x -------------------------",
my_cq, my_cq->cq_number);
}
read_lock(&ehca_qp_idr_lock);
my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
read_unlock(&ehca_qp_idr_lock);
if (!my_qp)
goto repoll;
wc->qp = &my_qp->ib_qp;
qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
/* We got a send completion. */
qmap = &my_qp->sq_map;
else
/* We got a receive completion. */
qmap = &my_qp->rq_map;
/* advance the tail pointer */
qmap->tail = qmap_tail_idx;
if (is_error) {
/*
* set left_to_poll to 0 because in error state, we will not
* get any additional CQEs
*/
my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
my_qp->sq_map.entries);
my_qp->sq_map.left_to_poll = 0;
ehca_add_to_err_list(my_qp, 1);
my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
my_qp->rq_map.entries);
my_qp->rq_map.left_to_poll = 0;
if (HAS_RQ(my_qp))
ehca_add_to_err_list(my_qp, 0);
}
qmap_entry = &qmap->map[qmap_tail_idx];
if (qmap_entry->reported) {
ehca_warn(cq->device, "Double cqe on qp_num=%#x",
my_qp->real_qp_num);
/* found a double cqe, discard it and read next one */
goto repoll;
}
wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
qmap_entry->reported = 1;
/* if left_to_poll is decremented to 0, add the QP to the error list */
if (qmap->left_to_poll > 0) {
qmap->left_to_poll--;
if ((my_qp->sq_map.left_to_poll == 0) &&
(my_qp->rq_map.left_to_poll == 0)) {
ehca_add_to_err_list(my_qp, 1);
if (HAS_RQ(my_qp))
ehca_add_to_err_list(my_qp, 0);
}
}
/* eval ib_wc_opcode */
wc->opcode = ib_wc_opcode[cqe->optype]-1;
if (unlikely(wc->opcode == -1)) {
ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
"ehca_cq=%p cq_num=%x",
cqe->optype, cqe->status, my_cq, my_cq->cq_number);
/* dump cqe for other infos */
ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
my_cq, my_cq->cq_number);
/* update also queue adder to throw away this entry!!! */
goto repoll;
}
/* eval ib_wc_status */
if (unlikely(is_error)) {
/* complete with errors */
map_ib_wc_status(cqe->status, &wc->status);
wc->vendor_err = wc->status;
} else
wc->status = IB_WC_SUCCESS;
wc->byte_len = cqe->nr_bytes_transferred;
wc->pkey_index = cqe->pkey_index;
wc->slid = cqe->rlid;
wc->dlid_path_bits = cqe->dlid;
wc->src_qp = cqe->remote_qp_number;
/*
* HW has "Immed data present" and "GRH present" in bits 6 and 5.
* SW defines those in bits 1 and 0, so we can just shift and mask.
*/
wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
poll_cq_one_exit0:
if (cqe_count > 0)
hipz_update_feca(my_cq, cqe_count);
return ret;
}
static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
struct ib_wc *wc, int num_entries,
struct ipz_queue *ipz_queue, int on_sq)
{
int nr = 0;
struct ehca_wqe *wqe;
u64 offset;
struct ehca_queue_map *qmap;
struct ehca_qmap_entry *qmap_entry;
if (on_sq)
qmap = &my_qp->sq_map;
else
qmap = &my_qp->rq_map;
qmap_entry = &qmap->map[qmap->next_wqe_idx];
while ((nr < num_entries) && (qmap_entry->reported == 0)) {
/* generate flush CQE */
memset(wc, 0, sizeof(*wc));
offset = qmap->next_wqe_idx * ipz_queue->qe_size;
wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
if (!wqe) {
ehca_err(cq->device, "Invalid wqe offset=%#llx on "
"qp_num=%#x", offset, my_qp->real_qp_num);
return nr;
}
wc->wr_id = replace_wr_id(wqe->work_request_id,
qmap_entry->app_wr_id);
if (on_sq) {
switch (wqe->optype) {
case WQE_OPTYPE_SEND:
wc->opcode = IB_WC_SEND;
break;
case WQE_OPTYPE_RDMAWRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case WQE_OPTYPE_RDMAREAD:
wc->opcode = IB_WC_RDMA_READ;
break;
default:
ehca_err(cq->device, "Invalid optype=%x",
wqe->optype);
return nr;
}
} else
wc->opcode = IB_WC_RECV;
if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
wc->ex.imm_data = wqe->immediate_data;
wc->wc_flags |= IB_WC_WITH_IMM;
}
wc->status = IB_WC_WR_FLUSH_ERR;
wc->qp = &my_qp->ib_qp;
/* mark as reported and advance next_wqe pointer */
qmap_entry->reported = 1;
qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
qmap->entries);
qmap_entry = &qmap->map[qmap->next_wqe_idx];
wc++; nr++;
}
return nr;
}
int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
int nr;
struct ehca_qp *err_qp;
struct ib_wc *current_wc = wc;
int ret = 0;
unsigned long flags;
int entries_left = num_entries;
if (num_entries < 1) {
ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
"cq_num=%x", num_entries, my_cq, my_cq->cq_number);
ret = -EINVAL;
goto poll_cq_exit0;
}
spin_lock_irqsave(&my_cq->spinlock, flags);
/* generate flush cqes for send queues */
list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
&err_qp->ipz_squeue, 1);
entries_left -= nr;
current_wc += nr;
if (entries_left == 0)
break;
}
/* generate flush cqes for receive queues */
list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
&err_qp->ipz_rqueue, 0);
entries_left -= nr;
current_wc += nr;
if (entries_left == 0)
break;
}
for (nr = 0; nr < entries_left; nr++) {
ret = ehca_poll_cq_one(cq, current_wc);
if (ret)
break;
current_wc++;
} /* eof for nr */
entries_left -= nr;
spin_unlock_irqrestore(&my_cq->spinlock, flags);
if (ret == -EAGAIN || !ret)
ret = num_entries - entries_left;
poll_cq_exit0:
return ret;
}
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
{
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
int ret = 0;
switch (notify_flags & IB_CQ_SOLICITED_MASK) {
case IB_CQ_SOLICITED:
hipz_set_cqx_n0(my_cq, 1);
break;
case IB_CQ_NEXT_COMP:
hipz_set_cqx_n1(my_cq, 1);
break;
default:
return -EINVAL;
}
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
unsigned long spl_flags;
spin_lock_irqsave(&my_cq->spinlock, spl_flags);
ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
}
return ret;
}

View file

@ -1,245 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* SQP functions
*
* Authors: Khadija Souissi <souissi@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <rdma/ib_mad.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002)
#define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
* pair is created successfully, the corresponding port gets active.
*
* Define Special Queue pair 0 (SMI QP) is still not supported.
*
* @qp_init_attr: Queue pair init attributes with port and queue pair type
*/
u64 ehca_define_sqp(struct ehca_shca *shca,
struct ehca_qp *ehca_qp,
struct ib_qp_init_attr *qp_init_attr)
{
u32 pma_qp_nr, bma_qp_nr;
u64 ret;
u8 port = qp_init_attr->port_num;
int counter;
shca->sport[port - 1].port_state = IB_PORT_DOWN;
switch (qp_init_attr->qp_type) {
case IB_QPT_SMI:
/* function not supported yet */
break;
case IB_QPT_GSI:
ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
ehca_qp->ipz_qp_handle,
ehca_qp->galpas.kernel,
(u32) qp_init_attr->port_num,
&pma_qp_nr, &bma_qp_nr);
if (ret != H_SUCCESS) {
ehca_err(&shca->ib_device,
"Can't define AQP1 for port %x. h_ret=%lli",
port, ret);
return ret;
}
shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
port, pma_qp_nr);
break;
default:
ehca_err(&shca->ib_device, "invalid qp_type=%x",
qp_init_attr->qp_type);
return H_PARAMETER;
}
if (ehca_nr_ports < 0) /* autodetect mode */
return H_SUCCESS;
for (counter = 0;
shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
counter < ehca_port_act_time;
counter++) {
ehca_dbg(&shca->ib_device, "... wait until port %x is active",
port);
msleep_interruptible(1000);
}
if (counter == ehca_port_act_time) {
ehca_err(&shca->ib_device, "Port %x is not active.", port);
return H_HARDWARE;
}
return H_SUCCESS;
}
struct ib_perf {
struct ib_mad_hdr mad_hdr;
u8 reserved[40];
u8 data[192];
} __attribute__ ((packed));
/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
struct tcslfl {
u32 tc:8;
u32 sl:4;
u32 fl:20;
} __attribute__ ((packed));
/* IP Version/TC/FL packed into 32 bits, as in GRH */
struct vertcfl {
u32 ver:4;
u32 tc:8;
u32 fl:20;
} __attribute__ ((packed));
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
struct ib_perf *out_perf = (struct ib_perf *)out_mad;
struct ib_class_port_info *poi =
(struct ib_class_port_info *)out_perf->data;
struct tcslfl *tcslfl =
(struct tcslfl *)&poi->redirect_tcslfl;
struct ehca_shca *shca =
container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_sport *sport = &shca->sport[port_num - 1];
ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
*out_mad = *in_mad;
if (in_perf->mad_hdr.class_version != 1) {
ehca_warn(ibdev, "Unsupported class_version=%x",
in_perf->mad_hdr.class_version);
out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
goto perf_reply;
}
switch (in_perf->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
case IB_MGMT_METHOD_SET:
/* set class port info for redirection */
out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
memset(poi, 0, sizeof(*poi));
poi->base_version = 1;
poi->class_version = 1;
poi->resp_time_value = 18;
/* copy local routing information from WC where applicable */
tcslfl->sl = in_wc->sl;
poi->redirect_lid =
sport->saved_attr.lid | in_wc->dlid_path_bits;
poi->redirect_qp = sport->pma_qp_nr;
poi->redirect_qkey = IB_QP1_QKEY;
ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
&poi->redirect_pkey);
/* if request was globally routed, copy route info */
if (in_grh) {
const struct vertcfl *vertcfl =
(const struct vertcfl *)&in_grh->version_tclass_flow;
memcpy(poi->redirect_gid, in_grh->dgid.raw,
sizeof(poi->redirect_gid));
tcslfl->tc = vertcfl->tc;
tcslfl->fl = vertcfl->fl;
} else
/* else only fill in default GID */
ehca_query_gid(ibdev, port_num, 0,
(union ib_gid *)&poi->redirect_gid);
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
sport->saved_attr.lid, sport->pma_qp_nr);
break;
case IB_MGMT_METHOD_GET_RESP:
return IB_MAD_RESULT_FAILURE;
default:
out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
break;
}
perf_reply:
out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
int ret;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
/* accept only pma request */
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return IB_MAD_RESULT_SUCCESS;
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
in_mad, out_mad);
return ret;
}

View file

@ -1,155 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* auxiliary functions
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Khadija Souissi <souissik@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EHCA_TOOLS_H
#define EHCA_TOOLS_H
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/atomic.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/hvcall.h>
extern int ehca_debug_level;
#define ehca_dbg(ib_dev, format, arg...) \
do { \
if (unlikely(ehca_debug_level)) \
dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
"PU%04x EHCA_DBG:%s " format "\n", \
raw_smp_processor_id(), __func__, \
## arg); \
} while (0)
#define ehca_info(ib_dev, format, arg...) \
dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
raw_smp_processor_id(), __func__, ## arg)
#define ehca_warn(ib_dev, format, arg...) \
dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
raw_smp_processor_id(), __func__, ## arg)
#define ehca_err(ib_dev, format, arg...) \
dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
raw_smp_processor_id(), __func__, ## arg)
/* use this one only if no ib_dev available */
#define ehca_gen_dbg(format, arg...) \
do { \
if (unlikely(ehca_debug_level)) \
printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
raw_smp_processor_id(), __func__, ## arg); \
} while (0)
#define ehca_gen_warn(format, arg...) \
printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
raw_smp_processor_id(), __func__, ## arg)
#define ehca_gen_err(format, arg...) \
printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
raw_smp_processor_id(), __func__, ## arg)
/**
* ehca_dmp - printk a memory block, whose length is n*8 bytes.
* Each line has the following layout:
* <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
*/
#define ehca_dmp(adr, len, format, args...) \
do { \
unsigned int x; \
unsigned int l = (unsigned int)(len); \
unsigned char *deb = (unsigned char *)(adr); \
for (x = 0; x < l; x += 16) { \
printk(KERN_INFO "EHCA_DMP:%s " format \
" adr=%p ofs=%04x %016llx %016llx\n", \
__func__, ##args, deb, x, \
*((u64 *)&deb[0]), *((u64 *)&deb[8])); \
deb += 16; \
} \
} while (0)
/* define a bitmask, little endian version */
#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
/* define a bitmask, the ibm way... */
#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
/* internal function, don't use */
#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
/* internal function, don't use */
#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
/**
* EHCA_BMASK_SET - return value shifted and masked by mask
* variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
* variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
* in variable
*/
#define EHCA_BMASK_SET(mask, value) \
((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
/**
* EHCA_BMASK_GET - extract a parameter from value by mask
*/
#define EHCA_BMASK_GET(mask, value) \
(EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
/* Converts ehca to ib return code */
int ehca2ib_return_code(u64 ehca_rc);
#endif /* EHCA_TOOLS_H */

View file

@ -1,309 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* userspace support verbs
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_classes.h"
#include "ehca_iverbs.h"
#include "ehca_mrmw.h"
#include "ehca_tools.h"
#include "hcp_if.h"
struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
struct ib_udata *udata)
{
struct ehca_ucontext *my_context;
my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
if (!my_context) {
ehca_err(device, "Out of memory device=%p", device);
return ERR_PTR(-ENOMEM);
}
return &my_context->ib_ucontext;
}
int ehca_dealloc_ucontext(struct ib_ucontext *context)
{
kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
return 0;
}
static void ehca_mm_open(struct vm_area_struct *vma)
{
u32 *count = (u32 *)vma->vm_private_data;
if (!count) {
ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
return;
}
(*count)++;
if (!(*count))
ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
vma->vm_start, vma->vm_end, *count);
}
static void ehca_mm_close(struct vm_area_struct *vma)
{
u32 *count = (u32 *)vma->vm_private_data;
if (!count) {
ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
vma->vm_start, vma->vm_end);
return;
}
(*count)--;
ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
vma->vm_start, vma->vm_end, *count);
}
static const struct vm_operations_struct vm_ops = {
.open = ehca_mm_open,
.close = ehca_mm_close,
};
static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
u32 *mm_count)
{
int ret;
u64 vsize, physical;
vsize = vma->vm_end - vma->vm_start;
if (vsize < EHCA_PAGESIZE) {
ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
return -EINVAL;
}
physical = galpas->user.fw_handle;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
vma->vm_page_prot);
if (unlikely(ret)) {
ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
return -ENOMEM;
}
vma->vm_private_data = mm_count;
(*mm_count)++;
vma->vm_ops = &vm_ops;
return 0;
}
static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
u32 *mm_count)
{
int ret;
u64 start, ofs;
struct page *page;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
start = vma->vm_start;
for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
page = virt_to_page(virt_addr);
ret = vm_insert_page(vma, start, page);
if (unlikely(ret)) {
ehca_gen_err("vm_insert_page() failed rc=%i", ret);
return ret;
}
start += PAGE_SIZE;
}
vma->vm_private_data = mm_count;
(*mm_count)++;
vma->vm_ops = &vm_ops;
return 0;
}
static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
u32 rsrc_type)
{
int ret;
switch (rsrc_type) {
case 0: /* galpa fw handle */
ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
if (unlikely(ret)) {
ehca_err(cq->ib_cq.device,
"ehca_mmap_fw() failed rc=%i cq_num=%x",
ret, cq->cq_number);
return ret;
}
break;
case 1: /* cq queue_addr */
ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
if (unlikely(ret)) {
ehca_err(cq->ib_cq.device,
"ehca_mmap_queue() failed rc=%i cq_num=%x",
ret, cq->cq_number);
return ret;
}
break;
default:
ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
rsrc_type, cq->cq_number);
return -EINVAL;
}
return 0;
}
static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
u32 rsrc_type)
{
int ret;
switch (rsrc_type) {
case 0: /* galpa fw handle */
ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"remap_pfn_range() failed ret=%i qp_num=%x",
ret, qp->ib_qp.qp_num);
return -ENOMEM;
}
break;
case 1: /* qp rqueue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
&qp->mm_count_rqueue);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
ret, qp->ib_qp.qp_num);
return ret;
}
break;
case 2: /* qp squeue_addr */
ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
&qp->mm_count_squeue);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
ret, qp->ib_qp.qp_num);
return ret;
}
break;
default:
ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
rsrc_type, qp->ib_qp.qp_num);
return -EINVAL;
}
return 0;
}
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
u64 fileoffset = vma->vm_pgoff;
u32 idr_handle = fileoffset & 0x1FFFFFF;
u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
u32 ret;
struct ehca_cq *cq;
struct ehca_qp *qp;
struct ib_uobject *uobject;
switch (q_type) {
case 0: /* CQ */
read_lock(&ehca_cq_idr_lock);
cq = idr_find(&ehca_cq_idr, idr_handle);
read_unlock(&ehca_cq_idr_lock);
/* make sure this mmap really belongs to the authorized user */
if (!cq)
return -EINVAL;
if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
return -EINVAL;
ret = ehca_mmap_cq(vma, cq, rsrc_type);
if (unlikely(ret)) {
ehca_err(cq->ib_cq.device,
"ehca_mmap_cq() failed rc=%i cq_num=%x",
ret, cq->cq_number);
return ret;
}
break;
case 1: /* QP */
read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, idr_handle);
read_unlock(&ehca_qp_idr_lock);
/* make sure this mmap really belongs to the authorized user */
if (!qp)
return -EINVAL;
uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
if (!uobject || uobject->context != context)
return -EINVAL;
ret = ehca_mmap_qp(vma, qp, rsrc_type);
if (unlikely(ret)) {
ehca_err(qp->ib_qp.device,
"ehca_mmap_qp() failed rc=%i qp_num=%x",
ret, qp->ib_qp.qp_num);
return ret;
}
break;
default:
ehca_gen_err("bad queue type %x", q_type);
return -EINVAL;
}
return 0;
}

View file

@ -1,949 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Firmware Infiniband Interface code for POWER
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Joachim Fenkes <fenkes@de.ibm.com>
* Gerd Bayer <gerd.bayer@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/hvcall.h>
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hcp_phyp.h"
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
static DEFINE_SPINLOCK(hcall_lock);
static long ehca_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7)
{
long ret;
int i, sleep_msecs;
unsigned long flags = 0;
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
if (ehca_lock_hcalls)
spin_lock_irqsave(&hcall_lock, flags);
ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
arg5, arg6, arg7);
if (ehca_lock_hcalls)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS)
ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
opcode, ret, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
else
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
return ret;
}
return H_BUSY;
}
static long ehca_plpar_hcall9(unsigned long opcode,
unsigned long *outs, /* array of 9 outputs */
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7,
unsigned long arg8,
unsigned long arg9)
{
long ret;
int i, sleep_msecs;
unsigned long flags = 0;
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
if (ehca_lock_hcalls)
spin_lock_irqsave(&hcall_lock, flags);
ret = plpar_hcall9(opcode, outs,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
if (ehca_lock_hcalls)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS) {
ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
} else if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
return ret;
}
return H_BUSY;
}
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_pfeq *pfeq,
const u32 neq_control,
const u32 number_of_entries,
struct ipz_eq_handle *eq_handle,
u32 *act_nr_of_entries,
u32 *act_pages,
u32 *eq_ist)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 allocate_controls;
/* resource type */
allocate_controls = 3ULL;
/* ISN is associated */
if (neq_control != 1)
allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
else /* notification event queue */
allocate_controls = (1ULL << 63) | allocate_controls;
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
number_of_entries, /* r6 */
0, 0, 0, 0, 0, 0);
eq_handle->handle = outs[0];
*act_nr_of_entries = (u32)outs[3];
*act_pages = (u32)outs[4];
*eq_ist = (u32)outs[5];
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resource - ret=%lli ", ret);
return ret;
}
u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
struct ipz_eq_handle eq_handle,
const u64 event_mask)
{
return ehca_plpar_hcall_norets(H_RESET_EVENTS,
adapter_handle.handle, /* r4 */
eq_handle.handle, /* r5 */
event_mask, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
int rc;
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
2, /* r5 */
param->eq_handle.handle, /* r6 */
cq->token, /* r7 */
param->nr_cqe, /* r8 */
0, 0, 0, 0);
cq->ipz_cq_handle.handle = outs[0];
param->act_nr_of_entries = (u32)outs[3];
param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[5]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user)
{
int rc;
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
allocate_controls =
EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
| EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
parms->squeue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
parms->rqueue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
!!(parms->ll_comp_flags & LLQP_RECV_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
!!(parms->ll_comp_flags & LLQP_SEND_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
parms->ud_av_l_key_ctl)
| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
max_r10_reg =
EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
parms->squeue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
parms->rqueue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
parms->squeue.max_sge)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
parms->rqueue.max_sge);
r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
if (parms->ext_type == EQPT_SRQ)
r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
else
r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
parms->send_cq_handle.handle,
parms->recv_cq_handle.handle,
parms->eq_handle.handle,
((u64)parms->token << 32) | parms->pd.value,
max_r10_reg, r11, r12);
parms->qp_handle.handle = outs[0];
parms->real_qp_num = (u32)outs[1];
parms->squeue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
parms->rqueue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
parms->squeue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
parms->rqueue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
parms->squeue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[6]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
parms->qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id,
struct hipz_query_port *query_port_response_block)
{
u64 ret;
u64 r_cb = __pa(query_port_response_block);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("response block not page aligned");
return H_PARAMETER;
}
ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
r_cb, /* r6 */
0, 0, 0, 0);
if (ehca_debug_level >= 2)
ehca_dmp(query_port_response_block, 64, "response_block");
return ret;
}
u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u32 port_cap,
const u8 init_type, const int modify_mask)
{
u64 port_attributes = port_cap;
if (modify_mask & IB_PORT_SHUTDOWN)
port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
if (modify_mask & IB_PORT_INIT_TYPE)
port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
return ehca_plpar_hcall_norets(H_MODIFY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
port_attributes, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock)
{
u64 r_cb = __pa(query_hca_rblock);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("response_block=%p not page aligned",
query_hca_rblock);
return H_PARAMETER;
}
return ehca_plpar_hcall_norets(H_QUERY_HCA,
adapter_handle.handle, /* r4 */
r_cb, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
const u8 pagesize,
const u8 queue_type,
const u64 resource_handle,
const u64 logical_address_of_page,
u64 count)
{
return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
adapter_handle.handle, /* r4 */
(u64)queue_type | ((u64)pagesize) << 8,
/* r5 */
resource_handle, /* r6 */
logical_address_of_page, /* r7 */
count, /* r8 */
0, 0);
}
u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_eq_handle eq_handle,
struct ehca_pfeq *pfeq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count)
{
if (count != 1) {
ehca_gen_err("Ppage counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle,
pagesize,
queue_type,
eq_handle.handle,
logical_address_of_page, count);
}
u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
u32 ist)
{
u64 ret;
ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
adapter_handle.handle, /* r4 */
ist, /* r5 */
0, 0, 0, 0, 0);
if (ret != H_SUCCESS && ret != H_BUSY)
ehca_gen_err("Could not query interrupt state.");
return ret;
}
u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_cq_handle cq_handle,
struct ehca_pfcq *pfcq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa gal)
{
if (count != 1) {
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
cq_handle.handle, logical_address_of_page,
count);
}
u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa galpa)
{
if (count > 1) {
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
qp_handle.handle, logical_address_of_page,
count);
}
u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
void **log_addr_next_sq_wqe2processed,
void **log_addr_next_rq_wqe2processed,
int dis_and_get_function_code)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
dis_and_get_function_code, /* r5 */
qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (log_addr_next_sq_wqe2processed)
*log_addr_next_sq_wqe2processed = (void *)outs[0];
if (log_addr_next_rq_wqe2processed)
*log_addr_next_rq_wqe2processed = (void *)outs[1];
return ret;
}
u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u64 update_mask,
struct hcp_modify_qp_control_block *mqpcb,
struct h_galpa gal)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
update_mask, /* r6 */
__pa(mqpcb), /* r7 */
0, 0, 0, 0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Insufficient resources ret=%lli", ret);
return ret;
}
u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
struct hcp_modify_qp_control_block *qqpcb,
struct h_galpa gal)
{
return ehca_plpar_hcall_norets(H_QUERY_QP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
__pa(qqpcb), /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_qp *qp)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = hcp_galpas_dtor(&qp->galpas);
if (ret) {
ehca_gen_err("Could not destruct qp->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
/* function code */
1, /* r5 */
qp->ipz_qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (ret == H_HARDWARE)
ehca_gen_err("HCA not operational. ret=%lli", ret);
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
qp->ipz_qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource still in use. ret=%lli", ret);
return ret;
}
u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port)
{
return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
port, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port, u32 * pma_qp_nr,
u32 * bma_qp_nr)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
port, /* r6 */
0, 0, 0, 0, 0, 0);
*pma_qp_nr = (u32)outs[0];
*bma_qp_nr = (u32)outs[1];
if (ret == H_ALIAS_EXIST)
ehca_gen_err("AQP1 already exists. ret=%lli", ret);
return ret;
}
u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
u64 ret;
ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
mcg_dlid, /* r6 */
interface_id, /* r7 */
subnet_prefix, /* r8 */
0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
return ehca_plpar_hcall_norets(H_DETACH_MCQP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
mcg_dlid, /* r6 */
interface_id, /* r7 */
subnet_prefix, /* r8 */
0, 0);
}
u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
u8 force_flag)
{
u64 ret;
ret = hcp_galpas_dtor(&cq->galpas);
if (ret) {
ehca_gen_err("Could not destruct cp->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
force_flag != 0 ? 1L : 0L, /* r6 */
0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
return ret;
}
u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_eq *eq)
{
u64 ret;
ret = hcp_galpas_dtor(&eq->galpas);
if (ret) {
ehca_gen_err("Could not destruct eq->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
eq->ipz_eq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource in use. ret=%lli ", ret);
return ret;
}
u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
5, /* r5 */
vaddr, /* r6 */
length, /* r7 */
(((u64)access_ctrl) << 32ULL), /* r8 */
pd.value, /* r9 */
0, 0, 0);
outparms->handle.handle = outs[0];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count)
{
u64 ret;
if (unlikely(ehca_debug_level >= 3)) {
if (count > 1) {
u64 *kpage;
int i;
kpage = __va(logical_address_of_page);
for (i = 0; i < count; i++)
ehca_gen_dbg("kpage[%d]=%p",
i, (void *)kpage[i]);
} else
ehca_gen_dbg("kpage=%p",
(void *)logical_address_of_page);
}
if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
ehca_gen_err("logical_address_of_page not on a 4k boundary "
"adapter_handle=%llx mr=%p mr_handle=%llx "
"pagesize=%x queue_type=%x "
"logical_address_of_page=%llx count=%llx",
adapter_handle.handle, mr,
mr->ipz_mr_handle.handle, pagesize, queue_type,
logical_address_of_page, count);
ret = H_PARAMETER;
} else
ret = hipz_h_register_rpage(adapter_handle, pagesize,
queue_type,
mr->ipz_mr_handle.handle,
logical_address_of_page, count);
return ret;
}
u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
0, 0, 0, 0, 0, 0, 0);
outparms->len = outs[0];
outparms->vaddr = outs[1];
outparms->acl = outs[4] >> 32;
outparms->lkey = (u32)(outs[5] >> 32);
outparms->rkey = (u32)(outs[5] & (0xffffffff));
return ret;
}
u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr)
{
return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr_in,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
const u64 mr_addr_cb,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
vaddr_in, /* r6 */
length, /* r7 */
/* r8 */
((((u64)access_ctrl) << 32ULL) | pd.value),
mr_addr_cb, /* r9 */
0, 0, 0);
outparms->vaddr = outs[1];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const struct ehca_mr *orig_mr,
const u64 vaddr_in,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
adapter_handle.handle, /* r4 */
orig_mr->ipz_mr_handle.handle, /* r5 */
vaddr_in, /* r6 */
(((u64)access_ctrl) << 32ULL), /* r7 */
pd.value, /* r8 */
0, 0, 0, 0);
outparms->handle.handle = outs[0];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
const struct ipz_pd pd,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
6, /* r5 */
pd.value, /* r6 */
0, 0, 0, 0, 0, 0);
outparms->handle.handle = outs[0];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
adapter_handle.handle, /* r4 */
mw->ipz_mw_handle.handle, /* r5 */
0, 0, 0, 0, 0, 0, 0);
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw)
{
return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
mw->ipz_mw_handle.handle, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
const u64 ressource_handle,
void *rblock,
unsigned long *byte_count)
{
u64 r_cb = __pa(rblock);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("rblock not page aligned.");
return H_PARAMETER;
}
return ehca_plpar_hcall_norets(H_ERROR_DATA,
adapter_handle.handle,
ressource_handle,
r_cb,
0, 0, 0, 0);
}
u64 hipz_h_eoi(int irq)
{
unsigned long xirr;
iosync();
xirr = (0xffULL << 24) | irq;
return plpar_hcall_norets(H_EOI, xirr);
}

View file

@ -1,265 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Firmware Infiniband Interface code for POWER
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Gerd Bayer <gerd.bayer@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HCP_IF_H__
#define __HCP_IF_H__
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "hipz_hw.h"
/*
* hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
* resources, create the empty EQPT (ring).
*/
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_pfeq *pfeq,
const u32 neq_control,
const u32 number_of_entries,
struct ipz_eq_handle *eq_handle,
u32 * act_nr_of_entries,
u32 * act_pages,
u32 * eq_ist);
u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
struct ipz_eq_handle eq_handle,
const u64 event_mask);
/*
* hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
* resources, create the empty CQPT (ring).
*/
u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param);
/*
* hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
* initialize resources, create empty QPPTs (2 rings).
*/
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user);
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id,
struct hipz_query_port *query_port_response_block);
u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u32 port_cap,
const u8 init_type, const int modify_mask);
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock);
/*
* hipz_h_register_rpage internal function in hcp_if.h for all
* hcp_H_REGISTER_RPAGE calls.
*/
u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
const u8 pagesize,
const u8 queue_type,
const u64 resource_handle,
const u64 logical_address_of_page,
u64 count);
u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_eq_handle eq_handle,
struct ehca_pfeq *pfeq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count);
u64 hipz_h_query_int_state(const struct ipz_adapter_handle
hcp_adapter_handle,
u32 ist);
u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_cq_handle cq_handle,
struct ehca_pfcq *pfcq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa gal);
u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa galpa);
u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
void **log_addr_next_sq_wqe_tb_processed,
void **log_addr_next_rq_wqe_tb_processed,
int dis_and_get_function_code);
enum hcall_sigt {
HCALL_SIGT_NO_CQE = 0,
HCALL_SIGT_BY_WQE = 1,
HCALL_SIGT_EVERY = 2
};
u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u64 update_mask,
struct hcp_modify_qp_control_block *mqpcb,
struct h_galpa gal);
u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
struct hcp_modify_qp_control_block *qqpcb,
struct h_galpa gal);
u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_qp *qp);
u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port);
u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port, u32 * pma_qp_nr,
u32 * bma_qp_nr);
u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id);
u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id);
u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
u8 force_flag);
u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_eq *eq);
/*
* hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
* resources.
*/
u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms);
/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count);
/* hipz_h_query_mr queries MR in HW and FW */
u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
struct ehca_mr_hipzout_parms *outparms);
/* hipz_h_free_resource_mr frees MR resources in HW and FW */
u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr);
/* hipz_h_reregister_pmr reregisters MR in HW and FW */
u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr_in,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
const u64 mr_addr_cb,
struct ehca_mr_hipzout_parms *outparms);
/* hipz_h_register_smr register shared MR in HW and FW */
u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const struct ehca_mr *orig_mr,
const u64 vaddr_in,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms);
/*
* hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
* resources.
*/
u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
const struct ipz_pd pd,
struct ehca_mw_hipzout_parms *outparms);
/* hipz_h_query_mw queries MW in HW and FW */
u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
struct ehca_mw_hipzout_parms *outparms);
/* hipz_h_free_resource_mw frees MW resources in HW and FW */
u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw);
u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
const u64 ressource_handle,
void *rblock,
unsigned long *byte_count);
u64 hipz_h_eoi(int irq);
#endif /* __HCP_IF_H__ */

View file

@ -1,82 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* load store abstraction for ehca register access with tracing
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "ehca_classes.h"
#include "hipz_hw.h"
u64 hcall_map_page(u64 physaddr)
{
return (u64)ioremap(physaddr, EHCA_PAGESIZE);
}
int hcall_unmap_page(u64 mapaddr)
{
iounmap((volatile void __iomem *) mapaddr);
return 0;
}
int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user)
{
if (!is_user) {
galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
if (!galpas->kernel.fw_handle)
return -ENOMEM;
} else
galpas->kernel.fw_handle = 0;
galpas->user.fw_handle = paddr_user;
return 0;
}
int hcp_galpas_dtor(struct h_galpas *galpas)
{
if (galpas->kernel.fw_handle) {
int ret = hcall_unmap_page(galpas->kernel.fw_handle);
if (ret)
return ret;
}
galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
return 0;
}

View file

@ -1,90 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Firmware calls
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
* Gerd Bayer <gerd.bayer@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HCP_PHYP_H__
#define __HCP_PHYP_H__
/*
* eHCA page (mapped into memory)
* resource to access eHCA register pages in CPU address space
*/
struct h_galpa {
u64 fw_handle;
/* for pSeries this is a 64bit memory address where
I/O memory is mapped into CPU address space (kv) */
};
/*
* resource to access eHCA address space registers, all types
*/
struct h_galpas {
u32 pid; /*PID of userspace galpa checking */
struct h_galpa user; /* user space accessible resource,
set to 0 if unused */
struct h_galpa kernel; /* kernel space accessible resource,
set to 0 if unused */
};
static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
{
u64 addr = galpa.fw_handle + offset;
return *(volatile u64 __force *)addr;
}
static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
{
u64 addr = galpa.fw_handle + offset;
*(volatile u64 __force *)addr = value;
}
int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user);
int hcp_galpas_dtor(struct h_galpas *galpas);
u64 hcall_map_page(u64 physaddr);
int hcall_unmap_page(u64 mapaddr);
#endif

View file

@ -1,68 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* HW abstraction register functions
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIPZ_FNS_H__
#define __HIPZ_FNS_H__
#include "ehca_classes.h"
#include "hipz_hw.h"
#include "hipz_fns_core.h"
#define hipz_galpa_store_eq(gal, offset, value) \
hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
#define hipz_galpa_load_eq(gal, offset) \
hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
#define hipz_galpa_store_qped(gal, offset, value) \
hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
#define hipz_galpa_load_qped(gal, offset) \
hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
#define hipz_galpa_store_mrmw(gal, offset, value) \
hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
#define hipz_galpa_load_mrmw(gal, offset) \
hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
#endif

View file

@ -1,100 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* HW abstraction register functions
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Heiko J Schick <schickhj@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIPZ_FNS_CORE_H__
#define __HIPZ_FNS_CORE_H__
#include "hcp_phyp.h"
#include "hipz_hw.h"
#define hipz_galpa_store_cq(gal, offset, value) \
hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
#define hipz_galpa_load_cq(gal, offset) \
hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
#define hipz_galpa_store_qp(gal, offset, value) \
hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
#define hipz_galpa_load_qp(gal, offset) \
hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
{
/* ringing doorbell :-) */
hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
}
static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
{
/* ringing doorbell :-) */
hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
}
static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
{
hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
}
static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
{
u64 cqx_n0_reg;
hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
value));
cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
}
static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
{
u64 cqx_n1_reg;
hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
}
#endif /* __HIPZ_FNC_CORE_H__ */

View file

@ -1,414 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* eHCA register definitions
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __HIPZ_HW_H__
#define __HIPZ_HW_H__
#include "ehca_tools.h"
#define EHCA_MAX_MTU 4
/* QP Table Entry Memory Map */
struct hipz_qptemm {
u64 qpx_hcr;
u64 qpx_c;
u64 qpx_herr;
u64 qpx_aer;
/* 0x20*/
u64 qpx_sqa;
u64 qpx_sqc;
u64 qpx_rqa;
u64 qpx_rqc;
/* 0x40*/
u64 qpx_st;
u64 qpx_pmstate;
u64 qpx_pmfa;
u64 qpx_pkey;
/* 0x60*/
u64 qpx_pkeya;
u64 qpx_pkeyb;
u64 qpx_pkeyc;
u64 qpx_pkeyd;
/* 0x80*/
u64 qpx_qkey;
u64 qpx_dqp;
u64 qpx_dlidp;
u64 qpx_portp;
/* 0xa0*/
u64 qpx_slidp;
u64 qpx_slidpp;
u64 qpx_dlida;
u64 qpx_porta;
/* 0xc0*/
u64 qpx_slida;
u64 qpx_slidpa;
u64 qpx_slvl;
u64 qpx_ipd;
/* 0xe0*/
u64 qpx_mtu;
u64 qpx_lato;
u64 qpx_rlimit;
u64 qpx_rnrlimit;
/* 0x100*/
u64 qpx_t;
u64 qpx_sqhp;
u64 qpx_sqptp;
u64 qpx_nspsn;
/* 0x120*/
u64 qpx_nspsnhwm;
u64 reserved1;
u64 qpx_sdsi;
u64 qpx_sdsbc;
/* 0x140*/
u64 qpx_sqwsize;
u64 qpx_sqwts;
u64 qpx_lsn;
u64 qpx_nssn;
/* 0x160 */
u64 qpx_mor;
u64 qpx_cor;
u64 qpx_sqsize;
u64 qpx_erc;
/* 0x180*/
u64 qpx_rnrrc;
u64 qpx_ernrwt;
u64 qpx_rnrresp;
u64 qpx_lmsna;
/* 0x1a0 */
u64 qpx_sqhpc;
u64 qpx_sqcptp;
u64 qpx_sigt;
u64 qpx_wqecnt;
/* 0x1c0*/
u64 qpx_rqhp;
u64 qpx_rqptp;
u64 qpx_rqsize;
u64 qpx_nrr;
/* 0x1e0*/
u64 qpx_rdmac;
u64 qpx_nrpsn;
u64 qpx_lapsn;
u64 qpx_lcr;
/* 0x200*/
u64 qpx_rwc;
u64 qpx_rwva;
u64 qpx_rdsi;
u64 qpx_rdsbc;
/* 0x220*/
u64 qpx_rqwsize;
u64 qpx_crmsn;
u64 qpx_rdd;
u64 qpx_larpsn;
/* 0x240*/
u64 qpx_pd;
u64 qpx_scqn;
u64 qpx_rcqn;
u64 qpx_aeqn;
/* 0x260*/
u64 qpx_aaelog;
u64 qpx_ram;
u64 qpx_rdmaqe0;
u64 qpx_rdmaqe1;
/* 0x280*/
u64 qpx_rdmaqe2;
u64 qpx_rdmaqe3;
u64 qpx_nrpsnhwm;
/* 0x298*/
u64 reserved[(0x400 - 0x298) / 8];
/* 0x400 extended data */
u64 reserved_ext[(0x500 - 0x400) / 8];
/* 0x500 */
u64 reserved2[(0x1000 - 0x500) / 8];
/* 0x1000 */
};
#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
/* MRMWPT Entry Memory Map */
struct hipz_mrmwmm {
/* 0x00 */
u64 mrx_hcr;
u64 mrx_c;
u64 mrx_herr;
u64 mrx_aer;
/* 0x20 */
u64 mrx_pp;
u64 reserved1;
u64 reserved2;
u64 reserved3;
/* 0x40 */
u64 reserved4[(0x200 - 0x40) / 8];
/* 0x200 */
u64 mrx_ctl[64];
};
#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
struct hipz_qpedmm {
/* 0x00 */
u64 reserved0[(0x400) / 8];
/* 0x400 */
u64 qpedx_phh;
u64 qpedx_ppsgp;
/* 0x410 */
u64 qpedx_ppsgu;
u64 qpedx_ppdgp;
/* 0x420 */
u64 qpedx_ppdgu;
u64 qpedx_aph;
/* 0x430 */
u64 qpedx_apsgp;
u64 qpedx_apsgu;
/* 0x440 */
u64 qpedx_apdgp;
u64 qpedx_apdgu;
/* 0x450 */
u64 qpedx_apav;
u64 qpedx_apsav;
/* 0x460 */
u64 qpedx_hcr;
u64 reserved1[4];
/* 0x488 */
u64 qpedx_rrl0;
/* 0x490 */
u64 qpedx_rrrkey0;
u64 qpedx_rrva0;
/* 0x4a0 */
u64 reserved2;
u64 qpedx_rrl1;
/* 0x4b0 */
u64 qpedx_rrrkey1;
u64 qpedx_rrva1;
/* 0x4c0 */
u64 reserved3;
u64 qpedx_rrl2;
/* 0x4d0 */
u64 qpedx_rrrkey2;
u64 qpedx_rrva2;
/* 0x4e0 */
u64 reserved4;
u64 qpedx_rrl3;
/* 0x4f0 */
u64 qpedx_rrrkey3;
u64 qpedx_rrva3;
};
#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
/* CQ Table Entry Memory Map */
struct hipz_cqtemm {
u64 cqx_hcr;
u64 cqx_c;
u64 cqx_herr;
u64 cqx_aer;
/* 0x20 */
u64 cqx_ptp;
u64 cqx_tp;
u64 cqx_fec;
u64 cqx_feca;
/* 0x40 */
u64 cqx_ep;
u64 cqx_eq;
/* 0x50 */
u64 reserved1;
u64 cqx_n0;
/* 0x60 */
u64 cqx_n1;
u64 reserved2[(0x1000 - 0x60) / 8];
/* 0x1000 */
};
#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32, 63)
#define CQX_FECADDER EHCA_BMASK_IBM(32, 63)
#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
/* EQ Table Entry Memory Map */
struct hipz_eqtemm {
u64 eqx_hcr;
u64 eqx_c;
u64 eqx_herr;
u64 eqx_aer;
/* 0x20 */
u64 eqx_ptp;
u64 eqx_tp;
u64 eqx_ssba;
u64 eqx_psba;
/* 0x40 */
u64 eqx_cec;
u64 eqx_meql;
u64 eqx_xisbi;
u64 eqx_xisc;
/* 0x60 */
u64 eqx_it;
};
#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
/* access control defines for MR/MW */
#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000
#define HIPZ_ACCESSCTRL_R_READ 0x00200000
#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000
/* query hca response block */
struct hipz_query_hca {
u32 cur_reliable_dg;
u32 cur_qp;
u32 cur_cq;
u32 cur_eq;
u32 cur_mr;
u32 cur_mw;
u32 cur_ee_context;
u32 cur_mcast_grp;
u32 cur_qp_attached_mcast_grp;
u32 reserved1;
u32 cur_ipv6_qp;
u32 cur_eth_qp;
u32 cur_hp_mr;
u32 reserved2[3];
u32 max_rd_domain;
u32 max_qp;
u32 max_cq;
u32 max_eq;
u32 max_mr;
u32 max_hp_mr;
u32 max_mw;
u32 max_mrwpte;
u32 max_special_mrwpte;
u32 max_rd_ee_context;
u32 max_mcast_grp;
u32 max_total_mcast_qp_attach;
u32 max_mcast_qp_attach;
u32 max_raw_ipv6_qp;
u32 max_raw_ethy_qp;
u32 internal_clock_frequency;
u32 max_pd;
u32 max_ah;
u32 max_cqe;
u32 max_wqes_wq;
u32 max_partitions;
u32 max_rr_ee_context;
u32 max_rr_qp;
u32 max_rr_hca;
u32 max_act_wqs_ee_context;
u32 max_act_wqs_qp;
u32 max_sge;
u32 max_sge_rd;
u32 memory_page_size_supported;
u64 max_mr_size;
u32 local_ca_ack_delay;
u32 num_ports;
u32 vendor_id;
u32 vendor_part_id;
u32 hw_ver;
u64 node_guid;
u64 hca_cap_indicators;
u32 data_counter_register_size;
u32 max_shared_rq;
u32 max_isns_eq;
u32 max_neq;
} __attribute__ ((packed));
#define HCA_CAP_AH_PORT_NR_CHECK EHCA_BMASK_IBM( 0, 0)
#define HCA_CAP_ATOMIC EHCA_BMASK_IBM( 1, 1)
#define HCA_CAP_AUTO_PATH_MIG EHCA_BMASK_IBM( 2, 2)
#define HCA_CAP_BAD_P_KEY_CTR EHCA_BMASK_IBM( 3, 3)
#define HCA_CAP_SQD_RTS_PORT_CHANGE EHCA_BMASK_IBM( 4, 4)
#define HCA_CAP_CUR_QP_STATE_MOD EHCA_BMASK_IBM( 5, 5)
#define HCA_CAP_INIT_TYPE EHCA_BMASK_IBM( 6, 6)
#define HCA_CAP_PORT_ACTIVE_EVENT EHCA_BMASK_IBM( 7, 7)
#define HCA_CAP_Q_KEY_VIOL_CTR EHCA_BMASK_IBM( 8, 8)
#define HCA_CAP_WQE_RESIZE EHCA_BMASK_IBM( 9, 9)
#define HCA_CAP_RAW_PACKET_MCAST EHCA_BMASK_IBM(10, 10)
#define HCA_CAP_SHUTDOWN_PORT EHCA_BMASK_IBM(11, 11)
#define HCA_CAP_RC_LL_QP EHCA_BMASK_IBM(12, 12)
#define HCA_CAP_SRQ EHCA_BMASK_IBM(13, 13)
#define HCA_CAP_UD_LL_QP EHCA_BMASK_IBM(16, 16)
#define HCA_CAP_RESIZE_MR EHCA_BMASK_IBM(17, 17)
#define HCA_CAP_MINI_QP EHCA_BMASK_IBM(18, 18)
#define HCA_CAP_H_ALLOC_RES_SYNC EHCA_BMASK_IBM(19, 19)
/* query port response block */
struct hipz_query_port {
u32 state;
u32 bad_pkey_cntr;
u32 lmc;
u32 lid;
u32 subnet_timeout;
u32 qkey_viol_cntr;
u32 sm_sl;
u32 sm_lid;
u32 capability_mask;
u32 init_type_reply;
u32 pkey_tbl_len;
u32 gid_tbl_len;
u64 gid_prefix;
u32 port_nr;
u16 pkey_entries[16];
u8 reserved1[32];
u32 trent_size;
u32 trbuf_size;
u64 max_msg_sz;
u32 max_mtu;
u32 vl_cap;
u32 phys_pstate;
u32 phys_state;
u32 phys_speed;
u32 phys_width;
u8 reserved2[1884];
u64 guid_entries[255];
} __attribute__ ((packed));
#endif

View file

@ -1,289 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* internal queue handling
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/slab.h>
#include "ehca_tools.h"
#include "ipz_pt_fn.h"
#include "ehca_classes.h"
#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
struct kmem_cache *small_qp_cache;
void *ipz_qpageit_get_inc(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
queue->current_q_offset += queue->pagesize;
if (queue->current_q_offset > queue->queue_length) {
queue->current_q_offset -= queue->pagesize;
ret = NULL;
}
if (((u64)ret) % queue->pagesize) {
ehca_gen_err("ERROR!! not at PAGE-Boundary");
return NULL;
}
return ret;
}
void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
u64 last_entry_in_q = queue->queue_length - queue->qe_size;
queue->current_q_offset += queue->qe_size;
if (queue->current_q_offset > last_entry_in_q) {
queue->current_q_offset = 0;
queue->toggle_state = (~queue->toggle_state) & 1;
}
return ret;
}
int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
{
int i;
for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
u64 page = __pa(queue->queue_pages[i]);
if (addr >= page && addr < page + queue->pagesize) {
*q_offset = addr - page + i * queue->pagesize;
return 0;
}
}
return -EINVAL;
}
#if PAGE_SHIFT < EHCA_PAGESHIFT
#error Kernel pages must be at least as large than eHCA pages (4K) !
#endif
/*
* allocate pages for queue:
* outer loop allocates whole kernel pages (page aligned) and
* inner loop divides a kernel page into smaller hca queue pages
*/
static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
{
int k, f = 0;
u8 *kpage;
while (f < nr_of_pages) {
kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!kpage)
goto out;
for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
queue->queue_pages[f] = (struct ipz_page *)kpage;
kpage += EHCA_PAGESIZE;
f++;
}
}
return 1;
out:
for (f = 0; f < nr_of_pages && queue->queue_pages[f];
f += PAGES_PER_KPAGE)
free_page((unsigned long)(queue->queue_pages)[f]);
return 0;
}
static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
{
int order = ilog2(queue->pagesize) - 9;
struct ipz_small_queue_page *page;
unsigned long bit;
mutex_lock(&pd->lock);
if (!list_empty(&pd->free[order]))
page = list_entry(pd->free[order].next,
struct ipz_small_queue_page, list);
else {
page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
if (!page)
goto out;
page->page = get_zeroed_page(GFP_KERNEL);
if (!page->page) {
kmem_cache_free(small_qp_cache, page);
goto out;
}
list_add(&page->list, &pd->free[order]);
}
bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
__set_bit(bit, page->bitmap);
page->fill++;
if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
list_move(&page->list, &pd->full[order]);
mutex_unlock(&pd->lock);
queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
queue->small_page = page;
queue->offset = bit << (order + 9);
return 1;
out:
ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
mutex_unlock(&pd->lock);
return 0;
}
static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
{
int order = ilog2(queue->pagesize) - 9;
struct ipz_small_queue_page *page = queue->small_page;
unsigned long bit;
int free_page = 0;
bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
>> (order + 9);
mutex_lock(&pd->lock);
__clear_bit(bit, page->bitmap);
page->fill--;
if (page->fill == 0) {
list_del(&page->list);
free_page = 1;
}
if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
/* the page was full until we freed the chunk */
list_move_tail(&page->list, &pd->free[order]);
mutex_unlock(&pd->lock);
if (free_page) {
free_page(page->page);
kmem_cache_free(small_qp_cache, page);
}
}
int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
const u32 nr_of_pages, const u32 pagesize,
const u32 qe_size, const u32 nr_of_sg,
int is_small)
{
if (pagesize > PAGE_SIZE) {
ehca_gen_err("FATAL ERROR: pagesize=%x "
"is greater than kernel page size", pagesize);
return 0;
}
/* init queue fields */
queue->queue_length = nr_of_pages * pagesize;
queue->pagesize = pagesize;
queue->qe_size = qe_size;
queue->act_nr_of_sg = nr_of_sg;
queue->current_q_offset = 0;
queue->toggle_state = 1;
queue->small_page = NULL;
/* allocate queue page pointers */
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
GFP_KERNEL | __GFP_NOWARN);
if (!queue->queue_pages) {
queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
if (!queue->queue_pages) {
ehca_gen_err("Couldn't allocate queue page list");
return 0;
}
}
/* allocate actual queue pages */
if (is_small) {
if (!alloc_small_queue_page(queue, pd))
goto ipz_queue_ctor_exit0;
} else
if (!alloc_queue_pages(queue, nr_of_pages))
goto ipz_queue_ctor_exit0;
return 1;
ipz_queue_ctor_exit0:
ehca_gen_err("Couldn't alloc pages queue=%p "
"nr_of_pages=%x", queue, nr_of_pages);
kvfree(queue->queue_pages);
return 0;
}
int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
{
int i, nr_pages;
if (!queue || !queue->queue_pages) {
ehca_gen_dbg("queue or queue_pages is NULL");
return 0;
}
if (queue->small_page)
free_small_queue_page(queue, pd);
else {
nr_pages = queue->queue_length / queue->pagesize;
for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
free_page((unsigned long)queue->queue_pages[i]);
}
kvfree(queue->queue_pages);
return 1;
}
int ehca_init_small_qp_cache(void)
{
small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
sizeof(struct ipz_small_queue_page),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!small_qp_cache)
return -ENOMEM;
return 0;
}
void ehca_cleanup_small_qp_cache(void)
{
kmem_cache_destroy(small_qp_cache);
}

View file

@ -1,289 +0,0 @@
/*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* internal queue handling
*
* Authors: Waleri Fomin <fomin@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
* Christoph Raisch <raisch@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __IPZ_PT_FN_H__
#define __IPZ_PT_FN_H__
#define EHCA_PAGESHIFT 12
#define EHCA_PAGESIZE 4096UL
#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
#define EHCA_PT_ENTRIES 512UL
#include "ehca_tools.h"
#include "ehca_qes.h"
struct ehca_pd;
struct ipz_small_queue_page;
extern struct kmem_cache *small_qp_cache;
/* struct generic ehca page */
struct ipz_page {
u8 entries[EHCA_PAGESIZE];
};
#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
struct ipz_small_queue_page {
unsigned long page;
unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
int fill;
void *mapped_addr;
u32 mmap_count;
struct list_head list;
};
/* struct generic queue in linux kernel virtual memory (kv) */
struct ipz_queue {
u64 current_q_offset; /* current queue entry */
struct ipz_page **queue_pages; /* array of pages belonging to queue */
u32 qe_size; /* queue entry size */
u32 act_nr_of_sg;
u32 queue_length; /* queue length allocated in bytes */
u32 pagesize;
u32 toggle_state; /* toggle flag - per page */
u32 offset; /* save offset within page for small_qp */
struct ipz_small_queue_page *small_page;
};
/*
* return current Queue Entry for a certain q_offset
* returns address (kv) of Queue Entry
*/
static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
{
struct ipz_page *current_page;
if (q_offset >= queue->queue_length)
return NULL;
current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
}
/*
* return current Queue Entry
* returns address (kv) of Queue Entry
*/
static inline void *ipz_qeit_get(struct ipz_queue *queue)
{
return ipz_qeit_calc(queue, queue->current_q_offset);
}
/*
* return current Queue Page , increment Queue Page iterator from
* page to page in struct ipz_queue, last increment will return 0! and
* NOT wrap
* returns address (kv) of Queue Page
* warning don't use in parallel with ipz_QE_get_inc()
*/
void *ipz_qpageit_get_inc(struct ipz_queue *queue);
/*
* return current Queue Entry, increment Queue Entry iterator by one
* step in struct ipz_queue, will wrap in ringbuffer
* returns address (kv) of Queue Entry BEFORE increment
* warning don't use in parallel with ipz_qpageit_get_inc()
*/
static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
queue->current_q_offset += queue->qe_size;
if (queue->current_q_offset >= queue->queue_length) {
queue->current_q_offset = 0;
/* toggle the valid flag */
queue->toggle_state = (~queue->toggle_state) & 1;
}
return ret;
}
/*
* return a bool indicating whether current Queue Entry is valid
*/
static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
{
struct ehca_cqe *cqe = ipz_qeit_get(queue);
return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
}
/*
* return current Queue Entry, increment Queue Entry iterator by one
* step in struct ipz_queue, will wrap in ringbuffer
* returns address (kv) of Queue Entry BEFORE increment
* returns 0 and does not increment, if wrong valid state
* warning don't use in parallel with ipz_qpageit_get_inc()
*/
static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
{
return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
}
/*
* returns and resets Queue Entry iterator
* returns address (kv) of first Queue Entry
*/
static inline void *ipz_qeit_reset(struct ipz_queue *queue)
{
queue->current_q_offset = 0;
return ipz_qeit_get(queue);
}
/*
* return the q_offset corresponding to an absolute address
*/
int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
/*
* return the next queue offset. don't modify the queue.
*/
static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
{
offset += queue->qe_size;
if (offset >= queue->queue_length) offset = 0;
return offset;
}
/* struct generic page table */
struct ipz_pt {
u64 entries[EHCA_PT_ENTRIES];
};
/* struct page table for a queue, only to be used in pf */
struct ipz_qpt {
/* queue page tables (kv), use u64 because we know the element length */
u64 *qpts;
u32 n_qpts;
u32 n_ptes; /* number of page table entries */
u64 *current_pte_addr;
};
/*
* constructor for a ipz_queue_t, placement new for ipz_queue_t,
* new for all dependent datastructors
* all QP Tables are the same
* flow:
* allocate+pin queue
* see ipz_qpt_ctor()
* returns true if ok, false if out of memory
*/
int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
const u32 nr_of_pages, const u32 pagesize,
const u32 qe_size, const u32 nr_of_sg,
int is_small);
/*
* destructor for a ipz_queue_t
* -# free queue
* see ipz_queue_ctor()
* returns true if ok, false if queue was NULL-ptr of free failed
*/
int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
/*
* constructor for a ipz_qpt_t,
* placement new for struct ipz_queue, new for all dependent datastructors
* all QP Tables are the same,
* flow:
* -# allocate+pin queue
* -# initialise ptcb
* -# allocate+pin PTs
* -# link PTs to a ring, according to HCA Arch, set bit62 id needed
* -# the ring must have room for exactly nr_of_PTEs
* see ipz_qpt_ctor()
*/
void ipz_qpt_ctor(struct ipz_qpt *qpt,
const u32 nr_of_qes,
const u32 pagesize,
const u32 qe_size,
const u8 lowbyte, const u8 toggle,
u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
/*
* return current Queue Entry, increment Queue Entry iterator by one
* step in struct ipz_queue, will wrap in ringbuffer
* returns address (kv) of Queue Entry BEFORE increment
* warning don't use in parallel with ipz_qpageit_get_inc()
* warning unpredictable results may occur if steps>act_nr_of_queue_entries
* fix EQ page problems
*/
void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
/*
* return current Event Queue Entry, increment Queue Entry iterator
* by one step in struct ipz_queue if valid, will wrap in ringbuffer
* returns address (kv) of Queue Entry BEFORE increment
* returns 0 and does not increment, if wrong valid state
* warning don't use in parallel with ipz_queue_QPageit_get_inc()
* warning unpredictable results may occur if steps>act_nr_of_queue_entries
*/
static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
u32 qe = *(u8 *)ret;
if ((qe >> 7) != (queue->toggle_state & 1))
return NULL;
ipz_qeit_eq_get_inc(queue); /* this is a good one */
return ret;
}
static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
{
void *ret = ipz_qeit_get(queue);
u32 qe = *(u8 *)ret;
if ((qe >> 7) != (queue->toggle_state & 1))
return NULL;
return ret;
}
/* returns address (GX) of first queue entry */
static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
{
return be64_to_cpu(qpt->qpts[0]);
}
/* returns address (kv) of first page of queue page table */
static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
{
return qpt->qpts;
}
#endif /* __IPZ_PT_FN_H__ */