RDMA/amso1100: Start of endianness annotation

Signed-off-by: Roland Dreier <rolandd@cisco.com>
Acked-by: Steve Wise <swise@opengridcomputing.com>
This commit is contained in:
Roland Dreier 2008-04-16 21:01:08 -07:00
parent d23b9d8ff2
commit dc544bc9cb
8 changed files with 166 additions and 160 deletions

View file

@ -130,10 +130,10 @@ static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
tx_desc->status = 0;
/* Set TXP_HTXD_UNINIT */
__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
(void __iomem *) txp_desc + C2_TXP_ADDR);
__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
(void __iomem *) txp_desc + C2_TXP_FLAGS);
elem->skb = NULL;
@ -179,13 +179,13 @@ static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
rx_desc->status = 0;
/* Set RXP_HRXD_UNINIT */
__raw_writew(cpu_to_be16(RXP_HRXD_OK),
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
(void __iomem *) rxp_desc + C2_RXP_STATUS);
__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
(void __iomem *) rxp_desc + C2_RXP_ADDR);
__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
(void __iomem *) rxp_desc + C2_RXP_FLAGS);
elem->skb = NULL;
@ -239,10 +239,11 @@ static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
rxp_hdr->flags = RXP_HRXD_READY;
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
__raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
__raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
elem->hw_desc + C2_RXP_LEN);
__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
__raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
elem->hw_desc + C2_RXP_FLAGS);
elem->skb = skb;
elem->mapaddr = mapaddr;
@ -290,9 +291,9 @@ static void c2_rx_clean(struct c2_port *c2_port)
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
__raw_writew(0, elem->hw_desc + C2_RXP_LEN);
__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
elem->hw_desc + C2_RXP_ADDR);
__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
elem->hw_desc + C2_RXP_FLAGS);
if (elem->skb) {
@ -346,16 +347,16 @@ static void c2_tx_clean(struct c2_port *c2_port)
elem->hw_desc + C2_TXP_LEN);
__raw_writeq(0,
elem->hw_desc + C2_TXP_ADDR);
__raw_writew(cpu_to_be16(TXP_HTXD_DONE),
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
elem->hw_desc + C2_TXP_FLAGS);
c2_port->netstats.tx_dropped++;
break;
} else {
__raw_writew(0,
elem->hw_desc + C2_TXP_LEN);
__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
elem->hw_desc + C2_TXP_ADDR);
__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
elem->hw_desc + C2_TXP_FLAGS);
}
@ -390,7 +391,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
elem = elem->next) {
txp_htxd.flags =
be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));
be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
if (txp_htxd.flags != TXP_HTXD_DONE)
break;
@ -398,7 +399,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
if (netif_msg_tx_done(c2_port)) {
/* PCI reads are expensive in fast path */
txp_htxd.len =
be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));
be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
pr_debug("%s: tx done slot %3Zu status 0x%x len "
"%5u bytes\n",
netdev->name, elem - tx_ring->start,
@ -448,10 +449,12 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
/* Write the descriptor to the adapter's rx ring */
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
__raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
__raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
elem->hw_desc + C2_RXP_LEN);
__raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);
__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
__raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
elem->hw_desc + C2_RXP_ADDR);
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
elem->hw_desc + C2_RXP_FLAGS);
pr_debug("packet dropped\n");
c2_port->netstats.rx_dropped++;
@ -653,7 +656,7 @@ static int c2_up(struct net_device *netdev)
i++, elem++) {
rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
rxp_hdr->flags = 0;
__raw_writew(cpu_to_be16(RXP_HRXD_READY),
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
elem->hw_desc + C2_RXP_FLAGS);
}
@ -787,9 +790,12 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
elem->maplen = maplen;
/* Tell HW to xmit */
__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);
__raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);
__raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);
__raw_writeq((__force u64) cpu_to_be64(mapaddr),
elem->hw_desc + C2_TXP_ADDR);
__raw_writew((__force u16) cpu_to_be16(maplen),
elem->hw_desc + C2_TXP_LEN);
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
elem->hw_desc + C2_TXP_FLAGS);
c2_port->netstats.tx_packets++;
c2_port->netstats.tx_bytes += maplen;
@ -810,11 +816,11 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
elem->maplen = maplen;
/* Tell HW to xmit */
__raw_writeq(cpu_to_be64(mapaddr),
__raw_writeq((__force u64) cpu_to_be64(mapaddr),
elem->hw_desc + C2_TXP_ADDR);
__raw_writew(cpu_to_be16(maplen),
__raw_writew((__force u16) cpu_to_be16(maplen),
elem->hw_desc + C2_TXP_LEN);
__raw_writew(cpu_to_be16(TXP_HTXD_READY),
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
elem->hw_desc + C2_TXP_FLAGS);
c2_port->netstats.tx_packets++;
@ -1029,10 +1035,10 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
}
/* Validate the adapter version */
if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
printk(KERN_ERR PFX "Version mismatch "
"[fw=%u, c2=%u], Adapter not claimed\n",
be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
C2_VERSION);
ret = -EINVAL;
iounmap(mmio_regs);
@ -1040,12 +1046,12 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
}
/* Validate the adapter IVN */
if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
"the OpenIB device support kit. "
"[fw=0x%x, c2=0x%x], Adapter not claimed\n",
be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)),
C2_IVN);
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
C2_IVN);
ret = -EINVAL;
iounmap(mmio_regs);
goto bail2;
@ -1068,7 +1074,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
/* Get the last RX index */
c2dev->cur_rx =
(be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) -
(be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
0xffffc000) / sizeof(struct c2_rxp_desc);
/* Request an interrupt line for the driver */
@ -1090,7 +1096,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
}
/* Save off the actual size prior to unmapping mmio_regs */
kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE));
kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
/* Unmap the adapter PCI registers in BAR4 */
iounmap(mmio_regs);

View file

@ -346,7 +346,7 @@ struct c2_dev {
// spinlock_t aeq_lock;
// spinlock_t rnic_lock;
u16 *hint_count;
__be16 *hint_count;
dma_addr_t hint_count_dma;
u16 hints_read;
@ -425,10 +425,10 @@ static inline void __raw_writeq(u64 val, void __iomem * addr)
#endif
#define C2_SET_CUR_RX(c2dev, cur_rx) \
__raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
__raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
#define C2_GET_CUR_RX(c2dev) \
be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092))
be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
{
@ -485,8 +485,8 @@ extern void c2_unregister_device(struct c2_dev *c2dev);
extern int c2_rnic_init(struct c2_dev *c2dev);
extern void c2_rnic_term(struct c2_dev *c2dev);
extern void c2_rnic_interrupt(struct c2_dev *c2dev);
extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
/* QPs */
extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,

View file

@ -195,7 +195,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
"resource=%x, qp_state=%s\n",
__FUNCTION__,
to_event_str(event_id),
(unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context),
(unsigned long long) wr->ae.ae_generic.user_context,
be32_to_cpu(wr->ae.ae_generic.resource_type),
be32_to_cpu(wr->ae.ae_generic.resource),
to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));

View file

@ -45,7 +45,7 @@
* Reply buffer _is_ freed by this function.
*/
static int
send_pbl_messages(struct c2_dev *c2dev, u32 stag_index,
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
unsigned long va, u32 pbl_depth,
struct c2_vq_req *vq_req, int pbl_type)
{

View file

@ -64,7 +64,7 @@ void c2_mq_produce(struct c2_mq *q)
q->priv = (q->priv + 1) % q->q_size;
q->hint_count++;
/* Update peer's offset. */
__raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
}
}
@ -105,7 +105,7 @@ void c2_mq_free(struct c2_mq *q)
#endif
q->priv = (q->priv + 1) % q->q_size;
/* Update peer's offset. */
__raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
}
}

View file

@ -75,7 +75,7 @@ struct c2_mq {
u16 hint_count;
u16 priv;
struct c2_mq_shared __iomem *peer;
u16 *shared;
__be16 *shared;
dma_addr_t shared_dma;
u32 q_size;
u32 msg_size;

View file

@ -208,7 +208,7 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
/*
* Add an IP address to the RNIC interface
*/
int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
{
struct c2_vq_req *vq_req;
struct c2wr_rnic_setconfig_req *wr;
@ -270,7 +270,7 @@ int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
/*
* Delete an IP address from the RNIC interface
*/
int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
{
struct c2_vq_req *vq_req;
struct c2wr_rnic_setconfig_req *wr;
@ -506,17 +506,17 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
mmio_regs = c2dev->kva;
/* Initialize the Verbs Request Queue */
c2_mq_req_init(&c2dev->req_vq, 0,
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)),
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
mmio_regs +
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
mmio_regs +
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
C2_MQ_ADAPTER_TARGET);
/* Initialize the Verbs Reply Queue */
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
&c2dev->rep_vq.host_dma, GFP_KERNEL);
if (!q1_pages) {
@ -532,12 +532,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
msgsize,
q1_pages,
mmio_regs +
be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
C2_MQ_HOST_TARGET);
/* Initialize the Asynchronus Event Queue */
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
&c2dev->aeq.host_dma, GFP_KERNEL);
if (!q2_pages) {
@ -553,7 +553,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
msgsize,
q2_pages,
mmio_regs +
be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)),
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
C2_MQ_HOST_TARGET);
/* Initialize the verbs request allocator */

View file

@ -180,8 +180,8 @@ enum c2_wr_type {
};
struct c2_netaddr {
u32 ip_addr;
u32 netmask;
__be32 ip_addr;
__be32 netmask;
u32 mtu;
};
@ -199,9 +199,9 @@ struct c2_route {
* A Scatter Gather Entry.
*/
struct c2_data_addr {
u32 stag;
u32 length;
u64 to;
__be32 stag;
__be32 length;
__be64 to;
};
/*
@ -274,7 +274,7 @@ struct c2wr_hdr {
* from the host to adapter by libccil, but we copy it anyway
* to make the memcpy to the adapter better aligned.
*/
u32 wqe_count;
__be32 wqe_count;
/* Put these fields next so that later 32- and 64-bit
* quantities are naturally aligned.
@ -316,8 +316,8 @@ enum c2_rnic_flags {
struct c2wr_rnic_open_req {
struct c2wr_hdr hdr;
u64 user_context;
u16 flags; /* See enum c2_rnic_flags */
u16 port_num;
__be16 flags; /* See enum c2_rnic_flags */
__be16 port_num;
} __attribute__((packed));
struct c2wr_rnic_open_rep {
@ -341,30 +341,30 @@ struct c2wr_rnic_query_req {
struct c2wr_rnic_query_rep {
struct c2wr_hdr hdr;
u64 user_context;
u32 vendor_id;
u32 part_number;
u32 hw_version;
u32 fw_ver_major;
u32 fw_ver_minor;
u32 fw_ver_patch;
__be32 vendor_id;
__be32 part_number;
__be32 hw_version;
__be32 fw_ver_major;
__be32 fw_ver_minor;
__be32 fw_ver_patch;
char fw_ver_build_str[WR_BUILD_STR_LEN];
u32 max_qps;
u32 max_qp_depth;
__be32 max_qps;
__be32 max_qp_depth;
u32 max_srq_depth;
u32 max_send_sgl_depth;
u32 max_rdma_sgl_depth;
u32 max_cqs;
u32 max_cq_depth;
__be32 max_cqs;
__be32 max_cq_depth;
u32 max_cq_event_handlers;
u32 max_mrs;
__be32 max_mrs;
u32 max_pbl_depth;
u32 max_pds;
u32 max_global_ird;
__be32 max_pds;
__be32 max_global_ird;
u32 max_global_ord;
u32 max_qp_ird;
u32 max_qp_ord;
__be32 max_qp_ird;
__be32 max_qp_ord;
u32 flags;
u32 max_mws;
__be32 max_mws;
u32 pbe_range_low;
u32 pbe_range_high;
u32 max_srqs;
@ -405,7 +405,7 @@ union c2wr_rnic_getconfig {
struct c2wr_rnic_setconfig_req {
struct c2wr_hdr hdr;
u32 rnic_handle;
u32 option; /* See c2_setconfig_cmd_t */
__be32 option; /* See c2_setconfig_cmd_t */
/* variable data and pad. See c2_netaddr and c2_route */
u8 data[0];
} __attribute__((packed)) ;
@ -441,18 +441,18 @@ union c2wr_rnic_close {
*/
struct c2wr_cq_create_req {
struct c2wr_hdr hdr;
u64 shared_ht;
__be64 shared_ht;
u64 user_context;
u64 msg_pool;
__be64 msg_pool;
u32 rnic_handle;
u32 msg_size;
u32 depth;
__be32 msg_size;
__be32 depth;
} __attribute__((packed)) ;
struct c2wr_cq_create_rep {
struct c2wr_hdr hdr;
u32 mq_index;
u32 adapter_shared;
__be32 mq_index;
__be32 adapter_shared;
u32 cq_handle;
} __attribute__((packed)) ;
@ -585,40 +585,40 @@ enum c2wr_qp_flags {
struct c2wr_qp_create_req {
struct c2wr_hdr hdr;
u64 shared_sq_ht;
u64 shared_rq_ht;
__be64 shared_sq_ht;
__be64 shared_rq_ht;
u64 user_context;
u32 rnic_handle;
u32 sq_cq_handle;
u32 rq_cq_handle;
u32 sq_depth;
u32 rq_depth;
__be32 sq_depth;
__be32 rq_depth;
u32 srq_handle;
u32 srq_limit;
u32 flags; /* see enum c2wr_qp_flags */
u32 send_sgl_depth;
u32 recv_sgl_depth;
u32 rdma_write_sgl_depth;
u32 ord;
u32 ird;
__be32 flags; /* see enum c2wr_qp_flags */
__be32 send_sgl_depth;
__be32 recv_sgl_depth;
__be32 rdma_write_sgl_depth;
__be32 ord;
__be32 ird;
u32 pd_id;
} __attribute__((packed)) ;
struct c2wr_qp_create_rep {
struct c2wr_hdr hdr;
u32 sq_depth;
u32 rq_depth;
__be32 sq_depth;
__be32 rq_depth;
u32 send_sgl_depth;
u32 recv_sgl_depth;
u32 rdma_write_sgl_depth;
u32 ord;
u32 ird;
u32 sq_msg_size;
u32 sq_mq_index;
u32 sq_mq_start;
u32 rq_msg_size;
u32 rq_mq_index;
u32 rq_mq_start;
__be32 sq_msg_size;
__be32 sq_mq_index;
__be32 sq_mq_start;
__be32 rq_msg_size;
__be32 rq_mq_index;
__be32 rq_mq_start;
u32 qp_handle;
} __attribute__((packed)) ;
@ -667,11 +667,11 @@ struct c2wr_qp_modify_req {
u32 stream_msg_length;
u32 rnic_handle;
u32 qp_handle;
u32 next_qp_state;
u32 ord;
u32 ird;
u32 sq_depth;
u32 rq_depth;
__be32 next_qp_state;
__be32 ord;
__be32 ird;
__be32 sq_depth;
__be32 rq_depth;
u32 llp_ep_handle;
} __attribute__((packed)) ;
@ -721,10 +721,10 @@ struct c2wr_qp_connect_req {
struct c2wr_hdr hdr;
u32 rnic_handle;
u32 qp_handle;
u32 remote_addr;
u16 remote_port;
__be32 remote_addr;
__be16 remote_port;
u16 pad;
u32 private_data_length;
__be32 private_data_length;
u8 private_data[0]; /* Private data in-line. */
} __attribute__((packed)) ;
@ -759,25 +759,25 @@ union c2wr_nsmr_stag_alloc {
struct c2wr_nsmr_register_req {
struct c2wr_hdr hdr;
u64 va;
__be64 va;
u32 rnic_handle;
u16 flags;
__be16 flags;
u8 stag_key;
u8 pad;
u32 pd_id;
u32 pbl_depth;
u32 pbe_size;
u32 fbo;
u32 length;
u32 addrs_length;
__be32 pbl_depth;
__be32 pbe_size;
__be32 fbo;
__be32 length;
__be32 addrs_length;
/* array of paddrs (must be aligned on a 64bit boundary) */
u64 paddrs[0];
__be64 paddrs[0];
} __attribute__((packed)) ;
struct c2wr_nsmr_register_rep {
struct c2wr_hdr hdr;
u32 pbl_depth;
u32 stag_index;
__be32 stag_index;
} __attribute__((packed)) ;
union c2wr_nsmr_register {
@ -788,11 +788,11 @@ union c2wr_nsmr_register {
struct c2wr_nsmr_pbl_req {
struct c2wr_hdr hdr;
u32 rnic_handle;
u32 flags;
u32 stag_index;
u32 addrs_length;
__be32 flags;
__be32 stag_index;
__be32 addrs_length;
/* array of paddrs (must be aligned on a 64bit boundary) */
u64 paddrs[0];
__be64 paddrs[0];
} __attribute__((packed)) ;
struct c2wr_nsmr_pbl_rep {
@ -847,7 +847,7 @@ union c2wr_mw_query {
struct c2wr_stag_dealloc_req {
struct c2wr_hdr hdr;
u32 rnic_handle;
u32 stag_index;
__be32 stag_index;
} __attribute__((packed)) ;
struct c2wr_stag_dealloc_rep {
@ -949,7 +949,7 @@ struct c2wr_ce {
u64 qp_user_context; /* c2_user_qp_t * */
u32 qp_state; /* Current QP State */
u32 handle; /* QPID or EP Handle */
u32 bytes_rcvd; /* valid for RECV WCs */
__be32 bytes_rcvd; /* valid for RECV WCs */
u32 stag;
} __attribute__((packed)) ;
@ -984,8 +984,8 @@ struct c2_rq_hdr {
*/
struct c2wr_send_req {
struct c2_sq_hdr sq_hdr;
u32 sge_len;
u32 remote_stag;
__be32 sge_len;
__be32 remote_stag;
u8 data[0]; /* SGE array */
} __attribute__((packed));
@ -996,9 +996,9 @@ union c2wr_send {
struct c2wr_rdma_write_req {
struct c2_sq_hdr sq_hdr;
u64 remote_to;
u32 remote_stag;
u32 sge_len;
__be64 remote_to;
__be32 remote_stag;
__be32 sge_len;
u8 data[0]; /* SGE array */
} __attribute__((packed));
@ -1009,11 +1009,11 @@ union c2wr_rdma_write {
struct c2wr_rdma_read_req {
struct c2_sq_hdr sq_hdr;
u64 local_to;
u64 remote_to;
u32 local_stag;
u32 remote_stag;
u32 length;
__be64 local_to;
__be64 remote_to;
__be32 local_stag;
__be32 remote_stag;
__be32 length;
} __attribute__((packed));
union c2wr_rdma_read {
@ -1113,9 +1113,9 @@ union c2wr_recv {
struct c2wr_ae_hdr {
struct c2wr_hdr hdr;
u64 user_context; /* user context for this res. */
u32 resource_type; /* see enum c2_resource_indicator */
u32 resource; /* handle for resource */
u32 qp_state; /* current QP State */
__be32 resource_type; /* see enum c2_resource_indicator */
__be32 resource; /* handle for resource */
__be32 qp_state; /* current QP State */
} __attribute__((packed));
/*
@ -1124,11 +1124,11 @@ struct c2wr_ae_hdr {
*/
struct c2wr_ae_active_connect_results {
struct c2wr_ae_hdr ae_hdr;
u32 laddr;
u32 raddr;
u16 lport;
u16 rport;
u32 private_data_length;
__be32 laddr;
__be32 raddr;
__be16 lport;
__be16 rport;
__be32 private_data_length;
u8 private_data[0]; /* data is in-line in the msg. */
} __attribute__((packed));
@ -1142,11 +1142,11 @@ struct c2wr_ae_active_connect_results {
struct c2wr_ae_connection_request {
struct c2wr_ae_hdr ae_hdr;
u32 cr_handle; /* connreq handle (sock ptr) */
u32 laddr;
u32 raddr;
u16 lport;
u16 rport;
u32 private_data_length;
__be32 laddr;
__be32 raddr;
__be16 lport;
__be16 rport;
__be32 private_data_length;
u8 private_data[0]; /* data is in-line in the msg. */
} __attribute__((packed));
@ -1158,12 +1158,12 @@ union c2wr_ae {
struct c2wr_init_req {
struct c2wr_hdr hdr;
u64 hint_count;
u64 q0_host_shared;
u64 q1_host_shared;
u64 q1_host_msg_pool;
u64 q2_host_shared;
u64 q2_host_msg_pool;
__be64 hint_count;
__be64 q0_host_shared;
__be64 q1_host_shared;
__be64 q1_host_msg_pool;
__be64 q2_host_shared;
__be64 q2_host_msg_pool;
} __attribute__((packed));
struct c2wr_init_rep {
@ -1276,10 +1276,10 @@ struct c2wr_ep_listen_create_req {
struct c2wr_hdr hdr;
u64 user_context; /* returned in AEs. */
u32 rnic_handle;
u32 local_addr; /* local addr, or 0 */
u16 local_port; /* 0 means "pick one" */
__be32 local_addr; /* local addr, or 0 */
__be16 local_port; /* 0 means "pick one" */
u16 pad;
u32 backlog; /* tradional tcp listen bl */
__be32 backlog; /* tradional tcp listen bl */
} __attribute__((packed));
struct c2wr_ep_listen_create_rep {
@ -1340,7 +1340,7 @@ struct c2wr_cr_accept_req {
u32 rnic_handle;
u32 qp_handle; /* QP to bind to this LLP conn */
u32 ep_handle; /* LLP handle to accept */
u32 private_data_length;
__be32 private_data_length;
u8 private_data[0]; /* data in-line in msg. */
} __attribute__((packed));
@ -1508,7 +1508,7 @@ static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
{
((struct c2wr_hdr *) wr)->sge_count = sge_count;
}
static __inline__ u32 c2_wr_get_wqe_count(void *wr)
static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
{
return ((struct c2wr_hdr *) wr)->wqe_count;
}