IB/ipath: Enable reduced PIO update for HCAs that support it.

Newer HCAs have a threshold counter to reduce the number of DMAs the
chip makes to update the PIO buffer availability status bits.  This
patch enables the feature.

Signed-off-by: Dave Olson <dave.olson@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Dave Olson 2008-04-16 21:09:30 -07:00 committed by Roland Dreier
parent 0ab6b2b9ab
commit 1d7c2e529f
3 changed files with 45 additions and 1 deletions

View file

@ -184,6 +184,29 @@ static int ipath_get_base_info(struct file *fp,
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
dd->ipath_palign * kinfo->spi_piocnt * slave;
}
/*
* Set the PIO avail update threshold to no larger
* than the number of buffers per process. Note that
* we decrease it here, but won't ever increase it.
*/
if (dd->ipath_pioupd_thresh &&
kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
unsigned long flags;
dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
ipath_dbg("Decreased pio update threshold to %u\n",
dd->ipath_pioupd_thresh);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
<< INFINIPATH_S_UPDTHRESH_SHIFT);
dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
<< INFINIPATH_S_UPDTHRESH_SHIFT;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
if (shared) {
kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_ureg_align * pd->port_port;

View file

@ -341,6 +341,7 @@ static int init_chip_reset(struct ipath_devdata *dd)
{
u32 rtmp;
int i;
unsigned long flags;
/*
* ensure chip does no sends or receives, tail updates, or
@ -356,8 +357,13 @@ static int init_chip_reset(struct ipath_devdata *dd)
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl = 0U; /* no sdma, etc */
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
if (rtmp != dd->ipath_rcvtidcnt)
@ -478,6 +484,14 @@ static void enable_chip(struct ipath_devdata *dd, int reinit)
/* Enable PIO send, and update of PIOavail regs to memory. */
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
INFINIPATH_S_PIOBUFAVAILUPD;
/*
* Set the PIO avail update threshold to host memory
* on chips that support it.
*/
if (dd->ipath_pioupd_thresh)
dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
<< INFINIPATH_S_UPDTHRESH_SHIFT;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
@ -757,6 +771,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
"each for %u user ports\n", kpiobufs,
piobufs, dd->ipath_pbufsport, uports);
if (dd->ipath_pioupd_thresh) {
if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
if (kpiobufs < dd->ipath_pioupd_thresh)
dd->ipath_pioupd_thresh = kpiobufs;
}
dd->ipath_f_early_init(dd);
/*

View file

@ -349,6 +349,7 @@ struct ipath_devdata {
u32 ipath_lastrpkts;
/* pio bufs allocated per port */
u32 ipath_pbufsport;
u32 ipath_pioupd_thresh; /* update threshold, some chips */
/*
* number of ports configured as max; zero is set to number chip
* supports, less gives more pio bufs/port, etc.