IB/ipath: Remove incompletely implemented ipath_runtime flags and code

The IPATH_RUNTIME_PBC_REWRITE and the IPATH_RUNTIME_LOOSE_DMA_ALIGN
flags were not ever implemented correctly and did not turn out to be
necessary.  Remove the last vestiges of these flags but mark the spot
with a comment to remind us to not reuse these flags in the interest
of binary compatibility.  The INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR
bit was also not found to be useful, so it was dropped in the cleanup
as well.

Signed-off-by: John Gregor <john.gregor@qlogic.com>
Signed-off-by: Arthur Jones <arthur.jones@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
John Gregor 2007-05-17 08:15:50 -07:00 committed by Roland Dreier
parent 17b2eb9fe6
commit 8e9ab3f1c9
2 changed files with 1 additions and 27 deletions

View file

@ -189,8 +189,7 @@ typedef enum _ipath_ureg {
#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
#define IPATH_RUNTIME_RCVHDR_COPY 0x8
#define IPATH_RUNTIME_MASTER 0x10
#define IPATH_RUNTIME_PBC_REWRITE 0x20
#define IPATH_RUNTIME_LOOSE_DMA_ALIGN 0x40
/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */
/*
* This structure is returned by ipath_userinit() immediately after

View file

@ -296,13 +296,6 @@ static const struct ipath_cregs ipath_pe_cregs = {
#define IPATH_GPIO_SCL (1ULL << \
(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
/*
* Rev2 silicon allows suppressing check for ArmLaunch errors.
* this can speed up short packet sends on systems that do
* not guaranteee write-order.
*/
#define INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR (1ULL<<63)
/* 6120 specific hardware errors... */
static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
@ -680,17 +673,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
val |= dd->ipath_rx_pol_inv <<
INFINIPATH_XGXS_RX_POL_SHIFT;
}
if (dd->ipath_minrev >= 2) {
/* Rev 2. can tolerate multiple writes to PBC, and
* allowing them can provide lower latency on some
* CPUs, but this feature is off by default, only
* turned on by setting D63 of XGXSconfig reg.
* May want to make this conditional more
* fine-grained in future. This is not exactly
* related to XGXS, but where the bit ended up.
*/
val |= INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR;
}
if (val != prev_val)
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
@ -1324,13 +1306,6 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
dd = pd->port_dd;
if (dd != NULL && dd->ipath_minrev >= 2) {
ipath_cdbg(PROC, "IBA6120 Rev2, allow multiple PBC write\n");
kinfo->spi_runtime_flags |= IPATH_RUNTIME_PBC_REWRITE;
ipath_cdbg(PROC, "IBA6120 Rev2, allow loose DMA alignment\n");
kinfo->spi_runtime_flags |= IPATH_RUNTIME_LOOSE_DMA_ALIGN;
}
done:
kinfo->spi_runtime_flags |= IPATH_RUNTIME_PCIE;
return 0;