1
0
Fork 0

sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver

Rationale: The differences between Falcon and Siena are in many ways larger
 than those between Siena and EF10 (despite Siena being nominally "Falcon-
 architecture"); for instance, Falcon has no MCPU, so there is no MCDI.
 Removing Falcon support from the sfc driver should simplify the latter,
 and avoid the possibility of Falcon support being broken by changes to sfc
 (which are rarely if ever tested on Falcon, it being end-of-lifed hardware).

The sfc-falcon driver created in this changeset is essentially a copy of the
 sfc driver, but with Siena- and EF10-specific code, including MCDI, removed
 and with the "efx_" identifier prefix changed to "ef4_" (for "EFX 4000-
 series") to avoid collisions when both drivers are built-in.

This changeset removes Falcon from the sfc driver's PCI ID table; then in
 sfc I've removed obvious Falcon-related code: I removed the Falcon NIC
 functions, Falcon PHY code, and EFX_REV_FALCON_*, then fixed up everything
 that referenced them.

Also, increment minor version of both drivers (to 4.1).

For now, CONFIG_SFC selects CONFIG_SFC_FALCON, so that updating old configs
 doesn't cause Falcon support to disappear; but that should be undone at
 some point in the future.

Signed-off-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Edward Cree 2016-11-28 18:55:34 +00:00 committed by David S. Miller
parent 6bb10c2bc6
commit 5a6681e22c
44 changed files with 18209 additions and 1216 deletions

View File

@ -165,6 +165,7 @@ source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sfc/falcon/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"

View File

@ -75,6 +75,7 @@ obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
obj-$(CONFIG_NET_VENDOR_SIS) += sis/
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/

View File

@ -1,20 +1,20 @@
config SFC
tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
tristate "Solarflare SFC9000/SFC9100-family support"
depends on PCI
select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
select PTP_1588_CLOCK
select SFC_FALCON
---help---
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC4000, SFC9000-family and SFC9100-family
controllers.
the Solarflare SFC9000-family and SFC9100-family controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
bool "Solarflare SFC9000/SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---

View File

@ -1,7 +1,6 @@
sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o tx_tso.o
sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
selftest.o ethtool.o ptp.o tx_tso.o \
mcdi.o mcdi_port.o mcdi_mon.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o

View File

@ -733,16 +733,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
}
rc = efx->type->fini_dmaq(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
/* Schedule a reset to recover from the flush failure. The
* descriptor caches reference memory we're about to free,
* but falcon_reconfigure_mac_wrapper() won't reconnect
* the MACs because of the pending reset.
*/
netif_err(efx, drv, efx->net_dev,
"Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
@ -1892,15 +1883,13 @@ static void efx_start_all(struct efx_nic *efx)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
/* If link state detection is normally event-driven, we have
/* Link state detection is normally event-driven; we have
* to poll now because we could have missed a change
*/
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
}
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
efx->type->start_stats(efx);
efx->type->pull_stats(efx);
@ -2842,12 +2831,6 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* PCI device ID table */
static const struct pci_device_id efx_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */

View File

@ -169,9 +169,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
@ -966,8 +965,6 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
case ETHTOOL_GRXFH: {
unsigned min_revision = 0;
info->data = 0;
switch (info->flow_type) {
case UDP_V4_FLOW:
@ -980,7 +977,6 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
min_revision = EFX_REV_FALCON_B0;
break;
case UDP_V6_FLOW:
if (efx->rx_hash_udp_4tuple)
@ -992,13 +988,10 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
min_revision = EFX_REV_SIENA_A0;
break;
default:
break;
}
if (efx_nic_rev(efx) < min_revision)
info->data = 0;
return 0;
}
@ -1271,9 +1264,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
efx->n_rx_channels == 1) ?
0 : ARRAY_SIZE(efx->rx_indir_table));
return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table);
}
static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,

View File

@ -0,0 +1,21 @@
config SFC_FALCON
tristate "Solarflare SFC4000 support"
depends on PCI
select MDIO
select CRC32
select I2C
select I2C_ALGOBIT
---help---
This driver supports 10-gigabit Ethernet cards based on
the Solarflare SFC4000 controller.
To compile this driver as a module, choose M here. The module
will be called sfc-falcon.
config SFC_FALCON_MTD
bool "Solarflare SFC4000 MTD support"
depends on SFC_FALCON && MTD && !(SFC_FALCON=y && MTD=m)
default y
---help---
This exposes the on-board flash and/or EEPROM as MTD devices
(e.g. /dev/mtd1). This is required to update the boot
configuration under Linux.

View File

@ -0,0 +1,6 @@
sfc-falcon-y += efx.o nic.o farch.o falcon.o tx.o rx.o selftest.o \
ethtool.o qt202x_phy.o mdio_10g.o tenxpress.o \
txc43128_phy.o falcon_boards.o
sfc-falcon-$(CONFIG_SFC_FALCON_MTD) += mtd.o
obj-$(CONFIG_SFC_FALCON) += sfc-falcon.o

View File

@ -0,0 +1,542 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_BITFIELD_H
#define EF4_BITFIELD_H
/*
* Efx bitfield access
*
* Efx NICs make extensive use of bitfields up to 128 bits
* wide. Since there is no native 128-bit datatype on most systems,
* and since 64-bit datatypes are inefficient on 32-bit systems and
* vice versa, we wrap accesses in a way that uses the most efficient
* datatype.
*
* The NICs are PCI devices and therefore little-endian. Since most
* of the quantities that we deal with are DMAed to/from host memory,
* we define our datatypes (ef4_oword_t, ef4_qword_t and
* ef4_dword_t) to be little-endian.
*/
/* Lowest bit numbers and widths */
#define EF4_DUMMY_FIELD_LBN 0
#define EF4_DUMMY_FIELD_WIDTH 0
#define EF4_WORD_0_LBN 0
#define EF4_WORD_0_WIDTH 16
#define EF4_WORD_1_LBN 16
#define EF4_WORD_1_WIDTH 16
#define EF4_DWORD_0_LBN 0
#define EF4_DWORD_0_WIDTH 32
#define EF4_DWORD_1_LBN 32
#define EF4_DWORD_1_WIDTH 32
#define EF4_DWORD_2_LBN 64
#define EF4_DWORD_2_WIDTH 32
#define EF4_DWORD_3_LBN 96
#define EF4_DWORD_3_WIDTH 32
#define EF4_QWORD_0_LBN 0
#define EF4_QWORD_0_WIDTH 64
/* Specified attribute (e.g. LBN) of the specified field */
#define EF4_VAL(field, attribute) field ## _ ## attribute
/* Low bit number of the specified field */
#define EF4_LOW_BIT(field) EF4_VAL(field, LBN)
/* Bit width of the specified field */
#define EF4_WIDTH(field) EF4_VAL(field, WIDTH)
/* High bit number of the specified field */
#define EF4_HIGH_BIT(field) (EF4_LOW_BIT(field) + EF4_WIDTH(field) - 1)
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 64 bits.
*/
#define EF4_MASK64(width) \
((width) == 64 ? ~((u64) 0) : \
(((((u64) 1) << (width))) - 1))
/* Mask equal in width to the specified field.
*
* For example, a field with width 5 would have a mask of 0x1f.
*
* The maximum width mask that can be generated is 32 bits. Use
* EF4_MASK64 for higher width fields.
*/
#define EF4_MASK32(width) \
((width) == 32 ? ~((u32) 0) : \
(((((u32) 1) << (width))) - 1))
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
typedef union ef4_dword {
__le32 u32[1];
} ef4_dword_t;
/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
typedef union ef4_qword {
__le64 u64[1];
__le32 u32[2];
ef4_dword_t dword[2];
} ef4_qword_t;
/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
typedef union ef4_oword {
__le64 u64[2];
ef4_qword_t qword[2];
__le32 u32[4];
ef4_dword_t dword[4];
} ef4_oword_t;
/* Format string and value expanders for printk */
#define EF4_DWORD_FMT "%08x"
#define EF4_QWORD_FMT "%08x:%08x"
#define EF4_OWORD_FMT "%08x:%08x:%08x:%08x"
#define EF4_DWORD_VAL(dword) \
((unsigned int) le32_to_cpu((dword).u32[0]))
#define EF4_QWORD_VAL(qword) \
((unsigned int) le32_to_cpu((qword).u32[1])), \
((unsigned int) le32_to_cpu((qword).u32[0]))
#define EF4_OWORD_VAL(oword) \
((unsigned int) le32_to_cpu((oword).u32[3])), \
((unsigned int) le32_to_cpu((oword).u32[2])), \
((unsigned int) le32_to_cpu((oword).u32[1])), \
((unsigned int) le32_to_cpu((oword).u32[0]))
/*
* Extract bit field portion [low,high) from the native-endian element
* which contains bits [min,max).
*
* For example, suppose "element" represents the high 32 bits of a
* 64-bit value, and we wish to extract the bits belonging to the bit
* field occupying bits 28-45 of this 64-bit value.
*
* Then EF4_EXTRACT ( element, 32, 63, 28, 45 ) would give
*
* ( element ) << 4
*
* The result will contain the relevant bits filled in in the range
* [0,high-low), with garbage in bits [high-low+1,...).
*/
#define EF4_EXTRACT_NATIVE(native_element, min, max, low, high) \
((low) > (max) || (high) < (min) ? 0 : \
(low) > (min) ? \
(native_element) >> ((low) - (min)) : \
(native_element) << ((min) - (low)))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
* element which contains bits [min,max)
*/
#define EF4_EXTRACT64(element, min, max, low, high) \
EF4_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
/*
* Extract bit field portion [low,high) from the 32-bit little-endian
* element which contains bits [min,max)
*/
#define EF4_EXTRACT32(element, min, max, low, high) \
EF4_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
#define EF4_EXTRACT_OWORD64(oword, low, high) \
((EF4_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
EF4_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
EF4_MASK64((high) + 1 - (low)))
#define EF4_EXTRACT_QWORD64(qword, low, high) \
(EF4_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
EF4_MASK64((high) + 1 - (low)))
#define EF4_EXTRACT_OWORD32(oword, low, high) \
((EF4_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
EF4_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
EF4_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
EF4_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
EF4_MASK32((high) + 1 - (low)))
#define EF4_EXTRACT_QWORD32(qword, low, high) \
((EF4_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
EF4_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
EF4_MASK32((high) + 1 - (low)))
#define EF4_EXTRACT_DWORD(dword, low, high) \
(EF4_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
EF4_MASK32((high) + 1 - (low)))
#define EF4_OWORD_FIELD64(oword, field) \
EF4_EXTRACT_OWORD64(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_QWORD_FIELD64(qword, field) \
EF4_EXTRACT_QWORD64(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_OWORD_FIELD32(oword, field) \
EF4_EXTRACT_OWORD32(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_QWORD_FIELD32(qword, field) \
EF4_EXTRACT_QWORD32(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_DWORD_FIELD(dword, field) \
EF4_EXTRACT_DWORD(dword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field))
#define EF4_OWORD_IS_ZERO64(oword) \
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
#define EF4_QWORD_IS_ZERO64(qword) \
(((qword).u64[0]) == (__force __le64) 0)
#define EF4_OWORD_IS_ZERO32(oword) \
(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
== (__force __le32) 0)
#define EF4_QWORD_IS_ZERO32(qword) \
(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
#define EF4_DWORD_IS_ZERO(dword) \
(((dword).u32[0]) == (__force __le32) 0)
#define EF4_OWORD_IS_ALL_ONES64(oword) \
(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
#define EF4_QWORD_IS_ALL_ONES64(qword) \
((qword).u64[0] == ~((__force __le64) 0))
#define EF4_OWORD_IS_ALL_ONES32(oword) \
(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
== ~((__force __le32) 0))
#define EF4_QWORD_IS_ALL_ONES32(qword) \
(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
#define EF4_DWORD_IS_ALL_ONES(dword) \
((dword).u32[0] == ~((__force __le32) 0))
#if BITS_PER_LONG == 64
#define EF4_OWORD_FIELD EF4_OWORD_FIELD64
#define EF4_QWORD_FIELD EF4_QWORD_FIELD64
#define EF4_OWORD_IS_ZERO EF4_OWORD_IS_ZERO64
#define EF4_QWORD_IS_ZERO EF4_QWORD_IS_ZERO64
#define EF4_OWORD_IS_ALL_ONES EF4_OWORD_IS_ALL_ONES64
#define EF4_QWORD_IS_ALL_ONES EF4_QWORD_IS_ALL_ONES64
#else
#define EF4_OWORD_FIELD EF4_OWORD_FIELD32
#define EF4_QWORD_FIELD EF4_QWORD_FIELD32
#define EF4_OWORD_IS_ZERO EF4_OWORD_IS_ZERO32
#define EF4_QWORD_IS_ZERO EF4_QWORD_IS_ZERO32
#define EF4_OWORD_IS_ALL_ONES EF4_OWORD_IS_ALL_ONES32
#define EF4_QWORD_IS_ALL_ONES EF4_QWORD_IS_ALL_ONES32
#endif
/*
* Construct bit field portion
*
* Creates the portion of the bit field [low,high) that lies within
* the range [min,max).
*/
#define EF4_INSERT_NATIVE64(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u64) (value)) << (low - min)) : \
(((u64) (value)) >> (min - low))))
#define EF4_INSERT_NATIVE32(min, max, low, high, value) \
(((low > max) || (high < min)) ? 0 : \
((low > min) ? \
(((u32) (value)) << (low - min)) : \
(((u32) (value)) >> (min - low))))
#define EF4_INSERT_NATIVE(min, max, low, high, value) \
((((max - min) >= 32) || ((high - low) >= 32)) ? \
EF4_INSERT_NATIVE64(min, max, low, high, value) : \
EF4_INSERT_NATIVE32(min, max, low, high, value))
/*
* Construct bit field portion
*
* Creates the portion of the named bit field that lies within the
* range [min,max).
*/
#define EF4_INSERT_FIELD_NATIVE(min, max, field, value) \
EF4_INSERT_NATIVE(min, max, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
/*
* Construct bit field
*
* Creates the portion of the named bit fields that lie within the
* range [min,max).
*/
#define EF4_INSERT_FIELDS_NATIVE(min, max, \
field1, value1, \
field2, value2, \
field3, value3, \
field4, value4, \
field5, value5, \
field6, value6, \
field7, value7, \
field8, value8, \
field9, value9, \
field10, value10) \
(EF4_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
EF4_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
#define EF4_INSERT_FIELDS64(...) \
cpu_to_le64(EF4_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EF4_INSERT_FIELDS32(...) \
cpu_to_le32(EF4_INSERT_FIELDS_NATIVE(__VA_ARGS__))
#define EF4_POPULATE_OWORD64(oword, ...) do { \
(oword).u64[0] = EF4_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
(oword).u64[1] = EF4_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_QWORD64(qword, ...) do { \
(qword).u64[0] = EF4_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_OWORD32(oword, ...) do { \
(oword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(oword).u32[1] = EF4_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
(oword).u32[2] = EF4_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
(oword).u32[3] = EF4_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_QWORD32(qword, ...) do { \
(qword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
(qword).u32[1] = EF4_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
} while (0)
#define EF4_POPULATE_DWORD(dword, ...) do { \
(dword).u32[0] = EF4_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
} while (0)
#if BITS_PER_LONG == 64
#define EF4_POPULATE_OWORD EF4_POPULATE_OWORD64
#define EF4_POPULATE_QWORD EF4_POPULATE_QWORD64
#else
#define EF4_POPULATE_OWORD EF4_POPULATE_OWORD32
#define EF4_POPULATE_QWORD EF4_POPULATE_QWORD32
#endif
/* Populate an octword field with various numbers of arguments */
#define EF4_POPULATE_OWORD_10 EF4_POPULATE_OWORD
#define EF4_POPULATE_OWORD_9(oword, ...) \
EF4_POPULATE_OWORD_10(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_8(oword, ...) \
EF4_POPULATE_OWORD_9(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_7(oword, ...) \
EF4_POPULATE_OWORD_8(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_6(oword, ...) \
EF4_POPULATE_OWORD_7(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_5(oword, ...) \
EF4_POPULATE_OWORD_6(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_4(oword, ...) \
EF4_POPULATE_OWORD_5(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_3(oword, ...) \
EF4_POPULATE_OWORD_4(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_2(oword, ...) \
EF4_POPULATE_OWORD_3(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_OWORD_1(oword, ...) \
EF4_POPULATE_OWORD_2(oword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_ZERO_OWORD(oword) \
EF4_POPULATE_OWORD_1(oword, EF4_DUMMY_FIELD, 0)
#define EF4_SET_OWORD(oword) \
EF4_POPULATE_OWORD_4(oword, \
EF4_DWORD_0, 0xffffffff, \
EF4_DWORD_1, 0xffffffff, \
EF4_DWORD_2, 0xffffffff, \
EF4_DWORD_3, 0xffffffff)
/* Populate a quadword field with various numbers of arguments */
#define EF4_POPULATE_QWORD_10 EF4_POPULATE_QWORD
#define EF4_POPULATE_QWORD_9(qword, ...) \
EF4_POPULATE_QWORD_10(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_8(qword, ...) \
EF4_POPULATE_QWORD_9(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_7(qword, ...) \
EF4_POPULATE_QWORD_8(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_6(qword, ...) \
EF4_POPULATE_QWORD_7(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_5(qword, ...) \
EF4_POPULATE_QWORD_6(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_4(qword, ...) \
EF4_POPULATE_QWORD_5(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_3(qword, ...) \
EF4_POPULATE_QWORD_4(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_2(qword, ...) \
EF4_POPULATE_QWORD_3(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_QWORD_1(qword, ...) \
EF4_POPULATE_QWORD_2(qword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_ZERO_QWORD(qword) \
EF4_POPULATE_QWORD_1(qword, EF4_DUMMY_FIELD, 0)
#define EF4_SET_QWORD(qword) \
EF4_POPULATE_QWORD_2(qword, \
EF4_DWORD_0, 0xffffffff, \
EF4_DWORD_1, 0xffffffff)
/* Populate a dword field with various numbers of arguments */
#define EF4_POPULATE_DWORD_10 EF4_POPULATE_DWORD
#define EF4_POPULATE_DWORD_9(dword, ...) \
EF4_POPULATE_DWORD_10(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_8(dword, ...) \
EF4_POPULATE_DWORD_9(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_7(dword, ...) \
EF4_POPULATE_DWORD_8(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_6(dword, ...) \
EF4_POPULATE_DWORD_7(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_5(dword, ...) \
EF4_POPULATE_DWORD_6(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_4(dword, ...) \
EF4_POPULATE_DWORD_5(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_3(dword, ...) \
EF4_POPULATE_DWORD_4(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_2(dword, ...) \
EF4_POPULATE_DWORD_3(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_POPULATE_DWORD_1(dword, ...) \
EF4_POPULATE_DWORD_2(dword, EF4_DUMMY_FIELD, 0, __VA_ARGS__)
#define EF4_ZERO_DWORD(dword) \
EF4_POPULATE_DWORD_1(dword, EF4_DUMMY_FIELD, 0)
#define EF4_SET_DWORD(dword) \
EF4_POPULATE_DWORD_1(dword, EF4_DWORD_0, 0xffffffff)
/*
* Modify a named field within an already-populated structure. Used
* for read-modify-write operations.
*
*/
#define EF4_INVERT_OWORD(oword) do { \
(oword).u64[0] = ~((oword).u64[0]); \
(oword).u64[1] = ~((oword).u64[1]); \
} while (0)
#define EF4_AND_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
} while (0)
#define EF4_OR_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
(oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
} while (0)
#define EF4_INSERT64(min, max, low, high, value) \
cpu_to_le64(EF4_INSERT_NATIVE(min, max, low, high, value))
#define EF4_INSERT32(min, max, low, high, value) \
cpu_to_le32(EF4_INSERT_NATIVE(min, max, low, high, value))
#define EF4_INPLACE_MASK64(min, max, low, high) \
EF4_INSERT64(min, max, low, high, EF4_MASK64((high) + 1 - (low)))
#define EF4_INPLACE_MASK32(min, max, low, high) \
EF4_INSERT32(min, max, low, high, EF4_MASK32((high) + 1 - (low)))
#define EF4_SET_OWORD64(oword, low, high, value) do { \
(oword).u64[0] = (((oword).u64[0] \
& ~EF4_INPLACE_MASK64(0, 63, low, high)) \
| EF4_INSERT64(0, 63, low, high, value)); \
(oword).u64[1] = (((oword).u64[1] \
& ~EF4_INPLACE_MASK64(64, 127, low, high)) \
| EF4_INSERT64(64, 127, low, high, value)); \
} while (0)
#define EF4_SET_QWORD64(qword, low, high, value) do { \
(qword).u64[0] = (((qword).u64[0] \
& ~EF4_INPLACE_MASK64(0, 63, low, high)) \
| EF4_INSERT64(0, 63, low, high, value)); \
} while (0)
#define EF4_SET_OWORD32(oword, low, high, value) do { \
(oword).u32[0] = (((oword).u32[0] \
& ~EF4_INPLACE_MASK32(0, 31, low, high)) \
| EF4_INSERT32(0, 31, low, high, value)); \
(oword).u32[1] = (((oword).u32[1] \
& ~EF4_INPLACE_MASK32(32, 63, low, high)) \
| EF4_INSERT32(32, 63, low, high, value)); \
(oword).u32[2] = (((oword).u32[2] \
& ~EF4_INPLACE_MASK32(64, 95, low, high)) \
| EF4_INSERT32(64, 95, low, high, value)); \
(oword).u32[3] = (((oword).u32[3] \
& ~EF4_INPLACE_MASK32(96, 127, low, high)) \
| EF4_INSERT32(96, 127, low, high, value)); \
} while (0)
#define EF4_SET_QWORD32(qword, low, high, value) do { \
(qword).u32[0] = (((qword).u32[0] \
& ~EF4_INPLACE_MASK32(0, 31, low, high)) \
| EF4_INSERT32(0, 31, low, high, value)); \
(qword).u32[1] = (((qword).u32[1] \
& ~EF4_INPLACE_MASK32(32, 63, low, high)) \
| EF4_INSERT32(32, 63, low, high, value)); \
} while (0)
#define EF4_SET_DWORD32(dword, low, high, value) do { \
(dword).u32[0] = (((dword).u32[0] \
& ~EF4_INPLACE_MASK32(0, 31, low, high)) \
| EF4_INSERT32(0, 31, low, high, value)); \
} while (0)
#define EF4_SET_OWORD_FIELD64(oword, field, value) \
EF4_SET_OWORD64(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_QWORD_FIELD64(qword, field, value) \
EF4_SET_QWORD64(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_OWORD_FIELD32(oword, field, value) \
EF4_SET_OWORD32(oword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_QWORD_FIELD32(qword, field, value) \
EF4_SET_QWORD32(qword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#define EF4_SET_DWORD_FIELD(dword, field, value) \
EF4_SET_DWORD32(dword, EF4_LOW_BIT(field), \
EF4_HIGH_BIT(field), value)
#if BITS_PER_LONG == 64
#define EF4_SET_OWORD_FIELD EF4_SET_OWORD_FIELD64
#define EF4_SET_QWORD_FIELD EF4_SET_QWORD_FIELD64
#else
#define EF4_SET_OWORD_FIELD EF4_SET_OWORD_FIELD32
#define EF4_SET_QWORD_FIELD EF4_SET_QWORD_FIELD32
#endif
/* Used to avoid compiler warnings about shift range exceeding width
* of the data types when dma_addr_t is only 32 bits wide.
*/
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EF4_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
/* Static initialiser */
#define EF4_OWORD32(a, b, c, d) \
{ .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
cpu_to_le32(c), cpu_to_le32(d) } }
#endif /* EF4_BITFIELD_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,277 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_EFX_H
#define EF4_EFX_H
#include "net_driver.h"
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
/* All VFs use BAR 0/1 for memory */
#define EF4_MEM_BAR 2
#define EF4_MEM_VF_BAR 0
int ef4_net_open(struct net_device *net_dev);
int ef4_net_stop(struct net_device *net_dev);
/* TX */
int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue);
void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue);
void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue);
void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue);
void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue);
netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb);
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index);
int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx);
extern bool ef4_separate_tx_channels;
/* RX */
void ef4_set_default_rx_indir_table(struct ef4_nic *efx);
void ef4_rx_config_page_split(struct ef4_nic *efx);
int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
void ef4_rx_slow_fill(unsigned long context);
void __ef4_rx_packet(struct ef4_channel *channel);
void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
static inline void ef4_rx_flush_packet(struct ef4_channel *channel)
{
if (channel->rx_pkt_n_frags)
__ef4_rx_packet(channel);
}
void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue);
#define EF4_MAX_DMAQ_SIZE 4096UL
#define EF4_DEFAULT_DMAQ_SIZE 1024UL
#define EF4_MIN_DMAQ_SIZE 512UL
#define EF4_MAX_EVQ_SIZE 16384UL
#define EF4_MIN_EVQ_SIZE 512UL
/* Maximum number of TCP segments we support for soft-TSO */
#define EF4_TSO_MAX_SEGS 100
/* The smallest [rt]xq_entries that the driver supports. RX minimum
* is a bit arbitrary. For TX, we must have space for at least 2
* TSO skbs.
*/
#define EF4_RXQ_MIN_ENT 128U
#define EF4_TXQ_MIN_ENT(efx) (2 * ef4_tx_max_skb_descs(efx))
static inline bool ef4_rss_enabled(struct ef4_nic *efx)
{
return efx->rss_spread > 1;
}
/* Filters */
void ef4_mac_reconfigure(struct ef4_nic *efx);
/**
* ef4_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
* @replace_equal: Flag for whether the specified filter may replace an
* existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
*
* If existing filters have equal match values to the new filter spec,
* then the new filter might replace them or the function might fail,
* as follows.
*
* 1. If the existing filters have lower priority, or @replace_equal
* is set and they have equal priority, replace them.
*
* 2. If the existing filters have higher priority, return -%EPERM.
*
* 3. If !ef4_filter_is_mc_recipient(@spec), or the NIC does not
* support delivery to multiple recipients, return -%EEXIST.
*
* This implies that filters for multiple multicast recipients must
* all be inserted with the same priority and @replace_equal = %false.
*/
static inline s32 ef4_filter_insert_filter(struct ef4_nic *efx,
struct ef4_filter_spec *spec,
bool replace_equal)
{
return efx->type->filter_insert(efx, spec, replace_equal);
}
/**
* ef4_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @ef4_filter_insert_filter
* @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int ef4_filter_remove_id_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 filter_id)
{
return efx->type->filter_remove_safe(efx, priority, filter_id);
}
/**
* ef4_filter_get_filter_safe - retrieve a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @ef4_filter_insert_filter
* @filter_id: ID of filter, as returned by @ef4_filter_insert_filter
* @spec: Buffer in which to store filter specification
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int
ef4_filter_get_filter_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 filter_id, struct ef4_filter_spec *spec)
{
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
static inline u32 ef4_filter_count_rx_used(struct ef4_nic *efx,
enum ef4_filter_priority priority)
{
return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 ef4_filter_get_rx_id_limit(struct ef4_nic *efx)
{
return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 ef4_filter_get_rx_ids(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 *buf, u32 size)
{
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL
int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned quota);
static inline void ef4_filter_rfs_expire(struct ef4_channel *channel)
{
if (channel->rfs_filters_added >= 60 &&
__ef4_filter_rfs_expire(channel->efx, 100))
channel->rfs_filters_added -= 60;
}
#define ef4_filter_rfs_enabled() 1
#else
static inline void ef4_filter_rfs_expire(struct ef4_channel *channel) {}
#define ef4_filter_rfs_enabled() 0
#endif
bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec);
/* Channels */
int ef4_channel_dummy_op_int(struct ef4_channel *channel);
void ef4_channel_dummy_op_void(struct ef4_channel *channel);
int ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries);
/* Ports */
int ef4_reconfigure_port(struct ef4_nic *efx);
int __ef4_reconfigure_port(struct ef4_nic *efx);
/* Ethtool support */
extern const struct ethtool_ops ef4_ethtool_ops;
/* Reset handling */
int ef4_reset(struct ef4_nic *efx, enum reset_type method);
void ef4_reset_down(struct ef4_nic *efx, enum reset_type method);
int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok);
int ef4_try_recovery(struct ef4_nic *efx);
/* Global */
void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type);
unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs);
unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks);
int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx);
void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive);
void ef4_stop_eventq(struct ef4_channel *channel);
void ef4_start_eventq(struct ef4_channel *channel);
/* Dummy PHY ops for PHY drivers */
int ef4_port_dummy_op_int(struct ef4_nic *efx);
void ef4_port_dummy_op_void(struct ef4_nic *efx);
/* Update the generic software stats in the passed stats array */
void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats);
/* MTD */
#ifdef CONFIG_SFC_FALCON_MTD
int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
size_t n_parts, size_t sizeof_part);
static inline int ef4_mtd_probe(struct ef4_nic *efx)
{
return efx->type->mtd_probe(efx);
}
void ef4_mtd_rename(struct ef4_nic *efx);
void ef4_mtd_remove(struct ef4_nic *efx);
#else
static inline int ef4_mtd_probe(struct ef4_nic *efx) { return 0; }
static inline void ef4_mtd_rename(struct ef4_nic *efx) {}
static inline void ef4_mtd_remove(struct ef4_nic *efx) {}
#endif
static inline void ef4_schedule_channel(struct ef4_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
napi_schedule(&channel->napi_str);
}
static inline void ef4_schedule_channel_irq(struct ef4_channel *channel)
{
channel->event_test_cpu = raw_smp_processor_id();
ef4_schedule_channel(channel);
}
void ef4_link_status_changed(struct ef4_nic *efx);
void ef4_link_set_advertising(struct ef4_nic *efx, u32);
void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8);
static inline void ef4_device_detach_sync(struct ef4_nic *efx)
{
struct net_device *dev = efx->net_dev;
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
*/
netif_tx_lock_bh(dev);
netif_device_detach(dev);
netif_tx_unlock_bh(dev);
}
static inline bool ef4_rwsem_assert_write_locked(struct rw_semaphore *sem)
{
if (WARN_ON(down_read_trylock(sem))) {
up_read(sem);
return false;
}
return true;
}
#endif /* EF4_EFX_H */

View File

@ -0,0 +1,171 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2007-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_ENUM_H
#define EF4_ENUM_H
/**
* enum ef4_loopback_mode - loopback modes
* @LOOPBACK_NONE: no loopback
* @LOOPBACK_DATA: data path loopback
* @LOOPBACK_GMAC: loopback within GMAC
* @LOOPBACK_XGMII: loopback after XMAC
* @LOOPBACK_XGXS: loopback within BPX after XGXS
* @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
* @LOOPBACK_GMII: loopback within BPX after GMAC
* @LOOPBACK_SGMII: loopback within BPX within SGMII
* @LOOPBACK_XGBR: loopback within BPX within XGBR
* @LOOPBACK_XFI: loopback within BPX before XFI serdes
* @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
* @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
* @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
* @LOOPBACK_XFI_FAR: loopback after XFI serdes
* @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
* @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
* @LOOPBACK_PCS: loopback within 10G PHY at PCS level
* @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
* @LOOPBACK_XPORT: cross port loopback
* @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
* @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
* @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
* @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
* @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
* @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
* @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
* @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
*/
/* Please keep up-to-date w.r.t the following two #defines */
enum ef4_loopback_mode {
LOOPBACK_NONE = 0,
LOOPBACK_DATA = 1,
LOOPBACK_GMAC = 2,
LOOPBACK_XGMII = 3,
LOOPBACK_XGXS = 4,
LOOPBACK_XAUI = 5,
LOOPBACK_GMII = 6,
LOOPBACK_SGMII = 7,
LOOPBACK_XGBR = 8,
LOOPBACK_XFI = 9,
LOOPBACK_XAUI_FAR = 10,
LOOPBACK_GMII_FAR = 11,
LOOPBACK_SGMII_FAR = 12,
LOOPBACK_XFI_FAR = 13,
LOOPBACK_GPHY = 14,
LOOPBACK_PHYXS = 15,
LOOPBACK_PCS = 16,
LOOPBACK_PMAPMD = 17,
LOOPBACK_XPORT = 18,
LOOPBACK_XGMII_WS = 19,
LOOPBACK_XAUI_WS = 20,
LOOPBACK_XAUI_WS_FAR = 21,
LOOPBACK_XAUI_WS_NEAR = 22,
LOOPBACK_GMII_WS = 23,
LOOPBACK_XFI_WS = 24,
LOOPBACK_XFI_WS_FAR = 25,
LOOPBACK_PHYXS_WS = 26,
LOOPBACK_MAX
};
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
/* These loopbacks occur within the controller */
#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
(1 << LOOPBACK_GMAC) | \
(1 << LOOPBACK_XGMII)| \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI) | \
(1 << LOOPBACK_GMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_XGBR) | \
(1 << LOOPBACK_XFI) | \
(1 << LOOPBACK_XAUI_FAR) | \
(1 << LOOPBACK_GMII_FAR) | \
(1 << LOOPBACK_SGMII_FAR) | \
(1 << LOOPBACK_XFI_FAR) | \
(1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR))
#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
(1 << LOOPBACK_XAUI_WS) | \
(1 << LOOPBACK_XAUI_WS_FAR) | \
(1 << LOOPBACK_XAUI_WS_NEAR) | \
(1 << LOOPBACK_GMII_WS) | \
(1 << LOOPBACK_XFI_WS) | \
(1 << LOOPBACK_XFI_WS_FAR) | \
(1 << LOOPBACK_PHYXS_WS))
#define LOOPBACKS_EXTERNAL(_efx) \
((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
~(1 << LOOPBACK_NONE))
#define LOOPBACK_MASK(_efx) \
(1 << (_efx)->loopback_mode)
#define LOOPBACK_INTERNAL(_efx) \
(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
#define LOOPBACK_EXTERNAL(_efx) \
(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
#define LOOPBACK_CHANGED(_from, _to, _mask) \
(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
/*****************************************************************************/
/**
* enum reset_type - reset types
*
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
* other valuesspecify reasons, which ef4_schedule_reset() will choose
* a method for.
*
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC
* @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
* if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
* @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
*/
enum reset_type {
RESET_TYPE_INVISIBLE,
RESET_TYPE_RECOVER_OR_ALL,
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
RESET_TYPE_DATAPATH,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MAX,
};
#endif /* EF4_ENUM_H */

File diff suppressed because it is too large Load Diff

View File

@ -66,7 +66,7 @@
#if IS_ENABLED(CONFIG_SENSORS_LM87)
static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
static int ef4_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
while (*reg_values) {
u8 reg = *reg_values++;
@ -87,7 +87,7 @@ static const u8 falcon_lm87_common_regs[] = {
0
};
static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
static int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
struct falcon_board *board = falcon_board(efx);
@ -101,10 +101,10 @@ static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
rc = efx_poke_lm87(client, reg_values);
rc = ef4_poke_lm87(client, reg_values);
if (rc)
goto err;
rc = efx_poke_lm87(client, falcon_lm87_common_regs);
rc = ef4_poke_lm87(client, falcon_lm87_common_regs);
if (rc)
goto err;
@ -116,12 +116,12 @@ err:
return rc;
}
static void efx_fini_lm87(struct efx_nic *efx)
static void ef4_fini_lm87(struct ef4_nic *efx)
{
i2c_unregister_device(falcon_board(efx)->hwmon_client);
}
static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
static int ef4_check_lm87(struct ef4_nic *efx, unsigned mask)
{
struct i2c_client *client = falcon_board(efx)->hwmon_client;
bool temp_crit, elec_fault, is_failure;
@ -129,7 +129,7 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
s32 reg;
/* If link is up then do not monitor temperature */
if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
if (EF4_WORKAROUND_7884(efx) && efx->link_state.up)
return 0;
reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
@ -179,15 +179,15 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
#else /* !CONFIG_SENSORS_LM87 */
static inline int
efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info,
ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info,
const u8 *reg_values)
{
return 0;
}
static inline void efx_fini_lm87(struct efx_nic *efx)
static inline void ef4_fini_lm87(struct ef4_nic *efx)
{
}
static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
static inline int ef4_check_lm87(struct ef4_nic *efx, unsigned mask)
{
return 0;
}
@ -255,7 +255,7 @@ static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
#define MAX664X_REG_RSL 0x02
#define MAX664X_REG_WLHO 0x0B
static void sfe4001_poweroff(struct efx_nic *efx)
static void sfe4001_poweroff(struct ef4_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
@ -269,7 +269,7 @@ static void sfe4001_poweroff(struct efx_nic *efx)
i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
}
static int sfe4001_poweron(struct efx_nic *efx)
static int sfe4001_poweron(struct ef4_nic *efx)
{
struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
@ -360,7 +360,7 @@ fail_on:
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
@ -368,8 +368,8 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
enum efx_phy_mode old_mode, new_mode;
struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
enum ef4_phy_mode old_mode, new_mode;
int err;
rtnl_lock();
@ -390,7 +390,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
falcon_stop_nic_stats(efx);
err = sfe4001_poweron(efx);
if (!err)
err = efx_reconfigure_port(efx);
err = ef4_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
falcon_start_nic_stats(efx);
}
@ -401,7 +401,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
static void sfe4001_fini(struct efx_nic *efx)
static void sfe4001_fini(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@ -413,13 +413,13 @@ static void sfe4001_fini(struct efx_nic *efx)
i2c_unregister_device(board->hwmon_client);
}
static int sfe4001_check_hw(struct efx_nic *efx)
static int sfe4001_check_hw(struct ef4_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
s32 status;
/* If XAUI link is up then do not monitor */
if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
if (EF4_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
return 0;
/* Check the powered status of the PHY. Lack of power implies that
@ -450,7 +450,7 @@ static const struct i2c_board_info sfe4001_hwmon_info = {
* be turned on before the PHY can be used.
* Context: Process context, rtnl lock held
*/
static int sfe4001_init(struct efx_nic *efx)
static int sfe4001_init(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
int rc;
@ -537,7 +537,7 @@ static const struct i2c_board_info sfe4002_hwmon_info = {
#define SFE4002_RX_LED (0) /* Green */
#define SFE4002_TX_LED (1) /* Amber */
static void sfe4002_init_phy(struct efx_nic *efx)
static void sfe4002_init_phy(struct ef4_nic *efx)
{
/* Set the TX and RX LEDs to reflect status and activity, and the
* fault LED off */
@ -548,14 +548,14 @@ static void sfe4002_init_phy(struct efx_nic *efx)
falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
}
static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
static void sfe4002_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
falcon_qt202x_set_led(
efx, SFE4002_FAULT_LED,
(mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
(mode == EF4_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
}
static int sfe4002_check_hw(struct efx_nic *efx)
static int sfe4002_check_hw(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@ -565,12 +565,12 @@ static int sfe4002_check_hw(struct efx_nic *efx)
(board->major == 0 && board->minor == 0) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
return efx_check_lm87(efx, alarm_mask);
return ef4_check_lm87(efx, alarm_mask);
}
static int sfe4002_init(struct efx_nic *efx)
static int sfe4002_init(struct ef4_nic *efx)
{
return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
return ef4_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
}
/*****************************************************************************
@ -599,7 +599,7 @@ static const struct i2c_board_info sfn4112f_hwmon_info = {
#define SFN4112F_ACT_LED 0
#define SFN4112F_LINK_LED 1
static void sfn4112f_init_phy(struct efx_nic *efx)
static void sfn4112f_init_phy(struct ef4_nic *efx)
{
falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
@ -607,15 +607,15 @@ static void sfn4112f_init_phy(struct efx_nic *efx)
QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
}
static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
static void sfn4112f_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
int reg;
switch (mode) {
case EFX_LED_OFF:
case EF4_LED_OFF:
reg = QUAKE_LED_OFF;
break;
case EFX_LED_ON:
case EF4_LED_ON:
reg = QUAKE_LED_ON;
break;
default:
@ -626,15 +626,15 @@ static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
}
static int sfn4112f_check_hw(struct efx_nic *efx)
static int sfn4112f_check_hw(struct ef4_nic *efx)
{
/* Mask out unused sensors */
return efx_check_lm87(efx, ~0x48);
return ef4_check_lm87(efx, ~0x48);
}
static int sfn4112f_init(struct efx_nic *efx)
static int sfn4112f_init(struct ef4_nic *efx)
{
return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
return ef4_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
}
/*****************************************************************************
@ -663,7 +663,7 @@ static const struct i2c_board_info sfe4003_hwmon_info = {
#define SFE4003_LED_ON 1
#define SFE4003_LED_OFF 0
static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
static void sfe4003_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
struct falcon_board *board = falcon_board(efx);
@ -673,10 +673,10 @@ static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
falcon_txc_set_gpio_val(
efx, SFE4003_RED_LED_GPIO,
(mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
(mode == EF4_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
}
static void sfe4003_init_phy(struct efx_nic *efx)
static void sfe4003_init_phy(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@ -688,7 +688,7 @@ static void sfe4003_init_phy(struct efx_nic *efx)
falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
}
static int sfe4003_check_hw(struct efx_nic *efx)
static int sfe4003_check_hw(struct ef4_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
@ -698,19 +698,19 @@ static int sfe4003_check_hw(struct efx_nic *efx)
(board->major == 0 && board->minor <= 2) ?
~LM87_ALARM_TEMP_EXT1 : ~0;
return efx_check_lm87(efx, alarm_mask);
return ef4_check_lm87(efx, alarm_mask);
}
static int sfe4003_init(struct efx_nic *efx)
static int sfe4003_init(struct ef4_nic *efx)
{
return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
return ef4_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
}
static const struct falcon_board_type board_types[] = {
{
.id = FALCON_BOARD_SFE4001,
.init = sfe4001_init,
.init_phy = efx_port_dummy_op_void,
.init_phy = ef4_port_dummy_op_void,
.fini = sfe4001_fini,
.set_id_led = tenxpress_set_id_led,
.monitor = sfe4001_check_hw,
@ -719,7 +719,7 @@ static const struct falcon_board_type board_types[] = {
.id = FALCON_BOARD_SFE4002,
.init = sfe4002_init,
.init_phy = sfe4002_init_phy,
.fini = efx_fini_lm87,
.fini = ef4_fini_lm87,
.set_id_led = sfe4002_set_id_led,
.monitor = sfe4002_check_hw,
},
@ -727,7 +727,7 @@ static const struct falcon_board_type board_types[] = {
.id = FALCON_BOARD_SFE4003,
.init = sfe4003_init,
.init_phy = sfe4003_init_phy,
.fini = efx_fini_lm87,
.fini = ef4_fini_lm87,
.set_id_led = sfe4003_set_id_led,
.monitor = sfe4003_check_hw,
},
@ -735,13 +735,13 @@ static const struct falcon_board_type board_types[] = {
.id = FALCON_BOARD_SFN4112F,
.init = sfn4112f_init,
.init_phy = sfn4112f_init_phy,
.fini = efx_fini_lm87,
.fini = ef4_fini_lm87,
.set_id_led = sfn4112f_set_id_led,
.monitor = sfn4112f_check_hw,
},
};
int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
int falcon_probe_board(struct ef4_nic *efx, u16 revision_info)
{
struct falcon_board *board = falcon_board(efx);
u8 type_id = FALCON_BOARD_TYPE(revision_info);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,272 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_FILTER_H
#define EF4_FILTER_H
#include <linux/types.h>
#include <linux/if_ether.h>
#include <asm/byteorder.h>
/**
* enum ef4_filter_match_flags - Flags for hardware filter match type
* @EF4_FILTER_MATCH_REM_HOST: Match by remote IP host address
* @EF4_FILTER_MATCH_LOC_HOST: Match by local IP host address
* @EF4_FILTER_MATCH_REM_MAC: Match by remote MAC address
* @EF4_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
* @EF4_FILTER_MATCH_LOC_MAC: Match by local MAC address
* @EF4_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
* @EF4_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
* @EF4_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
* @EF4_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
* @EF4_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
* @EF4_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
* Used for RX default unicast and multicast/broadcast filters.
*
* Only some combinations are supported, depending on NIC type:
*
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
* local 2-tuple (only implemented for Falcon B0)
*
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
* or local 2-tuple, or local MAC with or without outer VID, and RX
* default filters
*
* - Huntington supports filter matching controlled by firmware, potentially
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
* with or without outer and inner VID
*/
enum ef4_filter_match_flags {
EF4_FILTER_MATCH_REM_HOST = 0x0001,
EF4_FILTER_MATCH_LOC_HOST = 0x0002,
EF4_FILTER_MATCH_REM_MAC = 0x0004,
EF4_FILTER_MATCH_REM_PORT = 0x0008,
EF4_FILTER_MATCH_LOC_MAC = 0x0010,
EF4_FILTER_MATCH_LOC_PORT = 0x0020,
EF4_FILTER_MATCH_ETHER_TYPE = 0x0040,
EF4_FILTER_MATCH_INNER_VID = 0x0080,
EF4_FILTER_MATCH_OUTER_VID = 0x0100,
EF4_FILTER_MATCH_IP_PROTO = 0x0200,
EF4_FILTER_MATCH_LOC_MAC_IG = 0x0400,
};
/**
* enum ef4_filter_priority - priority of a hardware filter specification
* @EF4_FILTER_PRI_HINT: Performance hint
* @EF4_FILTER_PRI_AUTO: Automatic filter based on device address list
* or hardware requirements. This may only be used by the filter
* implementation for each NIC type.
* @EF4_FILTER_PRI_MANUAL: Manually configured filter
* @EF4_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
* networking and SR-IOV)
*/
enum ef4_filter_priority {
EF4_FILTER_PRI_HINT = 0,
EF4_FILTER_PRI_AUTO,
EF4_FILTER_PRI_MANUAL,
EF4_FILTER_PRI_REQUIRED,
};
/**
* enum ef4_filter_flags - flags for hardware filter specifications
* @EF4_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
* By default, matching packets will be delivered only to the
* specified queue. If this flag is set, they will be delivered
* to a range of queues offset from the specified queue number
* according to the indirection table.
* @EF4_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
* @EF4_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
* overriding an automatic filter (priority
* %EF4_FILTER_PRI_AUTO). This may only be set by the filter
* implementation for each type. A removal request will restore
* the automatic filter in its place.
* @EF4_FILTER_FLAG_RX: Filter is for RX
* @EF4_FILTER_FLAG_TX: Filter is for TX
*/
enum ef4_filter_flags {
EF4_FILTER_FLAG_RX_RSS = 0x01,
EF4_FILTER_FLAG_RX_SCATTER = 0x02,
EF4_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EF4_FILTER_FLAG_RX = 0x08,
EF4_FILTER_FLAG_TX = 0x10,
};
/**
* struct ef4_filter_spec - specification for a hardware filter
* @match_flags: Match type flags, from &enum ef4_filter_match_flags
* @priority: Priority of the filter, from &enum ef4_filter_priority
* @flags: Miscellaneous flags, from &enum ef4_filter_flags
* @rss_context: RSS context to use, if %EF4_FILTER_FLAG_RX_RSS is set
* @dmaq_id: Source/target queue index, or %EF4_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
* @outer_vid: Outer VLAN ID to match, if %EF4_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EF4_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EF4_FILTER_MATCH_LOC_MAC or
* %EF4_FILTER_MATCH_LOC_MAC_IG is set
* @rem_mac: Remote MAC address to match, if %EF4_FILTER_MATCH_REM_MAC is set
* @ether_type: Ether-type to match, if %EF4_FILTER_MATCH_ETHER_TYPE is set
* @ip_proto: IP transport protocol to match, if %EF4_FILTER_MATCH_IP_PROTO
* is set
* @loc_host: Local IP host to match, if %EF4_FILTER_MATCH_LOC_HOST is set
* @rem_host: Remote IP host to match, if %EF4_FILTER_MATCH_REM_HOST is set
* @loc_port: Local TCP/UDP port to match, if %EF4_FILTER_MATCH_LOC_PORT is set
* @rem_port: Remote TCP/UDP port to match, if %EF4_FILTER_MATCH_REM_PORT is set
*
* The ef4_filter_init_rx() or ef4_filter_init_tx() function *must* be
* used to initialise the structure. The ef4_filter_set_*() functions
* may then be used to set @rss_context, @match_flags and related
* fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
* depends on which fields are matched.
*/
struct ef4_filter_spec {
u32 match_flags:12;
u32 priority:2;
u32 flags:6;
u32 dmaq_id:12;
u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
__be16 ether_type;
u8 ip_proto;
__be32 loc_host[4];
__be32 rem_host[4];
__be16 loc_port;
__be16 rem_port;
/* total 64 bytes */
};
enum {
EF4_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
EF4_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void ef4_filter_init_rx(struct ef4_filter_spec *spec,
enum ef4_filter_priority priority,
enum ef4_filter_flags flags,
unsigned rxq_id)
{
memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EF4_FILTER_FLAG_RX | flags;
spec->rss_context = EF4_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id;
}
static inline void ef4_filter_init_tx(struct ef4_filter_spec *spec,
unsigned txq_id)
{
memset(spec, 0, sizeof(*spec));
spec->priority = EF4_FILTER_PRI_REQUIRED;
spec->flags = EF4_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
/**
* ef4_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
*/
static inline int
ef4_filter_set_ipv4_local(struct ef4_filter_spec *spec, u8 proto,
__be32 host, __be16 port)
{
spec->match_flags |=
EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = host;
spec->loc_port = port;
return 0;
}
/**
* ef4_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @lhost: Local host address (network byte order)
* @lport: Local port (network byte order)
* @rhost: Remote host address (network byte order)
* @rport: Remote port (network byte order)
*/
static inline int
ef4_filter_set_ipv4_full(struct ef4_filter_spec *spec, u8 proto,
__be32 lhost, __be16 lport,
__be32 rhost, __be16 rport)
{
spec->match_flags |=
EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = lhost;
spec->loc_port = lport;
spec->rem_host[0] = rhost;
spec->rem_port = rport;
return 0;
}
enum {
EF4_FILTER_VID_UNSPEC = 0xffff,
};
/**
* ef4_filter_set_eth_local - specify local Ethernet address and/or VID
* @spec: Specification to initialise
* @vid: Outer VLAN ID to match, or %EF4_FILTER_VID_UNSPEC
* @addr: Local Ethernet MAC address, or %NULL
*/
static inline int ef4_filter_set_eth_local(struct ef4_filter_spec *spec,
u16 vid, const u8 *addr)
{
if (vid == EF4_FILTER_VID_UNSPEC && addr == NULL)
return -EINVAL;
if (vid != EF4_FILTER_VID_UNSPEC) {
spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
spec->outer_vid = htons(vid);
}
if (addr != NULL) {
spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC;
ether_addr_copy(spec->loc_mac, addr);
}
return 0;
}
/**
* ef4_filter_set_uc_def - specify matching otherwise-unmatched unicast
* @spec: Specification to initialise
*/
static inline int ef4_filter_set_uc_def(struct ef4_filter_spec *spec)
{
spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
return 0;
}
/**
* ef4_filter_set_mc_def - specify matching otherwise-unmatched multicast
* @spec: Specification to initialise
*/
static inline int ef4_filter_set_mc_def(struct ef4_filter_spec *spec)
{
spec->match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
spec->loc_mac[0] = 1;
return 0;
}
#endif /* EF4_FILTER_H */

View File

@ -0,0 +1,290 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_IO_H
#define EF4_IO_H
#include <linux/io.h>
#include <linux/spinlock.h>
/**************************************************************************
*
* NIC register I/O
*
**************************************************************************
*
* Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
* up to 128 bits. Whenever the host writes part of such a register,
* the BIU collects the written value and does not write to the
* underlying register until all 4 dwords have been written. A
* similar buffering scheme applies to host access to the NIC's 64-bit
* SRAM.
*
* Writes to different CSRs and 64-bit SRAM words must be serialised,
* since interleaved access can result in lost writes. We use
* ef4_nic::biu_lock for this.
*
* We also serialise reads from 128-bit CSRs and SRAM with the same
* spinlock. This may not be necessary, but it doesn't really matter
* as there are no such reads on the fast path.
*
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* 128-bit but are special-cased in the BIU to avoid the need for
* locking in the host:
*
* - They are write-only.
* - The semantics of writing to these registers are such that
* replacing the low 96 bits with zero does not affect functionality.
* - If the host writes to the last dword address of such a register
* (i.e. the high 32 bits) the underlying register will always be
* written. If the collector and the current write together do not
* provide values for all 128 bits of the register, the low 96 bits
* will be written as zero.
* - If the host writes to the address of any other part of such a
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
*
* The EF10 architecture exposes very few registers to the host and
* most of them are only 32 bits wide. The only exceptions are the MC
* doorbell register pair, which has its own latching, and
* TX_DESC_UPD, which works in a similar way to the Falcon
* architecture.
*/
#if BITS_PER_LONG == 64
#define EF4_USE_QWORD_IO 1
#endif
#ifdef EF4_USE_QWORD_IO
static inline void _ef4_writeq(struct ef4_nic *efx, __le64 value,
unsigned int reg)
{
__raw_writeq((__force u64)value, efx->membase + reg);
}
static inline __le64 _ef4_readq(struct ef4_nic *efx, unsigned int reg)
{
return (__force __le64)__raw_readq(efx->membase + reg);
}
#endif
static inline void _ef4_writed(struct ef4_nic *efx, __le32 value,
unsigned int reg)
{
__raw_writel((__force u32)value, efx->membase + reg);
}
static inline __le32 _ef4_readd(struct ef4_nic *efx, unsigned int reg)
{
return (__force __le32)__raw_readl(efx->membase + reg);
}
/* Write a normal 128-bit CSR, locking as appropriate. */
static inline void ef4_writeo(struct ef4_nic *efx, const ef4_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EF4_OWORD_FMT "\n", reg,
EF4_OWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EF4_USE_QWORD_IO
_ef4_writeq(efx, value->u64[0], reg + 0);
_ef4_writeq(efx, value->u64[1], reg + 8);
#else
_ef4_writed(efx, value->u32[0], reg + 0);
_ef4_writed(efx, value->u32[1], reg + 4);
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void ef4_sram_writeq(struct ef4_nic *efx, void __iomem *membase,
const ef4_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
netif_vdbg(efx, hw, efx->net_dev,
"writing SRAM address %x with " EF4_QWORD_FMT "\n",
addr, EF4_QWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EF4_USE_QWORD_IO
__raw_writeq((__force u64)value->u64[0], membase + addr);
#else
__raw_writel((__force u32)value->u32[0], membase + addr);
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
#endif
mmiowb();
spin_unlock_irqrestore(&efx->biu_lock, flags);
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
static inline void ef4_writed(struct ef4_nic *efx, const ef4_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with "EF4_DWORD_FMT"\n",
reg, EF4_DWORD_VAL(*value));
/* No lock required */
_ef4_writed(efx, value->u32[0], reg);
}
/* Read a 128-bit CSR, locking as appropriate. */
static inline void ef4_reado(struct ef4_nic *efx, ef4_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _ef4_readd(efx, reg + 0);
value->u32[1] = _ef4_readd(efx, reg + 4);
value->u32[2] = _ef4_readd(efx, reg + 8);
value->u32[3] = _ef4_readd(efx, reg + 12);
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got " EF4_OWORD_FMT "\n", reg,
EF4_OWORD_VAL(*value));
}
/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void ef4_sram_readq(struct ef4_nic *efx, void __iomem *membase,
ef4_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EF4_USE_QWORD_IO
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
netif_vdbg(efx, hw, efx->net_dev,
"read from SRAM address %x, got "EF4_QWORD_FMT"\n",
addr, EF4_QWORD_VAL(*value));
}
/* Read a 32-bit CSR or SRAM */
static inline void ef4_readd(struct ef4_nic *efx, ef4_dword_t *value,
unsigned int reg)
{
value->u32[0] = _ef4_readd(efx, reg);
netif_vdbg(efx, hw, efx->net_dev,
"read from register %x, got "EF4_DWORD_FMT"\n",
reg, EF4_DWORD_VAL(*value));
}
/* Write a 128-bit CSR forming part of a table */
static inline void
ef4_writeo_table(struct ef4_nic *efx, const ef4_oword_t *value,
unsigned int reg, unsigned int index)
{
ef4_writeo(efx, value, reg + index * sizeof(ef4_oword_t));
}
/* Read a 128-bit CSR forming part of a table */
static inline void ef4_reado_table(struct ef4_nic *efx, ef4_oword_t *value,
unsigned int reg, unsigned int index)
{
ef4_reado(efx, value, reg + index * sizeof(ef4_oword_t));
}
/* Page size used as step between per-VI registers */
#define EF4_VI_PAGE_SIZE 0x2000
/* Calculate offset to page-mapped register */
#define EF4_PAGED_REG(page, reg) \
((page) * EF4_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _ef4_writeo_page(struct ef4_nic *efx, ef4_oword_t *value,
unsigned int reg, unsigned int page)
{
reg = EF4_PAGED_REG(page, reg);
netif_vdbg(efx, hw, efx->net_dev,
"writing register %x with " EF4_OWORD_FMT "\n", reg,
EF4_OWORD_VAL(*value));
#ifdef EF4_USE_QWORD_IO
_ef4_writeq(efx, value->u64[0], reg + 0);
_ef4_writeq(efx, value->u64[1], reg + 8);
#else
_ef4_writed(efx, value->u32[0], reg + 0);
_ef4_writed(efx, value->u32[1], reg + 4);
_ef4_writed(efx, value->u32[2], reg + 8);
_ef4_writed(efx, value->u32[3], reg + 12);
#endif
}
#define ef4_writeo_page(efx, value, reg, page) \
_ef4_writeo_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
* high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
static inline void
_ef4_writed_page(struct ef4_nic *efx, const ef4_dword_t *value,
unsigned int reg, unsigned int page)
{
ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
}
#define ef4_writed_page(efx, value, reg, page) \
_ef4_writed_page(efx, value, \
reg + \
BUILD_BUG_ON_ZERO((reg) != 0x400 && \
(reg) != 0x420 && \
(reg) != 0x830 && \
(reg) != 0x83c && \
(reg) != 0xa18 && \
(reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
* collector register.
*/
static inline void _ef4_writed_page_locked(struct ef4_nic *efx,
const ef4_dword_t *value,
unsigned int reg,
unsigned int page)
{
unsigned long flags __attribute__ ((unused));
if (page == 0) {
spin_lock_irqsave(&efx->biu_lock, flags);
ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
spin_unlock_irqrestore(&efx->biu_lock, flags);
} else {
ef4_writed(efx, value, EF4_PAGED_REG(page, reg));
}
}
#define ef4_writed_page_locked(efx, value, reg, page) \
_ef4_writed_page_locked(efx, value, \
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
page)
#endif /* EF4_IO_H */

View File

@ -16,7 +16,7 @@
#include "mdio_10g.h"
#include "workarounds.h"
unsigned efx_mdio_id_oui(u32 id)
unsigned ef4_mdio_id_oui(u32 id)
{
unsigned oui = 0;
int i;
@ -31,19 +31,19 @@ unsigned efx_mdio_id_oui(u32 id)
return oui;
}
int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
int ef4_mdio_reset_mmd(struct ef4_nic *port, int mmd,
int spins, int spintime)
{
u32 ctrl;
/* Catch callers passing values in the wrong units (or just silly) */
EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
EF4_BUG_ON_PARANOID(spins * spintime >= 5000);
efx_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
ef4_mdio_write(port, mmd, MDIO_CTRL1, MDIO_CTRL1_RESET);
/* Wait for the reset bit to clear. */
do {
msleep(spintime);
ctrl = efx_mdio_read(port, mmd, MDIO_CTRL1);
ctrl = ef4_mdio_read(port, mmd, MDIO_CTRL1);
spins--;
} while (spins && (ctrl & MDIO_CTRL1_RESET));
@ -51,13 +51,13 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd,
return spins ? spins : -ETIMEDOUT;
}
static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
static int ef4_mdio_check_mmd(struct ef4_nic *efx, int mmd)
{
int status;
if (mmd != MDIO_MMD_AN) {
/* Read MMD STATUS2 to check it is responding. */
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
status = ef4_mdio_read(efx, mmd, MDIO_STAT2);
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
netif_err(efx, hw, efx->net_dev,
"PHY MMD %d not responding.\n", mmd);
@ -72,7 +72,7 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd)
#define MDIO45_RESET_TIME 1000 /* ms */
#define MDIO45_RESET_ITERS 100
int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
int ef4_mdio_wait_reset_mmds(struct ef4_nic *efx, unsigned int mmd_mask)
{
const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
int tries = MDIO45_RESET_ITERS;
@ -86,7 +86,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
in_reset = 0;
while (mask) {
if (mask & 1) {
stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
stat = ef4_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read status of"
@ -113,7 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
return rc;
}
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
int ef4_mdio_check_mmds(struct ef4_nic *efx, unsigned int mmd_mask)
{
int mmd = 0, probe_mmd, devs1, devs2;
u32 devices;
@ -125,8 +125,8 @@ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
__ffs(mmd_mask);
/* Check all the expected MMDs are present */
devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
devs1 = ef4_mdio_read(efx, probe_mmd, MDIO_DEVS1);
devs2 = ef4_mdio_read(efx, probe_mmd, MDIO_DEVS2);
if (devs1 < 0 || devs2 < 0) {
netif_err(efx, hw, efx->net_dev,
"failed to read devices present\n");
@ -143,7 +143,7 @@ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd))
if ((mmd_mask & 1) && ef4_mdio_check_mmd(efx, mmd))
return -EIO;
mmd_mask = mmd_mask >> 1;
mmd++;
@ -152,7 +152,7 @@ int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask)
return 0;
}
bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
bool ef4_mdio_links_ok(struct ef4_nic *efx, unsigned int mmd_mask)
{
/* If the port is in loopback, then we should only consider a subset
* of mmd's */
@ -160,7 +160,7 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return true;
else if (LOOPBACK_MASK(efx) & LOOPBACKS_WS)
return false;
else if (efx_phy_mode_disabled(efx->phy_mode))
else if (ef4_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_DEVS_PHYXS |
@ -178,59 +178,59 @@ bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return mdio45_links_ok(&efx->mdio, mmd_mask);
}
void efx_mdio_transmit_disable(struct efx_nic *efx)
void ef4_mdio_transmit_disable(struct ef4_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_PMA_TXDIS, MDIO_PMD_TXDIS_GLOBAL,
efx->phy_mode & PHY_MODE_TX_DISABLED);
}
void efx_mdio_phy_reconfigure(struct efx_nic *efx)
void ef4_mdio_phy_reconfigure(struct ef4_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD,
MDIO_CTRL1, MDIO_PMA_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PMAPMD);
efx_mdio_set_flag(efx, MDIO_MMD_PCS,
ef4_mdio_set_flag(efx, MDIO_MMD_PCS,
MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PCS);
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS,
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS,
MDIO_CTRL1, MDIO_PHYXS_CTRL1_LOOPBACK,
efx->loopback_mode == LOOPBACK_PHYXS_WS);
}
static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
static void ef4_mdio_set_mmd_lpower(struct ef4_nic *efx,
int lpower, int mmd)
{
int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
int stat = ef4_mdio_read(efx, mmd, MDIO_STAT1);
netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
if (stat & MDIO_STAT1_LPOWERABLE) {
efx_mdio_set_flag(efx, mmd, MDIO_CTRL1,
ef4_mdio_set_flag(efx, mmd, MDIO_CTRL1,
MDIO_CTRL1_LPOWER, lpower);
}
}
void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx,
int low_power, unsigned int mmd_mask)
{
int mmd = 0;
mmd_mask &= ~MDIO_DEVS_AN;
while (mmd_mask) {
if (mmd_mask & 1)
efx_mdio_set_mmd_lpower(efx, low_power, mmd);
ef4_mdio_set_mmd_lpower(efx, low_power, mmd);
mmd_mask = (mmd_mask >> 1);
mmd++;
}
}
/**
* efx_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO.
* @efx: Efx NIC
* @ecmd: New settings
*/
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
@ -252,16 +252,16 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
(ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
return -EINVAL;
efx_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
efx_mdio_an_reconfigure(efx);
ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
ef4_mdio_an_reconfigure(efx);
return 0;
}
/**
* efx_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
* ef4_mdio_an_reconfigure - Push advertising flags and restart autonegotiation
* @efx: Efx NIC
*/
void efx_mdio_an_reconfigure(struct efx_nic *efx)
void ef4_mdio_an_reconfigure(struct ef4_nic *efx)
{
int reg;
@ -273,32 +273,32 @@ void efx_mdio_an_reconfigure(struct efx_nic *efx)
reg |= ADVERTISE_PAUSE_CAP;
if (efx->link_advertising & ADVERTISED_Asym_Pause)
reg |= ADVERTISE_PAUSE_ASYM;
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
ef4_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
/* Set up the (extended) next page */
efx->phy_op->set_npage_adv(efx, efx->link_advertising);
/* Enable and restart AN */
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
ef4_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
u8 efx_mdio_get_pause(struct efx_nic *efx)
u8 ef4_mdio_get_pause(struct ef4_nic *efx)
{
BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
BUILD_BUG_ON(EF4_FC_AUTO & (EF4_FC_RX | EF4_FC_TX));
if (!(efx->wanted_fc & EFX_FC_AUTO))
if (!(efx->wanted_fc & EF4_FC_AUTO))
return efx->wanted_fc;
WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
return mii_resolve_flowctrl_fdx(
mii_advertise_flowctrl(efx->wanted_fc),
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
int efx_mdio_test_alive(struct efx_nic *efx)
int ef4_mdio_test_alive(struct ef4_nic *efx)
{
int rc;
int devad = __ffs(efx->mdio.mmds);
@ -306,8 +306,8 @@ int efx_mdio_test_alive(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
physid1 = ef4_mdio_read(efx, devad, MDIO_DEVID1);
physid2 = ef4_mdio_read(efx, devad, MDIO_DEVID2);
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
@ -315,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx)
"no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
rc = efx_mdio_check_mmds(efx, efx->mdio.mmds);
rc = ef4_mdio_check_mmds(efx, efx->mdio.mmds);
}
mutex_unlock(&efx->mac_lock);

View File

@ -7,8 +7,8 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_MDIO_10G_H
#define EFX_MDIO_10G_H
#ifndef EF4_MDIO_10G_H
#define EF4_MDIO_10G_H
#include <linux/mdio.h>
@ -18,35 +18,35 @@
#include "efx.h"
static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
unsigned efx_mdio_id_oui(u32 id);
static inline unsigned ef4_mdio_id_rev(u32 id) { return id & 0xf; }
static inline unsigned ef4_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
unsigned ef4_mdio_id_oui(u32 id);
static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
static inline int ef4_mdio_read(struct ef4_nic *efx, int devad, int addr)
{
return efx->mdio.mdio_read(efx->net_dev, efx->mdio.prtad, devad, addr);
}
static inline void
efx_mdio_write(struct efx_nic *efx, int devad, int addr, int value)
ef4_mdio_write(struct ef4_nic *efx, int devad, int addr, int value)
{
efx->mdio.mdio_write(efx->net_dev, efx->mdio.prtad, devad, addr, value);
}
static inline u32 efx_mdio_read_id(struct efx_nic *efx, int mmd)
static inline u32 ef4_mdio_read_id(struct ef4_nic *efx, int mmd)
{
u16 id_low = efx_mdio_read(efx, mmd, MDIO_DEVID2);
u16 id_hi = efx_mdio_read(efx, mmd, MDIO_DEVID1);
u16 id_low = ef4_mdio_read(efx, mmd, MDIO_DEVID2);
u16 id_hi = ef4_mdio_read(efx, mmd, MDIO_DEVID1);
return (id_hi << 16) | (id_low);
}
static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
static inline bool ef4_mdio_phyxgxs_lane_sync(struct ef4_nic *efx)
{
int i, lane_status;
bool sync;
for (i = 0; i < 2; ++i)
lane_status = efx_mdio_read(efx, MDIO_MMD_PHYXS,
lane_status = ef4_mdio_read(efx, MDIO_MMD_PHYXS,
MDIO_PHYXS_LNSTAT);
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
return sync;
}
const char *efx_mdio_mmd_name(int mmd);
const char *ef4_mdio_mmd_name(int mmd);
/*
* Reset a specific MMD and wait for reset to clear.
@ -64,47 +64,47 @@ const char *efx_mdio_mmd_name(int mmd);
*
* This function will sleep
*/
int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
int ef4_mdio_reset_mmd(struct ef4_nic *efx, int mmd, int spins, int spintime);
/* As efx_mdio_check_mmd but for multiple MMDs */
int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
/* As ef4_mdio_check_mmd but for multiple MMDs */
int ef4_mdio_check_mmds(struct ef4_nic *efx, unsigned int mmd_mask);
/* Check the link status of specified mmds in bit mask */
bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
bool ef4_mdio_links_ok(struct ef4_nic *efx, unsigned int mmd_mask);
/* Generic transmit disable support though PMAPMD */
void efx_mdio_transmit_disable(struct efx_nic *efx);
void ef4_mdio_transmit_disable(struct ef4_nic *efx);
/* Generic part of reconfigure: set/clear loopback bits */
void efx_mdio_phy_reconfigure(struct efx_nic *efx);
void ef4_mdio_phy_reconfigure(struct ef4_nic *efx);
/* Set the power state of the specified MMDs */
void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power,
unsigned int mmd_mask);
/* Set (some of) the PHY settings over MDIO */
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
/* Push advertising flags and restart autonegotiation */
void efx_mdio_an_reconfigure(struct efx_nic *efx);
void ef4_mdio_an_reconfigure(struct ef4_nic *efx);
/* Get pause parameters from AN if available (otherwise return
* requested pause parameters)
*/
u8 efx_mdio_get_pause(struct efx_nic *efx);
u8 ef4_mdio_get_pause(struct ef4_nic *efx);
/* Wait for specified MMDs to exit reset within a timeout */
int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
int ef4_mdio_wait_reset_mmds(struct ef4_nic *efx, unsigned int mmd_mask);
/* Set or clear flag, debouncing */
static inline void
efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
ef4_mdio_set_flag(struct ef4_nic *efx, int devad, int addr,
int mask, bool state)
{
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
/* Liveness self-test for MDIO PHYs */
int efx_mdio_test_alive(struct efx_nic *efx);
int ef4_mdio_test_alive(struct ef4_nic *efx);
#endif /* EFX_MDIO_10G_H */
#endif /* EF4_MDIO_10G_H */

View File

@ -0,0 +1,133 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
#include "efx.h"
#define to_ef4_mtd_partition(mtd) \
container_of(mtd, struct ef4_mtd_partition, mtd)
/* MTD interface */
static int ef4_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
struct ef4_nic *efx = mtd->priv;
int rc;
rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
erase->state = MTD_ERASE_FAILED;
erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
}
mtd_erase_callback(erase);
return rc;
}
static void ef4_mtd_sync(struct mtd_info *mtd)
{
struct ef4_mtd_partition *part = to_ef4_mtd_partition(mtd);
struct ef4_nic *efx = mtd->priv;
int rc;
rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
part->name, part->dev_type_name, rc);
}
static void ef4_mtd_remove_partition(struct ef4_mtd_partition *part)
{
int rc;
for (;;) {
rc = mtd_device_unregister(&part->mtd);
if (rc != -EBUSY)
break;
ssleep(1);
}
WARN_ON(rc);
list_del(&part->node);
}
int ef4_mtd_add(struct ef4_nic *efx, struct ef4_mtd_partition *parts,
size_t n_parts, size_t sizeof_part)
{
struct ef4_mtd_partition *part;
size_t i;
for (i = 0; i < n_parts; i++) {
part = (struct ef4_mtd_partition *)((char *)parts +
i * sizeof_part);
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = ef4_mtd_erase;
part->mtd._read = efx->type->mtd_read;
part->mtd._write = efx->type->mtd_write;
part->mtd._sync = ef4_mtd_sync;
efx->type->mtd_rename(part);
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
/* Add to list in order - ef4_mtd_remove() depends on this */
list_add_tail(&part->node, &efx->mtd_list);
}
return 0;
fail:
while (i--) {
part = (struct ef4_mtd_partition *)((char *)parts +
i * sizeof_part);
ef4_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM;
}
void ef4_mtd_remove(struct ef4_nic *efx)
{
struct ef4_mtd_partition *parts, *part, *next;
WARN_ON(ef4_dev_registered(efx));
if (list_empty(&efx->mtd_list))
return;
parts = list_first_entry(&efx->mtd_list, struct ef4_mtd_partition,
node);
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
ef4_mtd_remove_partition(part);
kfree(parts);
}
void ef4_mtd_rename(struct ef4_nic *efx)
{
struct ef4_mtd_partition *part;
ASSERT_RTNL();
list_for_each_entry(part, &efx->mtd_list, node)
efx->type->mtd_rename(part);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,527 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/cpu_rmap.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags)
{
buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
return 0;
}
void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer)
{
if (buffer->addr) {
dma_free_coherent(&efx->pci_dev->dev, buffer->len,
buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
bool ef4_nic_event_present(struct ef4_channel *channel)
{
return ef4_event_present(ef4_event(channel, channel->eventq_read_ptr));
}
void ef4_nic_event_test_start(struct ef4_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
channel->efx->type->ev_test_generate(channel);
}
int ef4_nic_irq_test_start(struct ef4_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
return efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
* Try MSI and then legacy interrupts.
*/
int ef4_nic_init_interrupt(struct ef4_nic *efx)
{
struct ef4_channel *channel;
unsigned int n_irqs;
int rc;
if (!EF4_INT_MODE_USE_MSI(efx)) {
rc = request_irq(efx->legacy_irq,
efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook legacy IRQ %d\n",
efx->pci_dev->irq);
goto fail1;
}
return 0;
}
#ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
efx->net_dev->rx_cpu_rmap =
alloc_irq_cpu_rmap(efx->n_rx_channels);
if (!efx->net_dev->rx_cpu_rmap) {
rc = -ENOMEM;
goto fail1;
}
}
#endif
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
ef4_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
efx->msi_context[channel->channel].name,
&efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
++n_irqs;
#ifdef CONFIG_RFS_ACCEL
if (efx->interrupt_mode == EF4_INT_MODE_MSIX &&
channel->channel < efx->n_rx_channels) {
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
channel->irq);
if (rc)
goto fail2;
}
#endif
}
return 0;
fail2:
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
ef4_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
}
void ef4_nic_fini_interrupt(struct ef4_nic *efx)
{
struct ef4_channel *channel;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL;
#endif
if (EF4_INT_MODE_USE_MSI(efx)) {
/* Disable MSI/MSI-X interrupts */
ef4_for_each_channel(channel, efx)
free_irq(channel->irq,
&efx->msi_context[channel->channel]);
} else {
/* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
}
}
/* Register dump */
#define REGISTER_REVISION_FA 1
#define REGISTER_REVISION_FB 2
#define REGISTER_REVISION_FC 3
#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
#define REGISTER_REVISION_ED 4
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
struct ef4_nic_reg {
u32 offset:24;
u32 min_revision:3, max_revision:3;
};
#define REGISTER(name, arch, min_rev, max_rev) { \
arch ## R_ ## min_rev ## max_rev ## _ ## name, \
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev \
}
#define REGISTER_AA(name) REGISTER(name, F, A, A)
#define REGISTER_AB(name) REGISTER(name, F, A, B)
#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
#define REGISTER_BB(name) REGISTER(name, F, B, B)
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
static const struct ef4_nic_reg ef4_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
REGISTER_AZ(INT_EN_KER),
REGISTER_BZ(INT_EN_CHAR),
REGISTER_AZ(INT_ADR_KER),
REGISTER_BZ(INT_ADR_CHAR),
/* INT_ACK_KER is WO */
/* INT_ISR0 is RC */
REGISTER_AZ(HW_INIT),
REGISTER_CZ(USR_EV_CFG),
REGISTER_AB(EE_SPI_HCMD),
REGISTER_AB(EE_SPI_HADR),
REGISTER_AB(EE_SPI_HDATA),
REGISTER_AB(EE_BASE_PAGE),
REGISTER_AB(EE_VPD_CFG0),
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
/* PCIE_CORE_INDIRECT is indirect */
REGISTER_AB(NIC_STAT),
REGISTER_AB(GPIO_CTL),
REGISTER_AB(GLB_CTL),
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
REGISTER_BZ(DP_CTRL),
REGISTER_AZ(MEM_STAT),
REGISTER_AZ(CS_DEBUG),
REGISTER_AZ(ALTERA_BUILD),
REGISTER_AZ(CSR_SPARE),
REGISTER_AB(PCIE_SD_CTL0123),
REGISTER_AB(PCIE_SD_CTL45),
REGISTER_AB(PCIE_PCS_CTL_STAT),
/* DEBUG_DATA_OUT is not used */
/* DRV_EV is WO */
REGISTER_AZ(EVQ_CTL),
REGISTER_AZ(EVQ_CNT1),
REGISTER_AZ(EVQ_CNT2),
REGISTER_AZ(BUF_TBL_CFG),
REGISTER_AZ(SRM_RX_DC_CFG),
REGISTER_AZ(SRM_TX_DC_CFG),
REGISTER_AZ(SRM_CFG),
/* BUF_TBL_UPD is WO */
REGISTER_AZ(SRM_UPD_EVQ),
REGISTER_AZ(SRAM_PARITY),
REGISTER_AZ(RX_CFG),
REGISTER_BZ(RX_FILTER_CTL),
/* RX_FLUSH_DESCQ is WO */
REGISTER_AZ(RX_DC_CFG),
REGISTER_AZ(RX_DC_PF_WM),
REGISTER_BZ(RX_RSS_TKEY),
/* RX_NODESC_DROP is RC */
REGISTER_AA(RX_SELF_RST),
/* RX_DEBUG, RX_PUSH_DROP are not used */
REGISTER_CZ(RX_RSS_IPV6_REG1),
REGISTER_CZ(RX_RSS_IPV6_REG2),
REGISTER_CZ(RX_RSS_IPV6_REG3),
/* TX_FLUSH_DESCQ is WO */
REGISTER_AZ(TX_DC_CFG),
REGISTER_AA(TX_CHKSM_CFG),
REGISTER_AZ(TX_CFG),
/* TX_PUSH_DROP is not used */
REGISTER_AZ(TX_RESERVED),
REGISTER_BZ(TX_PACE),
/* TX_PACE_DROP_QID is RC */
REGISTER_BB(TX_VLAN),
REGISTER_BZ(TX_IPFIL_PORTEN),
REGISTER_AB(MD_TXD),
REGISTER_AB(MD_RXD),
REGISTER_AB(MD_CS),
REGISTER_AB(MD_PHY_ADR),
REGISTER_AB(MD_ID),
/* MD_STAT is RC */
REGISTER_AB(MAC_STAT_DMA),
REGISTER_AB(MAC_CTRL),
REGISTER_BB(GEN_MODE),
REGISTER_AB(MAC_MC_HASH_REG0),
REGISTER_AB(MAC_MC_HASH_REG1),
REGISTER_AB(GM_CFG1),
REGISTER_AB(GM_CFG2),
/* GM_IPG and GM_HD are not used */
REGISTER_AB(GM_MAX_FLEN),
/* GM_TEST is not used */
REGISTER_AB(GM_ADR1),
REGISTER_AB(GM_ADR2),
REGISTER_AB(GMF_CFG0),
REGISTER_AB(GMF_CFG1),
REGISTER_AB(GMF_CFG2),
REGISTER_AB(GMF_CFG3),
REGISTER_AB(GMF_CFG4),
REGISTER_AB(GMF_CFG5),
REGISTER_BB(TX_SRC_MAC_CTL),
REGISTER_AB(XM_ADR_LO),
REGISTER_AB(XM_ADR_HI),
REGISTER_AB(XM_GLB_CFG),
REGISTER_AB(XM_TX_CFG),
REGISTER_AB(XM_RX_CFG),
REGISTER_AB(XM_MGT_INT_MASK),
REGISTER_AB(XM_FC),
REGISTER_AB(XM_PAUSE_TIME),
REGISTER_AB(XM_TX_PARAM),
REGISTER_AB(XM_RX_PARAM),
/* XM_MGT_INT_MSK (note no 'A') is RC */
REGISTER_AB(XX_PWR_RST),
REGISTER_AB(XX_SD_CTL),
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
};
struct ef4_nic_reg_table {
u32 offset:24;
u32 min_revision:3, max_revision:3;
u32 step:6, rows:21;
};
#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
offset, \
REGISTER_REVISION_ ## arch ## min_rev, \
REGISTER_REVISION_ ## arch ## max_rev, \
step, rows \
}
#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
REGISTER_TABLE_DIMENSIONS( \
name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
arch, min_rev, max_rev, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
#define REGISTER_TABLE_BB_CZ(name) \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
FR_BZ_ ## name ## _STEP, \
FR_BB_ ## name ## _ROWS), \
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
static const struct ef4_nic_reg_table ef4_nic_reg_tables[] = {
/* DRIVER is not used */
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
REGISTER_TABLE_BB(TX_IPFIL_TBL),
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
/* We can't reasonably read all of the buffer table (up to 8MB!).
* However this driver will only use a few entries. Reading
* 1K entries allows for some expansion of queue count and
* size before we need to change the version. */
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
F, A, A, 8, 1024),
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
F, B, Z, 8, 1024),
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
REGISTER_TABLE_BB_CZ(TIMER_TBL),
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
/* TX_FILTER_TBL0 is huge and not used by this driver */
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
REGISTER_TABLE_CZ(MC_TREG_SMEM),
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
};
size_t ef4_nic_get_regs_len(struct ef4_nic *efx)
{
const struct ef4_nic_reg *reg;
const struct ef4_nic_reg_table *table;
size_t len = 0;
for (reg = ef4_nic_regs;
reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
reg++)
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision)
len += sizeof(ef4_oword_t);
for (table = ef4_nic_reg_tables;
table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
table++)
if (efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision)
len += table->rows * min_t(size_t, table->step, 16);
return len;
}
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
{
const struct ef4_nic_reg *reg;
const struct ef4_nic_reg_table *table;
for (reg = ef4_nic_regs;
reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
reg++) {
if (efx->type->revision >= reg->min_revision &&
efx->type->revision <= reg->max_revision) {
ef4_reado(efx, (ef4_oword_t *)buf, reg->offset);
buf += sizeof(ef4_oword_t);
}
}
for (table = ef4_nic_reg_tables;
table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
table++) {
size_t size, i;
if (!(efx->type->revision >= table->min_revision &&
efx->type->revision <= table->max_revision))
continue;
size = min_t(size_t, table->step, 16);
for (i = 0; i < table->rows; i++) {
switch (table->step) {
case 4: /* 32-bit SRAM */
ef4_readd(efx, buf, table->offset + 4 * i);
break;
case 8: /* 64-bit SRAM */
ef4_sram_readq(efx,
efx->membase + table->offset,
buf, i);
break;
case 16: /* 128-bit-readable register */
ef4_reado_table(efx, buf, table->offset, i);
break;
case 32: /* 128-bit register, interleaved */
ef4_reado_table(efx, buf, table->offset, 2 * i);
break;
default:
WARN_ON(1);
return;
}
buf += size;
}
}
}
/**
* ef4_nic_describe_stats - Describe supported statistics for ethtool
* @desc: Array of &struct ef4_hw_stat_desc describing the statistics
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @names: Buffer to copy names to, or %NULL. The names are copied
* starting at intervals of %ETH_GSTRING_LEN bytes.
*
* Returns the number of visible statistics, i.e. the number of set
* bits in the first @count bits of @mask for which a name is defined.
*/
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names)
{
size_t visible = 0;
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
strlcpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
++visible;
}
}
return visible;
}
/**
* ef4_nic_update_stats - Convert statistics DMA buffer to array of u64
* @desc: Array of &struct ef4_hw_stat_desc describing the DMA buffer
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
* the width is specified as 0 the corresponding element of
* @stats is not updated.
* @count: Length of the @desc array
* @mask: Bitmask of which elements of @desc are enabled
* @stats: Buffer to update with the converted statistics. The length
* of this array must be at least @count.
* @dma_buf: DMA buffer containing hardware statistics
* @accumulate: If set, the converted values will be added rather than
* directly stored to the corresponding elements of @stats
*/
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask,
u64 *stats, const void *dma_buf, bool accumulate)
{
size_t index;
for_each_set_bit(index, mask, count) {
if (desc[index].dma_width) {
const void *addr = dma_buf + desc[index].offset;
u64 val;
switch (desc[index].dma_width) {
case 16:
val = le16_to_cpup((__le16 *)addr);
break;
case 32:
val = le32_to_cpup((__le32 *)addr);
break;
case 64:
val = le64_to_cpup((__le64 *)addr);
break;
default:
WARN_ON(1);
val = 0;
break;
}
if (accumulate)
stats[index] += val;
else
stats[index] = val;
}
}
}
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
{
/* if down, or this is the first update after coming up */
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
efx->rx_nodesc_drops_while_down +=
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}

View File

@ -0,0 +1,513 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_NIC_H
#define EF4_NIC_H
#include <linux/net_tstamp.h>
#include <linux/i2c-algo-bit.h>
#include "net_driver.h"
#include "efx.h"
enum {
EF4_REV_FALCON_A0 = 0,
EF4_REV_FALCON_A1 = 1,
EF4_REV_FALCON_B0 = 2,
};
static inline int ef4_nic_rev(struct ef4_nic *efx)
{
return efx->type->revision;
}
u32 ef4_farch_fpga_ver(struct ef4_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool ef4_nic_is_dual_func(struct ef4_nic *efx)
{
return ef4_nic_rev(efx) < EF4_REV_FALCON_B0;
}
/* Read the current event from the event queue */
static inline ef4_qword_t *ef4_event(struct ef4_channel *channel,
unsigned int index)
{
return ((ef4_qword_t *) (channel->eventq.buf.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int ef4_event_present(ef4_qword_t *event)
{
return !(EF4_DWORD_IS_ALL_ONES(event->dword[0]) |
EF4_DWORD_IS_ALL_ONES(event->dword[1]));
}
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline ef4_qword_t *
ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index)
{
return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
/* Get partner of a TX queue, seen as part of the same net core queue */
static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue)
{
if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
return tx_queue - EF4_TXQ_TYPE_OFFLOAD;
else
return tx_queue + EF4_TXQ_TYPE_OFFLOAD;
}
/* Report whether this TX queue would be empty for the given write_count.
* May return false negative.
*/
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
return ((empty_read_count ^ write_count) & ~EF4_EMPTY_COUNT_VALID) == 0;
}
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
* triggered if we don't check this.
* We use the write_count used for the last doorbell push, to get the
* NIC's view of the tx queue.
*/
static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
return was_empty && tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline ef4_qword_t *
ef4_rx_desc(struct ef4_rx_queue *rx_queue, unsigned int index)
{
return ((ef4_qword_t *) (rx_queue->rxd.buf.addr)) + index;
}
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
PHY_TYPE_88E1111 = 2,
PHY_TYPE_SFX7101 = 3,
PHY_TYPE_QT2022C2 = 4,
PHY_TYPE_PM8358 = 6,
PHY_TYPE_SFT9001A = 8,
PHY_TYPE_QT2025C = 9,
PHY_TYPE_SFT9001B = 10,
};
#define FALCON_XMAC_LOOPBACKS \
((1 << LOOPBACK_XGMII) | \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
/* Alignment of PCIe DMA boundaries (4KB) */
#define EF4_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
#define EF4_BUF_SIZE EF4_PAGE_SIZE
/* NIC-generic software stats */
enum {
GENERIC_STAT_rx_noskb_drops,
GENERIC_STAT_rx_nodesc_trunc,
GENERIC_STAT_COUNT
};
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
* @init: Allocate resources and initialise peripheral hardware
* @init_phy: Do board-specific PHY initialisation
* @fini: Shut down hardware and free resources
* @set_id_led: Set state of identifying LED or revert to automatic function
* @monitor: Board-specific health check function
*/
struct falcon_board_type {
u8 id;
int (*init) (struct ef4_nic *nic);
void (*init_phy) (struct ef4_nic *efx);
void (*fini) (struct ef4_nic *nic);
void (*set_id_led) (struct ef4_nic *efx, enum ef4_led_mode mode);
int (*monitor) (struct ef4_nic *nic);
};
/**
* struct falcon_board - board information
* @type: Type of board
* @major: Major rev. ('A', 'B' ...)
* @minor: Minor rev. (0, 1, ...)
* @i2c_adap: I2C adapter for on-board peripherals
* @i2c_data: Data for bit-banging algorithm
* @hwmon_client: I2C client for hardware monitor
* @ioexp_client: I2C client for power/port control
*/
struct falcon_board {
const struct falcon_board_type *type;
int major;
int minor;
struct i2c_adapter i2c_adap;
struct i2c_algo_bit_data i2c_data;
struct i2c_client *hwmon_client, *ioexp_client;
};
/**
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device
* @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands
* @munge_address: Flag whether addresses should be munged.
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
* use bit 3 of the command byte as address bit A8, rather
* than having a two-byte address. If this flag is set, then
* commands should be munged in this way.
* @erase_command: Erase command (or 0 if sector erase not needed).
* @erase_size: Erase sector size (in bytes)
* Erase commands affect sectors with this size and alignment.
* This must be a power of two.
* @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment.
*/
struct falcon_spi_device {
int device_id;
unsigned int size;
unsigned int addr_len;
unsigned int munge_address:1;
u8 erase_command;
unsigned int erase_size;
unsigned int block_size;
};
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{
return spi->size != 0;
}
enum {
FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
FALCON_STAT_tx_packets,
FALCON_STAT_tx_pause,
FALCON_STAT_tx_control,
FALCON_STAT_tx_unicast,
FALCON_STAT_tx_multicast,
FALCON_STAT_tx_broadcast,
FALCON_STAT_tx_lt64,
FALCON_STAT_tx_64,
FALCON_STAT_tx_65_to_127,
FALCON_STAT_tx_128_to_255,
FALCON_STAT_tx_256_to_511,
FALCON_STAT_tx_512_to_1023,
FALCON_STAT_tx_1024_to_15xx,
FALCON_STAT_tx_15xx_to_jumbo,
FALCON_STAT_tx_gtjumbo,
FALCON_STAT_tx_non_tcpudp,
FALCON_STAT_tx_mac_src_error,
FALCON_STAT_tx_ip_src_error,
FALCON_STAT_rx_bytes,
FALCON_STAT_rx_good_bytes,
FALCON_STAT_rx_bad_bytes,
FALCON_STAT_rx_packets,
FALCON_STAT_rx_good,
FALCON_STAT_rx_bad,
FALCON_STAT_rx_pause,
FALCON_STAT_rx_control,
FALCON_STAT_rx_unicast,
FALCON_STAT_rx_multicast,
FALCON_STAT_rx_broadcast,
FALCON_STAT_rx_lt64,
FALCON_STAT_rx_64,
FALCON_STAT_rx_65_to_127,
FALCON_STAT_rx_128_to_255,
FALCON_STAT_rx_256_to_511,
FALCON_STAT_rx_512_to_1023,
FALCON_STAT_rx_1024_to_15xx,
FALCON_STAT_rx_15xx_to_jumbo,
FALCON_STAT_rx_gtjumbo,
FALCON_STAT_rx_bad_lt64,
FALCON_STAT_rx_bad_gtjumbo,
FALCON_STAT_rx_overflow,
FALCON_STAT_rx_symbol_error,
FALCON_STAT_rx_align_error,
FALCON_STAT_rx_length_error,
FALCON_STAT_rx_internal_error,
FALCON_STAT_rx_nodesc_drop_cnt,
FALCON_STAT_COUNT
};
/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
* @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
* @mdio_lock: MDIO bus lock
* @xmac_poll_required: XMAC link state needs polling
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
struct falcon_spi_device spi_flash;
struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
};
static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
{
struct falcon_nic_data *data = efx->nic_data;
return &data->board;
}
struct ethtool_ts_info;
extern const struct ef4_nic_type falcon_a1_nic_type;
extern const struct ef4_nic_type falcon_b0_nic_type;
/**************************************************************************
*
* Externs
*
**************************************************************************
*/
int falcon_probe_board(struct ef4_nic *efx, u16 revision_info);
/* TX data path */
static inline int ef4_nic_probe_tx(struct ef4_tx_queue *tx_queue)
{
return tx_queue->efx->type->tx_probe(tx_queue);
}
static inline void ef4_nic_init_tx(struct ef4_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_init(tx_queue);
}
static inline void ef4_nic_remove_tx(struct ef4_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_remove(tx_queue);
}
static inline void ef4_nic_push_buffers(struct ef4_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_write(tx_queue);
}
/* RX data path */
static inline int ef4_nic_probe_rx(struct ef4_rx_queue *rx_queue)
{
return rx_queue->efx->type->rx_probe(rx_queue);
}
static inline void ef4_nic_init_rx(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_init(rx_queue);
}
static inline void ef4_nic_remove_rx(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_remove(rx_queue);
}
static inline void ef4_nic_notify_rx_desc(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_write(rx_queue);
}
static inline void ef4_nic_generate_fill_event(struct ef4_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_defer_refill(rx_queue);
}
/* Event data path */
static inline int ef4_nic_probe_eventq(struct ef4_channel *channel)
{
return channel->efx->type->ev_probe(channel);
}
static inline int ef4_nic_init_eventq(struct ef4_channel *channel)
{
return channel->efx->type->ev_init(channel);
}
static inline void ef4_nic_fini_eventq(struct ef4_channel *channel)
{
channel->efx->type->ev_fini(channel);
}
static inline void ef4_nic_remove_eventq(struct ef4_channel *channel)
{
channel->efx->type->ev_remove(channel);
}
static inline int
ef4_nic_process_eventq(struct ef4_channel *channel, int quota)
{
return channel->efx->type->ev_process(channel, quota);
}
static inline void ef4_nic_eventq_read_ack(struct ef4_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
void ef4_nic_event_test_start(struct ef4_channel *channel);
/* queue operations */
int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue);
void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue);
unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue);
void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue);
int ef4_farch_ev_probe(struct ef4_channel *channel);
int ef4_farch_ev_init(struct ef4_channel *channel);
void ef4_farch_ev_fini(struct ef4_channel *channel);
void ef4_farch_ev_remove(struct ef4_channel *channel);
int ef4_farch_ev_process(struct ef4_channel *channel, int quota);
void ef4_farch_ev_read_ack(struct ef4_channel *channel);
void ef4_farch_ev_test_generate(struct ef4_channel *channel);
/* filter operations */
int ef4_farch_filter_table_probe(struct ef4_nic *efx);
void ef4_farch_filter_table_restore(struct ef4_nic *efx);
void ef4_farch_filter_table_remove(struct ef4_nic *efx);
void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx);
s32 ef4_farch_filter_insert(struct ef4_nic *efx, struct ef4_filter_spec *spec,
bool replace);
int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority,
u32 filter_id);
int ef4_farch_filter_get_safe(struct ef4_nic *efx,
enum ef4_filter_priority priority, u32 filter_id,
struct ef4_filter_spec *);
int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
enum ef4_filter_priority priority);
u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
enum ef4_filter_priority priority);
u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx);
s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
enum ef4_filter_priority priority, u32 *buf,
u32 size);
#ifdef CONFIG_RFS_ACCEL
s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
struct ef4_filter_spec *spec);
bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
unsigned int index);
#endif
void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx);
bool ef4_nic_event_present(struct ef4_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
* after those contributing to A, the computed value may be lower than
* the true value by some variable amount, and may decrease between
* subsequent computations.
*
* We should never allow statistics to decrease or to exceed the true
* value. Since the computed value will never be greater than the
* true value, we can achieve this by only storing the computed value
* when it increases.
*/
static inline void ef4_update_diff_stat(u64 *stat, u64 diff)
{
if ((s64)(diff - *stat) > 0)
*stat = diff;
}
/* Interrupts */
int ef4_nic_init_interrupt(struct ef4_nic *efx);
int ef4_nic_irq_test_start(struct ef4_nic *efx);
void ef4_nic_fini_interrupt(struct ef4_nic *efx);
void ef4_farch_irq_enable_master(struct ef4_nic *efx);
int ef4_farch_irq_test_generate(struct ef4_nic *efx);
void ef4_farch_irq_disable_master(struct ef4_nic *efx);
irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id);
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id);
irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
return ACCESS_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
return ACCESS_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
int ef4_nic_flush_queues(struct ef4_nic *efx);
int ef4_farch_fini_dmaq(struct ef4_nic *efx);
void ef4_farch_finish_flr(struct ef4_nic *efx);
void falcon_start_nic_stats(struct ef4_nic *efx);
void falcon_stop_nic_stats(struct ef4_nic *efx);
int falcon_reset_xaui(struct ef4_nic *efx);
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
void ef4_farch_init_common(struct ef4_nic *efx);
void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer);
/* Tests */
struct ef4_farch_register_test {
unsigned address;
ef4_oword_t mask;
};
int ef4_farch_test_registers(struct ef4_nic *efx,
const struct ef4_farch_register_test *regs,
size_t n_regs);
size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u8 *names);
void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
#define EF4_MAX_FLUSH_TIME 5000
void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
ef4_qword_t *event);
#endif /* EF4_NIC_H */

View File

@ -7,20 +7,20 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_PHY_H
#define EFX_PHY_H
#ifndef EF4_PHY_H
#define EF4_PHY_H
/****************************************************************************
* 10Xpress (SFX7101) PHY
*/
extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
extern const struct ef4_phy_operations falcon_sfx7101_phy_ops;
void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
void tenxpress_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
*/
extern const struct efx_phy_operations falcon_qt202x_phy_ops;
extern const struct ef4_phy_operations falcon_qt202x_phy_ops;
/* These PHYs provide various H/W control states for LEDs */
#define QUAKE_LED_LINK_INVAL (0)
@ -34,17 +34,17 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
#define QUAKE_LED_TXLINK (0)
#define QUAKE_LED_RXLINK (8)
void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
void falcon_qt202x_set_led(struct ef4_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
*/
extern const struct efx_phy_operations falcon_txc_phy_ops;
extern const struct ef4_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
void falcon_txc_set_gpio_dir(struct ef4_nic *efx, int pin, int dir);
void falcon_txc_set_gpio_val(struct ef4_nic *efx, int pin, int val);
#endif

View File

@ -50,14 +50,14 @@
#define PCS_VEND1_REG 0xc000
#define PCS_VEND1_LBTXD_LBN 5
void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
void falcon_qt202x_set_led(struct ef4_nic *p, int led, int mode)
{
int addr = MDIO_QUAKE_LED0_REG + led;
efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
ef4_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
}
struct qt202x_phy_data {
enum efx_phy_mode phy_mode;
enum ef4_phy_mode phy_mode;
bool bug17190_in_bad_state;
unsigned long bug17190_timer;
u32 firmware_ver;
@ -73,7 +73,7 @@ struct qt202x_phy_data {
#define BUG17190_INTERVAL (2 * HZ)
static int qt2025c_wait_heartbeat(struct efx_nic *efx)
static int qt2025c_wait_heartbeat(struct ef4_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
int reg, old_counter = 0;
@ -81,7 +81,7 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
/* Wait for firmware heartbeat to start */
for (;;) {
int counter;
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
reg = ef4_mdio_read(efx, MDIO_MMD_PCS, PCS_FW_HEARTBEAT_REG);
if (reg < 0)
return reg;
counter = ((reg >> PCS_FW_HEARTB_LBN) &
@ -105,14 +105,14 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
return 0;
}
static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
static int qt2025c_wait_fw_status_good(struct ef4_nic *efx)
{
unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
int reg;
/* Wait for firmware status to look good */
for (;;) {
reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
reg = ef4_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
if (reg < 0)
return reg;
if ((reg &
@ -127,15 +127,15 @@ static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
return 0;
}
static void qt2025c_restart_firmware(struct efx_nic *efx)
static void qt2025c_restart_firmware(struct ef4_nic *efx)
{
/* Restart microcontroller execution of firmware from RAM */
efx_mdio_write(efx, 3, 0xe854, 0x00c0);
efx_mdio_write(efx, 3, 0xe854, 0x0040);
ef4_mdio_write(efx, 3, 0xe854, 0x00c0);
ef4_mdio_write(efx, 3, 0xe854, 0x0040);
msleep(50);
}
static int qt2025c_wait_reset(struct efx_nic *efx)
static int qt2025c_wait_reset(struct ef4_nic *efx)
{
int rc;
@ -160,14 +160,14 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
return rc;
}
static void qt2025c_firmware_id(struct efx_nic *efx)
static void qt2025c_firmware_id(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
u8 firmware_id[9];
size_t i;
for (i = 0; i < sizeof(firmware_id); i++)
firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
firmware_id[i] = ef4_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
netif_info(efx, probe, efx->net_dev,
"QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
@ -180,7 +180,7 @@ static void qt2025c_firmware_id(struct efx_nic *efx)
(firmware_id[4] << 8) | firmware_id[5];
}
static void qt2025c_bug17190_workaround(struct efx_nic *efx)
static void qt2025c_bug17190_workaround(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
@ -191,7 +191,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
* recover it.
*/
if (efx->link_state.up ||
!efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
!ef4_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
phy_data->bug17190_in_bad_state = false;
return;
}
@ -204,16 +204,16 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, false);
phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
}
}
static int qt2025c_select_phy_mode(struct efx_nic *efx)
static int qt2025c_select_phy_mode(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
struct falcon_board *board = falcon_board(efx);
@ -233,7 +233,7 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
/* Only change mode if really necessary */
reg = efx_mdio_read(efx, 1, 0xc319);
reg = ef4_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
@ -243,52 +243,52 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
* EEPROM (including the differences between board revisions), except
* that the operating mode is changed, and the PHY is prevented from
* unnecessarily reloading the main firmware image again. */
efx_mdio_write(efx, 1, 0xc300, 0x0000);
ef4_mdio_write(efx, 1, 0xc300, 0x0000);
/* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
* STOPs onto the firmware/module I2C bus to reset it, varies across
* board revisions, as the bus is connected to different GPIO/LED
* outputs on the PHY.) */
if (board->major == 0 && board->minor < 2) {
efx_mdio_write(efx, 1, 0xc303, 0x4498);
ef4_mdio_write(efx, 1, 0xc303, 0x4498);
for (i = 0; i < 9; i++) {
efx_mdio_write(efx, 1, 0xc303, 0x4488);
efx_mdio_write(efx, 1, 0xc303, 0x4480);
efx_mdio_write(efx, 1, 0xc303, 0x4490);
efx_mdio_write(efx, 1, 0xc303, 0x4498);
ef4_mdio_write(efx, 1, 0xc303, 0x4488);
ef4_mdio_write(efx, 1, 0xc303, 0x4480);
ef4_mdio_write(efx, 1, 0xc303, 0x4490);
ef4_mdio_write(efx, 1, 0xc303, 0x4498);
}
} else {
efx_mdio_write(efx, 1, 0xc303, 0x0920);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
ef4_mdio_write(efx, 1, 0xc303, 0x0920);
ef4_mdio_write(efx, 1, 0xd008, 0x0004);
for (i = 0; i < 9; i++) {
efx_mdio_write(efx, 1, 0xc303, 0x0900);
efx_mdio_write(efx, 1, 0xd008, 0x0005);
efx_mdio_write(efx, 1, 0xc303, 0x0920);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
ef4_mdio_write(efx, 1, 0xc303, 0x0900);
ef4_mdio_write(efx, 1, 0xd008, 0x0005);
ef4_mdio_write(efx, 1, 0xc303, 0x0920);
ef4_mdio_write(efx, 1, 0xd008, 0x0004);
}
efx_mdio_write(efx, 1, 0xc303, 0x4900);
ef4_mdio_write(efx, 1, 0xc303, 0x4900);
}
efx_mdio_write(efx, 1, 0xc303, 0x4900);
efx_mdio_write(efx, 1, 0xc302, 0x0004);
efx_mdio_write(efx, 1, 0xc316, 0x0013);
efx_mdio_write(efx, 1, 0xc318, 0x0054);
efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
efx_mdio_write(efx, 1, 0xc31a, 0x0098);
efx_mdio_write(efx, 3, 0x0026, 0x0e00);
efx_mdio_write(efx, 3, 0x0027, 0x0013);
efx_mdio_write(efx, 3, 0x0028, 0xa528);
efx_mdio_write(efx, 1, 0xd006, 0x000a);
efx_mdio_write(efx, 1, 0xd007, 0x0009);
efx_mdio_write(efx, 1, 0xd008, 0x0004);
ef4_mdio_write(efx, 1, 0xc303, 0x4900);
ef4_mdio_write(efx, 1, 0xc302, 0x0004);
ef4_mdio_write(efx, 1, 0xc316, 0x0013);
ef4_mdio_write(efx, 1, 0xc318, 0x0054);
ef4_mdio_write(efx, 1, 0xc319, phy_op_mode);
ef4_mdio_write(efx, 1, 0xc31a, 0x0098);
ef4_mdio_write(efx, 3, 0x0026, 0x0e00);
ef4_mdio_write(efx, 3, 0x0027, 0x0013);
ef4_mdio_write(efx, 3, 0x0028, 0xa528);
ef4_mdio_write(efx, 1, 0xd006, 0x000a);
ef4_mdio_write(efx, 1, 0xd007, 0x0009);
ef4_mdio_write(efx, 1, 0xd008, 0x0004);
/* This additional write is not present in the boot EEPROM. It
* prevents the PHY's internal boot ROM doing another pointless (and
* slow) reload of the firmware image (the microcontroller's code
* memory is not affected by the microcontroller reset). */
efx_mdio_write(efx, 1, 0xc317, 0x00ff);
ef4_mdio_write(efx, 1, 0xc317, 0x00ff);
/* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
* restart doesn't reset it. We need to do that ourselves. */
efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
ef4_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
1 << PMA_PMD_RXIN_SEL_LBN, false);
efx_mdio_write(efx, 1, 0xc300, 0x0002);
ef4_mdio_write(efx, 1, 0xc300, 0x0002);
msleep(20);
/* Restart microcontroller execution of firmware from RAM */
@ -306,7 +306,7 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
return 0;
}
static int qt202x_reset_phy(struct efx_nic *efx)
static int qt202x_reset_phy(struct ef4_nic *efx)
{
int rc;
@ -319,7 +319,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
} else {
/* Reset the PHYXS MMD. This is documented as doing
* a complete soft reset. */
rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
rc = ef4_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
QT2022C2_MAX_RESET_TIME /
QT2022C2_RESET_WAIT,
QT2022C2_RESET_WAIT);
@ -339,7 +339,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
return rc;
}
static int qt202x_phy_probe(struct efx_nic *efx)
static int qt202x_phy_probe(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data;
@ -357,7 +357,7 @@ static int qt202x_phy_probe(struct efx_nic *efx)
return 0;
}
static int qt202x_phy_init(struct efx_nic *efx)
static int qt202x_phy_init(struct ef4_nic *efx)
{
u32 devid;
int rc;
@ -368,11 +368,11 @@ static int qt202x_phy_init(struct efx_nic *efx)
return rc;
}
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
devid = ef4_mdio_read_id(efx, MDIO_MMD_PHYXS);
netif_info(efx, probe, efx->net_dev,
"PHY ID reg %x (OUI %06x model %02x revision %x)\n",
devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
efx_mdio_id_rev(devid));
devid, ef4_mdio_id_oui(devid), ef4_mdio_id_model(devid),
ef4_mdio_id_rev(devid));
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
@ -380,12 +380,12 @@ static int qt202x_phy_init(struct efx_nic *efx)
return 0;
}
static int qt202x_link_ok(struct efx_nic *efx)
static int qt202x_link_ok(struct ef4_nic *efx)
{
return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
return ef4_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
}
static bool qt202x_phy_poll(struct efx_nic *efx)
static bool qt202x_phy_poll(struct ef4_nic *efx)
{
bool was_up = efx->link_state.up;
@ -400,7 +400,7 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
return efx->link_state.up != was_up;
}
static int qt202x_phy_reconfigure(struct efx_nic *efx)
static int qt202x_phy_reconfigure(struct ef4_nic *efx)
{
struct qt202x_phy_data *phy_data = efx->phy_data;
@ -427,29 +427,29 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
(phy_data->phy_mode & PHY_MODE_TX_DISABLED))
qt202x_reset_phy(efx);
efx_mdio_transmit_disable(efx);
ef4_mdio_transmit_disable(efx);
}
efx_mdio_phy_reconfigure(efx);
ef4_mdio_phy_reconfigure(efx);
phy_data->phy_mode = efx->phy_mode;
return 0;
}
static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
static void qt202x_phy_remove(struct efx_nic *efx)
static void qt202x_phy_remove(struct ef4_nic *efx)
{
/* Free the context block */
kfree(efx->phy_data);
efx->phy_data = NULL;
}
static int qt202x_phy_get_module_info(struct efx_nic *efx,
static int qt202x_phy_get_module_info(struct ef4_nic *efx,
struct ethtool_modinfo *modinfo)
{
modinfo->type = ETH_MODULE_SFF_8079;
@ -457,10 +457,10 @@ static int qt202x_phy_get_module_info(struct efx_nic *efx,
return 0;
}
static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
static int qt202x_phy_get_module_eeprom(struct ef4_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
int mmd, reg_base, rc, i;
int mmd, reg_base, rc, i;
if (efx->phy_type == PHY_TYPE_QT2025C) {
mmd = MDIO_MMD_PCS;
@ -471,7 +471,7 @@ static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
}
for (i = 0; i < ee->len; i++) {
rc = efx_mdio_read(efx, mmd, reg_base + ee->offset + i);
rc = ef4_mdio_read(efx, mmd, reg_base + ee->offset + i);
if (rc < 0)
return rc;
data[i] = rc;
@ -480,16 +480,16 @@ static int qt202x_phy_get_module_eeprom(struct efx_nic *efx,
return 0;
}
const struct efx_phy_operations falcon_qt202x_phy_ops = {
const struct ef4_phy_operations falcon_qt202x_phy_ops = {
.probe = qt202x_phy_probe,
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
.poll = qt202x_phy_poll,
.fini = efx_port_dummy_op_void,
.fini = ef4_port_dummy_op_void,
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
.set_settings = ef4_mdio_set_settings,
.test_alive = ef4_mdio_test_alive,
.get_module_eeprom = qt202x_phy_get_module_eeprom,
.get_module_info = qt202x_phy_get_module_info,
};

View File

@ -0,0 +1,974 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/* Preferred number of descriptors to fill at once */
#define EF4_RX_PREFERRED_BATCH 8U
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
* ring, this number is divided by the number of buffers per page to calculate
* the number of pages to store in the RX page recycle ring.
*/
#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */
#define EF4_SKB_HEADERS 128u
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
*/
static unsigned int rx_refill_threshold;
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
EF4_RX_USR_BUF_SIZE)
/*
* RX maximum head room required.
*
* This must be at least 1 to prevent overflow, plus one packet-worth
* to allow pipelined receives.
*/
#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
}
static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
(u32)data[3] << 24;
#endif
}
static inline struct ef4_rx_buffer *
ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
{
if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
return ef4_rx_buffer(rx_queue, 0);
else
return rx_buf + 1;
}
static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
struct ef4_rx_buffer *rx_buf,
unsigned int len)
{
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
DMA_FROM_DEVICE);
}
void ef4_rx_config_page_split(struct ef4_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
EF4_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
efx->rx_page_buf_step);
efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
efx->rx_bufs_per_page;
efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
efx->rx_bufs_per_page);
}
/* Check the RX page recycle ring for a page that can be reused. */
static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
{
struct ef4_nic *efx = rx_queue->efx;
struct page *page;
struct ef4_rx_page_state *state;
unsigned index;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
return NULL;
rx_queue->page_ring[index] = NULL;
/* page_remove cannot exceed page_add. */
if (rx_queue->page_remove != rx_queue->page_add)
++rx_queue->page_remove;
/* If page_count is 1 then we hold the only reference to this page. */
if (page_count(page) == 1) {
++rx_queue->page_recycle_count;
return page;
} else {
state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
put_page(page);
++rx_queue->page_recycle_failed;
}
return NULL;
}
/**
* ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
* This allocates a batch of pages, maps them for DMA, and populates
* struct ef4_rx_buffers for each one. Return a negative error code or
* 0 on success. If a single page can be used for multiple buffers,
* then the page will either be inserted fully, or not at all.
*/
static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
{
struct ef4_nic *efx = rx_queue->efx;
struct ef4_rx_buffer *rx_buf;
struct page *page;
unsigned int page_offset;
struct ef4_rx_page_state *state;
dma_addr_t dma_addr;
unsigned index, count;
count = 0;
do {
page = ef4_reuse_page(rx_queue);
if (page == NULL) {
page = alloc_pages(__GFP_COLD | __GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
dma_addr =
dma_map_page(&efx->pci_dev->dev, page, 0,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
state = page_address(page);
state->dma_addr = dma_addr;
} else {
state = page_address(page);
dma_addr = state->dma_addr;
}
dma_addr += sizeof(struct ef4_rx_page_state);
page_offset = sizeof(struct ef4_rx_page_state);
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = ef4_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->page = page;
rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
get_page(page);
dma_addr += efx->rx_page_buf_step;
page_offset += efx->rx_page_buf_step;
} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
} while (++count < efx->rx_pages_per_batch);
return 0;
}
/* Unmap a DMA-mapped page. This function is only called for the final RX
* buffer in a page.
*/
static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
struct ef4_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
if (page) {
struct ef4_rx_page_state *state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
}
}
static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
struct ef4_rx_buffer *rx_buf,
unsigned int num_bufs)
{
do {
if (rx_buf->page) {
put_page(rx_buf->page);
rx_buf->page = NULL;
}
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
} while (--num_bufs);
}
/* Attempt to recycle the page if there is an RX recycle ring; the page can
* only be added if this is the final RX buffer, to prevent pages being used in
* the descriptor ring and appearing in the recycle ring simultaneously.
*/
static void ef4_recycle_rx_page(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
struct ef4_nic *efx = rx_queue->efx;
unsigned index;
/* Only recycle the page after processing the final buffer. */
if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
return;
index = rx_queue->page_add & rx_queue->page_ptr_mask;
if (rx_queue->page_ring[index] == NULL) {
unsigned read_index = rx_queue->page_remove &
rx_queue->page_ptr_mask;
/* The next slot in the recycle ring is available, but
* increment page_remove if the read pointer currently
* points here.
*/
if (read_index == index)
++rx_queue->page_remove;
rx_queue->page_ring[index] = page;
++rx_queue->page_add;
return;
}
++rx_queue->page_recycle_full;
ef4_unmap_rx_buffer(efx, rx_buf);
put_page(rx_buf->page);
}
static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
struct ef4_rx_buffer *rx_buf)
{
/* Release the page reference we hold for the buffer. */
if (rx_buf->page)
put_page(rx_buf->page);
/* If this is the last buffer in a page, unmap and free it. */
if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
ef4_free_rx_buffers(rx_queue, rx_buf, 1);
}
rx_buf->page = NULL;
}
/* Recycle the pages that are used by buffers that have just been received. */
static void ef4_recycle_rx_pages(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
do {
ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
} while (--n_frags);
}
static void ef4_discard_rx_packet(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
ef4_recycle_rx_pages(channel, rx_buf, n_frags);
ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
/**
* ef4_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
*
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
*
* The caller must provide serialisation (none is used here). In practise,
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
int space, rc = 0;
if (!rx_queue->refill_enabled)
return;
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
if (fill_level >= rx_queue->fast_fill_trigger)
goto out;
/* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level)
rx_queue->min_fill = fill_level;
}
batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
EF4_BUG_ON_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
" level %d to level %d\n",
ef4_rx_queue_index(rx_queue), fill_level,
rx_queue->max_fill);
do {
rc = ef4_init_rx_buffers(rx_queue, atomic);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
ef4_schedule_slow_fill(rx_queue);
goto out;
}
} while ((space -= batch_size) >= batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
"to level %d\n", ef4_rx_queue_index(rx_queue),
rx_queue->added_count - rx_queue->removed_count);
out:
if (rx_queue->notified_count != rx_queue->added_count)
ef4_nic_notify_rx_desc(rx_queue);
}
void ef4_rx_slow_fill(unsigned long context)
{
struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context;
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
++rx_queue->slow_fill_count;
}
static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
struct ef4_rx_buffer *rx_buf,
int len)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
if (likely(len <= max_len))
return;
/* The packet must be discarded, but this is only a fatal error
* if the caller indicated it was
*/
rx_buf->flags |= EF4_RX_PKT_DISCARD;
if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d seriously overlength "
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
ef4_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d overlength RX event "
"(0x%x > 0x%x)\n",
ef4_rx_queue_index(rx_queue), len, max_len);
}
ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
}
/* Pass a received packet up through GRO. GRO can handle pages
* regardless of checksum state and skbs with a good checksum.
*/
static void
ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
skb = napi_get_frags(napi);
if (unlikely(!skb)) {
struct ef4_rx_queue *rx_queue;
rx_queue = ef4_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
if (efx->net_dev->features & NETIF_F_RXHASH)
skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset,
rx_buf->len);
rx_buf->page = NULL;
skb->len += rx_buf->len;
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
}
skb->data_len = skb->len;
skb->truesize += n_frags * efx->rx_buffer_truesize;
skb_record_rx_queue(skb, channel->rx_queue.core_index);
gro_result = napi_gro_frags(napi);
if (gro_result != GRO_DROP)
channel->irq_mod_score += 2;
}
/* Allocate and construct an SKB around page fragments */
static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags,
u8 *eh, int hdr_len)
{
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
/* Allocate an SKB to store the headers */
skb = netdev_alloc_skb(efx->net_dev,
efx->rx_ip_align + efx->rx_prefix_size +
hdr_len);
if (unlikely(skb == NULL)) {
atomic_inc(&efx->n_rx_noskb_drops);
return NULL;
}
EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
efx->rx_prefix_size + hdr_len);
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
__skb_put(skb, hdr_len);
/* Append the remaining page(s) onto the frag list */
if (rx_buf->len > hdr_len) {
rx_buf->page_offset += hdr_len;
rx_buf->len -= hdr_len;
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_buf->page, rx_buf->page_offset,
rx_buf->len);
rx_buf->page = NULL;
skb->len += rx_buf->len;
skb->data_len += rx_buf->len;
if (skb_shinfo(skb)->nr_frags == n_frags)
break;
rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
}
} else {
__free_pages(rx_buf->page, efx->rx_buffer_order);
rx_buf->page = NULL;
n_frags = 0;
}
skb->truesize += n_frags * efx->rx_buffer_truesize;
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efx->net_dev);
skb_mark_napi_id(skb, &channel->napi_str);
return skb;
}
void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags)
{
struct ef4_nic *efx = rx_queue->efx;
struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
struct ef4_rx_buffer *rx_buf;
rx_queue->rx_packets++;
rx_buf = ef4_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
if (!(flags & EF4_RX_PKT_PREFIX_LEN))
ef4_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
*/
WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
rx_buf->flags |= EF4_RX_PKT_DISCARD;
}
netif_vdbg(efx, rx_status, efx->net_dev,
"RX queue %d received ids %x-%x len %d %s%s\n",
ef4_rx_queue_index(rx_queue), index,
(index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
/* Discard packet, if instructed to do so. Process the
* previous receive first.
*/
if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
ef4_rx_flush_packet(channel);
ef4_discard_rx_packet(channel, rx_buf, n_frags);
return;
}
if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
* consumed in-order per RX queue.
*/
ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
prefetch(ef4_rx_buf_va(rx_buf));
rx_buf->page_offset += efx->rx_prefix_size;
rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
* Fix length for last fragment.
*/
unsigned int tail_frags = n_frags - 1;
for (;;) {
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
/* All fragments have been DMA-synced, so recycle pages. */
rx_buf = ef4_rx_buffer(rx_queue, index);
ef4_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
ef4_rx_flush_packet(channel);
channel->rx_pkt_n_frags = n_frags;
channel->rx_pkt_index = index;
}
static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
struct ef4_rx_buffer *rx_buf,
unsigned int n_frags)
{
struct sk_buff *skb;
u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
if (unlikely(skb == NULL)) {
struct ef4_rx_queue *rx_queue;
rx_queue = ef4_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
skb_record_rx_queue(skb, channel->rx_queue.core_index);
/* Set the SKB flags */
skb_checksum_none_assert(skb);
if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (channel->type->receive_skb)
if (channel->type->receive_skb(channel, skb))
return;
/* Pass the packet up */
netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
void __ef4_rx_packet(struct ef4_channel *channel)
{
struct ef4_nic *efx = channel->efx;
struct ef4_rx_buffer *rx_buf =
ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = ef4_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
struct ef4_rx_queue *rx_queue;
ef4_loopback_rx_packet(efx, eh, rx_buf->len);
rx_queue = ef4_channel_get_rx_queue(channel);
ef4_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out;
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb &&
!ef4_channel_busy_polling(channel))
ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
out:
channel->rx_pkt_n_frags = 0;
}
int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned int entries;
int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
rx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
"creating RX queue %d size %#x mask %#x\n",
ef4_rx_queue_index(rx_queue), efx->rxq_entries,
rx_queue->ptr_mask);
/* Allocate RX buffers */
rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
rc = ef4_nic_probe_rx(rx_queue);
if (rc) {
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
return rc;
}
static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
struct ef4_rx_queue *rx_queue)
{
unsigned int bufs_in_recycle_ring, page_ring_size;
/* Set the RX recycle ring size */
#ifdef CONFIG_PPC64
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
#else
if (iommu_present(&pci_bus_type))
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
else
bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
#endif /* CONFIG_PPC64 */
page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
rx_queue->page_ptr_mask = page_ring_size - 1;
}
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
{
struct ef4_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger, max_trigger;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
/* Initialise ptr fields */
rx_queue->added_count = 0;
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
ef4_init_rx_recycle_ring(efx, rx_queue);
rx_queue->page_remove = 0;
rx_queue->page_add = rx_queue->page_ptr_mask + 1;
rx_queue->page_recycle_count = 0;
rx_queue->page_recycle_failed = 0;
rx_queue->page_recycle_full = 0;
/* Initialise limit fields */
max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
max_trigger =
max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
if (rx_refill_threshold != 0) {
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
if (trigger > max_trigger)
trigger = max_trigger;
} else {
trigger = max_trigger;
}
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */
ef4_nic_init_rx(rx_queue);
}
void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
{
int i;
struct ef4_nic *efx = rx_queue->efx;
struct ef4_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
del_timer_sync(&rx_queue->slow_fill);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
for (i = rx_queue->removed_count; i < rx_queue->added_count;
i++) {
unsigned index = i & rx_queue->ptr_mask;
rx_buf = ef4_rx_buffer(rx_queue, index);
ef4_fini_rx_buffer(rx_queue, rx_buf);
}
}
/* Unmap and release the pages in the recycle ring. Remove the ring. */
for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
struct page *page = rx_queue->page_ring[i];
struct ef4_rx_page_state *state;
if (page == NULL)
continue;
state = page_address(page);
dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
PAGE_SIZE << efx->rx_buffer_order,
DMA_FROM_DEVICE);
put_page(page);
}
kfree(rx_queue->page_ring);
rx_queue->page_ring = NULL;
}
void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
ef4_nic_remove_rx(rx_queue);
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
#ifdef CONFIG_RFS_ACCEL
int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_channel *channel;
struct ef4_filter_spec spec;
struct flow_keys fk;
int rc;
if (flow_id == RPS_FLOW_ID_INVALID)
return -EINVAL;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
spec.match_flags =
EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
spec.ether_type = fk.basic.n_proto;
spec.ip_proto = fk.basic.ip_proto;
if (fk.basic.n_proto == htons(ETH_P_IP)) {
spec.rem_host[0] = fk.addrs.v4addrs.src;
spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
spec.rem_port = fk.ports.src;
spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
channel = ef4_get_channel(efx, rxq_index);
channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
flow_id = channel->rps_flow_id[index];
if (flow_id != RPS_FLOW_ID_INVALID &&
expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
"expired filter %d [queue %u flow %u]\n",
index, channel_idx, flow_id);
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
}
if (++index == size) {
if (++channel_idx == efx->n_channels)
channel_idx = 0;
index = 0;
}
}
efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
return true;
}
#endif /* CONFIG_RFS_ACCEL */
/**
* ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
* @spec: Specification to test
*
* Return: %true if the specification is a non-drop RX filter that
* matches a local MAC address I/G bit value of 1 or matches a local
* IPv4 or IPv6 address value in the respective multicast address
* range. Otherwise %false.
*/
bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
{
if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
return false;
if (spec->match_flags &
(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
is_multicast_ether_addr(spec->loc_mac))
return true;
if ((spec->match_flags &
(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
if (spec->ether_type == htons(ETH_P_IP) &&
ipv4_is_multicast(spec->loc_host[0]))
return true;
if (spec->ether_type == htons(ETH_P_IPV6) &&
((const u8 *)spec->loc_host)[0] == 0xff)
return true;
}
return false;
}

View File

@ -0,0 +1,808 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
/* IRQ latency can be enormous because:
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
* slow serial console or an old IDE driver doing error recovery
* - The PREEMPT_RT patches mostly deal with this, but also allow a
* tasklet or normal task to be given higher priority than our IRQ
* threads
* Try to avoid blaming the hardware for this.
*/
#define IRQ_TIMEOUT HZ
/*
* Loopback test packet structure
*
* The self-test should stress every RSS vector, and unfortunately
* Falcon only performs RSS on TCP/UDP packets.
*/
struct ef4_loopback_payload {
struct ethhdr header;
struct iphdr ip;
struct udphdr udp;
__be16 iteration;
char msg[64];
} __packed;
/* Loopback test source MAC address */
static const u8 payload_source[ETH_ALEN] __aligned(2) = {
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
};
static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!";
/* Interrupt mode names */
static const unsigned int ef4_interrupt_mode_max = EF4_INT_MODE_MAX;
static const char *const ef4_interrupt_mode_names[] = {
[EF4_INT_MODE_MSIX] = "MSI-X",
[EF4_INT_MODE_MSI] = "MSI",
[EF4_INT_MODE_LEGACY] = "legacy",
};
#define INT_MODE(efx) \
STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode)
/**
* ef4_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in ef4_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
* @offload_csum: Checksums are being offloaded
* @rx_good: RX good packet count
* @rx_bad: RX bad packet count
* @payload: Payload used in tests
*/
struct ef4_loopback_state {
bool flush;
int packet_count;
struct sk_buff **skbs;
bool offload_csum;
atomic_t rx_good;
atomic_t rx_bad;
struct ef4_loopback_payload payload;
};
/* How long to wait for all the packets to arrive (in ms) */
#define LOOPBACK_TIMEOUT_MS 1000
/**************************************************************************
*
* MII, NVRAM and register tests
*
**************************************************************************/
static int ef4_test_phy_alive(struct ef4_nic *efx, struct ef4_self_tests *tests)
{
int rc = 0;
if (efx->phy_op->test_alive) {
rc = efx->phy_op->test_alive(efx);
tests->phy_alive = rc ? -1 : 1;
}
return rc;
}
static int ef4_test_nvram(struct ef4_nic *efx, struct ef4_self_tests *tests)
{
int rc = 0;
if (efx->type->test_nvram) {
rc = efx->type->test_nvram(efx);
if (rc == -EPERM)
rc = 0;
else
tests->nvram = rc ? -1 : 1;
}
return rc;
}
/**************************************************************************
*
* Interrupt and event queue testing
*
**************************************************************************/
/* Test generation and receipt of interrupts */
static int ef4_test_interrupts(struct ef4_nic *efx,
struct ef4_self_tests *tests)
{
unsigned long timeout, wait;
int cpu;
int rc;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
rc = ef4_nic_irq_test_start(efx);
if (rc == -ENOTSUPP) {
netif_dbg(efx, drv, efx->net_dev,
"direct interrupt testing not supported\n");
tests->interrupt = 0;
return 0;
}
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
do {
schedule_timeout_uninterruptible(wait);
cpu = ef4_nic_irq_test_irq_cpu(efx);
if (cpu >= 0)
goto success;
wait *= 2;
} while (time_before(jiffies, timeout));
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
INT_MODE(efx), cpu);
tests->interrupt = 1;
return 0;
}
/* Test generation and receipt of interrupting events */
static int ef4_test_eventq_irq(struct ef4_nic *efx,
struct ef4_self_tests *tests)
{
struct ef4_channel *channel;
unsigned int read_ptr[EF4_MAX_CHANNELS];
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
unsigned long timeout, wait;
BUILD_BUG_ON(EF4_MAX_CHANNELS > BITS_PER_LONG);
ef4_for_each_channel(channel, efx) {
read_ptr[channel->channel] = channel->eventq_read_ptr;
set_bit(channel->channel, &dma_pend);
set_bit(channel->channel, &int_pend);
ef4_nic_event_test_start(channel);
}
timeout = jiffies + IRQ_TIMEOUT;
wait = 1;
/* Wait for arrival of interrupts. NAPI processing may or may
* not complete in time, but we can cope in any case.
*/
do {
schedule_timeout_uninterruptible(wait);
ef4_for_each_channel(channel, efx) {
ef4_stop_eventq(channel);
if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran);
clear_bit(channel->channel, &dma_pend);
clear_bit(channel->channel, &int_pend);
} else {
if (ef4_nic_event_present(channel))
clear_bit(channel->channel, &dma_pend);
if (ef4_nic_event_test_irq_cpu(channel) >= 0)
clear_bit(channel->channel, &int_pend);
}
ef4_start_eventq(channel);
}
wait *= 2;
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
ef4_for_each_channel(channel, efx) {
bool dma_seen = !test_bit(channel->channel, &dma_pend);
bool int_seen = !test_bit(channel->channel, &int_pend);
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
if (dma_seen && int_seen) {
netif_dbg(efx, drv, efx->net_dev,
"channel %d event queue passed (with%s NAPI)\n",
channel->channel,
test_bit(channel->channel, &napi_ran) ?
"" : "out");
} else {
/* Report failure and whether either interrupt or DMA
* worked
*/
netif_err(efx, drv, efx->net_dev,
"channel %d timed out waiting for event queue\n",
channel->channel);
if (int_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d saw interrupt "
"during event queue test\n",
channel->channel);
if (dma_seen)
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n",
channel->channel);
}
}
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
}
static int ef4_test_phy(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned flags)
{
int rc;
if (!efx->phy_op->run_tests)
return 0;
mutex_lock(&efx->mac_lock);
rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
if (rc == -EPERM)
rc = 0;
else
netif_info(efx, drv, efx->net_dev,
"%s phy selftest\n", rc ? "Failed" : "Passed");
return rc;
}
/**************************************************************************
*
* Loopback testing
* NB Only one loopback test can be executing concurrently.
*
**************************************************************************/
/* Loopback test RX callback
* This is called for each received packet during loopback testing.
*/
void ef4_loopback_rx_packet(struct ef4_nic *efx,
const char *buf_ptr, int pkt_len)
{
struct ef4_loopback_state *state = efx->loopback_selftest;
struct ef4_loopback_payload *received;
struct ef4_loopback_payload *payload;
BUG_ON(!buf_ptr);
/* If we are just flushing, then drop the packet */
if ((state == NULL) || state->flush)
return;
payload = &state->payload;
received = (struct ef4_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr;
if (state->offload_csum)
received->ip.check = payload->ip.check;
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
netif_err(efx, drv, efx->net_dev,
"saw runt RX packet (length %d) in %s loopback "
"test\n", pkt_len, LOOPBACK_MODE(efx));
goto err;
}
/* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw non-loopback RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check packet length */
if (pkt_len != sizeof(*payload)) {
netif_err(efx, drv, efx->net_dev,
"saw incorrect RX packet length %d (wanted %d) in "
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
LOOPBACK_MODE(efx));
goto err;
}
/* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted IP header in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
netif_err(efx, drv, efx->net_dev,
"saw corrupted RX packet in %s loopback test\n",
LOOPBACK_MODE(efx));
goto err;
}
/* Check that iteration matches */
if (received->iteration != payload->iteration) {
netif_err(efx, drv, efx->net_dev,
"saw RX packet from iteration %d (wanted %d) in "
"%s loopback test\n", ntohs(received->iteration),
ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err;
}
/* Increase correct RX count */
netif_vdbg(efx, drv, efx->net_dev,
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
atomic_inc(&state->rx_good);
return;
err:
#ifdef DEBUG
if (atomic_read(&state->rx_bad) == 0) {
netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0);
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0);
}
#endif
atomic_inc(&state->rx_bad);
}
/* Initialise an ef4_selftest_state for a new iteration */
static void ef4_iterate_state(struct ef4_nic *efx)
{
struct ef4_loopback_state *state = efx->loopback_selftest;
struct net_device *net_dev = efx->net_dev;
struct ef4_loopback_payload *payload = &state->payload;
/* Initialise the layerII header */
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
payload->header.h_proto = htons(ETH_P_IP);
/* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5;
payload->ip.check = (__force __sum16) htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP;
/* Initialise udp header */
payload->udp.source = 0;
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
sizeof(struct iphdr));
payload->udp.check = 0; /* checksum ignored */
/* Fill out payload */
payload->iteration = htons(ntohs(payload->iteration) + 1);
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
/* Fill out remaining state members */
atomic_set(&state->rx_good, 0);
atomic_set(&state->rx_bad, 0);
smp_wmb();
}
static int ef4_begin_loopback(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest;
struct ef4_loopback_payload *payload;
struct sk_buff *skb;
int i;
netdev_tx_t rc;
/* Transmit N copies of buffer */
for (i = 0; i < state->packet_count; i++) {
/* Allocate an skb, holding an extra reference for
* transmit completion counting */
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
if (!skb)
return -ENOMEM;
state->skbs[i] = skb;
skb_get(skb);
/* Copy the payload in, incrementing the source address to
* exercise the rss vectors */
payload = ((struct ef4_loopback_payload *)
skb_put(skb, sizeof(state->payload)));
memcpy(payload, &state->payload, sizeof(state->payload));
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
/* Ensure everything we've written is visible to the
* interrupt handler. */
smp_wmb();
netif_tx_lock_bh(efx->net_dev);
rc = ef4_enqueue_skb(tx_queue, skb);
netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->queue,
i + 1, state->packet_count,
LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb);
return -EPIPE;
}
}
return 0;
}
static int ef4_poll_loopback(struct ef4_nic *efx)
{
struct ef4_loopback_state *state = efx->loopback_selftest;
return atomic_read(&state->rx_good) == state->packet_count;
}
static int ef4_end_loopback(struct ef4_tx_queue *tx_queue,
struct ef4_loopback_self_tests *lb_tests)
{
struct ef4_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest;
struct sk_buff *skb;
int tx_done = 0, rx_good, rx_bad;
int i, rc = 0;
netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any
* skbs not already completed will be free'd when the queue is flushed */
for (i = 0; i < state->packet_count; i++) {
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */
rx_good = atomic_read(&state->rx_good);
rx_bad = atomic_read(&state->rx_bad);
if (tx_done != state->packet_count) {
/* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown.
*/
netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n",
tx_queue->queue, tx_done, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) {
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n",
tx_queue->queue, rx_good, state->packet_count,
LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Fall through */
}
/* Update loopback test structure */
lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
lb_tests->tx_done[tx_queue->queue] += tx_done;
lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad;
return rc;
}
static int
ef4_test_loopback(struct ef4_tx_queue *tx_queue,
struct ef4_loopback_self_tests *lb_tests)
{
struct ef4_nic *efx = tx_queue->efx;
struct ef4_loopback_state *state = efx->loopback_selftest;
int i, begin_rc, end_rc;
for (i = 0; i < 3; i++) {
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kcalloc(state->packet_count,
sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d testing %s loopback with %d packets\n",
tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
ef4_iterate_state(efx);
begin_rc = ef4_begin_loopback(tx_queue);
/* This will normally complete very quickly, but be
* prepared to wait much longer. */
msleep(1);
if (!ef4_poll_loopback(efx)) {
msleep(LOOPBACK_TIMEOUT_MS);
ef4_poll_loopback(efx);
}
end_rc = ef4_end_loopback(tx_queue, lb_tests);
kfree(state->skbs);
if (begin_rc || end_rc) {
/* Wait a while to ensure there are no packets
* floating around after a failure. */
schedule_timeout_uninterruptible(HZ / 10);
return begin_rc ? begin_rc : end_rc;
}
}
netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count);
return 0;
}
/* Wait for link up. On Falcon, we would prefer to rely on ef4_monitor, but
* any contention on the mac lock (via e.g. ef4_mac_mcast_work) causes it
* to delay and retry. Therefore, it's safer to just poll directly. Wait
* for link up and any faults to dissipate. */
static int ef4_wait_for_link(struct ef4_nic *efx)
{
struct ef4_link_state *link_state = &efx->link_state;
int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
schedule_timeout_uninterruptible(HZ / 10);
if (efx->type->monitor != NULL) {
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
}
mutex_lock(&efx->mac_lock);
link_up = link_state->up;
if (link_up)
link_up = !efx->type->check_mac_fault(efx);
mutex_unlock(&efx->mac_lock);
if (link_up) {
if (++link_up_count == 2)
return 0;
} else {
link_up_count = 0;
}
}
return -ETIMEDOUT;
}
static int ef4_test_loopbacks(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned int loopback_modes)
{
enum ef4_loopback_mode mode;
struct ef4_loopback_state *state;
struct ef4_channel *channel =
ef4_get_channel(efx, efx->tx_channel_offset);
struct ef4_tx_queue *tx_queue;
int rc = 0;
/* Set the port loopback_selftest member. From this point on
* all received packets will be dropped. Mark the state as
* "flushing" so all inflight packets are dropped */
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
BUG_ON(efx->loopback_selftest);
state->flush = true;
efx->loopback_selftest = state;
/* Test all supported loopback modes */
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
if (!(loopback_modes & (1 << mode)))
continue;
/* Move the port into the specified loopback mode. */
state->flush = true;
mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode;
rc = __ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"unable to move into %s loopback\n",
LOOPBACK_MODE(efx));
goto out;
}
rc = ef4_wait_for_link(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"loopback %s never came up\n",
LOOPBACK_MODE(efx));
goto out;
}
/* Test all enabled types of TX queue */
ef4_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EF4_TXQ_TYPE_OFFLOAD);
rc = ef4_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
goto out;
}
}
out:
/* Remove the flush. The caller will remove the loopback setting */
state->flush = true;
efx->loopback_selftest = NULL;
wmb();
kfree(state);
if (rc == -EPERM)
rc = 0;
return rc;
}
/**************************************************************************
*
* Entry point
*
*************************************************************************/
int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned flags)
{
enum ef4_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
int rc_test = 0, rc_reset, rc;
ef4_selftest_async_cancel(efx);
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
rc = ef4_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_nvram(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_interrupts(efx, tests);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_eventq_irq(efx, tests);
if (rc && !rc_test)
rc_test = rc;
if (rc_test)
return rc_test;
if (!(flags & ETH_TEST_FL_OFFLINE))
return ef4_test_phy(efx, tests, flags);
/* Offline (i.e. disruptive) testing
* This checks MAC and PHY loopback on the specified port. */
/* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire.
*/
ef4_device_detach_sync(efx);
if (efx->type->test_chip) {
rc_reset = efx->type->test_chip(efx, tests);
if (rc_reset) {
netif_err(efx, hw, efx->net_dev,
"Unable to recover from chip test\n");
ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
rc_test = -EIO;
}
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
rc = ef4_test_phy(efx, tests, flags);
if (rc && !rc_test)
rc_test = rc;
rc = ef4_test_loopbacks(efx, tests, efx->loopback_modes);
if (rc && !rc_test)
rc_test = rc;
/* restore the PHY to the previous state */
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode;
__ef4_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
netif_device_attach(efx->net_dev);
return rc_test;
}
void ef4_selftest_async_start(struct ef4_nic *efx)
{
struct ef4_channel *channel;
ef4_for_each_channel(channel, efx)
ef4_nic_event_test_start(channel);
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
}
void ef4_selftest_async_cancel(struct ef4_nic *efx)
{
cancel_delayed_work_sync(&efx->selftest_work);
}
void ef4_selftest_async_work(struct work_struct *data)
{
struct ef4_nic *efx = container_of(data, struct ef4_nic,
selftest_work.work);
struct ef4_channel *channel;
int cpu;
ef4_for_each_channel(channel, efx) {
cpu = ef4_nic_event_test_irq_cpu(channel);
if (cpu < 0)
netif_err(efx, ifup, efx->net_dev,
"channel %d failed to trigger an interrupt\n",
channel->channel);
else
netif_dbg(efx, ifup, efx->net_dev,
"channel %d triggered interrupt on CPU %d\n",
channel->channel, cpu);
}
}

View File

@ -0,0 +1,55 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_SELFTEST_H
#define EF4_SELFTEST_H
#include "net_driver.h"
/*
* Self tests
*/
struct ef4_loopback_self_tests {
int tx_sent[EF4_TXQ_TYPES];
int tx_done[EF4_TXQ_TYPES];
int rx_good;
int rx_bad;
};
#define EF4_MAX_PHY_TESTS 20
/* Efx self test results
* For fields which are not counters, 1 indicates success and -1
* indicates failure; 0 indicates test could not be run.
*/
struct ef4_self_tests {
/* online tests */
int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EF4_MAX_CHANNELS];
int eventq_int[EF4_MAX_CHANNELS];
/* offline tests */
int memory;
int registers;
int phy_ext[EF4_MAX_PHY_TESTS];
struct ef4_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
void ef4_loopback_rx_packet(struct ef4_nic *efx, const char *buf_ptr,
int pkt_len);
int ef4_selftest(struct ef4_nic *efx, struct ef4_self_tests *tests,
unsigned flags);
void ef4_selftest_async_start(struct ef4_nic *efx);
void ef4_selftest_async_cancel(struct ef4_nic *efx);
void ef4_selftest_async_work(struct work_struct *data);
#endif /* EF4_SELFTEST_H */

View File

@ -143,27 +143,27 @@
#define LNPGA_PDOWN_WAIT (HZ / 5)
struct tenxpress_phy_data {
enum efx_loopback_mode loopback_mode;
enum efx_phy_mode phy_mode;
enum ef4_loopback_mode loopback_mode;
enum ef4_phy_mode phy_mode;
int bad_lp_tries;
};
static int tenxpress_init(struct efx_nic *efx)
static int tenxpress_init(struct ef4_nic *efx)
{
/* Enable 312.5 MHz clock */
efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
ef4_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
1 << CLK312_EN_LBN);
/* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
ef4_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
1 << PMA_PMA_LED_ACTIVITY_LBN, true);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
SFX7101_PMA_PMD_LED_DEFAULT);
return 0;
}
static int tenxpress_phy_probe(struct efx_nic *efx)
static int tenxpress_phy_probe(struct ef4_nic *efx)
{
struct tenxpress_phy_data *phy_data;
@ -185,18 +185,18 @@ static int tenxpress_phy_probe(struct efx_nic *efx)
return 0;
}
static int tenxpress_phy_init(struct efx_nic *efx)
static int tenxpress_phy_init(struct ef4_nic *efx)
{
int rc;
falcon_board(efx)->type->init_phy(efx);
if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
rc = ef4_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
rc = ef4_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
return rc;
}
@ -206,8 +206,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
return rc;
/* Reinitialise flow control settings */
efx_link_set_wanted_fc(efx, efx->wanted_fc);
efx_mdio_an_reconfigure(efx);
ef4_link_set_wanted_fc(efx, efx->wanted_fc);
ef4_mdio_an_reconfigure(efx);
schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
@ -220,7 +220,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
/* Perform a "special software reset" on the PHY. The caller is
* responsible for saving and restoring the PHY hardware registers
* properly, and masking/unmasking LASI */
static int tenxpress_special_reset(struct efx_nic *efx)
static int tenxpress_special_reset(struct ef4_nic *efx)
{
int rc, reg;
@ -230,14 +230,14 @@ static int tenxpress_special_reset(struct efx_nic *efx)
falcon_stop_nic_stats(efx);
/* Initiate reset */
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg = ef4_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
reg |= (1 << PMA_PMD_EXT_SSR_LBN);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
mdelay(200);
/* Wait for the blocks to come out of reset */
rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
rc = ef4_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto out;
@ -253,7 +253,7 @@ out:
return rc;
}
static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
static void sfx7101_check_bad_lp(struct ef4_nic *efx, bool link_ok)
{
struct tenxpress_phy_data *pd = efx->phy_data;
bool bad_lp;
@ -263,7 +263,7 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
bad_lp = false;
} else {
/* Check that AN has started but not completed. */
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_STAT1);
if (!(reg & MDIO_AN_STAT1_LPABLE))
return; /* LP status is unknown */
bad_lp = !(reg & MDIO_AN_STAT1_COMPLETE);
@ -278,7 +278,7 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
/* Use the RX (red) LED as an error indicator once we've seen AN
* failure several times in a row, and also log a message. */
if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
reg = ef4_mdio_read(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG);
reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
if (!bad_lp) {
@ -291,35 +291,35 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
" supports 10GBASE-T ONLY, so no link can"
" be established\n");
}
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG, reg);
pd->bad_lp_tries = bad_lp;
}
}
static bool sfx7101_link_ok(struct efx_nic *efx)
static bool sfx7101_link_ok(struct ef4_nic *efx)
{
return efx_mdio_links_ok(efx,
return ef4_mdio_links_ok(efx,
MDIO_DEVS_PMAPMD |
MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS);
}
static void tenxpress_ext_loopback(struct efx_nic *efx)
static void tenxpress_ext_loopback(struct ef4_nic *efx)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
1 << LOOPBACK_NEAR_LBN,
efx->loopback_mode == LOOPBACK_PHYXS);
}
static void tenxpress_low_power(struct efx_nic *efx)
static void tenxpress_low_power(struct ef4_nic *efx)
{
efx_mdio_set_mmds_lpower(
ef4_mdio_set_mmds_lpower(
efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
TENXPRESS_REQUIRED_DEVS);
}
static int tenxpress_phy_reconfigure(struct efx_nic *efx)
static int tenxpress_phy_reconfigure(struct ef4_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
bool phy_mode_change, loop_reset;
@ -340,10 +340,10 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
}
tenxpress_low_power(efx);
efx_mdio_transmit_disable(efx);
efx_mdio_phy_reconfigure(efx);
ef4_mdio_transmit_disable(efx);
ef4_mdio_phy_reconfigure(efx);
tenxpress_ext_loopback(efx);
efx_mdio_an_reconfigure(efx);
ef4_mdio_an_reconfigure(efx);
phy_data->loopback_mode = efx->loopback_mode;
phy_data->phy_mode = efx->phy_mode;
@ -352,30 +352,30 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
}
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
/* Poll for link state changes */
static bool tenxpress_phy_poll(struct efx_nic *efx)
static bool tenxpress_phy_poll(struct ef4_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
struct ef4_link_state old_state = efx->link_state;
efx->link_state.up = sfx7101_link_ok(efx);
efx->link_state.speed = 10000;
efx->link_state.fd = true;
efx->link_state.fc = efx_mdio_get_pause(efx);
efx->link_state.fc = ef4_mdio_get_pause(efx);
sfx7101_check_bad_lp(efx, efx->link_state.up);
return !efx_link_state_equal(&efx->link_state, &old_state);
return !ef4_link_state_equal(&efx->link_state, &old_state);
}
static void sfx7101_phy_fini(struct efx_nic *efx)
static void sfx7101_phy_fini(struct ef4_nic *efx)
{
int reg;
/* Power down the LNPGA */
reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
/* Waiting here ensures that the board fini, which can turn
* off the power to the PHY, won't get run until the LNPGA
@ -383,7 +383,7 @@ static void sfx7101_phy_fini(struct efx_nic *efx)
schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
}
static void tenxpress_phy_remove(struct efx_nic *efx)
static void tenxpress_phy_remove(struct ef4_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
@ -391,17 +391,17 @@ static void tenxpress_phy_remove(struct efx_nic *efx)
/* Override the RX, TX and link LEDs */
void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
void tenxpress_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
{
int reg;
switch (mode) {
case EFX_LED_OFF:
case EF4_LED_OFF:
reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
break;
case EFX_LED_ON:
case EF4_LED_ON:
reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
(PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
@ -411,14 +411,14 @@ void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
break;
}
efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
}
static const char *const sfx7101_test_names[] = {
"bist"
};
static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
static const char *sfx7101_test_name(struct ef4_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(sfx7101_test_names))
return sfx7101_test_names[index];
@ -426,7 +426,7 @@ static const char *sfx7101_test_name(struct efx_nic *efx, unsigned int index)
}
static int
sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
{
int rc;
@ -437,21 +437,21 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
rc = tenxpress_special_reset(efx);
results[0] = rc ? -1 : 1;
efx_mdio_an_reconfigure(efx);
ef4_mdio_an_reconfigure(efx);
return rc;
}
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
u32 adv = 0, lpa = 0;
int reg;
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
adv |= ADVERTISED_10000baseT_Full;
reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
reg = ef4_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
if (reg & MDIO_AN_10GBT_STAT_LP10G)
lpa |= ADVERTISED_10000baseT_Full;
@ -463,22 +463,22 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, SPEED_10000);
}
static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
if (!ecmd->autoneg)
return -EINVAL;
return efx_mdio_set_settings(efx, ecmd);
return ef4_mdio_set_settings(efx, ecmd);
}
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising)
{
efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
ef4_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV10G,
advertising & ADVERTISED_10000baseT_Full);
}
const struct efx_phy_operations falcon_sfx7101_phy_ops = {
const struct ef4_phy_operations falcon_sfx7101_phy_ops = {
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
@ -488,7 +488,7 @@ const struct efx_phy_operations falcon_sfx7101_phy_ops = {
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
.test_alive = efx_mdio_test_alive,
.test_alive = ef4_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};

View File

@ -0,0 +1,649 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/pci.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/if_ether.h>
#include <linux/highmem.h>
#include <linux/cache.h>
#include "net_driver.h"
#include "efx.h"
#include "io.h"
#include "nic.h"
#include "tx.h"
#include "workarounds.h"
static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer)
{
unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
struct ef4_buffer *page_buf =
&tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
unsigned int offset =
((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
if (unlikely(!page_buf->addr) &&
ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
GFP_ATOMIC))
return NULL;
buffer->dma_addr = page_buf->dma_addr + offset;
buffer->unmap_len = 0;
return (u8 *)page_buf->addr + offset;
}
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len)
{
if (len > EF4_TX_CB_SIZE)
return NULL;
return ef4_tx_get_copy_buffer(tx_queue, buffer);
}
static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
if (buffer->flags & EF4_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
else
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
if (buffer->flags & EF4_TX_BUF_SKB) {
(*pkts_compl)++;
(*bytes_compl) += buffer->skb->len;
dev_consume_skb_any((struct sk_buff *)buffer->skb);
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count);
}
buffer->len = 0;
buffer->flags = 0;
}
unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
{
/* This is probably too much since we don't have any TSO support;
* it's a left-over from when we had Software TSO. But it's safer
* to leave it as-is than try to determine a new bound.
*/
/* Header and payload descriptor for each output segment, plus
* one for every input fragment boundary within a segment
*/
unsigned int max_descs = EF4_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
/* Possibly one more per segment for the alignment workaround,
* or for option descriptors
*/
if (EF4_WORKAROUND_5391(efx))
max_descs += EF4_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EF4_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
return max_descs;
}
static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(txq1);
struct ef4_nic *efx = txq1->efx;
unsigned int fill_level;
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
if (likely(fill_level < efx->txq_stop_thresh))
return;
/* We used the stale old_read_count above, which gives us a
* pessimistic estimate of the fill level (which may even
* validly be >= efx->txq_entries). Now try again using
* read_count (more likely to be a cache miss).
*
* If we read read_count and then conditionally stop the
* queue, it is possible for the completion path to race with
* us and complete all outstanding descriptors in the middle,
* after which there will be no more completions to wake it.
* Therefore we stop the queue first, then read read_count
* (with a memory barrier to ensure the ordering), then
* restart the queue if the fill level turns out to be low
* enough.
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
EF4_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
if (likely(!efx->loopback_selftest))
netif_tx_start_queue(txq1->core_txq);
}
}
static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
struct sk_buff *skb)
{
unsigned int min_len = tx_queue->tx_min_size;
unsigned int copy_len = skb->len;
struct ef4_tx_buffer *buffer;
u8 *copy_buffer;
int rc;
EF4_BUG_ON_PARANOID(copy_len > EF4_TX_CB_SIZE);
buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
if (unlikely(!copy_buffer))
return -ENOMEM;
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
EF4_WARN_ON_PARANOID(rc);
if (unlikely(copy_len < min_len)) {
memset(copy_buffer + copy_len, 0, min_len - copy_len);
buffer->len = min_len;
} else {
buffer->len = copy_len;
}
buffer->skb = skb;
buffer->flags = EF4_TX_BUF_SKB;
++tx_queue->insert_count;
return rc;
}
static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr,
size_t len)
{
const struct ef4_nic_type *nic_type = tx_queue->efx->type;
struct ef4_tx_buffer *buffer;
unsigned int dma_len;
/* Map the fragment taking account of NIC-dependent DMA limits. */
do {
buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
buffer->len = dma_len;
buffer->dma_addr = dma_addr;
buffer->flags = EF4_TX_BUF_CONT;
len -= dma_len;
dma_addr += dma_len;
++tx_queue->insert_count;
} while (len);
return buffer;
}
/* Map all data from an SKB for DMA and create descriptors on the queue.
*/
static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
{
struct ef4_nic *efx = tx_queue->efx;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int frag_index, nr_frags;
dma_addr_t dma_addr, unmap_addr;
unsigned short dma_flags;
size_t len, unmap_len;
nr_frags = skb_shinfo(skb)->nr_frags;
frag_index = 0;
/* Map header data. */
len = skb_headlen(skb);
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
dma_flags = EF4_TX_BUF_MAP_SINGLE;
unmap_len = len;
unmap_addr = dma_addr;
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
return -EIO;
/* Add descriptors for each fragment. */
do {
struct ef4_tx_buffer *buffer;
skb_frag_t *fragment;
buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
/* The final descriptor for a fragment is responsible for
* unmapping the whole fragment.
*/
buffer->flags = EF4_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len;
buffer->dma_offset = buffer->dma_addr - unmap_addr;
if (frag_index >= nr_frags) {
/* Store SKB details with the final buffer for
* the completion.
*/
buffer->skb = skb;
buffer->flags = EF4_TX_BUF_SKB | dma_flags;
return 0;
}
/* Move on to the next fragment. */
fragment = &skb_shinfo(skb)->frags[frag_index++];
len = skb_frag_size(fragment);
dma_addr = skb_frag_dma_map(dma_dev, fragment,
0, len, DMA_TO_DEVICE);
dma_flags = 0;
unmap_len = len;
unmap_addr = dma_addr;
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
return -EIO;
} while (1);
}
/* Remove buffers put into a tx_queue. None of the buffers must have
* an skb attached.
*/
static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
{
struct ef4_tx_buffer *buffer;
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count;
buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
}
}
/*
* Add a socket buffer to a TX queue
*
* This maps all fragments of a socket buffer for DMA and adds them to
* the TX queue. The queue's insert pointer will be incremented by
* the number of fragments in the socket buffer.
*
* If any DMA mapping fails, any mapped fragments will be unmapped,
* the queue's insert pointer will be restored to its original value.
*
* This function is split out from ef4_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues.
*
* Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
{
bool data_mapped = false;
unsigned int skb_len;
skb_len = skb->len;
EF4_WARN_ON_PARANOID(skb_is_gso(skb));
if (skb_len < tx_queue->tx_min_size ||
(skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
/* Pad short packets or coalesce short fragmented packets. */
if (ef4_enqueue_skb_copy(tx_queue, skb))
goto err;
tx_queue->cb_packets++;
data_mapped = true;
}
/* Map for DMA and create descriptors if we haven't done so already. */
if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
goto err;
/* Update BQL */
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
* SKBs had skb->xmit_more set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
ef4_nic_push_buffers(txq2);
ef4_nic_push_buffers(tx_queue);
} else {
tx_queue->xmit_more_available = skb->xmit_more;
}
tx_queue->tx_packets++;
ef4_tx_maybe_stop_queue(tx_queue);
return NETDEV_TX_OK;
err:
ef4_enqueue_unwind(tx_queue);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
* specified index.
*/
static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
unsigned int index,
unsigned int *pkts_compl,
unsigned int *bytes_compl)
{
struct ef4_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
stop_index = (index + 1) & tx_queue->ptr_mask;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (read_ptr != stop_index) {
struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (!(buffer->flags & EF4_TX_BUF_OPTION) &&
unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
ef4_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
}
/* Initiate a packet transmission. We use one channel per CPU
* (sharing when we have more CPUs than channels). On Falcon, the TX
* completion events will be directed back to the CPU that transmitted
* the packet, which should be cache-efficient.
*
* Context: non-blocking.
* Note that returning anything other than NETDEV_TX_OK will cause the
* OS to free the skb.
*/
netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_tx_queue *tx_queue;
unsigned index, type;
EF4_WARN_ON_PARANOID(!netif_device_present(net_dev));
index = skb_get_queue_mapping(skb);
type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EF4_TXQ_TYPE_HIGHPRI;
}
tx_queue = ef4_get_tx_queue(efx, index, type);
return ef4_enqueue_skb(tx_queue, skb);
}
void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
/* Must be inverse of queue lookup in ef4_hard_start_xmit() */
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EF4_TXQ_TYPES +
((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0));
}
int ef4_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
struct tc_to_netdev *ntc)
{
struct ef4_nic *efx = netdev_priv(net_dev);
struct ef4_channel *channel;
struct ef4_tx_queue *tx_queue;
unsigned tc, num_tc;
int rc;
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
num_tc = ntc->tc;
if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
return -EINVAL;
if (num_tc == net_dev->num_tc)
return 0;
for (tc = 0; tc < num_tc; tc++) {
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
}
if (num_tc > net_dev->num_tc) {
/* Initialise high-priority queues as necessary */
ef4_for_each_channel(channel, efx) {
ef4_for_each_possible_channel_tx_queue(tx_queue,
channel) {
if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
continue;
if (!tx_queue->buffer) {
rc = ef4_probe_tx_queue(tx_queue);
if (rc)
return rc;
}
if (!tx_queue->initialised)
ef4_init_tx_queue(tx_queue);
ef4_init_tx_queue_core_txq(tx_queue);
}
}
} else {
/* Reduce number of classes before number of queues */
net_dev->num_tc = num_tc;
}
rc = netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) *
efx->n_tx_channels);
if (rc)
return rc;
/* Do not destroy high-priority queues when they become
* unused. We would have to flush them first, and it is
* fairly difficult to flush a subset of TX queues. Leave
* it to ef4_fini_channels().
*/
net_dev->num_tc = num_tc;
return 0;
}
void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct ef4_nic *efx = tx_queue->efx;
struct ef4_tx_queue *txq2;
unsigned int pkts_compl = 0, bytes_compl = 0;
EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
if (pkts_compl > 1)
++tx_queue->merge_events;
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* ef4_dequeue_buffers()) before reading the queue status.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
txq2 = ef4_tx_queue_partner(tx_queue);
fill_level = max(tx_queue->insert_count - tx_queue->read_count,
txq2->insert_count - txq2->read_count);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
}
}
}
static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
{
return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
}
int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
unsigned int entries;
int rc;
/* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE);
EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
tx_queue->ptr_mask = entries - 1;
netif_dbg(efx, probe, efx->net_dev,
"creating TX queue %d size %#x mask %#x\n",
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
if (!tx_queue->cb_page) {
rc = -ENOMEM;
goto fail1;
}
/* Allocate hardware ring */
rc = ef4_nic_probe_tx(tx_queue);
if (rc)
goto fail2;
return 0;
fail2:
kfree(tx_queue->cb_page);
tx_queue->cb_page = NULL;
fail1:
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
return rc;
}
void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
{
struct ef4_nic *efx = tx_queue->efx;
netif_dbg(efx, drv, efx->net_dev,
"initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
tx_queue->xmit_more_available = false;
/* Some older hardware requires Tx writes larger than 32. */
tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
/* Set up TX descriptor ring */
ef4_nic_init_tx(tx_queue);
tx_queue->initialised = true;
}
void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
{
struct ef4_tx_buffer *buffer;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
if (!tx_queue->buffer)
return;
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
++tx_queue->read_count;
}
tx_queue->xmit_more_available = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
{
int i;
if (!tx_queue->buffer)
return;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue);
ef4_nic_remove_tx(tx_queue);
if (tx_queue->cb_page) {
for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
ef4_nic_free_buffer(tx_queue->efx,
&tx_queue->cb_page[i]);
kfree(tx_queue->cb_page);
tx_queue->cb_page = NULL;
}
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
}

View File

@ -0,0 +1,27 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2015 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_TX_H
#define EF4_TX_H
#include <linux/types.h>
/* Driver internal tx-path related declarations. */
unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
struct ef4_tx_buffer *buffer, size_t len);
int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
#endif /* EF4_TX_H */

View File

@ -158,8 +158,8 @@
struct txc43128_data {
unsigned long bug10934_timer;
enum efx_phy_mode phy_mode;
enum efx_loopback_mode loopback_mode;
enum ef4_phy_mode phy_mode;
enum ef4_loopback_mode loopback_mode;
};
/* The PHY sometimes needs a reset to bring the link back up. So long as
@ -168,32 +168,32 @@ struct txc43128_data {
#define BUG10934_RESET_INTERVAL (5 * HZ)
/* Perform a reset that doesn't clear configuration changes */
static void txc_reset_logic(struct efx_nic *efx);
static void txc_reset_logic(struct ef4_nic *efx);
/* Set the output value of a gpio */
void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
void falcon_txc_set_gpio_val(struct ef4_nic *efx, int pin, int on)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
}
/* Set up the GPIO direction register */
void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
void falcon_txc_set_gpio_dir(struct ef4_nic *efx, int pin, int dir)
{
efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
ef4_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
}
/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
* global reset (it's less clear what reset of other MMDs does).*/
static int txc_reset_phy(struct efx_nic *efx)
static int txc_reset_phy(struct ef4_nic *efx)
{
int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
int rc = ef4_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
TXC_RESET_WAIT);
if (rc < 0)
goto fail;
/* Check that all the MMDs we expect are present and responding. */
rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
rc = ef4_mdio_check_mmds(efx, TXC_REQUIRED_DEVS);
if (rc < 0)
goto fail;
@ -205,28 +205,28 @@ fail:
}
/* Run a single BIST on one MMD */
static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
static int txc_bist_one(struct ef4_nic *efx, int mmd, int test)
{
int ctrl, bctl;
int lane;
int rc = 0;
/* Set PMA to test into loopback using Mt Diablo reg as per app note */
ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl = ef4_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
ef4_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
/* The BIST app. note lists these as 3 distinct steps. */
/* Set the BIST type */
bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTEN bit in the BIST Control register to enable */
bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* Set the BSTRT bit in the BIST Control register */
efx_mdio_write(efx, mmd, TXC_BIST_CTL,
ef4_mdio_write(efx, mmd, TXC_BIST_CTL,
bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
/* Wait. */
@ -234,22 +234,22 @@ static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
/* Set the BSTOP bit in the BIST Control register */
bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
/* The STOP bit should go off when things have stopped */
while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
bctl = ef4_mdio_read(efx, mmd, TXC_BIST_CTL);
/* Check all the error counts are 0 and all the frame counts are
non-zero */
for (lane = 0; lane < 4; lane++) {
int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
int count = ef4_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
if (count != 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d had %d errs\n", lane, count);
rc = -EIO;
}
count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
count = ef4_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
if (count == 0) {
netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
"Lane %d got 0 frames\n", lane);
@ -261,23 +261,23 @@ static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
/* Disable BIST */
efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
ef4_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
/* Turn off loopback */
ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
ef4_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
return rc;
}
static int txc_bist(struct efx_nic *efx)
static int txc_bist(struct ef4_nic *efx)
{
return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
}
/* Push the non-configurable defaults into the PHY. This must be
* done after every full reset */
static void txc_apply_defaults(struct efx_nic *efx)
static void txc_apply_defaults(struct ef4_nic *efx)
{
int mctrl;
@ -287,33 +287,33 @@ static void txc_apply_defaults(struct efx_nic *efx)
* saves a picowatt or two */
/* Turn off preemphasis */
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
/* Turn down the amplitude */
efx_mdio_write(efx, MDIO_MMD_PHYXS,
ef4_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
efx_mdio_write(efx, MDIO_MMD_PHYXS,
ef4_mdio_write(efx, MDIO_MMD_PHYXS,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
/* Set the line side amplitude and preemphasis to the databook
* defaults as an erratum causes them to be 0 on at least some
* PHY rev.s */
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
ef4_mdio_write(efx, MDIO_MMD_PMAPMD,
TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
/* Set up the LEDs */
mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
mctrl = ef4_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
/* Set the Green and Red LEDs to their default modes */
mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
ef4_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
/* Databook recommends doing this after configuration changes */
txc_reset_logic(efx);
@ -321,7 +321,7 @@ static void txc_apply_defaults(struct efx_nic *efx)
falcon_board(efx)->type->init_phy(efx);
}
static int txc43128_phy_probe(struct efx_nic *efx)
static int txc43128_phy_probe(struct ef4_nic *efx)
{
struct txc43128_data *phy_data;
@ -341,7 +341,7 @@ static int txc43128_phy_probe(struct efx_nic *efx)
}
/* Initialisation entry point for this PHY driver */
static int txc43128_phy_init(struct efx_nic *efx)
static int txc43128_phy_init(struct ef4_nic *efx)
{
int rc;
@ -359,28 +359,28 @@ static int txc43128_phy_init(struct efx_nic *efx)
}
/* Set the lane power down state in the global registers */
static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
static void txc_glrgs_lane_power(struct ef4_nic *efx, int mmd)
{
int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int ctl = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
ctl &= ~pd;
else
ctl |= pd;
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
ef4_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
}
/* Set the lane power down state in the analog control registers */
static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
static void txc_analog_lane_power(struct ef4_nic *efx, int mmd)
{
int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
| (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
| (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
int txctl = ef4_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
int rxctl = ef4_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
txctl &= ~txpd;
@ -390,14 +390,14 @@ static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
rxctl |= rxpd;
}
efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
ef4_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
ef4_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
}
static void txc_set_power(struct efx_nic *efx)
static void txc_set_power(struct ef4_nic *efx)
{
/* According to the data book, all the MMDs can do low power */
efx_mdio_set_mmds_lpower(efx,
ef4_mdio_set_mmds_lpower(efx,
!!(efx->phy_mode & PHY_MODE_LOW_POWER),
TXC_REQUIRED_DEVS);
@ -411,15 +411,15 @@ static void txc_set_power(struct efx_nic *efx)
txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
}
static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
static void txc_reset_logic_mmd(struct ef4_nic *efx, int mmd)
{
int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int val = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
int tries = 50;
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
ef4_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
while (--tries) {
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
val = ef4_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
udelay(1);
@ -431,7 +431,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
/* Perform a logic reset. This preserves the configuration registers
* and is needed for some configuration changes to take effect */
static void txc_reset_logic(struct efx_nic *efx)
static void txc_reset_logic(struct ef4_nic *efx)
{
/* The data sheet claims we can do the logic reset on either the
* PCS or the PHYXS and the result is a reset of both host- and
@ -439,15 +439,15 @@ static void txc_reset_logic(struct efx_nic *efx)
txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
}
static bool txc43128_phy_read_link(struct efx_nic *efx)
static bool txc43128_phy_read_link(struct ef4_nic *efx)
{
return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
return ef4_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
}
static int txc43128_phy_reconfigure(struct efx_nic *efx)
static int txc43128_phy_reconfigure(struct ef4_nic *efx)
{
struct txc43128_data *phy_data = efx->phy_data;
enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
enum ef4_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
@ -457,8 +457,8 @@ static int txc43128_phy_reconfigure(struct efx_nic *efx)
mode_change &= ~PHY_MODE_TX_DISABLED;
}
efx_mdio_transmit_disable(efx);
efx_mdio_phy_reconfigure(efx);
ef4_mdio_transmit_disable(efx);
ef4_mdio_phy_reconfigure(efx);
if (mode_change & PHY_MODE_LOW_POWER)
txc_set_power(efx);
@ -475,13 +475,13 @@ static int txc43128_phy_reconfigure(struct efx_nic *efx)
return 0;
}
static void txc43128_phy_fini(struct efx_nic *efx)
static void txc43128_phy_fini(struct ef4_nic *efx)
{
/* Disable link events */
efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
ef4_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
}
static void txc43128_phy_remove(struct efx_nic *efx)
static void txc43128_phy_remove(struct ef4_nic *efx)
{
kfree(efx->phy_data);
efx->phy_data = NULL;
@ -489,7 +489,7 @@ static void txc43128_phy_remove(struct efx_nic *efx)
/* Periodic callback: this exists mainly to poll link status as we
* don't use LASI interrupts */
static bool txc43128_phy_poll(struct efx_nic *efx)
static bool txc43128_phy_poll(struct ef4_nic *efx)
{
struct txc43128_data *data = efx->phy_data;
bool was_up = efx->link_state.up;
@ -516,14 +516,14 @@ static const char *const txc43128_test_names[] = {
"bist"
};
static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
static const char *txc43128_test_name(struct ef4_nic *efx, unsigned int index)
{
if (index < ARRAY_SIZE(txc43128_test_names))
return txc43128_test_names[index];
return NULL;
}
static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
{
int rc;
@ -540,12 +540,12 @@ static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
return rc;
}
static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
{
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
const struct efx_phy_operations falcon_txc_phy_ops = {
const struct ef4_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
@ -553,8 +553,8 @@ const struct efx_phy_operations falcon_txc_phy_ops = {
.fini = txc43128_phy_fini,
.remove = txc43128_phy_remove,
.get_settings = txc43128_get_settings,
.set_settings = efx_mdio_set_settings,
.test_alive = efx_mdio_test_alive,
.set_settings = ef4_mdio_set_settings,
.test_alive = ef4_mdio_test_alive,
.run_tests = txc43128_run_tests,
.test_name = txc43128_test_name,
};

View File

@ -0,0 +1,44 @@
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EF4_WORKAROUNDS_H
#define EF4_WORKAROUNDS_H
/*
* Hardware workarounds.
* Bug numbers are from Solarflare's Bugzilla.
*/
#define EF4_WORKAROUND_FALCON_A(efx) (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1)
#define EF4_WORKAROUND_FALCON_AB(efx) (ef4_nic_rev(efx) <= EF4_REV_FALCON_B0)
#define EF4_WORKAROUND_10G(efx) 1
/* Bit-bashed I2C reads cause performance drop */
#define EF4_WORKAROUND_7884 EF4_WORKAROUND_10G
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EF4_WORKAROUND_15592 EF4_WORKAROUND_FALCON_AB
/* Spurious parity errors in TSORT buffers */
#define EF4_WORKAROUND_5129 EF4_WORKAROUND_FALCON_A
/* Unaligned read request >512 bytes after aligning may break TSORT */
#define EF4_WORKAROUND_5391 EF4_WORKAROUND_FALCON_A
/* iSCSI parsing errors */
#define EF4_WORKAROUND_5583 EF4_WORKAROUND_FALCON_A
/* RX events go missing */
#define EF4_WORKAROUND_5676 EF4_WORKAROUND_FALCON_A
/* RX_RESET on A1 */
#define EF4_WORKAROUND_6555 EF4_WORKAROUND_FALCON_A
/* Increase filter depth to avoid RX_RESET */
#define EF4_WORKAROUND_7244 EF4_WORKAROUND_FALCON_A
/* Flushes may never complete */
#define EF4_WORKAROUND_7803 EF4_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EF4_WORKAROUND_8071 EF4_WORKAROUND_FALCON_A
#endif /* EF4_WORKAROUNDS_H */

View File

@ -25,7 +25,7 @@
#include "io.h"
#include "workarounds.h"
/* Falcon-architecture (SFC4000 and SFC9000-family) support */
/* Falcon-architecture (SFC9000-family) support */
/**************************************************************************
*
@ -364,9 +364,6 @@ unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
len = min(limit, len);
if (EFX_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
return len;
}
@ -384,6 +381,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
@ -405,37 +403,18 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
FRF_AZ_TX_DESCQ_TYPE, 0,
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum);
efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
__clear_bit_le(tx_queue->queue, &reg);
else
__set_bit_le(tx_queue->queue, &reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
tx_queue->queue);
}
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
}
static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
@ -532,16 +511,10 @@ void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
bool jumbo_en;
/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
* DMA to continue after a PCIe page boundary (and scattering
* is not possible). In Falcon B0 and Siena, it enables
* scatter.
*/
jumbo_en = !is_b0 || efx->rx_scatter;
/* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
jumbo_en = efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
@ -555,8 +528,8 @@ void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_ISCSI_DDIG_EN, true,
FRF_AZ_RX_ISCSI_HDIG_EN, true,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
efx_rx_queue_channel(rx_queue)->channel,
@ -895,7 +868,7 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
bool rx_ev_frm_trunc, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
bool rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned rx_ev_pkt_type;
@ -912,12 +885,10 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
/* Every error apart from tobe_disc and pause_frm */
rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
@ -951,14 +922,13 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
" [TCP_UDP_CHKSUM_ERR]" : "",
rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
/* The frame must be discarded if any of these are true. */
return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
}
@ -987,8 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return false;
}
@ -1254,10 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
RESET_TYPE_DISABLE);
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@ -1394,13 +1360,11 @@ int efx_farch_ev_init(struct efx_channel *channel)
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
}
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
/* Pin event queue buffer */
efx_init_special_buffer(efx, &channel->eventq);
@ -1428,8 +1392,7 @@ void efx_farch_ev_fini(struct efx_channel *channel)
EFX_ZERO_OWORD(reg);
efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
channel->channel);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
/* Unpin event queue */
efx_fini_special_buffer(efx, &channel->eventq);
@ -1503,7 +1466,6 @@ int efx_farch_irq_test_generate(struct efx_nic *efx)
*/
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
@ -1529,8 +1491,6 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
/* Disable both devices */
pci_clear_master(efx->pci_dev);
if (efx_nic_is_dual_func(efx))
pci_clear_master(nic_data->pci_dev2);
efx_farch_irq_disable_master(efx);
/* Count errors and reset or disable the NIC accordingly */
@ -1677,8 +1637,6 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
size_t i = 0;
efx_dword_t dword;
BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
@ -1806,8 +1764,7 @@ void efx_farch_init_common(struct efx_nic *efx)
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
@ -1827,22 +1784,18 @@ void efx_farch_init_common(struct efx_nic *efx)
/* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_4(temp,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0,
/* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
EFX_POPULATE_OWORD_4(temp,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0,
/* Allow large pace values in the fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
/**************************************************************************
@ -2026,7 +1979,7 @@ static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER));
} else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
} else {
/* We don't expose 'default' filters because unmatched
* packets always go to the queue number found in the
* RSS table. But we still need to set the RX scatter
@ -2834,31 +2787,27 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
return -ENOMEM;
efx->filter_state = state;
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
}
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
}
table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];

View File

@ -15,7 +15,6 @@
#include "io.h"
#include "farch_regs.h"
#include "mcdi_pcol.h"
#include "phy.h"
/**************************************************************************
*

View File

@ -13,7 +13,6 @@
#include <linux/slab.h>
#include "efx.h"
#include "phy.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "nic.h"

View File

@ -41,7 +41,7 @@
*
**************************************************************************/
#define EFX_DRIVER_VERSION "4.0"
#define EFX_DRIVER_VERSION "4.1"
#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@ -191,7 +191,6 @@ struct efx_tx_buffer {
* Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @tx_min_size: Minimum transmit size for this queue. Depends on HW.
* @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
* may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
@ -242,7 +241,6 @@ struct efx_tx_queue {
void __iomem *piobuf;
unsigned int piobuf_offset;
bool initialised;
unsigned int tx_min_size;
/* Function pointers used in the fast path. */
int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);

View File

@ -18,11 +18,8 @@
#include "mcdi.h"
enum {
EFX_REV_FALCON_A0 = 0,
EFX_REV_FALCON_A1 = 1,
EFX_REV_FALCON_B0 = 2,
EFX_REV_SIENA_A0 = 3,
EFX_REV_HUNT_A0 = 4,
EFX_REV_SIENA_A0 = 0,
EFX_REV_HUNT_A0 = 1,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@ -32,12 +29,6 @@ static inline int efx_nic_rev(struct efx_nic *efx)
u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
{
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
}
/* Read the current event from the event queue */
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
@ -144,11 +135,6 @@ enum {
PHY_TYPE_SFT9001B = 10,
};
#define FALCON_XMAC_LOOPBACKS \
((1 << LOOPBACK_XGMII) | \
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@ -161,160 +147,6 @@ enum {
GENERIC_STAT_COUNT
};
/**
* struct falcon_board_type - board operations and type information
* @id: Board type id, as found in NVRAM
* @init: Allocate resources and initialise peripheral hardware
* @init_phy: Do board-specific PHY initialisation
* @fini: Shut down hardware and free resources
* @set_id_led: Set state of identifying LED or revert to automatic function
* @monitor: Board-specific health check function
*/
struct falcon_board_type {
u8 id;
int (*init) (struct efx_nic *nic);
void (*init_phy) (struct efx_nic *efx);
void (*fini) (struct efx_nic *nic);
void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
int (*monitor) (struct efx_nic *nic);
};
/**
* struct falcon_board - board information
* @type: Type of board
* @major: Major rev. ('A', 'B' ...)
* @minor: Minor rev. (0, 1, ...)
* @i2c_adap: I2C adapter for on-board peripherals
* @i2c_data: Data for bit-banging algorithm
* @hwmon_client: I2C client for hardware monitor
* @ioexp_client: I2C client for power/port control
*/
struct falcon_board {
const struct falcon_board_type *type;
int major;
int minor;
struct i2c_adapter i2c_adap;
struct i2c_algo_bit_data i2c_data;
struct i2c_client *hwmon_client, *ioexp_client;
};
/**
* struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device
* @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands
* @munge_address: Flag whether addresses should be munged.
* Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
* use bit 3 of the command byte as address bit A8, rather
* than having a two-byte address. If this flag is set, then
* commands should be munged in this way.
* @erase_command: Erase command (or 0 if sector erase not needed).
* @erase_size: Erase sector size (in bytes)
* Erase commands affect sectors with this size and alignment.
* This must be a power of two.
* @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment.
*/
struct falcon_spi_device {
int device_id;
unsigned int size;
unsigned int addr_len;
unsigned int munge_address:1;
u8 erase_command;
unsigned int erase_size;
unsigned int block_size;
};
static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{
return spi->size != 0;
}
enum {
FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
FALCON_STAT_tx_packets,
FALCON_STAT_tx_pause,
FALCON_STAT_tx_control,
FALCON_STAT_tx_unicast,
FALCON_STAT_tx_multicast,
FALCON_STAT_tx_broadcast,
FALCON_STAT_tx_lt64,
FALCON_STAT_tx_64,
FALCON_STAT_tx_65_to_127,
FALCON_STAT_tx_128_to_255,
FALCON_STAT_tx_256_to_511,
FALCON_STAT_tx_512_to_1023,
FALCON_STAT_tx_1024_to_15xx,
FALCON_STAT_tx_15xx_to_jumbo,
FALCON_STAT_tx_gtjumbo,
FALCON_STAT_tx_non_tcpudp,
FALCON_STAT_tx_mac_src_error,
FALCON_STAT_tx_ip_src_error,
FALCON_STAT_rx_bytes,
FALCON_STAT_rx_good_bytes,
FALCON_STAT_rx_bad_bytes,
FALCON_STAT_rx_packets,
FALCON_STAT_rx_good,
FALCON_STAT_rx_bad,
FALCON_STAT_rx_pause,
FALCON_STAT_rx_control,
FALCON_STAT_rx_unicast,
FALCON_STAT_rx_multicast,
FALCON_STAT_rx_broadcast,
FALCON_STAT_rx_lt64,
FALCON_STAT_rx_64,
FALCON_STAT_rx_65_to_127,
FALCON_STAT_rx_128_to_255,
FALCON_STAT_rx_256_to_511,
FALCON_STAT_rx_512_to_1023,
FALCON_STAT_rx_1024_to_15xx,
FALCON_STAT_rx_15xx_to_jumbo,
FALCON_STAT_rx_gtjumbo,
FALCON_STAT_rx_bad_lt64,
FALCON_STAT_rx_bad_gtjumbo,
FALCON_STAT_rx_overflow,
FALCON_STAT_rx_symbol_error,
FALCON_STAT_rx_align_error,
FALCON_STAT_rx_length_error,
FALCON_STAT_rx_internal_error,
FALCON_STAT_rx_nodesc_drop_cnt,
FALCON_STAT_COUNT
};
/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
* @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
* @mdio_lock: MDIO bus lock
* @xmac_poll_required: XMAC link state needs polling
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
struct falcon_spi_device spi_flash;
struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
};
static inline struct falcon_board *falcon_board(struct efx_nic *efx)
{
struct falcon_nic_data *data = efx->nic_data;
return &data->board;
}
enum {
SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
SIENA_STAT_tx_good_bytes,

View File

@ -400,21 +400,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*/
rx_buf->flags |= EFX_RX_PKT_DISCARD;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d seriously overlength "
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
" RX queue %d overlength RX event "
"(0x%x > 0x%x)\n",
efx_rx_queue_index(rx_queue), len, max_len);
}
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"RX queue %d overlength RX event (%#x > %#x)\n",
efx_rx_queue_index(rx_queue), len, max_len);
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
}

View File

@ -20,7 +20,6 @@
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"

View File

@ -97,10 +97,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
/* Possibly one more per segment for the alignment workaround,
* or for option descriptors
*/
if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
/* Possibly one more per segment for option descriptors */
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
@ -155,7 +153,6 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
unsigned int min_len = tx_queue->tx_min_size;
unsigned int copy_len = skb->len;
struct efx_tx_buffer *buffer;
u8 *copy_buffer;
@ -171,12 +168,7 @@ static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
EFX_WARN_ON_PARANOID(rc);
if (unlikely(copy_len < min_len)) {
memset(copy_buffer + copy_len, 0, min_len - copy_len);
buffer->len = min_len;
} else {
buffer->len = copy_len;
}
buffer->len = copy_len;
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB;
@ -530,8 +522,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
tx_queue->pio_packets++;
data_mapped = true;
#endif
} else if (skb_len < tx_queue->tx_min_size ||
(skb->data_len && skb_len <= EFX_TX_CB_SIZE)) {
} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
/* Pad short packets or coalesce short fragmented packets. */
if (efx_enqueue_skb_copy(tx_queue, skb))
goto err;
@ -677,7 +668,7 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
num_tc = ntc->tc;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
if (num_tc > EFX_MAX_TX_TC)
return -EINVAL;
if (num_tc == net_dev->num_tc)
@ -837,9 +828,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
*/
tx_queue->handle_tso = efx_enqueue_skb_tso;
/* Some older hardware requires Tx writes larger than 32. */
tx_queue->tx_min_size = EFX_WORKAROUND_15592(efx) ? 33 : 0;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);

View File

@ -15,35 +15,14 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
/* Spurious parity errors in TSORT buffers */
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
/* Unaligned read request >512 bytes after aligning may break TSORT */
#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
/* iSCSI parsing errors */
#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
/* RX events go missing */
#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
/* RX_RESET on A1 */
#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
/* Increase filter depth to avoid RX_RESET */
#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
/* Flushes may never complete */
#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
/* Lockup when writing event block registers at gen2/gen3 */
#define EFX_EF10_WORKAROUND_35388(efx) \
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)