Merge branch 'sfc-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc

Ben Hutchings says:

====================
Several fixes for the PTP hardware support added in 3.7:
1. Fix filtering of PTP packets on the TX path to be robust against bad
header lengths.
2. Limit logging on the RX path in case of a PTP packet flood, partly
from Laurence Evans.
3. Disable PTP hardware when the interface is down so that we don't
receive RX timestamp events, from Alexandre Rames.
4. Maintain clock frequency adjustment when a time offset is applied.

Also fixes for the SFC9100 family support added in 3.12:
5. Take the RX prefix length into account when applying NET_IP_ALIGN,
from Andrew Rybchenko.
6. Work around a bug that breaks communication between the driver and
firmware, from Robert Stonehouse.

Please also queue these up for the appropriate stable branches.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-12-10 21:19:42 -05:00
commit 88b07b3660
6 changed files with 101 additions and 23 deletions

View file

@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
NET_IP_ALIGN + efx->rx_dma_len);
efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
WARN_ON(channel->rx_pkt_n_frags);
}
efx_ptp_start_datapath(efx);
if (netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
}
@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
efx_ptp_stop_datapath(efx);
/* Stop RX refill */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx,
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);

View file

@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
}
}
static bool efx_mcdi_poll_once(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
rmb();
if (!efx->type->mcdi_poll_response(efx))
return false;
spin_lock_bh(&mcdi->iface_lock);
efx_mcdi_read_response_header(efx);
spin_unlock_bh(&mcdi->iface_lock);
return true;
}
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
rmb();
if (efx->type->mcdi_poll_response(efx))
if (efx_mcdi_poll_once(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
spin_lock_bh(&mcdi->iface_lock);
efx_mcdi_read_response_header(efx);
spin_unlock_bh(&mcdi->iface_lock);
/* Return rc=0 like wait_event_timeout() */
return 0;
}
@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
rc = efx_mcdi_await_completion(efx);
if (rc != 0) {
netif_err(efx, hw, efx->net_dev,
"MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode);
if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
netif_err(efx, hw, efx->net_dev,
"MCDI request was completed without an event\n");
rc = 0;
}
/* Close the race with efx_mcdi_ev_cpl() executing just too late
* and completing a request we've just cancelled, by ensuring
* that the seqno check therein fails.
@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
++mcdi->seqno;
++mcdi->credits;
spin_unlock_bh(&mcdi->iface_lock);
}
netif_err(efx, hw, efx->net_dev,
"MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode);
} else {
if (rc == 0) {
size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure

View file

@ -683,6 +683,8 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
* @rx_ip_align: RX DMA address offset to have IP header aligned in
* in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@ -816,6 +818,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
unsigned int rx_buffer_truesize;

View file

@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
void efx_ptp_start_datapath(struct efx_nic *efx);
void efx_ptp_stop_datapath(struct efx_nic *efx);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;

View file

@ -220,6 +220,7 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
* @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@ -270,6 +271,7 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
/* If the event overflow flag is set and the event list is now empty
* clear the flag to re-enable the overflow warning message.
*/
if (ptp->evt_overflow && list_empty(&ptp->evt_list))
ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
/* If the event overflow flag is set and the event list is now empty
* clear the flag to re-enable the overflow warning message.
*/
if (ptp->evt_overflow && list_empty(&ptp->evt_list))
ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
netif_warn(efx, rx_err, efx->net_dev,
"PTP packet - no timestamp seen\n");
if (net_ratelimit())
netif_warn(efx, rx_err, efx->net_dev,
"PTP packet - no timestamp seen\n");
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@ -788,9 +801,14 @@ fail:
static int efx_ptp_stop(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
int rc = efx_ptp_disable(efx);
struct list_head *cursor;
struct list_head *next;
int rc;
if (ptp == NULL)
return 0;
rc = efx_ptp_disable(efx);
if (ptp->rxfilter_installed) {
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
}
static int efx_ptp_restart(struct efx_nic *efx)
{
if (efx->ptp_data && efx->ptp_data->enabled)
return efx_ptp_start(efx);
return 0;
}
static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
ptp->evt_overflow = false;
ptp->phc_clock_info.owner = THIS_MODULE;
snprintf(ptp->phc_clock_info.name,
@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
skb->len >= PTP_MIN_LENGTH &&
skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
likely(skb->protocol == htons(ETH_P_IP)) &&
skb_transport_header_was_set(skb) &&
skb_network_header_len(skb) >= sizeof(struct iphdr) &&
ip_hdr(skb)->protocol == IPPROTO_UDP &&
skb_headlen(skb) >=
skb_transport_offset(skb) + sizeof(struct udphdr) &&
udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
}
@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
int rc;
int rc = 0;
if (enable_wanted) {
/* Change of mode requires disable */
@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
* succeed.
*/
efx->ptp_data->mode = new_mode;
rc = efx_ptp_start(efx);
if (netif_running(efx->net_dev))
rc = efx_ptp_start(efx);
if (rc == 0) {
rc = efx_ptp_synchronize(efx,
PTP_SYNC_ATTEMPTS * 2);
@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
} else {
netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
} else if (!ptp->evt_overflow) {
/* Log a warning message and set the event overflow flag.
* The message won't be logged again until the event queue
* becomes empty.
*/
netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (rc != 0)
return rc;
ptp_data->current_adjfreq = delta;
ptp_data->current_adjfreq = adjustment_ns;
return 0;
}
@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx)
efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
&efx_ptp_channel_type;
}
void efx_ptp_start_datapath(struct efx_nic *efx)
{
if (efx_ptp_restart(efx))
netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
}
void efx_ptp_stop_datapath(struct efx_nic *efx)
{
efx_ptp_stop(efx);
}

View file

@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->page = page;
rx_buf->page_offset = page_offset + NET_IP_ALIGN;
rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;