1
0
Fork 0

Merge branch 's390-qeth-next'

Julian Wiedmann says:

====================
s390/net: more updates for 4.14

please apply another batch of qeth patches for net-next.
This reworks the xmit path for L2 OSAs to use skb_cow_head() instead of
skb_realloc_headroom().
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
zero-colors
David S. Miller 2017-08-18 10:21:31 -07:00
commit 6ed272b2e9
4 changed files with 207 additions and 142 deletions

View File

@ -949,9 +949,10 @@ int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
int hd_len);
int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int);
unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int hd_len, unsigned int offset, int elements);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int qeth_core_get_sset_count(struct net_device *, int);
void qeth_core_get_ethtool_stats(struct net_device *,
@ -984,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t);
int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);

View File

@ -3890,6 +3890,34 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
}
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
/**
* qeth_push_hdr() - push a qeth_hdr onto an skb.
* @skb: skb that the qeth_hdr should be pushed onto.
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
* it contains a valid pointer to a qeth_hdr.
* @len: length of the hdr that needs to be pushed on.
*
* Returns the pushed length. If the header can't be pushed on
* (eg. because it would cross a page boundary), it is allocated from
* the cache instead and 0 is returned.
* Error to create the hdr is indicated by returning with < 0.
*/
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len)
{
if (skb_headroom(skb) >= len &&
qeth_get_elements_for_range((addr_t)skb->data - len,
(addr_t)skb->data) == 1) {
*hdr = skb_push(skb, len);
return len;
}
/* fall back */
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!*hdr)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_push_hdr);
static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf,
bool is_first_elem, unsigned int offset)
@ -3953,43 +3981,38 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
buf->next_element_to_fill = element;
}
/**
* qeth_fill_buffer() - map skb into an output buffer
* @queue: QDIO queue to submit the buffer on
* @buf: buffer to transport the skb
* @skb: skb to map into the buffer
* @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
*/
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, int hd_len)
unsigned int offset, unsigned int hd_len)
{
struct qdio_buffer *buffer;
int flush_cnt = 0, hdr_len;
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
int flush_cnt = 0;
buffer = buf->buffer;
refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
int element = buf->next_element_to_fill;
is_first_elem = false;
hdr_len = sizeof(struct qeth_hdr_tso) +
((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
skb_pull(skb, hdr_len);
}
/* IQD */
if (offset > 0) {
/* build dedicated header element */
if (hd_len) {
int element = buf->next_element_to_fill;
is_first_elem = false;
buffer->element[element].addr = hdr;
buffer->element[element].length = sizeof(struct qeth_hdr) +
hd_len;
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->is_header[element] = 1;
/* remember to free cache-allocated qeth_hdr: */
buf->is_header[element] = ((void *)hdr != skb->data);
buf->next_element_to_fill++;
}
@ -4020,7 +4043,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
int hd_len)
unsigned int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
int index;
@ -4050,8 +4073,9 @@ out:
EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
int elements_needed)
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len,
int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
int start_index;
@ -4100,7 +4124,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
}
tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, 0);
tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;

View File

@ -259,13 +259,14 @@ static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr,
card->perf_stats.tx_csum++;
}
static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int cast_type)
static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
int cast_type, unsigned int data_len)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
hdr->hdr.l2.pkt_length = data_len;
/* set byte byte 3 to casting flags */
if (cast_type == RTN_MULTICAST)
@ -275,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
else
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST;
hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr);
/* VSWITCH relies on the VLAN
* information to be present in
* the QDIO header */
@ -676,21 +676,127 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card);
}
static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int cast_type)
{
unsigned int data_offset = ETH_HLEN;
struct qeth_hdr *hdr;
int rc;
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
return -ENOMEM;
qeth_l2_fill_header(hdr, skb, cast_type, skb->len);
skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
data_offset);
if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
rc = -E2BIG;
goto out;
}
rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset,
sizeof(*hdr) + data_offset);
out:
if (rc)
kmem_cache_free(qeth_core_header_cache, hdr);
return rc;
}
static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int cast_type)
{
int push_len = sizeof(struct qeth_hdr);
unsigned int elements, nr_frags;
unsigned int hdr_elements = 0;
struct qeth_hdr *hdr = NULL;
unsigned int hd_len = 0;
int rc;
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
if (!qeth_get_elements_no(card, skb, 0, 0)) {
rc = skb_linearize(skb);
if (card->options.performance_stats) {
if (rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
if (rc)
return rc;
}
nr_frags = skb_shinfo(skb)->nr_frags;
rc = skb_cow_head(skb, push_len);
if (rc)
return rc;
push_len = qeth_push_hdr(skb, &hdr, push_len);
if (push_len < 0)
return push_len;
if (!push_len) {
/* hdr was allocated from cache */
hd_len = sizeof(*hdr);
hdr_elements = 1;
}
qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len);
if (skb->ip_summed == CHECKSUM_PARTIAL)
qeth_l2_hdr_csum(card, hdr, skb);
elements = qeth_get_elements_no(card, skb, hdr_elements, 0);
if (!elements) {
rc = -E2BIG;
goto out;
}
elements += hdr_elements;
/* TODO: remove the skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements);
out:
if (!rc) {
if (card->options.performance_stats && nr_frags) {
card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1;
}
} else {
if (hd_len)
kmem_cache_free(qeth_core_header_cache, hdr);
if (rc == -EBUSY)
/* roll back to ETH header */
skb_pull(skb, push_len);
}
return rc;
}
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue)
{
unsigned int elements;
struct qeth_hdr *hdr;
if (skb->protocol == htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
hdr = (struct qeth_hdr *)skb->data;
elements = qeth_get_elements_no(card, skb, 0, 0);
if (!elements)
return -E2BIG;
if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
return -EINVAL;
return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements);
}
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
int rc;
struct qeth_hdr *hdr = NULL;
int elements = 0;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = skb;
int cast_type = qeth_l2_get_cast_type(card, skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
unsigned int nr_frags;
int rc;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
@ -704,115 +810,38 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
goto tx_drop;
}
if ((card->info.type == QETH_CARD_TYPE_OSN) &&
(skb->protocol == htons(ETH_P_IPV6)))
goto tx_drop;
if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
netif_stop_queue(dev);
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
if ((card->info.type != QETH_CARD_TYPE_IQD) &&
!qeth_get_elements_no(card, new_skb, 0, 0)) {
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) {
if (lin_rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
if (lin_rc)
goto tx_drop;
}
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (card->info.type == QETH_CARD_TYPE_OSN)
hdr = (struct qeth_hdr *)skb->data;
else {
if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
data_offset = ETH_HLEN;
hd_len = ETH_HLEN;
hdr = kmem_cache_alloc(qeth_core_header_cache,
GFP_ATOMIC);
if (!hdr)
goto tx_drop;
elements_needed++;
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
hdr->hdr.l2.pkt_length = new_skb->len;
skb_copy_from_linear_data(new_skb,
((char *)hdr) + sizeof(*hdr),
ETH_HLEN);
} else {
/* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb,
sizeof(struct qeth_hdr));
if (!new_skb)
goto tx_drop;
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
if (new_skb->ip_summed == CHECKSUM_PARTIAL)
qeth_l2_hdr_csum(card, hdr, new_skb);
}
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
rc = qeth_l2_xmit_osn(card, skb, queue);
break;
case QETH_CARD_TYPE_IQD:
rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
break;
default:
rc = qeth_l2_xmit_osa(card, skb, queue, cast_type);
}
elements = qeth_get_elements_no(card, new_skb, elements_needed,
(data_offset > 0) ? data_offset : 0);
if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
goto tx_drop;
}
if (card->info.type != QETH_CARD_TYPE_IQD) {
if (qeth_hdr_chk_and_bounce(new_skb, &hdr,
sizeof(struct qeth_hdr_layer2)))
goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
data_offset, hd_len);
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats && nr_frags) {
card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1;
}
if (new_skb != skb)
dev_kfree_skb_any(skb);
rc = NETDEV_TX_OK;
} else {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
if (rc == -EBUSY) {
if (new_skb != skb)
dev_kfree_skb_any(new_skb);
return NETDEV_TX_BUSY;
} else
goto tx_drop;
}
netif_wake_queue(dev);
if (card->options.performance_stats)
card->perf_stats.outbound_time += qeth_get_micros() -
card->perf_stats.outbound_start_time;
return rc;
if (card->options.performance_stats)
card->perf_stats.outbound_time += qeth_get_micros() -
card->perf_stats.outbound_start_time;
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
tx_drop:
card->stats.tx_dropped++;
card->stats.tx_errors++;
if ((new_skb != skb) && new_skb)
dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
@ -990,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
}
if (card->info.type != QETH_CARD_TYPE_OSN &&
card->info.type != QETH_CARD_TYPE_IQD) {
card->dev->priv_flags &= ~IFF_TX_SKB_SHARING;
card->dev->needed_headroom = sizeof(struct qeth_hdr);
}
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *

View File

@ -2637,6 +2637,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_get_priority_queue(card, skb, ipv, cast_type) :
card->qdio.default_out_queue];
int tx_bytes = skb->len;
unsigned int hd_len = 0;
bool use_tso;
int data_offset = -1;
unsigned int nr_frags;
@ -2669,6 +2670,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->info.type == QETH_CARD_TYPE_IQD) {
new_skb = skb;
data_offset = ETH_HLEN;
hd_len = sizeof(*hdr);
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
@ -2756,19 +2758,21 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (card->info.type != QETH_CARD_TYPE_IQD) {
int len;
if (use_tso)
len = ((unsigned long)tcp_hdr(new_skb) +
tcp_hdrlen(new_skb)) -
(unsigned long)new_skb->data;
else
if (use_tso) {
hd_len = sizeof(struct qeth_hdr_tso) +
ip_hdrlen(new_skb) + tcp_hdrlen(new_skb);
len = hd_len;
} else {
len = sizeof(struct qeth_hdr_layer3);
}
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements);
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
hd_len, elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
data_offset, 0);
data_offset, hd_len);
if (!rc) {
card->stats.tx_packets++;