Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Merge 'net' into 'net-next' to get the AF_PACKET bug fix that
Daniel's direct transmit changes depend upon.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-12-09 20:20:14 -05:00
commit 34f9f43710
37 changed files with 310 additions and 204 deletions

View file

@ -4,7 +4,7 @@ This file provides information, what the device node
for the davinci_emac interface contains.
Required properties:
- compatible: "ti,davinci-dm6467-emac";
- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
- reg: Offset and length of the register set for the device
- ti,davinci-ctrl-reg-offset: offset to control register
- ti,davinci-ctrl-mod-reg-offset: offset to control module register

View file

@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
[shutdown] close() --------> destruction of the transmission socket and
deallocation of all associated resources.
Socket creation and destruction is also straight forward, and is done
the same way as in capturing described in the previous paragraph:
int fd = socket(PF_PACKET, mode, 0);
The protocol can optionally be 0 in case we only want to transmit
via this socket, which avoids an expensive call to packet_rcv().
In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
Binding the socket to your network interface is mandatory (with zero copy) to
know the header size of frames used in the circular buffer.

View file

@ -4450,10 +4450,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com>
M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Tushar Dave <tushar.n.dave@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/

View file

@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params)
(arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
/* not complete check, but should be good enough to
catch mistakes */
__be32 ip = in_aton(arp_ip_target[i]);
if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
ip == htonl(INADDR_BROADCAST)) {
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;

View file

@ -1634,12 +1634,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
int packets_per_slave = bond->params.packets_per_slave;
unsigned int packets_per_slave = bond->params.packets_per_slave;
if (packets_per_slave > 1)
packets_per_slave = reciprocal_value(packets_per_slave);
return sprintf(buf, "%d\n", packets_per_slave);
return sprintf(buf, "%u\n", packets_per_slave);
}
static ssize_t bonding_store_packets_per_slave(struct device *d,

View file

@ -3114,6 +3114,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
if (!IS_SRIOV(bp)) {
BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
return -EINVAL;
}
DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
num_vfs_param, BNX2X_NR_VIRTFN(bp));

View file

@ -64,6 +64,9 @@
#define SLIPORT_ERROR_NO_RESOURCE1 0x2
#define SLIPORT_ERROR_NO_RESOURCE2 0x9
#define SLIPORT_ERROR_FW_RESET1 0x2
#define SLIPORT_ERROR_FW_RESET2 0x0
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt

View file

@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->hw_error = true;
dev_err(&adapter->pdev->dev,
"Error detected in the card\n");
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
dev_info(&adapter->pdev->dev,
"Firmware update in progress\n");
return;
} else {
dev_err(&adapter->pdev->dev,
"Error detected in the card\n");
}
}
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@ -2932,28 +2940,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
}
}
static int be_clear(struct be_adapter *adapter)
static void be_mac_clear(struct be_adapter *adapter)
{
int i;
if (adapter->pmac_id) {
for (i = 0; i < (adapter->uc_macs + 1); i++)
be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id[i], 0);
adapter->uc_macs = 0;
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
}
}
static int be_clear(struct be_adapter *adapter)
{
be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
/* delete the primary mac along with the uc-mac list */
for (i = 0; i < (adapter->uc_macs + 1); i++)
be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id[i], 0);
adapter->uc_macs = 0;
be_mac_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_clear_queues(adapter);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
be_msix_disable(adapter);
return 0;
}
@ -3812,6 +3827,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
if (change_status == LANCER_FW_RESET_NEEDED) {
dev_info(&adapter->pdev->dev,
"Resetting adapter to activate new FW\n");
status = lancer_physdev_ctrl(adapter,
PHYSDEV_CONTROL_FW_RESET_MASK);
if (status) {
@ -4363,13 +4380,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
goto err;
}
dev_err(dev, "Error recovery successful\n");
dev_err(dev, "Adapter recovery successful\n");
return 0;
err:
if (status == -EAGAIN)
dev_err(dev, "Waiting for resource provisioning\n");
else
dev_err(dev, "Error recovery failed\n");
dev_err(dev, "Adapter recovery failed\n");
return status;
}

View file

@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
dev_kfree_skb_any(skb);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
rx_desc->data_size, DMA_FROM_DEVICE);
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
}
if (rx_done)
@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
rx_desc->data_size, DMA_FROM_DEVICE);
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
rx_bytes = rx_desc->data_size -
(ETH_FCS_LEN + MVNETA_MH_SIZE);

View file

@ -5149,8 +5149,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int result;
memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
int result, count;
count = nv_get_sset_count(dev, ETH_SS_TEST);
memset(buffer, 0, count * sizeof(u64));
if (!nv_link_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
@ -5194,7 +5196,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
return;
}
if (!nv_loopback_test(dev)) {
if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[3] = 1;
}

View file

@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
#define DRV_VERSION "1.00.00.33"
#define DRV_VERSION "1.00.00.34"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */

View file

@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
#define QLGE_RCV_MAC_ERR_STATS 7
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
iter++;
}
/* Update receive mac error statistics */
iter += QLGE_RCV_MAC_ERR_STATS;
/*
* Get Per-priority TX pause frame counter statistics.
*/

View file

@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX;
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);

View file

@ -61,6 +61,7 @@
#include <linux/davinci_emac.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
static const struct of_device_id davinci_emac_of_match[];
static struct emac_platform_data *
davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
{
struct device_node *np;
const struct of_device_id *match;
const struct emac_platform_data *auxdata;
struct emac_platform_data *pdata = NULL;
const u8 *mac_addr;
@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node)
pdata->phy_id = "";
pdata->phy_id = NULL;
auxdata = pdev->dev.platform_data;
if (auxdata) {
pdata->interrupt_enable = auxdata->interrupt_enable;
pdata->interrupt_disable = auxdata->interrupt_disable;
}
match = of_match_device(davinci_emac_of_match, &pdev->dev);
if (match && match->data) {
auxdata = match->data;
pdata->version = auxdata->version;
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
pdev->dev.platform_data = pdata;
@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
};
#if IS_ENABLED(CONFIG_OF)
static const struct emac_platform_data am3517_emac_data = {
.version = EMAC_VERSION_2,
.hw_ram_addr = 0x01e20000,
};
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);

View file

@ -872,6 +872,8 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
if (ret > 0)
iocb->ki_pos = ret;
out:
return ret;
}

View file

@ -1355,6 +1355,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = tun_do_read(tun, tfile, iv, len,
file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
out:
tun_put(tun);
return ret;

View file

@ -425,10 +425,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
if (vi->big_packets)
give_pages(rq, buf);
else if (vi->mergeable_rx_bufs)
if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(rq, buf);
else
dev_kfree_skb(buf);
return;
@ -1366,6 +1366,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
static void virtnet_free_queues(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
netif_napi_del(&vi->rq[i].napi);
kfree(vi->rq);
kfree(vi->sq);
}
@ -1395,10 +1400,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
if (vi->big_packets)
give_pages(&vi->rq[i], buf);
else if (vi->mergeable_rx_bufs)
if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
else if (vi->big_packets)
give_pages(&vi->rq[i], buf);
else
dev_kfree_skb(buf);
--vi->rq[i].num;

View file

@ -1149,49 +1149,72 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
unsigned int max)
{
if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
/* If we need to pullup then pullup to the max, so we
* won't need to do it again.
*/
int target = min_t(int, skb->len, MAX_TCP_HEADER);
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
if (skb_headlen(skb) >= len)
return 0;
/* If we need to pullup then pullup to the max, so we
* won't need to do it again.
*/
if (max > skb->len)
max = skb->len;
if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
return -ENOMEM;
if (skb_headlen(skb) < len)
return -EPROTO;
return 0;
}
/* This value should be large enough to cover a tagged ethernet header plus
* maximally sized IP and TCP or UDP headers.
*/
#define MAX_IP_HDR_LEN 128
static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum)
{
struct iphdr *iph = (void *)skb->data;
unsigned int header_size;
unsigned int off;
int err = -EPROTO;
bool fragment;
int err;
off = sizeof(struct iphdr);
fragment = false;
header_size = skb->network_header + off + MAX_IPOPTLEN;
maybe_pull_tail(skb, header_size);
err = maybe_pull_tail(skb,
sizeof(struct iphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
off = iph->ihl * 4;
if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
fragment = true;
switch (iph->protocol) {
off = ip_hdrlen(skb);
err = -EPROTO;
switch (ip_hdr(skb)->protocol) {
case IPPROTO_TCP:
if (!skb_partial_csum_set(skb, off,
offsetof(struct tcphdr, check)))
goto out;
if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb);
err = maybe_pull_tail(skb,
off + sizeof(struct tcphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header +
off +
sizeof(struct tcphdr);
maybe_pull_tail(skb, header_size);
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - off,
IPPROTO_TCP, 0);
tcp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - off,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
@ -1200,24 +1223,20 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb);
err = maybe_pull_tail(skb,
off + sizeof(struct udphdr),
MAX_IP_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header +
off +
sizeof(struct udphdr);
maybe_pull_tail(skb, header_size);
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - off,
IPPROTO_UDP, 0);
udp_hdr(skb)->check =
~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
skb->len - off,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
"Attempting to checksum a non-TCP/UDP packet, "
"dropping a protocol %d packet\n",
iph->protocol);
goto out;
}
@ -1227,75 +1246,99 @@ out:
return err;
}
/* This value should be large enough to cover a tagged ethernet header plus
* an IPv6 header, all options, and a maximal TCP or UDP header.
*/
#define MAX_IPV6_HDR_LEN 256
#define OPT_HDR(type, skb, off) \
(type *)(skb_network_header(skb) + (off))
static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum)
{
int err = -EPROTO;
struct ipv6hdr *ipv6h = (void *)skb->data;
int err;
u8 nexthdr;
unsigned int header_size;
unsigned int off;
unsigned int len;
bool fragment;
bool done;
fragment = false;
done = false;
off = sizeof(struct ipv6hdr);
header_size = skb->network_header + off;
maybe_pull_tail(skb, header_size);
err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
nexthdr = ipv6h->nexthdr;
nexthdr = ipv6_hdr(skb)->nexthdr;
while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
!done) {
len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
while (off <= len && !done) {
switch (nexthdr) {
case IPPROTO_DSTOPTS:
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING: {
struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
struct ipv6_opt_hdr *hp;
header_size = skb->network_header +
off +
sizeof(struct ipv6_opt_hdr);
maybe_pull_tail(skb, header_size);
err = maybe_pull_tail(skb,
off +
sizeof(struct ipv6_opt_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_optlen(hp);
break;
}
case IPPROTO_AH: {
struct ip_auth_hdr *hp = (void *)(skb->data + off);
struct ip_auth_hdr *hp;
header_size = skb->network_header +
off +
sizeof(struct ip_auth_hdr);
maybe_pull_tail(skb, header_size);
err = maybe_pull_tail(skb,
off +
sizeof(struct ip_auth_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct ip_auth_hdr, skb, off);
nexthdr = hp->nexthdr;
off += (hp->hdrlen+2)<<2;
off += ipv6_authlen(hp);
break;
}
case IPPROTO_FRAGMENT: {
struct frag_hdr *hp;
err = maybe_pull_tail(skb,
off +
sizeof(struct frag_hdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
hp = OPT_HDR(struct frag_hdr, skb, off);
if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
fragment = true;
nexthdr = hp->nexthdr;
off += sizeof(struct frag_hdr);
break;
}
case IPPROTO_FRAGMENT:
fragment = true;
/* fall through */
default:
done = true;
break;
}
}
if (!done) {
if (net_ratelimit())
netdev_err(vif->dev, "Failed to parse packet header\n");
goto out;
}
err = -EPROTO;
if (fragment) {
if (net_ratelimit())
netdev_err(vif->dev, "Packet is a fragment!\n");
if (!done || fragment)
goto out;
}
switch (nexthdr) {
case IPPROTO_TCP:
@ -1304,17 +1347,17 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (recalculate_partial_csum) {
struct tcphdr *tcph = tcp_hdr(skb);
err = maybe_pull_tail(skb,
off + sizeof(struct tcphdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header +
off +
sizeof(struct tcphdr);
maybe_pull_tail(skb, header_size);
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
&ipv6h->daddr,
skb->len - off,
IPPROTO_TCP, 0);
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - off,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
@ -1323,25 +1366,20 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
goto out;
if (recalculate_partial_csum) {
struct udphdr *udph = udp_hdr(skb);
err = maybe_pull_tail(skb,
off + sizeof(struct udphdr),
MAX_IPV6_HDR_LEN);
if (err < 0)
goto out;
header_size = skb->network_header +
off +
sizeof(struct udphdr);
maybe_pull_tail(skb, header_size);
udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
&ipv6h->daddr,
skb->len - off,
IPPROTO_UDP, 0);
udp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len - off,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
"Attempting to checksum a non-TCP/UDP packet, "
"dropping a protocol %d packet\n",
nexthdr);
goto out;
}

View file

@ -4,6 +4,7 @@
#include <uapi/linux/ipv6.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
/*
* This structure contains configuration options per IPv6 link.
*/

View file

@ -110,7 +110,8 @@ struct frag_hdr {
__be32 identification;
};
#define IP6_MF 0x0001
#define IP6_MF 0x0001
#define IP6_OFFSET 0xFFF8
#include <net/sock.h>

View file

@ -1035,7 +1035,6 @@ enum cg_proto_flags {
};
struct cg_proto {
void (*enter_memory_pressure)(struct sock *sk);
struct res_counter memory_allocated; /* Current allocated memory. */
struct percpu_counter sockets_allocated; /* Current number of sockets. */
int memory_pressure;
@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
if (cg_proto->memory_pressure)
cg_proto->memory_pressure = 0;
cg_proto->memory_pressure = 0;
}
}
@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
cg_proto->enter_memory_pressure(sk);
cg_proto->memory_pressure = 1;
}
sk->sk_prot->enter_memory_pressure(sk);

View file

@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
int br_handle_frame_finish(struct sk_buff *skb);
rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
{
return rcu_dereference(dev->rx_handler) == br_handle_frame;
}
static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
{
return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
}
/* br_ioctl.c */
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,

View file

@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err;
p = br_port_get_rcu(dev);
p = br_port_get_check_rcu(dev);
if (!p)
goto err;

View file

@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
skb->local_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);

View file

@ -6,13 +6,6 @@
#include <linux/memcontrol.h>
#include <linux/module.h>
static void memcg_tcp_enter_memory_pressure(struct sock *sk)
{
if (sk->sk_cgrp->memory_pressure)
sk->sk_cgrp->memory_pressure = 1;
}
EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
/*

View file

@ -237,6 +237,30 @@ struct packet_skb_cb {
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
rcu_read_lock();
dev = rcu_dereference(po->cached_dev);
if (likely(dev))
dev_hold(dev);
rcu_read_unlock();
return dev;
}
static void packet_cached_dev_assign(struct packet_sock *po,
struct net_device *dev)
{
rcu_assign_pointer(po->cached_dev, dev);
}
static void packet_cached_dev_reset(struct packet_sock *po)
{
RCU_INIT_POINTER(po->cached_dev, NULL);
}
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
struct packet_sock *po = pkt_sk(sk);
if (!po->running) {
if (po->fanout) {
if (po->fanout)
__fanout_link(sk, po);
} else {
else
dev_add_pack(&po->prot_hook);
rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
}
sock_hold(sk);
po->running = 1;
@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
struct packet_sock *po = pkt_sk(sk);
po->running = 0;
if (po->fanout) {
if (po->fanout)
__fanout_unlink(sk, po);
} else {
else
__dev_remove_pack(&po->prot_hook);
RCU_INIT_POINTER(po->cached_dev, NULL);
}
__sock_put(sk);
@ -2061,19 +2082,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return tp_len;
}
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
rcu_read_lock();
dev = rcu_dereference(po->cached_dev);
if (dev)
dev_hold(dev);
rcu_read_unlock();
return dev;
}
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{
struct sk_buff *skb;
@ -2090,7 +2098,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock);
if (saddr == NULL) {
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
@ -2244,7 +2252,7 @@ static int packet_snd(struct socket *sock,
* Get and verify the address.
*/
if (saddr == NULL) {
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
@ -2453,6 +2461,8 @@ static int packet_release(struct socket *sock)
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
packet_cached_dev_reset(po);
if (po->prot_hook.dev) {
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
@ -2508,14 +2518,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, true);
po->num = protocol;
po->prot_hook.type = protocol;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
po->prot_hook.dev = dev;
po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
packet_cached_dev_assign(po, dev);
if (protocol == 0)
goto out_unlock;
@ -2628,7 +2641,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po = pkt_sk(sk);
sk->sk_family = PF_PACKET;
po->num = proto;
RCU_INIT_POINTER(po->cached_dev, NULL);
packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk);
@ -3339,6 +3353,7 @@ static int packet_notifier(struct notifier_block *this,
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
po->ifindex = -1;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);

View file

@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act)
{
struct tc_action_ops *a, **ap;
/* Must supply act, dump, cleanup and init */
if (!act->act || !act->dump || !act->cleanup || !act->init)
return -EINVAL;
/* Supply defaults */
if (!act->lookup)
act->lookup = tcf_hash_search;
if (!act->walk)
act->walk = tcf_generic_walker;
write_lock(&act_mod_lock);
for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
}
while ((a = act) != NULL) {
repeat:
if (a->ops && a->ops->act) {
if (a->ops) {
ret = a->ops->act(skb, a, res);
if (TC_MUNGED & skb->tc_verd) {
/* copied already, allow trampling */
@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind)
struct tc_action *a;
for (a = act; a; a = act) {
if (a->ops && a->ops->cleanup) {
if (a->ops) {
if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
module_put(a->ops->owner);
act = act->next;
@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
int err = -EINVAL;
if (a->ops == NULL || a->ops->dump == NULL)
if (a->ops == NULL)
return err;
return a->ops->dump(skb, a, bind, ref);
}
@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
if (a->ops == NULL || a->ops->dump == NULL)
if (a->ops == NULL)
return err;
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (a->ops == NULL)
goto err_free;
if (a->ops->lookup == NULL)
goto err_mod;
err = -ENOENT;
if (a->ops->lookup(a, index) == 0)
goto err_mod;
@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
if (a_o->walk == NULL) {
WARN(1, "tc_dump_action: %s !capable of dumping table\n",
a_o->kind);
goto out_module_put;
}
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*t), 0);
if (!nlh)

View file

@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = {
.act = tcf_csum,
.dump = tcf_csum_dump,
.cleanup = tcf_csum_cleanup,
.lookup = tcf_hash_search,
.init = tcf_csum_init,
.walk = tcf_generic_walker
};
MODULE_DESCRIPTION("Checksum updating actions");

View file

@ -206,9 +206,7 @@ static struct tc_action_ops act_gact_ops = {
.act = tcf_gact,
.dump = tcf_gact_dump,
.cleanup = tcf_gact_cleanup,
.lookup = tcf_hash_search,
.init = tcf_gact_init,
.walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");

View file

@ -298,9 +298,7 @@ static struct tc_action_ops act_ipt_ops = {
.act = tcf_ipt,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
.lookup = tcf_hash_search,
.init = tcf_ipt_init,
.walk = tcf_generic_walker
};
static struct tc_action_ops act_xt_ops = {
@ -312,9 +310,7 @@ static struct tc_action_ops act_xt_ops = {
.act = tcf_ipt,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
.lookup = tcf_hash_search,
.init = tcf_ipt_init,
.walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");

View file

@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = {
.act = tcf_mirred,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_cleanup,
.lookup = tcf_hash_search,
.init = tcf_mirred_init,
.walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002)");

View file

@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = {
.act = tcf_nat,
.dump = tcf_nat_dump,
.cleanup = tcf_nat_cleanup,
.lookup = tcf_hash_search,
.init = tcf_nat_init,
.walk = tcf_generic_walker
};
MODULE_DESCRIPTION("Stateless NAT actions");

View file

@ -243,9 +243,7 @@ static struct tc_action_ops act_pedit_ops = {
.act = tcf_pedit,
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
.lookup = tcf_hash_search,
.init = tcf_pedit_init,
.walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");

View file

@ -407,7 +407,6 @@ static struct tc_action_ops act_police_ops = {
.act = tcf_act_police,
.dump = tcf_act_police_dump,
.cleanup = tcf_act_police_cleanup,
.lookup = tcf_hash_search,
.init = tcf_act_police_locate,
.walk = tcf_act_police_walker
};

View file

@ -201,7 +201,6 @@ static struct tc_action_ops act_simp_ops = {
.dump = tcf_simp_dump,
.cleanup = tcf_simp_cleanup,
.init = tcf_simp_init,
.walk = tcf_generic_walker,
};
MODULE_AUTHOR("Jamal Hadi Salim(2005)");

View file

@ -202,7 +202,6 @@ static struct tc_action_ops act_skbedit_ops = {
.dump = tcf_skbedit_dump,
.cleanup = tcf_skbedit_cleanup,
.init = tcf_skbedit_init,
.walk = tcf_generic_walker,
};
MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");

View file

@ -572,7 +572,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
u32 old_cwnd = t->cwnd;
u32 max_burst_bytes;
if (t->burst_limited)
if (t->burst_limited || asoc->max_burst == 0)
return;
max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);