1
0
Fork 0

Merge commit master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 of HEAD

* HEAD:
  [AX.25]: Use kzalloc
  [ATM] net/atm/clip.c: fix PROC_FS=n compile
  [PKT_SCHED]: act_api: Fix module leak while flushing actions
  [NET]: Fix IPv4/DECnet routing rule dumping
  [NET] gso: Fix up GSO packets with broken checksums
  [NET] gso: Add skb_is_gso
  [IRDA]: fix drivers/net/irda/ali-ircc.c:ali_ircc_init()
  [ATM]: fix possible recursive locking in skb_migrate()
  [ATM]: Typo in drivers/atm/Kconfig...
  [TG3]: add amd8131 to "write reorder" chipsets
  [NET]: Fix network device interface printk message priority
hifive-unleashed-5.1
Linus Torvalds 2006-07-09 15:50:41 -07:00
commit 09075ef0fd
37 changed files with 242 additions and 91 deletions

View File

@ -398,7 +398,7 @@ config ATM_FORE200E_USE_TASKLET
default n
help
This defers work to be done by the interrupt handler to a
tasklet instead of hanlding everything at interrupt time. This
tasklet instead of handling everything at interrupt time. This
may improve the responsive of the host.
config ATM_FORE200E_TX_RETRY

View File

@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp)
skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +

View File

@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt *cpl;
#ifdef NETIF_F_TSO
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
int eth_type;
struct cpl_tx_pkt_lso *hdr;

View File

@ -2524,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
int err;
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)
@ -2649,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
* tso gets written back prematurely before the data is fully
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_shinfo(skb)->gso_size) {
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = 0;
size -= 4;
}
@ -2937,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
/* Controller Erratum workaround */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_shinfo(skb)->gso_size)
if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
count++;
#endif

View File

@ -1495,7 +1495,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[nr] = skb;
#ifdef NETIF_F_TSO
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
else
#endif

View File

@ -146,7 +146,7 @@ static int __init ali_ircc_init(void)
{
ali_chip_t *chip;
chipio_t info;
int ret = -ENODEV;
int ret;
int cfg, cfg_base;
int reg, revision;
int i = 0;
@ -160,6 +160,7 @@ static int __init ali_ircc_init(void)
return ret;
}
ret = -ENODEV;
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)

View File

@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint16_t ipcse, tucse, mss;
int err;
if(likely(skb_shinfo(skb)->gso_size)) {
if (likely(skb_is_gso(skb))) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err)

View File

@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
#ifdef LOOPBACK_TSO
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
BUG_ON(skb->protocol != htons(ETH_P_IP));
BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);

View File

@ -2116,7 +2116,7 @@ abort_linearize:
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
if (skb_shinfo(skb)->gso_size) {
if (skb_is_gso(skb)) {
printk(KERN_ERR
"myri10ge: %s: TSO but wanted to linearize?!?!?\n",
mgp->dev->name);

View File

@ -1159,7 +1159,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
count = sizeof(dma_addr_t) / sizeof(u32);
count += skb_shinfo(skb)->nr_frags * count;
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
++count;
if (skb->ip_summed == CHECKSUM_HW)

View File

@ -10078,6 +10078,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
static struct pci_device_id write_reorder_chipsets[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_FE_GATE_700C) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_8131_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8385_0) },
{ },

View File

@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* If problems develop with TSO, check this first.
*/
numDesc = skb_shinfo(skb)->nr_frags + 1;
if(skb_tso_size(skb))
if (skb_is_gso(skb))
numDesc++;
/* When checking for free space in the ring, we need to also
@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
if(skb_tso_size(skb)) {
if (skb_is_gso(skb)) {
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
first_txd->numDesc++;

View File

@ -4457,7 +4457,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
queue = card->qdio.out_qs
[qeth_get_priority_queue(card, skb, ipv, cast_type)];
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
large_send = card->options.large_send;
/*are we able to do TSO ? If so ,prepare and send it from here */

View File

@ -549,6 +549,7 @@ struct packet_type {
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
@ -1001,13 +1002,14 @@ static inline int net_gso_ok(int features, int gso_type)
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_size ?
skb_shinfo(skb)->gso_type : 0);
return net_gso_ok(features, skb_shinfo(skb)->gso_type);
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return !skb_gso_ok(skb, dev->features);
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
unlikely(skb->ip_summed != CHECKSUM_HW));
}
#endif /* __KERNEL__ */

View File

@ -1455,5 +1455,10 @@ static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
static inline int skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */

View File

@ -36,6 +36,7 @@
struct net_protocol {
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int no_policy;
@ -51,6 +52,7 @@ struct inet6_protocol
int type, int code, int offset,
__u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);

View File

@ -1086,6 +1086,7 @@ extern struct request_sock_ops tcp_request_sock_ops;
extern int tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
#ifdef CONFIG_PROC_FS

View File

@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = {
static int __init atm_clip_init(void)
{
struct proc_dir_entry *p;
neigh_table_init_no_netlink(&clip_tbl);
clip_tbl_hook = &clip_tbl;
@ -972,9 +971,15 @@ static int __init atm_clip_init(void)
setup_timer(&idle_timer, idle_timer_check, 0);
p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
if (p)
p->proc_fops = &arp_seq_fops;
#ifdef CONFIG_PROC_FS
{
struct proc_dir_entry *p;
p = create_proc_entry("arp", S_IRUGO, atm_proc_root);
if (p)
p->proc_fops = &arp_seq_fops;
}
#endif
return 0;
}

View File

@ -25,22 +25,27 @@
/*
* skb_migrate appends the list at "from" to "to", emptying "from" in the
* process. skb_migrate is atomic with respect to all other skb operations on
* "from" and "to". Note that it locks both lists at the same time, so beware
* of potential deadlocks.
* "from" and "to". Note that it locks both lists at the same time, so to deal
* with the lock ordering, the locks are taken in address order.
*
* This function should live in skbuff.c or skbuff.h.
*/
void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to)
{
unsigned long flags;
struct sk_buff *skb_from = (struct sk_buff *) from;
struct sk_buff *skb_to = (struct sk_buff *) to;
struct sk_buff *prev;
spin_lock_irqsave(&from->lock,flags);
spin_lock(&to->lock);
if ((unsigned long) from < (unsigned long) to) {
spin_lock_irqsave(&from->lock, flags);
spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irqsave(&to->lock, flags);
spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING);
}
prev = from->prev;
from->next->prev = to->prev;
prev->next = skb_to;
@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
from->prev = skb_from;
from->next = skb_from;
from->qlen = 0;
spin_unlock_irqrestore(&from->lock,flags);
spin_unlock_irqrestore(&from->lock, flags);
}

View File

@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void)
{
ax25_cb *ax25;
if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
return NULL;
memset(ax25, 0x00, sizeof(*ax25));
atomic_set(&ax25->refcount, 1);
skb_queue_head_init(&ax25->write_queue);

View File

@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
ax25_unregister_sysctl();
memset(ax25_dev, 0x00, sizeof(*ax25_dev));
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold(dev);

View File

@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb)
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* drop mtu oversized packets except gso */
if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size)
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
#ifdef CONFIG_BRIDGE_NETFILTER

View File

@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP) &&
skb->len > skb->dev->mtu &&
!skb_shinfo(skb)->gso_size)
!skb_is_gso(skb))
return ip_fragment(skb, br_dev_queue_push_xmit);
else
return br_dev_queue_push_xmit(skb);

View File

@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
unsigned int csum;
int ret = 0, offset = skb->h.raw - skb->data;
if (inward) {
skb->ip_summed = CHECKSUM_NONE;
goto out;
if (inward)
goto out_set_summed;
if (unlikely(skb_shinfo(skb)->gso_size)) {
static int warned;
WARN_ON(!warned);
warned = 1;
/* Let GSO fix up the checksum. */
goto out_set_summed;
}
if (skb_cloned(skb)) {
@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
BUG_ON(skb->csum + 2 > offset);
*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
out_set_summed:
skb->ip_summed = CHECKSUM_NONE;
out:
return ret;
@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
int type = skb->protocol;
int err;
BUG_ON(skb_shinfo(skb)->frag_list);
BUG_ON(skb->ip_summed != CHECKSUM_HW);
skb->mac.raw = skb->data;
skb->mac_len = skb->nh.raw - skb->data;
__skb_pull(skb, skb->mac_len);
if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
static int warned;
WARN_ON(!warned);
warned = 1;
if (skb_header_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
return ERR_PTR(err);
}
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
err = ptype->gso_send_check(skb);
segs = ERR_PTR(err);
if (err || skb_gso_ok(skb, features))
break;
__skb_push(skb, skb->data - skb->nh.raw);
}
segs = ptype->gso_segment(skb, features);
break;
}
@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb)
if (dev->qdisc_ingress) {
__u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
if (MAX_RED_LOOP < ttl++) {
printk("Redir loop detected Dropping packet (%s->%s)\n",
printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n",
skb->input_dev->name, skb->dev->name);
return TC_ACT_SHOT;
}
@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev)
/* Fix illegal SG+CSUM combinations. */
if ((dev->features & NETIF_F_SG) &&
!(dev->features & NETIF_F_ALL_CSUM)) {
printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
dev->name);
dev->features &= ~NETIF_F_SG;
}
@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev)
/* TSO requires that SG is present as well. */
if ((dev->features & NETIF_F_TSO) &&
!(dev->features & NETIF_F_SG)) {
printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
dev->name);
dev->features &= ~NETIF_F_TSO;
}

View File

@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
if (idx < s_idx)
continue;
goto next;
if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
break;
next:
idx++;
}
rcu_read_unlock();

View File

@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk)
EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
struct iphdr *iph;
struct net_protocol *ops;
int proto;
int ihl;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
goto out;
iph = skb->nh.iph;
ihl = iph->ihl * 4;
if (ihl < sizeof(*iph))
goto out;
if (unlikely(!pskb_may_pull(skb, ihl)))
goto out;
skb->h.raw = __skb_pull(skb, ihl);
iph = skb->nh.iph;
proto = iph->protocol & (MAX_INET_PROTOS - 1);
err = -EPROTONOSUPPORT;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
if (likely(ops && ops->gso_send_check))
err = ops->gso_send_check(skb);
rcu_read_unlock();
out:
return err;
}
static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = {
static struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.gso_send_check = tcp_v4_gso_send_check,
.gso_segment = tcp_tso_segment,
.no_policy = 1,
};
@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void);
static struct packet_type ip_packet_type = {
.type = __constant_htons(ETH_P_IP),
.func = ip_rcv,
.gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
};

View File

@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
hlist_for_each_entry(r, node, &fib_rules, hlist) {
if (idx < s_idx)
continue;
goto next;
if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWRULE, NLM_F_MULTI) < 0)
break;
next:
idx++;
}
rcu_read_unlock();

View File

@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb)
return dst_output(skb);
}
#endif
if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size)
if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
return ip_fragment(skb, ip_finish_output2);
else
return ip_finish_output2(skb);
@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
while (size > 0) {
int i;
if (skb_shinfo(skb)->gso_size)
if (skb_is_gso(skb))
len = size;
else {

View File

@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
}
}
int tcp_v4_gso_send_check(struct sk_buff *skb)
{
struct iphdr *iph;
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return -EINVAL;
iph = skb->nh.iph;
th = skb->h.th;
th->check = 0;
th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
skb->csum = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_HW;
return 0;
}
/*
* This routine will send an RST to the other tcp.
*

View File

@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb)
}
#endif
if (!skb_shinfo(skb)->gso_size)
if (!skb_is_gso(skb))
return xfrm4_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);

View File

@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb)
int ip6_output(struct sk_buff *skb)
{
if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) ||
if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) ||
dst_allfrag(skb->dst))
return ip6_fragment(skb, ip6_output2);
else
@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
skb->priority = sk->sk_priority;
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) {
if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
dst_output);

View File

@ -57,12 +57,71 @@
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb,
int proto)
{
struct inet6_protocol *ops = NULL;
for (;;) {
struct ipv6_opt_hdr *opth;
int len;
if (proto != NEXTHDR_HOP) {
ops = rcu_dereference(inet6_protos[proto]);
if (unlikely(!ops))
break;
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
if (unlikely(!pskb_may_pull(skb, 8)))
break;
opth = (void *)skb->data;
len = opth->hdrlen * 8 + 8;
if (unlikely(!pskb_may_pull(skb, len)))
break;
proto = opth->nexthdr;
__skb_pull(skb, len);
}
return ops;
}
static int ipv6_gso_send_check(struct sk_buff *skb)
{
struct ipv6hdr *ipv6h;
struct inet6_protocol *ops;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
ipv6h = skb->nh.ipv6h;
__skb_pull(skb, sizeof(*ipv6h));
err = -EPROTONOSUPPORT;
rcu_read_lock();
ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
if (likely(ops && ops->gso_send_check)) {
skb->h.raw = skb->data;
err = ops->gso_send_check(skb);
}
rcu_read_unlock();
out:
return err;
}
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
struct inet6_protocol *ops;
int proto;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_UDP |
@ -76,42 +135,15 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
goto out;
ipv6h = skb->nh.ipv6h;
proto = ipv6h->nexthdr;
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
for (;;) {
struct ipv6_opt_hdr *opth;
int len;
if (proto != NEXTHDR_HOP) {
ops = rcu_dereference(inet6_protos[proto]);
if (unlikely(!ops))
goto unlock;
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
break;
}
if (unlikely(!pskb_may_pull(skb, 8)))
goto unlock;
opth = (void *)skb->data;
len = opth->hdrlen * 8 + 8;
if (unlikely(!pskb_may_pull(skb, len)))
goto unlock;
proto = opth->nexthdr;
__skb_pull(skb, len);
}
skb->h.raw = skb->data;
if (likely(ops->gso_segment))
ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
if (likely(ops && ops->gso_segment)) {
skb->h.raw = skb->data;
segs = ops->gso_segment(skb, features);
unlock:
}
rcu_read_unlock();
if (unlikely(IS_ERR(segs)))
@ -130,6 +162,7 @@ out:
static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6),
.func = ipv6_rcv,
.gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
};

View File

@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
}
}
static int tcp_v6_gso_send_check(struct sk_buff *skb)
{
struct ipv6hdr *ipv6h;
struct tcphdr *th;
if (!pskb_may_pull(skb, sizeof(*th)))
return -EINVAL;
ipv6h = skb->nh.ipv6h;
th = skb->h.th;
th->check = 0;
th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
IPPROTO_TCP, 0);
skb->csum = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_HW;
return 0;
}
static void tcp_v6_send_reset(struct sk_buff *skb)
{
@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = {
static struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
.gso_segment = tcp_tso_segment,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};

View File

@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb)
{
struct sk_buff *segs;
if (!skb_shinfo(skb)->gso_size)
if (!skb_is_gso(skb))
return xfrm6_output_finish2(skb);
skb->protocol = htons(ETH_P_IP);

View File

@ -1382,14 +1382,12 @@ static int __init nr_proto_init(void)
return -1;
}
dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *));
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;

View File

@ -1490,14 +1490,13 @@ static int __init rose_proto_init(void)
rose_callsign = null_ax25_address;
dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_rose == NULL) {
printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
rc = -ENOMEM;
goto out_proto_unregister;
}
memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
for (i = 0; i < rose_ndevs; i++) {
struct net_device *dev;
char name[IFNAMSIZ];

View File

@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid)
return err;
rtattr_failure:
module_put(a->ops->owner);
nlmsg_failure:
module_put(a->ops->owner);
err_out:
kfree_skb(skb);
kfree(a);