1
0
Fork 0

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [IPV6] addrconf: Fix IPv6 on tuntap tunnels
  [TCP]: Add missing break to TCP option parsing code
  [SCTP] Don't disable PMTU discovery when mtu is small
  [SCTP] Flag a pmtu change request
  [SCTP] Update pmtu handling to be similar to tcp
  [SCTP] Fix leak in sctp_getsockopt_local_addrs when copy_to_user fails
  [SCTP]: Allow unspecified port in sctp_bindx()
  [SCTP]: Correctly set daddr for IPv6 sockets during peeloff
  [TCP]: Set initial_ssthresh default to zero in Cubic and BIC.
  [TCP]: Fix left_out setting during FRTO
  [TCP]: Disable TSO if MD5SIG is enabled.
  [PPP_MPPE]: Fix "osize too small" check.
  [PATCH] mac80211: Don't stop tx queue on master device while scanning.
  [PATCH] mac80211: fix debugfs tx power reduction output
  [PATCH] cfg80211: fix signed macaddress in sysfs
  [IrDA]: f-timer reloading when sending rejected frames.
  [IrDA]: Fix Rx/Tx path race.
hifive-unleashed-5.1
Linus Torvalds 2007-06-14 15:08:50 -07:00
commit 1e0e76cf13
19 changed files with 150 additions and 81 deletions

View File

@ -493,14 +493,14 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/*
* Make sure we have enough room to decrypt the packet.
* Note that for our test we only subtract 1 byte whereas in
* mppe_compress() we added 2 bytes (+MPPE_OVHD);
* this is to account for possible PFC.
* To account for possible PFC we should only subtract 1
* byte whereas in mppe_compress() we added 2 bytes (+MPPE_OVHD);
* However, we assume no PFC, thus subtracting 2 bytes.
*/
if (osize < isize - MPPE_OVHD - 1) {
if (osize < isize - MPPE_OVHD - 2) {
printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
"(have: %d need: %d)\n", state->unit,
osize, isize - MPPE_OVHD - 1);
osize, isize - MPPE_OVHD - 2);
return DECOMP_ERROR;
}
osize = isize - MPPE_OVHD - 2; /* assume no PFC */

View File

@ -289,4 +289,21 @@ static inline void irlap_clear_disconnect(struct irlap_cb *self)
self->disconnect_pending = FALSE;
}
/*
* Function irlap_next_state (self, state)
*
* Switches state and provides debug information
*
*/
static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
{
/*
if (!self || self->magic != LAP_MAGIC)
return;
IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]);
*/
self->state = state;
}
#endif

View File

@ -503,6 +503,13 @@ static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu)
return frag;
}
static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
{
sctp_assoc_sync_pmtu(asoc);
asoc->pmtu_pending = 0;
}
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure

View File

@ -912,6 +912,9 @@ struct sctp_transport {
*/
__u16 pathmaxrxt;
/* is the Path MTU update pending on this tranport */
__u8 pmtu_pending;
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@ -1006,6 +1009,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
unsigned long sctp_transport_timeout(struct sctp_transport *);
void sctp_transport_reset(struct sctp_transport *);
void sctp_transport_update_pmtu(struct sctp_transport *, u32);
/* This is the structure we use to queue packets as they come into
@ -1565,6 +1569,9 @@ struct sctp_association {
*/
__u16 pathmaxrxt;
/* Flag that path mtu update is pending */
__u8 pmtu_pending;
/* Association : The smallest PMTU discovered for all of the
* PMTU : peer's transport addresses.
*/

View File

@ -29,7 +29,7 @@ static int fast_convergence = 1;
static int max_increment = 16;
static int low_window = 14;
static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh = 100;
static int initial_ssthresh;
static int smooth_part = 20;
module_param(fast_convergence, int, 0644);

View File

@ -29,7 +29,7 @@
static int fast_convergence __read_mostly = 1;
static int max_increment __read_mostly = 16;
static int beta __read_mostly = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */
static int initial_ssthresh __read_mostly = 100;
static int initial_ssthresh __read_mostly;
static int bic_scale __read_mostly = 41;
static int tcp_friendliness __read_mostly = 1;

View File

@ -2037,7 +2037,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->left_out = tp->sacked_out;
tcp_sync_left_out(tp);
if (tp->retrans_out == 0)
tp->retrans_stamp = 0;
@ -2932,6 +2932,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
opt_rx->sack_ok) {
TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
}
break;
#ifdef CONFIG_TCP_MD5SIG
case TCPOPT_MD5SIG:
/*

View File

@ -878,6 +878,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
kfree(newkey);
return -ENOMEM;
}
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
if (tcp_alloc_md5sig_pool() == NULL) {
kfree(newkey);
@ -1007,7 +1008,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
return -EINVAL;
tp->md5sig_info = p;
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);

View File

@ -2154,6 +2154,15 @@ static void addrconf_dev_config(struct net_device *dev)
ASSERT_RTNL();
if ((dev->type != ARPHRD_ETHER) &&
(dev->type != ARPHRD_FDDI) &&
(dev->type != ARPHRD_IEEE802_TR) &&
(dev->type != ARPHRD_ARCNET) &&
(dev->type != ARPHRD_INFINIBAND)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
idev = addrconf_add_dev(dev);
if (idev == NULL)
return;
@ -2241,36 +2250,16 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
ip6_tnl_add_linklocal(idev);
}
static int ipv6_hwtype(struct net_device *dev)
{
if ((dev->type == ARPHRD_ETHER) ||
(dev->type == ARPHRD_LOOPBACK) ||
(dev->type == ARPHRD_SIT) ||
(dev->type == ARPHRD_TUNNEL6) ||
(dev->type == ARPHRD_FDDI) ||
(dev->type == ARPHRD_IEEE802_TR) ||
(dev->type == ARPHRD_ARCNET) ||
(dev->type == ARPHRD_INFINIBAND))
return 1;
return 0;
}
static int addrconf_notify(struct notifier_block *this, unsigned long event,
void * data)
{
struct net_device *dev = (struct net_device *) data;
struct inet6_dev *idev;
struct inet6_dev *idev = __in6_dev_get(dev);
int run_pending = 0;
if (!ipv6_hwtype(dev))
return NOTIFY_OK;
idev = __in6_dev_get(dev);
switch(event) {
case NETDEV_REGISTER:
if (!idev) {
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
idev = ipv6_add_dev(dev);
if (!idev)
printk(KERN_WARNING "IPv6: add_dev failed for %s\n",

View File

@ -590,6 +590,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
kfree(newkey);
return -ENOMEM;
}
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
tcp_alloc_md5sig_pool();
if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
@ -724,6 +725,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
return -ENOMEM;
tp->md5sig_info = p;
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);

View File

@ -316,23 +316,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
}
}
/*
* Function irlap_next_state (self, state)
*
* Switches state and provides debug information
*
*/
static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state)
{
/*
if (!self || self->magic != LAP_MAGIC)
return;
IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]);
*/
self->state = state;
}
/*
* Function irlap_state_ndm (event, skb, frame)
*
@ -1086,7 +1069,6 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
} else {
/* Final packet of window */
irlap_send_data_primary_poll(self, skb);
irlap_next_state(self, LAP_NRM_P);
/*
* Make sure state machine does not try to send
@ -1436,14 +1418,14 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
*/
self->remote_busy = FALSE;
/* Stop final timer */
del_timer(&self->final_timer);
/*
* Nr as expected?
*/
ret = irlap_validate_nr_received(self, info->nr);
if (ret == NR_EXPECTED) {
/* Stop final timer */
del_timer(&self->final_timer);
/* Update Nr received */
irlap_update_nr_received(self, info->nr);
@ -1475,14 +1457,12 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event,
/* Resend rejected frames */
irlap_resend_rejected_frames(self, CMD_FRAME);
/* Final timer ??? Jean II */
irlap_start_final_timer(self, self->final_timeout * 2);
irlap_next_state(self, LAP_NRM_P);
} else if (ret == NR_INVALID) {
IRDA_DEBUG(1, "%s(), Received RR with "
"invalid nr !\n", __FUNCTION__);
del_timer(&self->final_timer);
irlap_next_state(self, LAP_RESET_WAIT);

View File

@ -798,16 +798,19 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
self->vs = (self->vs + 1) % 8;
self->ack_required = FALSE;
irlap_next_state(self, LAP_NRM_P);
irlap_send_i_frame(self, tx_skb, CMD_FRAME);
} else {
IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __FUNCTION__);
if (self->ack_required) {
irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
irlap_next_state(self, LAP_NRM_P);
irlap_send_rr_frame(self, CMD_FRAME);
self->ack_required = FALSE;
} else {
skb->data[1] |= PF_BIT;
irlap_next_state(self, LAP_NRM_P);
irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME);
}
}

View File

@ -112,7 +112,7 @@ DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x",
local->wep_iv & 0xffffff);
DEBUGFS_READONLY_FILE(tx_power_reduction, 20, "%d.%d dBm",
local->hw.conf.tx_power_reduction / 10,
local->hw.conf.tx_power_reduction & 10);
local->hw.conf.tx_power_reduction % 10);
DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>");

View File

@ -2592,11 +2592,17 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw)
read_lock(&local->sub_if_lock);
list_for_each_entry(sdata, &local->sub_if_list, list) {
/* No need to wake the master device. */
if (sdata->dev == local->mdev)
continue;
if (sdata->type == IEEE80211_IF_TYPE_STA) {
if (sdata->u.sta.associated)
ieee80211_send_nullfunc(local, sdata, 0);
ieee80211_sta_timer((unsigned long)sdata);
}
netif_wake_queue(sdata->dev);
}
read_unlock(&local->sub_if_lock);
@ -2738,6 +2744,12 @@ static int ieee80211_sta_start_scan(struct net_device *dev,
read_lock(&local->sub_if_lock);
list_for_each_entry(sdata, &local->sub_if_list, list) {
/* Don't stop the master interface, otherwise we can't transmit
* probes! */
if (sdata->dev == local->mdev)
continue;
netif_stop_queue(sdata->dev);
if (sdata->type == IEEE80211_IF_TYPE_STA &&
sdata->u.sta.associated)

View File

@ -1231,6 +1231,10 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
/* Get the lowest pmtu of all the transports. */
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport, transports);
if (t->pmtu_pending && t->dst) {
sctp_transport_update_pmtu(t, dst_mtu(t->dst));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
pmtu = t->pathmtu;
}

View File

@ -367,24 +367,18 @@ static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
{
if (sock_owned_by_user(sk) || !t || (t->pathmtu == pmtu))
if (!t || (t->pathmtu == pmtu))
return;
if (sock_owned_by_user(sk)) {
asoc->pmtu_pending = 1;
t->pmtu_pending = 1;
return;
}
if (t->param_flags & SPP_PMTUD_ENABLE) {
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
printk(KERN_WARNING "%s: Reported pmtu %d too low, "
"using default minimum of %d\n",
__FUNCTION__, pmtu,
SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
t->param_flags = (t->param_flags & ~SPP_PMTUD) |
SPP_PMTUD_DISABLE;
} else {
t->pathmtu = pmtu;
}
/* Update transports view of the MTU */
sctp_transport_update_pmtu(t, pmtu);
/* Update association pmtu. */
sctp_assoc_sync_pmtu(asoc);

View File

@ -333,12 +333,19 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
if (!sp->pf->bind_verify(sp, addr))
return -EADDRNOTAVAIL;
/* We must either be unbound, or bind to the same port. */
if (bp->port && (snum != bp->port)) {
SCTP_DEBUG_PRINTK("sctp_do_bind:"
/* We must either be unbound, or bind to the same port.
* It's OK to allow 0 ports if we are already bound.
* We'll just inhert an already bound port in this case
*/
if (bp->port) {
if (!snum)
snum = bp->port;
else if (snum != bp->port) {
SCTP_DEBUG_PRINTK("sctp_do_bind:"
" New port %d does not match existing port "
"%d.\n", snum, bp->port);
return -EINVAL;
return -EINVAL;
}
}
if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
@ -1655,6 +1662,9 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_free;
}
if (asoc->pmtu_pending)
sctp_assoc_pending_pmtu(asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
* does not specify what this error is, but this looks like
@ -3550,6 +3560,7 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
struct sock *sk = asoc->base.sk;
struct socket *sock;
struct inet_sock *inetsk;
struct sctp_af *af;
int err = 0;
/* An association cannot be branched off from an already peeled-off
@ -3571,8 +3582,9 @@ SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc,
/* Make peeled-off sockets more like 1-1 accepted sockets.
* Set the daddr and initialize id to something more random
*/
af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family);
af->to_sk_daddr(&asoc->peer.primary_addr, sk);
inetsk = inet_sk(sock->sk);
inetsk->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
inetsk->id = asoc->next_tsn ^ jiffies;
*sockp = sock;
@ -4343,11 +4355,12 @@ copy_getaddrs:
err = -EFAULT;
goto error;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
return -EFAULT;
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
err = -EFAULT;
goto error;
}
if (put_user(bytes_copied, optlen))
return -EFAULT;
err = -EFAULT;
error:
kfree(addrs);
return err;

View File

@ -241,6 +241,45 @@ void sctp_transport_pmtu(struct sctp_transport *transport)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
/* this is a complete rip-off from __sk_dst_check
* the cookie is always 0 since this is how it's used in the
* pmtu code
*/
static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
struct dst_entry *dst = t->dst;
if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
dst_release(t->dst);
t->dst = NULL;
return NULL;
}
return dst;
}
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
printk(KERN_WARNING "%s: Reported pmtu %d too low, "
"using default minimum of %d\n",
__FUNCTION__, pmtu,
SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
} else {
t->pathmtu = pmtu;
}
dst = sctp_transport_dst_check(t);
if (dst)
dst->ops->update_pmtu(dst, pmtu);
}
/* Caches the dst entry and source address for a transport's destination
* address.
*/

View File

@ -33,7 +33,7 @@ static ssize_t _show_permaddr(struct device *dev,
struct device_attribute *attr,
char *buf)
{
char *addr = dev_to_rdev(dev)->wiphy.perm_addr;
unsigned char *addr = dev_to_rdev(dev)->wiphy.perm_addr;
return sprintf(buf, "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);