Merge commit master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 of HEAD

* HEAD:
  [NET]: fix __sk_stream_mem_reclaim
  [Bluetooth] Fix deadlock in the L2CAP layer
  [Bluetooth] Let BT_HIDP depend on INPUT
  [Bluetooth] Avoid NULL pointer dereference with tty->driver
  [Bluetooth] Remaining transitions to use kzalloc()
  [WAN]: converting generic HDLC to use netif_dormant*()
  [IPV4]: Fix error handling for fib_insert_node call
  [NETROM] lockdep: fix false positive
  [ROSE] lockdep: fix false positive
  [AX.25]: Optimize AX.25 socket list lock
  [IPCOMP]: Fix truesize after decompression
  [IPV6]: Use ipv6_addr_src_scope for link address sorting.
  [TCP] tcp_highspeed: Fix AI updates.
  [MAINTAINERS]: Add proper entry for TC classifier
  [NETROM]: Drop lock before calling nr_destroy_socket
  [NETROM]: Fix locking order when establishing a NETROM circuit.
  [AX.25]: Fix locking of ax25 protocol function list.
  [IPV6]: order addresses by scope
This commit is contained in:
Linus Torvalds 2006-07-12 21:19:42 -07:00
commit e47f31787d
32 changed files with 206 additions and 158 deletions

View file

@ -2752,6 +2752,12 @@ P: Christoph Hellwig
M: hch@infradead.org
S: Maintained
TC CLASSIFIER
P: Jamal Hadi Salim
M: hadi@cyberus.ca
L: netdev@vger.kernel.org
S: Maintained
TI OMAP RANDOM NUMBER GENERATOR SUPPORT
P: Deepak Saxena
M: dsaxena@plexity.net

View file

@ -191,7 +191,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
/* Flush any pending characters in the driver and discipline. */
tty_ldisc_flush(tty);
if (tty->driver->flush_buffer)
if (tty->driver && tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
@ -290,7 +290,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
if (tty->driver->flush_buffer)
if (tty->driver && tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
return 0;

View file

@ -116,27 +116,33 @@ static inline void openwin(card_t *card, u8 page)
#include "hd6457x.c"
static inline void set_carrier(port_t *port)
{
if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)
netif_carrier_on(port_to_dev(port));
else
netif_carrier_off(port_to_dev(port));
}
static void sca_msci_intr(port_t *port)
{
struct net_device *dev = port_to_dev(port);
card_t* card = port_to_card(port);
u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */
u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */
/* Reset MSCI TX underrun status bit */
sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card);
sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port);
if (stat & ST1_UDRN) {
struct net_device_stats *stats = hdlc_stats(dev);
struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
stats->tx_errors++; /* TX Underrun error detected */
stats->tx_fifo_errors++;
}
/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card);
sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
if (stat & ST1_CDCD)
hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD),
dev);
set_carrier(port);
}
@ -190,7 +196,7 @@ static int c101_open(struct net_device *dev)
sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev);
set_carrier(port);
printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
/* enable MSCI1 CDCD interrupt */
@ -378,7 +384,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
}
sca_init_sync_port(card); /* Set up C101 memory */
hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev);
set_carrier(card);
printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
" using %u TX + %u RX packets rings\n",

View file

@ -168,6 +168,23 @@ static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
}
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier on\n",
port_to_dev(port)->name);
#endif
netif_carrier_on(port_to_dev(port));
} else {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier off\n",
port_to_dev(port)->name);
#endif
netif_carrier_off(port_to_dev(port));
}
}
static void sca_init_sync_port(port_t *port)
{
@ -237,9 +254,7 @@ static void sca_init_sync_port(port_t *port)
sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
}
}
hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD),
port_to_dev(port));
sca_set_carrier(port);
}
@ -262,8 +277,7 @@ static inline void sca_msci_intr(port_t *port)
}
if (stat & ST1_CDCD)
hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD),
port_to_dev(port));
sca_set_carrier(port);
}
#endif
@ -566,7 +580,7 @@ static void sca_open(struct net_device *dev)
- all DMA interrupts
*/
hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev);
sca_set_carrier(port);
#ifdef __HD64570_H
/* MSCI TX INT and RX INT A IRQ enable */

View file

@ -192,9 +192,7 @@ static int cisco_rx(struct sk_buff *skb)
"uptime %ud%uh%um%us)\n",
dev->name, days, hrs,
min, sec);
#if 0
netif_carrier_on(dev);
#endif
netif_dormant_off(dev);
hdlc->state.cisco.up = 1;
}
}
@ -227,9 +225,7 @@ static void cisco_timer(unsigned long arg)
hdlc->state.cisco.settings.timeout * HZ)) {
hdlc->state.cisco.up = 0;
printk(KERN_INFO "%s: Link down\n", dev->name);
#if 0
netif_carrier_off(dev);
#endif
netif_dormant_on(dev);
}
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
@ -265,10 +261,7 @@ static void cisco_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&hdlc->state.cisco.timer);
#if 0
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
#endif
netif_dormant_on(dev);
hdlc->state.cisco.up = 0;
hdlc->state.cisco.request_sent = 0;
}
@ -328,6 +321,7 @@ int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
dev->type = ARPHRD_CISCO;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->addr_len = 0;
netif_dormant_on(dev);
return 0;
}

View file

@ -301,7 +301,7 @@ static int pvc_open(struct net_device *dev)
if (pvc->open_count++ == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->master);
if (hdlc->state.fr.settings.lmi == LMI_NONE)
pvc->state.active = hdlc->carrier;
pvc->state.active = netif_carrier_ok(pvc->master);
pvc_carrier(pvc->state.active, pvc);
hdlc->state.fr.dce_changed = 1;
@ -545,11 +545,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
hdlc->state.fr.reliable = reliable;
if (reliable) {
#if 0
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
#endif
netif_dormant_off(dev);
hdlc->state.fr.n391cnt = 0; /* Request full status */
hdlc->state.fr.dce_changed = 1;
@ -562,11 +558,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
}
}
} else {
#if 0
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
#endif
netif_dormant_on(dev);
while (pvc) { /* Deactivate all PVCs */
pvc_carrier(0, pvc);
pvc->state.exist = pvc->state.active = 0;

View file

@ -34,10 +34,11 @@
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
#include <linux/hdlc.h>
static const char* version = "HDLC support module revision 1.18";
static const char* version = "HDLC support module revision 1.19";
#undef DEBUG_LINK
@ -73,57 +74,51 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
static void __hdlc_set_carrier_on(struct net_device *dev)
static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto.start)
return hdlc->proto.start(dev);
#if 0
#ifdef DEBUG_LINK
if (netif_carrier_ok(dev))
printk(KERN_ERR "hdlc_set_carrier_on(): already on\n");
#endif
netif_carrier_on(dev);
#endif
}
static void __hdlc_set_carrier_off(struct net_device *dev)
static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto.stop)
return hdlc->proto.stop(dev);
#if 0
#ifdef DEBUG_LINK
if (!netif_carrier_ok(dev))
printk(KERN_ERR "hdlc_set_carrier_off(): already off\n");
#endif
netif_carrier_off(dev);
#endif
}
void hdlc_set_carrier(int on, struct net_device *dev)
static int hdlc_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct net_device *dev = ptr;
hdlc_device *hdlc;
unsigned long flags;
on = on ? 1 : 0;
int on;
if (dev->get_stats != hdlc_get_stats)
return NOTIFY_DONE; /* not an HDLC device */
if (event != NETDEV_CHANGE)
return NOTIFY_DONE; /* Only interrested in carrier changes */
on = netif_carrier_ok(dev);
#ifdef DEBUG_LINK
printk(KERN_DEBUG "hdlc_set_carrier %i\n", on);
printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n",
dev->name, on);
#endif
hdlc = dev_to_hdlc(dev);
spin_lock_irqsave(&hdlc->state_lock, flags);
if (hdlc->carrier == on)
goto carrier_exit; /* no change in DCD line level */
#ifdef DEBUG_LINK
printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off");
#endif
hdlc->carrier = on;
if (!hdlc->open)
@ -131,14 +126,15 @@ void hdlc_set_carrier(int on, struct net_device *dev)
if (hdlc->carrier) {
printk(KERN_INFO "%s: Carrier detected\n", dev->name);
__hdlc_set_carrier_on(dev);
hdlc_proto_start(dev);
} else {
printk(KERN_INFO "%s: Carrier lost\n", dev->name);
__hdlc_set_carrier_off(dev);
hdlc_proto_stop(dev);
}
carrier_exit:
spin_unlock_irqrestore(&hdlc->state_lock, flags);
return NOTIFY_DONE;
}
@ -165,7 +161,7 @@ int hdlc_open(struct net_device *dev)
if (hdlc->carrier) {
printk(KERN_INFO "%s: Carrier detected\n", dev->name);
__hdlc_set_carrier_on(dev);
hdlc_proto_start(dev);
} else
printk(KERN_INFO "%s: No carrier\n", dev->name);
@ -190,7 +186,7 @@ void hdlc_close(struct net_device *dev)
hdlc->open = 0;
if (hdlc->carrier)
__hdlc_set_carrier_off(dev);
hdlc_proto_stop(dev);
spin_unlock_irq(&hdlc->state_lock);
@ -303,7 +299,6 @@ MODULE_LICENSE("GPL v2");
EXPORT_SYMBOL(hdlc_open);
EXPORT_SYMBOL(hdlc_close);
EXPORT_SYMBOL(hdlc_set_carrier);
EXPORT_SYMBOL(hdlc_ioctl);
EXPORT_SYMBOL(hdlc_setup);
EXPORT_SYMBOL(alloc_hdlcdev);
@ -315,9 +310,18 @@ static struct packet_type hdlc_packet_type = {
};
static struct notifier_block hdlc_notifier = {
.notifier_call = hdlc_device_event,
};
static int __init hdlc_module_init(void)
{
int result;
printk(KERN_INFO "%s\n", version);
if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
return result;
dev_add_pack(&hdlc_packet_type);
return 0;
}
@ -327,6 +331,7 @@ static int __init hdlc_module_init(void)
static void __exit hdlc_module_exit(void)
{
dev_remove_pack(&hdlc_packet_type);
unregister_netdevice_notifier(&hdlc_notifier);
}

View file

@ -149,7 +149,10 @@ static inline void wanxl_cable_intr(port_t *port)
printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
port->dev->name, pm, dte, cable, dsr, dcd);
hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev);
if (value & STATUS_CABLE_DCD)
netif_carrier_on(port->dev);
else
netif_carrier_off(port->dev);
}

View file

@ -224,8 +224,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
int hdlc_open(struct net_device *dev);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev);
/* Called by hardware driver when DCD line level changes */
void hdlc_set_carrier(int on, struct net_device *dev);
/* May be used by hardware driver to gain control over HDLC device */
static __inline__ void hdlc_proto_detach(hdlc_device *hdlc)

View file

@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
ax25_cb *s;
struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if ((s->iamdigi && !digi) || (!s->iamdigi && digi))
continue;
@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi,
/* If device is null we match any device */
if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) {
sock_hold(s->sk);
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
return s->sk;
}
}
}
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
return NULL;
}
@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
ax25_cb *s;
struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
!ax25cmp(&s->dest_addr, dest_addr) &&
@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr,
}
}
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
return sk;
}
@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
struct sk_buff *copy;
struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
spin_lock(&ax25_list_lock);
ax25_for_each(s, node, &ax25_list) {
if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
s->sk->sk_type == SOCK_RAW &&
@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto)
kfree_skb(copy);
}
}
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
}
/*

View file

@ -80,7 +80,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
ax25_start_t3timer(ax25);
ax25_ds_set_timer(ax25->ax25_dev);
spin_lock_bh(&ax25_list_lock);
spin_lock(&ax25_list_lock);
ax25_for_each(ax25o, node, &ax25_list) {
if (ax25o == ax25)
continue;
@ -106,7 +106,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25)
if (ax25o->state != AX25_STATE_0)
ax25_start_t3timer(ax25o);
}
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
}
void ax25_ds_establish_data_link(ax25_cb *ax25)
@ -162,13 +162,13 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev)
int res = 0;
struct hlist_node *node;
spin_lock_bh(&ax25_list_lock);
spin_lock(&ax25_list_lock);
ax25_for_each(ax25, node, &ax25_list)
if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) {
res = 1;
break;
}
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
return res;
}

View file

@ -85,7 +85,7 @@ static void ax25_ds_timeout(unsigned long arg)
return;
}
spin_lock_bh(&ax25_list_lock);
spin_lock(&ax25_list_lock);
ax25_for_each(ax25, node, &ax25_list) {
if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE))
continue;
@ -93,7 +93,7 @@ static void ax25_ds_timeout(unsigned long arg)
ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
ax25_disconnect(ax25, ETIMEDOUT);
}
spin_unlock_bh(&ax25_list_lock);
spin_unlock(&ax25_list_lock);
ax25_dev_dama_off(ax25_dev);
}

View file

@ -66,10 +66,10 @@ int ax25_protocol_register(unsigned int pid,
protocol->pid = pid;
protocol->func = func;
write_lock(&protocol_list_lock);
write_lock_bh(&protocol_list_lock);
protocol->next = protocol_list;
protocol_list = protocol;
write_unlock(&protocol_list_lock);
write_unlock_bh(&protocol_list_lock);
return 1;
}
@ -80,16 +80,16 @@ void ax25_protocol_release(unsigned int pid)
{
struct protocol_struct *s, *protocol;
write_lock(&protocol_list_lock);
write_lock_bh(&protocol_list_lock);
protocol = protocol_list;
if (protocol == NULL) {
write_unlock(&protocol_list_lock);
write_unlock_bh(&protocol_list_lock);
return;
}
if (protocol->pid == pid) {
protocol_list = protocol->next;
write_unlock(&protocol_list_lock);
write_unlock_bh(&protocol_list_lock);
kfree(protocol);
return;
}
@ -98,14 +98,14 @@ void ax25_protocol_release(unsigned int pid)
if (protocol->next->pid == pid) {
s = protocol->next;
protocol->next = protocol->next->next;
write_unlock(&protocol_list_lock);
write_unlock_bh(&protocol_list_lock);
kfree(s);
return;
}
protocol = protocol->next;
}
write_unlock(&protocol_list_lock);
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL(ax25_protocol_release);
@ -266,13 +266,13 @@ int ax25_protocol_is_registered(unsigned int pid)
struct protocol_struct *protocol;
int res = 0;
read_lock(&protocol_list_lock);
read_lock_bh(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = 1;
break;
}
read_unlock(&protocol_list_lock);
read_unlock_bh(&protocol_list_lock);
return res;
}

View file

@ -75,15 +75,13 @@
static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl)
{
struct cmtp_application *app = kmalloc(sizeof(*app), GFP_KERNEL);
struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL);
BT_DBG("session %p application %p appl %d", session, app, appl);
if (!app)
return NULL;
memset(app, 0, sizeof(*app));
app->state = BT_OPEN;
app->appl = appl;

View file

@ -335,10 +335,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
baswap(&src, &bt_sk(sock->sk)->src);
baswap(&dst, &bt_sk(sock->sk)->dst);
session = kmalloc(sizeof(struct cmtp_session), GFP_KERNEL);
session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
memset(session, 0, sizeof(struct cmtp_session));
down_write(&cmtp_session_sem);

View file

@ -336,9 +336,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
/* Entry not in the cache. Add new one. */
if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
return;
memset(e, 0, sizeof(struct inquiry_entry));
e->next = cache->list;
cache->list = e;
}
@ -800,12 +799,10 @@ struct hci_dev *hci_alloc_dev(void)
{
struct hci_dev *hdev;
hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL);
hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
if (!hdev)
return NULL;
memset(hdev, 0, sizeof(struct hci_dev));
skb_queue_head_init(&hdev->driver_init);
return hdev;

View file

@ -1,7 +1,6 @@
config BT_HIDP
tristate "HIDP protocol support"
depends on BT && BT_L2CAP && (BROKEN || !S390)
select INPUT
depends on BT && BT_L2CAP && INPUT
help
HIDP (Human Interface Device Protocol) is a transport layer
for HID reports. HIDP is required for the Bluetooth Human

View file

@ -582,10 +582,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst))
return -ENOTUNIQ;
session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL);
session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
memset(session, 0, sizeof(struct hidp_session));
session->input = input_allocate_device();
if (!session->input) {

View file

@ -185,7 +185,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
{
struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
write_lock(&l->lock);
write_lock_bh(&l->lock);
if (sk == l->head)
l->head = next;
@ -193,7 +193,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
l2cap_pi(next)->prev_c = prev;
if (prev)
l2cap_pi(prev)->next_c = next;
write_unlock(&l->lock);
write_unlock_bh(&l->lock);
__sock_put(sk);
}
@ -313,9 +313,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
{
struct l2cap_chan_list *l = &conn->chan_list;
write_lock(&l->lock);
write_lock_bh(&l->lock);
__l2cap_chan_add(conn, sk, parent);
write_unlock(&l->lock);
write_unlock_bh(&l->lock);
}
static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
@ -328,14 +328,14 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
* 200 - 254 are used by utilities like l2ping, etc.
*/
spin_lock(&conn->lock);
spin_lock_bh(&conn->lock);
if (++conn->tx_ident > 128)
conn->tx_ident = 1;
id = conn->tx_ident;
spin_unlock(&conn->lock);
spin_unlock_bh(&conn->lock);
return id;
}
@ -1416,11 +1416,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
if (!sk)
goto response;
write_lock(&list->lock);
write_lock_bh(&list->lock);
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(list, scid)) {
write_unlock(&list->lock);
write_unlock_bh(&list->lock);
sock_set_flag(sk, SOCK_ZAPPED);
l2cap_sock_kill(sk);
goto response;
@ -1458,7 +1458,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
result = status = 0;
done:
write_unlock(&list->lock);
write_unlock_bh(&list->lock);
response:
bh_unlock_sock(parent);

View file

@ -273,10 +273,10 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
{
struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio);
struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio);
if (!d)
return NULL;
memset(d, 0, sizeof(*d));
init_timer(&d->timer);
d->timer.function = rfcomm_dlc_timeout;
@ -289,6 +289,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
rfcomm_dlc_clear_state(d);
BT_DBG("%p", d);
return d;
}
@ -522,10 +523,10 @@ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig)
/* ---- RFCOMM sessions ---- */
static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
{
struct rfcomm_session *s = kmalloc(sizeof(*s), GFP_KERNEL);
struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return NULL;
memset(s, 0, sizeof(*s));
BT_DBG("session %p sock %p", s, sock);

View file

@ -169,10 +169,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
BT_DBG("id %d channel %d", req->dev_id, req->channel);
dev = kmalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
memset(dev, 0, sizeof(struct rfcomm_dev));
write_lock_bh(&rfcomm_dev_lock);

View file

@ -108,17 +108,14 @@ static void sco_sock_init_timer(struct sock *sk)
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn;
struct sco_conn *conn = hcon->sco_data;
if ((conn = hcon->sco_data))
if (conn || status)
return conn;
if (status)
return conn;
if (!(conn = kmalloc(sizeof(struct sco_conn), GFP_ATOMIC)))
conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
if (!conn)
return NULL;
memset(conn, 0, sizeof(struct sco_conn));
spin_lock_init(&conn->lock);
@ -134,6 +131,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
return conn;
}

View file

@ -196,15 +196,13 @@ EXPORT_SYMBOL(sk_stream_error);
void __sk_stream_mem_reclaim(struct sock *sk)
{
if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) {
atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
sk->sk_prot->memory_allocated);
sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
if (*sk->sk_prot->memory_pressure &&
(atomic_read(sk->sk_prot->memory_allocated) <
sk->sk_prot->sysctl_mem[0]))
*sk->sk_prot->memory_pressure = 0;
}
atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM,
sk->sk_prot->memory_allocated);
sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1;
if (*sk->sk_prot->memory_pressure &&
(atomic_read(sk->sk_prot->memory_allocated) <
sk->sk_prot->sysctl_mem[0]))
*sk->sk_prot->memory_pressure = 0;
}
EXPORT_SYMBOL(__sk_stream_mem_reclaim);

View file

@ -1252,8 +1252,8 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
*/
if (!fa_head) {
fa_head = fib_insert_node(t, &err, key, plen);
err = 0;
fa_head = fib_insert_node(t, &err, key, plen);
if (err)
goto out_free_new_fa;
}

View file

@ -70,7 +70,8 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
if (err)
goto out;
skb_put(skb, dlen - plen);
skb->truesize += dlen - plen;
__skb_put(skb, dlen - plen);
memcpy(skb->data, scratch, dlen);
out:
put_cpu();

View file

@ -139,14 +139,19 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
tp->snd_cwnd++;
}
} else {
/* Update AIMD parameters */
/* Update AIMD parameters.
*
* We want to guarantee that:
* hstcp_aimd_vals[ca->ai-1].cwnd <
* snd_cwnd <=
* hstcp_aimd_vals[ca->ai].cwnd
*/
if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
ca->ai < HSTCP_AIMD_MAX - 1)
ca->ai++;
} else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
ca->ai > 0)
} else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
ca->ai--;
}

View file

@ -508,6 +508,26 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
kfree(ifp);
}
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
{
struct inet6_ifaddr *ifa, **ifap;
int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
/*
* Each device address list is sorted in order of scope -
* global before linklocal.
*/
for (ifap = &idev->addr_list; (ifa = *ifap) != NULL;
ifap = &ifa->if_next) {
if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
break;
}
ifp->if_next = *ifap;
*ifap = ifp;
}
/* On success it returns ifp with increased reference count */
static struct inet6_ifaddr *
@ -573,8 +593,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
write_lock(&idev->lock);
/* Add to inet6_dev unicast addr list. */
ifa->if_next = idev->addr_list;
idev->addr_list = ifa;
ipv6_link_dev_addr(idev, ifa);
#ifdef CONFIG_IPV6_PRIVACY
if (ifa->flags&IFA_F_TEMPORARY) {
@ -987,7 +1006,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
continue;
} else if (score.scope < hiscore.scope) {
if (score.scope < daddr_scope)
continue;
break; /* addresses sorted by scope */
else {
score.rule = 2;
goto record_it;

View file

@ -109,7 +109,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out_put_cpu;
}
skb_put(skb, dlen - plen);
skb->truesize += dlen - plen;
__skb_put(skb, dlen - plen);
memcpy(skb->data, scratch, dlen);
err = ipch->nexthdr;

View file

@ -65,6 +65,14 @@ static DEFINE_SPINLOCK(nr_list_lock);
static const struct proto_ops nr_proto_ops;
/*
* NETROM network devices are virtual network devices encapsulating NETROM
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
/*
* Socket removal during an interrupt is now safe.
*/
@ -986,18 +994,18 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk_acceptq_added(sk);
nr_insert_socket(make);
skb_queue_head(&sk->sk_receive_queue, skb);
nr_start_heartbeat(make);
nr_start_idletimer(make);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, skb->len);
bh_unlock_sock(sk);
nr_insert_socket(make);
nr_start_heartbeat(make);
nr_start_idletimer(make);
return 1;
}
@ -1405,6 +1413,7 @@ static int __init nr_proto_init(void)
free_netdev(dev);
goto fail;
}
lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key);
dev_nr[i] = dev;
}

View file

@ -138,8 +138,8 @@ static void nr_heartbeat_expiry(unsigned long param)
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
sock_hold(sk);
nr_destroy_socket(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
sock_put(sk);
return;
}

View file

@ -66,6 +66,14 @@ static struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
/*
* ROSE network devices are virtual network devices encapsulating ROSE
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key rose_netdev_xmit_lock_key;
/*
* Convert a ROSE address into text.
*/
@ -1515,6 +1523,7 @@ static int __init rose_proto_init(void)
free_netdev(dev);
goto fail;
}
lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key);
dev_rose[i] = dev;
}

View file

@ -884,8 +884,6 @@ static int __init tc_action_init(void)
link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action;
}
printk("TC classifier action (bugs to netdev@vger.kernel.org cc "
"hadi@cyberus.ca)\n");
return 0;
}