1
0
Fork 0

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits)
  [IOAT]: Do not dereference THIS_MODULE directly to set unsafe.
  [NETROM]: Fix possible null pointer dereference.
  [NET] netpoll: break recursive loop in netpoll rx path
  [NET] netpoll: don't spin forever sending to stopped queues
  [IRDA]: add some IBM think pads
  [ATM]: atm/mpc.c warning fix
  [NET]: skb_find_text ignores to argument
  [NET]: make net/core/dev.c:netdev_nit static
  [NET]: Fix GSO problems in dev_hard_start_xmit()
  [NET]: Fix CHECKSUM_HW GSO problems.
  [TIPC]: Fix incorrect correction to discovery timer frequency computation.
  [TIPC]: Get rid of dynamically allocated arrays in broadcast code.
  [TIPC]: Fixed link switchover bugs
  [TIPC]: Enhanced & cleaned up system messages; fixed 2 obscure memory leaks.
  [TIPC]: First phase of assert() cleanup
  [TIPC]: Disallow config operations that aren't supported in certain modes.
  [TIPC]: Fixed memory leak in tipc_link_send() when destination is unreachable
  [TIPC]: Added missing warning for out-of-memory condition
  [TIPC]: Withdrawing all names from nameless port now returns success, not error
  [TIPC]: Optimized argument validation done by connect().
  ...
hifive-unleashed-5.1
Linus Torvalds 2006-06-26 10:08:13 -07:00
commit 61a46dc9d1
31 changed files with 725 additions and 467 deletions

View File

@ -824,10 +824,9 @@ static int __init ioat_init_module(void)
{
/* it's currently unsafe to unload this module */
/* if forced, worst case is that rmmod hangs */
if (THIS_MODULE != NULL)
THIS_MODULE->unsafe = 1;
__unsafe(THIS_MODULE);
return pci_module_init(&ioat_pci_drv);
pci_module_init(&ioat_pci_drv);
}
module_init(ioat_init_module);

View File

@ -115,8 +115,12 @@ static nsc_chip_t chips[] = {
/* Contributed by Jan Frey - IBM A30/A31 */
{ "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
nsc_ircc_probe_39x, nsc_ircc_init_39x },
{ "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
nsc_ircc_probe_39x, nsc_ircc_init_39x },
/* IBM ThinkPads using PC8738x (T60/X60/Z60) */
{ "IBM-PC8738x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
nsc_ircc_probe_39x, nsc_ircc_init_39x },
/* IBM ThinkPads using PC8394T (T43/R52/?) */
{ "IBM-PC8394T", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf9, 0xff,
nsc_ircc_probe_39x, nsc_ircc_init_39x },
{ NULL }
};

View File

@ -699,7 +699,6 @@ extern int dev_hard_start_xmit(struct sk_buff *skb,
extern void dev_init(void);
extern int netdev_nit;
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */

View File

@ -31,6 +31,7 @@ struct netpoll_info {
int rx_flags;
spinlock_t rx_lock;
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
};
void netpoll_poll(struct netpoll *np);

View File

@ -49,10 +49,18 @@
#define TIPC_MEDIA_TYPE_ETH 1
/*
* Destination address structure used by TIPC bearers when sending messages
*
* IMPORTANT: The fields of this structure MUST be stored using the specified
* byte order indicated below, as the structure is exchanged between nodes
* as part of a link setup process.
*/
struct tipc_media_addr {
__u32 type;
__u32 type; /* bearer type (network byte order) */
union {
__u8 eth_addr[6]; /* Ethernet bearer */
__u8 eth_addr[6]; /* 48 bit Ethernet addr (byte array) */
#if 0
/* Prototypes for other possible bearer types */

View File

@ -1113,10 +1113,9 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
{
unsigned char *ip;
uint32_t dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %u.%u.%u.%u\n", mpc->dev->name, NIPQUAD(dst_ip));
ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry);
if(entry == NULL){

View File

@ -230,7 +230,7 @@ extern void netdev_unregister_sysfs(struct net_device *);
* For efficiency
*/
int netdev_nit;
static int netdev_nit;
/*
* Add a protocol ID to the list. Now that the input handler is
@ -1325,9 +1325,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
nskb->next = NULL;
rc = dev->hard_start_xmit(nskb, dev);
if (unlikely(rc)) {
nskb->next = skb->next;
skb->next = nskb;
return rc;
}
if (unlikely(netif_queue_stopped(dev) && skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
skb->destructor = DEV_GSO_CB(skb)->destructor;

View File

@ -54,6 +54,7 @@ static atomic_t trapped;
sizeof(struct iphdr) + sizeof(struct ethhdr))
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
static void queue_process(void *p)
{
@ -153,6 +154,22 @@ static void poll_napi(struct netpoll *np)
}
}
static void service_arp_queue(struct netpoll_info *npi)
{
struct sk_buff *skb;
if (unlikely(!npi))
return;
skb = skb_dequeue(&npi->arp_tx);
while (skb != NULL) {
arp_reply(skb);
skb = skb_dequeue(&npi->arp_tx);
}
return;
}
void netpoll_poll(struct netpoll *np)
{
if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
@ -163,6 +180,8 @@ void netpoll_poll(struct netpoll *np)
if (np->dev->poll)
poll_napi(np);
service_arp_queue(np->dev->npinfo);
zap_completion_queue();
}
@ -279,14 +298,10 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
* network drivers do not expect to be called if the queue is
* stopped.
*/
if (netif_queue_stopped(np->dev)) {
netif_tx_unlock(np->dev);
netpoll_poll(np);
udelay(50);
continue;
}
status = NETDEV_TX_BUSY;
if (!netif_queue_stopped(np->dev))
status = np->dev->hard_start_xmit(skb, np->dev);
status = np->dev->hard_start_xmit(skb, np->dev);
netif_tx_unlock(np->dev);
/* success */
@ -446,7 +461,9 @@ int __netpoll_rx(struct sk_buff *skb)
int proto, len, ulen;
struct iphdr *iph;
struct udphdr *uh;
struct netpoll *np = skb->dev->npinfo->rx_np;
struct netpoll_info *npi = skb->dev->npinfo;
struct netpoll *np = npi->rx_np;
if (!np)
goto out;
@ -456,7 +473,7 @@ int __netpoll_rx(struct sk_buff *skb)
/* check if netpoll clients need ARP */
if (skb->protocol == __constant_htons(ETH_P_ARP) &&
atomic_read(&trapped)) {
arp_reply(skb);
skb_queue_tail(&npi->arp_tx, skb);
return 1;
}
@ -651,6 +668,7 @@ int netpoll_setup(struct netpoll *np)
npinfo->poll_owner = -1;
npinfo->tries = MAX_RETRIES;
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
} else
npinfo = ndev->npinfo;

View File

@ -1739,12 +1739,15 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state)
{
unsigned int ret;
config->get_next_block = skb_ts_get_next_block;
config->finish = skb_ts_finish;
skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
return textsearch_find(config, state);
ret = textsearch_find(config, state);
return (ret <= to - from ? ret : UINT_MAX);
}
/**

View File

@ -2166,7 +2166,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
if (!pskb_may_pull(skb, thlen))
goto out;
oldlen = ~htonl(skb->len);
oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
segs = skb_segment(skb, sg);
@ -2174,7 +2174,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
goto out;
len = skb_shinfo(skb)->gso_size;
delta = csum_add(oldlen, htonl(thlen + len));
delta = htonl(oldlen + (thlen + len));
skb = segs;
th = skb->h.th;
@ -2183,10 +2183,10 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
do {
th->fin = th->psh = 0;
if (skb->ip_summed == CHECKSUM_NONE) {
th->check = csum_fold(csum_partial(
skb->h.raw, thlen, csum_add(skb->csum, delta)));
}
th->check = ~csum_fold(th->check + delta);
if (skb->ip_summed != CHECKSUM_HW)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
seq += len;
skb = skb->next;
@ -2196,11 +2196,11 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
th->cwr = 0;
} while (skb->next);
if (skb->ip_summed == CHECKSUM_NONE) {
delta = csum_add(oldlen, htonl(skb->tail - skb->h.raw));
th->check = csum_fold(csum_partial(
skb->h.raw, thlen, csum_add(skb->csum, delta)));
}
delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
th->check = ~csum_fold(th->check + delta);
if (skb->ip_summed != CHECKSUM_HW)
th->check = csum_fold(csum_partial(skb->h.raw, thlen,
skb->csum));
out:
return segs;

View File

@ -725,15 +725,17 @@ void nr_link_failed(ax25_cb *ax25, int reason)
struct nr_node *nr_node = NULL;
spin_lock_bh(&nr_neigh_list_lock);
nr_neigh_for_each(s, node, &nr_neigh_list)
nr_neigh_for_each(s, node, &nr_neigh_list) {
if (s->ax25 == ax25) {
nr_neigh_hold(s);
nr_neigh = s;
break;
}
}
spin_unlock_bh(&nr_neigh_list_lock);
if (nr_neigh == NULL) return;
if (nr_neigh == NULL)
return;
nr_neigh->ax25 = NULL;
ax25_cb_put(ax25);
@ -743,11 +745,13 @@ void nr_link_failed(ax25_cb *ax25, int reason)
return;
}
spin_lock_bh(&nr_node_list_lock);
nr_node_for_each(nr_node, node, &nr_node_list)
nr_node_for_each(nr_node, node, &nr_node_list) {
nr_node_lock(nr_node);
if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh)
if (nr_node->which < nr_node->count &&
nr_node->routes[nr_node->which].neighbour == nr_neigh)
nr_node->which++;
nr_node_unlock(nr_node);
}
spin_unlock_bh(&nr_node_list_lock);
nr_neigh_put(nr_neigh);
}

View File

@ -49,13 +49,19 @@
#include "name_table.h"
#include "bcast.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
#define BCLINK_LOG_BUF_SIZE 0
/*
* Loss rate for incoming broadcast frames; used to test retransmission code.
* Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
*/
#define TIPC_BCAST_LOSS_RATE 0
/**
* struct bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
@ -75,7 +81,14 @@ struct bcbearer_pair {
* @bearer: (non-standard) broadcast bearer structure
* @media: (non-standard) broadcast media structure
* @bpairs: array of bearer pairs
* @bpairs_temp: array of bearer pairs used during creation of "bpairs"
* @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
* @remains: temporary node map used by tipc_bcbearer_send()
* @remains_new: temporary node map used tipc_bcbearer_send()
*
* Note: The fields labelled "temporary" are incorporated into the bearer
* to avoid consuming potentially limited stack space through the use of
* large local variables within multicast routines. Concurrent access is
* prevented through use of the spinlock "bc_lock".
*/
struct bcbearer {
@ -83,6 +96,8 @@ struct bcbearer {
struct media media;
struct bcbearer_pair bpairs[MAX_BEARERS];
struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
struct node_map remains;
struct node_map remains_new;
};
/**
@ -165,21 +180,18 @@ static int bclink_ack_allowed(u32 n)
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
*
* Called with 'node' locked, bc_lock unlocked
* Called with bc_lock locked
*/
static void bclink_retransmit_pkt(u32 after, u32 to)
{
struct sk_buff *buf;
spin_lock_bh(&bc_lock);
buf = bcl->first_out;
while (buf && less_eq(buf_seqno(buf), after)) {
buf = buf->next;
}
if (buf != NULL)
tipc_link_retransmit(bcl, buf, mod(to - after));
spin_unlock_bh(&bc_lock);
tipc_link_retransmit(bcl, buf, mod(to - after));
}
/**
@ -346,8 +358,10 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
for (; buf; buf = buf->next) {
u32 seqno = buf_seqno(buf);
if (mod(seqno - prev) != 1)
if (mod(seqno - prev) != 1) {
buf = NULL;
break;
}
if (seqno == gap_after)
break;
prev = seqno;
@ -399,7 +413,10 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
*/
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
{
#if (TIPC_BCAST_LOSS_RATE)
static int rx_count = 0;
#endif
struct tipc_msg *msg = buf_msg(buf);
struct node* node = tipc_node_find(msg_prevnode(msg));
u32 next_in;
@ -420,9 +437,13 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
spin_lock_bh(&bc_lock);
bcl->stats.recv_nacks++;
bcl->owner->next = node; /* remember requestor */
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
bcl->owner->next = NULL;
spin_unlock_bh(&bc_lock);
} else {
tipc_bclink_peek_nack(msg_destnode(msg),
msg_bcast_tag(msg),
@ -433,6 +454,14 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
return;
}
#if (TIPC_BCAST_LOSS_RATE)
if (++rx_count == TIPC_BCAST_LOSS_RATE) {
rx_count = 0;
buf_discard(buf);
return;
}
#endif
tipc_node_lock(node);
receive:
deferred = node->bclink.deferred_head;
@ -531,12 +560,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
{
static int send_count = 0;
struct node_map *remains;
struct node_map *remains_new;
struct node_map *remains_tmp;
int bp_index;
int swap_time;
int err;
/* Prepare buffer for broadcasting (if first time trying to send it) */
@ -557,9 +582,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
/* Send buffer over bearers until all targets reached */
remains = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
remains_new = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
*remains = tipc_cltr_bcast_nodes;
bcbearer->remains = tipc_cltr_bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
struct bearer *p = bcbearer->bpairs[bp_index].primary;
@ -568,8 +591,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
if (!p)
break; /* no more bearers to try */
tipc_nmap_diff(remains, &p->nodes, remains_new);
if (remains_new->count == remains->count)
tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
if (bcbearer->remains_new.count == bcbearer->remains.count)
continue; /* bearer pair doesn't add anything */
if (!p->publ.blocked &&
@ -587,27 +610,17 @@ swap:
bcbearer->bpairs[bp_index].primary = s;
bcbearer->bpairs[bp_index].secondary = p;
update:
if (remains_new->count == 0) {
err = TIPC_OK;
goto out;
}
if (bcbearer->remains_new.count == 0)
return TIPC_OK;
/* swap map */
remains_tmp = remains;
remains = remains_new;
remains_new = remains_tmp;
bcbearer->remains = bcbearer->remains_new;
}
/* Unable to reach all targets */
bcbearer->bearer.publ.blocked = 1;
bcl->stats.bearer_congs++;
err = ~TIPC_OK;
out:
kfree(remains_new);
kfree(remains);
return err;
return ~TIPC_OK;
}
/**
@ -765,7 +778,7 @@ int tipc_bclink_init(void)
bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
nomem:
warn("Memory squeeze; Failed to create multicast link\n");
warn("Multicast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
kfree(bclink);

View File

@ -180,7 +180,7 @@ static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
if (!item->next) {
item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
if (!item->next) {
warn("Memory squeeze: multicast destination port list is incomplete\n");
warn("Incomplete multicast delivery, no memory\n");
return;
}
item->next->next = NULL;

View File

@ -112,39 +112,42 @@ int tipc_register_media(u32 media_type,
goto exit;
if (!media_name_valid(name)) {
warn("Media registration error: illegal name <%s>\n", name);
warn("Media <%s> rejected, illegal name\n", name);
goto exit;
}
if (!bcast_addr) {
warn("Media registration error: no broadcast address supplied\n");
warn("Media <%s> rejected, no broadcast address\n", name);
goto exit;
}
if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
(bearer_priority > TIPC_MAX_LINK_PRI)) {
warn("Media registration error: priority %u\n", bearer_priority);
warn("Media <%s> rejected, illegal priority (%u)\n", name,
bearer_priority);
goto exit;
}
if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
(link_tolerance > TIPC_MAX_LINK_TOL)) {
warn("Media registration error: tolerance %u\n", link_tolerance);
warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
link_tolerance);
goto exit;
}
media_id = media_count++;
if (media_id >= MAX_MEDIA) {
warn("Attempt to register more than %u media\n", MAX_MEDIA);
warn("Media <%s> rejected, media limit reached (%u)\n", name,
MAX_MEDIA);
media_count--;
goto exit;
}
for (i = 0; i < media_id; i++) {
if (media_list[i].type_id == media_type) {
warn("Attempt to register second media with type %u\n",
warn("Media <%s> rejected, duplicate type (%u)\n", name,
media_type);
media_count--;
goto exit;
}
if (!strcmp(name, media_list[i].name)) {
warn("Attempt to re-register media name <%s>\n", name);
warn("Media <%s> rejected, duplicate name\n", name);
media_count--;
goto exit;
}
@ -283,6 +286,9 @@ static struct bearer *bearer_find(const char *name)
struct bearer *b_ptr;
u32 i;
if (tipc_mode != TIPC_NET_MODE)
return NULL;
for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
return b_ptr;
@ -475,26 +481,33 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
u32 i;
int res = -EINVAL;
if (tipc_mode != TIPC_NET_MODE)
if (tipc_mode != TIPC_NET_MODE) {
warn("Bearer <%s> rejected, not supported in standalone mode\n",
name);
return -ENOPROTOOPT;
if (!bearer_name_validate(name, &b_name) ||
!tipc_addr_domain_valid(bcast_scope) ||
!in_scope(bcast_scope, tipc_own_addr))
}
if (!bearer_name_validate(name, &b_name)) {
warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
if (!tipc_addr_domain_valid(bcast_scope) ||
!in_scope(bcast_scope, tipc_own_addr)) {
warn("Bearer <%s> rejected, illegal broadcast scope\n", name);
return -EINVAL;
}
if ((priority < TIPC_MIN_LINK_PRI ||
priority > TIPC_MAX_LINK_PRI) &&
(priority != TIPC_MEDIA_LINK_PRI))
(priority != TIPC_MEDIA_LINK_PRI)) {
warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
}
write_lock_bh(&tipc_net_lock);
if (!tipc_bearers)
goto failed;
m_ptr = media_find(b_name.media_name);
if (!m_ptr) {
warn("No media <%s>\n", b_name.media_name);
warn("Bearer <%s> rejected, media <%s> not registered\n", name,
b_name.media_name);
goto failed;
}
@ -510,23 +523,24 @@ restart:
continue;
}
if (!strcmp(name, tipc_bearers[i].publ.name)) {
warn("Bearer <%s> already enabled\n", name);
warn("Bearer <%s> rejected, already enabled\n", name);
goto failed;
}
if ((tipc_bearers[i].priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
name, priority + 1, priority);
warn("Bearer <%s> rejected, duplicate priority\n",
name);
goto failed;
}
warn("Third bearer <%s> with priority %u, lowering to %u\n",
warn("Bearer <%s> priority adjustment required %u->%u\n",
name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
name, MAX_BEARERS);
goto failed;
}
@ -536,7 +550,7 @@ restart:
strcpy(b_ptr->publ.name, name);
res = m_ptr->enable_bearer(&b_ptr->publ);
if (res) {
warn("Failed to enable bearer <%s>\n", name);
warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
goto failed;
}
@ -573,9 +587,6 @@ int tipc_block_bearer(const char *name)
struct link *l_ptr;
struct link *temp_l_ptr;
if (tipc_mode != TIPC_NET_MODE)
return -ENOPROTOOPT;
read_lock_bh(&tipc_net_lock);
b_ptr = bearer_find(name);
if (!b_ptr) {
@ -584,6 +595,7 @@ int tipc_block_bearer(const char *name)
return -EINVAL;
}
info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->publ.lock);
b_ptr->publ.blocked = 1;
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@ -595,7 +607,6 @@ int tipc_block_bearer(const char *name)
}
spin_unlock_bh(&b_ptr->publ.lock);
read_unlock_bh(&tipc_net_lock);
info("Blocked bearer <%s>\n", name);
return TIPC_OK;
}
@ -611,15 +622,13 @@ static int bearer_disable(const char *name)
struct link *l_ptr;
struct link *temp_l_ptr;
if (tipc_mode != TIPC_NET_MODE)
return -ENOPROTOOPT;
b_ptr = bearer_find(name);
if (!b_ptr) {
warn("Attempt to disable unknown bearer <%s>\n", name);
return -EINVAL;
}
info("Disabling bearer <%s>\n", name);
tipc_disc_stop_link_req(b_ptr->link_req);
spin_lock_bh(&b_ptr->publ.lock);
b_ptr->link_req = NULL;
@ -635,7 +644,6 @@ static int bearer_disable(const char *name)
tipc_link_delete(l_ptr);
}
spin_unlock_bh(&b_ptr->publ.lock);
info("Disabled bearer <%s>\n", name);
memset(b_ptr, 0, sizeof(struct bearer));
return TIPC_OK;
}

View File

@ -60,8 +60,10 @@ struct cluster *tipc_cltr_create(u32 addr)
int alloc;
c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
if (c_ptr == NULL)
if (c_ptr == NULL) {
warn("Cluster creation failure, no memory\n");
return NULL;
}
memset(c_ptr, 0, sizeof(*c_ptr));
c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
@ -70,30 +72,32 @@ struct cluster *tipc_cltr_create(u32 addr)
else
max_nodes = tipc_max_nodes + 1;
alloc = sizeof(void *) * (max_nodes + 1);
c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
if (c_ptr->nodes == NULL) {
warn("Cluster creation failure, no memory for node area\n");
kfree(c_ptr);
return NULL;
}
memset(c_ptr->nodes, 0, alloc);
memset(c_ptr->nodes, 0, alloc);
if (in_own_cluster(addr))
tipc_local_nodes = c_ptr->nodes;
c_ptr->highest_slave = LOWEST_SLAVE - 1;
c_ptr->highest_node = 0;
z_ptr = tipc_zone_find(tipc_zone(addr));
if (z_ptr == NULL) {
if (!z_ptr) {
z_ptr = tipc_zone_create(addr);
}
if (z_ptr != NULL) {
tipc_zone_attach_cluster(z_ptr, c_ptr);
c_ptr->owner = z_ptr;
}
else {
if (!z_ptr) {
kfree(c_ptr->nodes);
kfree(c_ptr);
c_ptr = NULL;
return NULL;
}
tipc_zone_attach_cluster(z_ptr, c_ptr);
c_ptr->owner = z_ptr;
return c_ptr;
}

View File

@ -291,13 +291,22 @@ static struct sk_buff *cfg_set_own_addr(void)
if (!tipc_addr_node_valid(addr))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (node address)");
if (tipc_own_addr)
if (tipc_mode == TIPC_NET_MODE)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change node address once assigned)");
tipc_own_addr = addr;
/*
* Must release all spinlocks before calling start_net() because
* Linux version of TIPC calls eth_media_start() which calls
* register_netdevice_notifier() which may block!
*
* Temporarily releasing the lock should be harmless for non-Linux TIPC,
* but Linux version of eth_media_start() should really be reworked
* so that it can be called with spinlocks held.
*/
spin_unlock_bh(&config_lock);
tipc_core_stop_net();
tipc_own_addr = addr;
tipc_core_start_net();
spin_lock_bh(&config_lock);
return tipc_cfg_reply_none();
@ -350,50 +359,21 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
static struct sk_buff *cfg_set_max_ports(void)
{
int orig_mode;
u32 value;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
if (value == tipc_max_ports)
return tipc_cfg_reply_none();
if (value != delimit(value, 127, 65535))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max ports must be 127-65535)");
if (value == tipc_max_ports)
return tipc_cfg_reply_none();
if (atomic_read(&tipc_user_count) > 2)
if (tipc_mode != TIPC_NOT_RUNNING)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change max ports while TIPC users exist)");
spin_unlock_bh(&config_lock);
orig_mode = tipc_get_mode();
if (orig_mode == TIPC_NET_MODE)
tipc_core_stop_net();
tipc_core_stop();
" (cannot change max ports while TIPC is active)");
tipc_max_ports = value;
tipc_core_start();
if (orig_mode == TIPC_NET_MODE)
tipc_core_start_net();
spin_lock_bh(&config_lock);
return tipc_cfg_reply_none();
}
static struct sk_buff *set_net_max(int value, int *parameter)
{
int orig_mode;
if (value != *parameter) {
orig_mode = tipc_get_mode();
if (orig_mode == TIPC_NET_MODE)
tipc_core_stop_net();
*parameter = value;
if (orig_mode == TIPC_NET_MODE)
tipc_core_start_net();
}
return tipc_cfg_reply_none();
}
@ -405,10 +385,16 @@ static struct sk_buff *cfg_set_max_zones(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
if (value == tipc_max_zones)
return tipc_cfg_reply_none();
if (value != delimit(value, 1, 255))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max zones must be 1-255)");
return set_net_max(value, &tipc_max_zones);
if (tipc_mode == TIPC_NET_MODE)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change max zones once TIPC has joined a network)");
tipc_max_zones = value;
return tipc_cfg_reply_none();
}
static struct sk_buff *cfg_set_max_clusters(void)
@ -419,8 +405,8 @@ static struct sk_buff *cfg_set_max_clusters(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
if (value != 1)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
if (value != delimit(value, 1, 1))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max clusters fixed at 1)");
return tipc_cfg_reply_none();
}
@ -433,10 +419,16 @@ static struct sk_buff *cfg_set_max_nodes(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
if (value == tipc_max_nodes)
return tipc_cfg_reply_none();
if (value != delimit(value, 8, 2047))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (max nodes must be 8-2047)");
return set_net_max(value, &tipc_max_nodes);
if (tipc_mode == TIPC_NET_MODE)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change max nodes once TIPC has joined a network)");
tipc_max_nodes = value;
return tipc_cfg_reply_none();
}
static struct sk_buff *cfg_set_max_slaves(void)
@ -461,15 +453,16 @@ static struct sk_buff *cfg_set_netid(void)
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
value = *(u32 *)TLV_DATA(req_tlv_area);
value = ntohl(value);
if (value == tipc_net_id)
return tipc_cfg_reply_none();
if (value != delimit(value, 1, 9999))
return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
" (network id must be 1-9999)");
if (tipc_own_addr)
if (tipc_mode == TIPC_NET_MODE)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change network id once part of network)");
return set_net_max(value, &tipc_net_id);
" (cannot change network id once TIPC has joined a network)");
tipc_net_id = value;
return tipc_cfg_reply_none();
}
struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
@ -649,7 +642,7 @@ static void cfg_named_msg_event(void *userdata,
if ((size < sizeof(*req_hdr)) ||
(size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
(ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
warn("discarded invalid configuration message\n");
warn("Invalid configuration message discarded\n");
return;
}

View File

@ -2,7 +2,7 @@
* net/tipc/core.c: TIPC module code
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -57,7 +57,7 @@ void tipc_socket_stop(void);
int tipc_netlink_start(void);
void tipc_netlink_stop(void);
#define MOD_NAME "tipc_start: "
#define TIPC_MOD_VER "1.6.1"
#ifndef CONFIG_TIPC_ZONES
#define CONFIG_TIPC_ZONES 3
@ -198,7 +198,7 @@ static int __init tipc_init(void)
tipc_max_publications = 10000;
tipc_max_subscriptions = 2000;
tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255);
tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
@ -224,6 +224,7 @@ module_exit(tipc_exit);
MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(TIPC_MOD_VER);
/* Native TIPC API for kernel-space applications (see tipc.h) */

View File

@ -2,7 +2,7 @@
* net/tipc/core.h: Include file for TIPC global declarations
*
* Copyright (c) 2005-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -111,10 +111,6 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#else
#ifndef DBG_OUTPUT
#define DBG_OUTPUT NULL
#endif
/*
* TIPC debug support not included:
* - system messages are printed to system console
@ -129,6 +125,19 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
#define msg_dbg(msg,txt) do {} while (0)
#define dump(fmt,arg...) do {} while (0)
/*
* TIPC_OUTPUT is defined to be the system console, while DBG_OUTPUT is
* the null print buffer. Thes ensures that any system or debug messages
* that are generated without using the above macros are handled correctly.
*/
#undef TIPC_OUTPUT
#define TIPC_OUTPUT TIPC_CONS
#undef DBG_OUTPUT
#define DBG_OUTPUT NULL
#endif
@ -309,7 +318,7 @@ static inline struct sk_buff *buf_acquire(u32 size)
* buf_discard - frees a TIPC message buffer
* @skb: message buffer
*
* Frees a new buffer. If passed NULL, just returns.
* Frees a message buffer. If passed NULL, just returns.
*/
static inline void buf_discard(struct sk_buff *skb)

View File

@ -2,7 +2,7 @@
* net/tipc/discover.c
*
* Copyright (c) 2003-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -176,7 +176,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
n_ptr = tipc_node_create(orig);
}
if (n_ptr == NULL) {
warn("Memory squeeze; Failed to create node\n");
return;
}
spin_lock_bh(&n_ptr->lock);
@ -191,10 +190,8 @@ void tipc_disc_recv_msg(struct sk_buff *buf)
}
addr = &link->media_addr;
if (memcmp(addr, &media_addr, sizeof(*addr))) {
char addr_string[16];
warn("New bearer address for %s\n",
addr_string_fill(addr_string, orig));
warn("Resetting link <%s>, peer interface address changed\n",
link->name);
memcpy(addr, &media_addr, sizeof(*addr));
tipc_link_reset(link);
}
@ -270,8 +267,8 @@ static void disc_timeout(struct link_req *req)
/* leave timer interval "as is" if already at a "normal" rate */
} else {
req->timer_intv *= 2;
if (req->timer_intv > TIPC_LINK_REQ_SLOW)
req->timer_intv = TIPC_LINK_REQ_SLOW;
if (req->timer_intv > TIPC_LINK_REQ_FAST)
req->timer_intv = TIPC_LINK_REQ_FAST;
if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
(req->bearer->nodes.count))
req->timer_intv = TIPC_LINK_REQ_SLOW;

View File

@ -2,7 +2,7 @@
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
* Copyright (c) 2001-2006, Ericsson AB
* Copyright (c) 2005, Wind River Systems
* Copyright (c) 2005-2006, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -98,17 +98,19 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
u32 size;
if (likely(eb_ptr->bearer)) {
size = msg_size((struct tipc_msg *)buf->data);
skb_trim(buf, size);
if (likely(buf->len == size)) {
buf->next = NULL;
tipc_recv_msg(buf, eb_ptr->bearer);
} else {
kfree_skb(buf);
if (likely(!dev->promiscuity) ||
!memcmp(buf->mac.raw,dev->dev_addr,ETH_ALEN) ||
!memcmp(buf->mac.raw,dev->broadcast,ETH_ALEN)) {
size = msg_size((struct tipc_msg *)buf->data);
skb_trim(buf, size);
if (likely(buf->len == size)) {
buf->next = NULL;
tipc_recv_msg(buf, eb_ptr->bearer);
return TIPC_OK;
}
}
} else {
kfree_skb(buf);
}
kfree_skb(buf);
return TIPC_OK;
}
@ -125,8 +127,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find device with specified name */
while (dev && dev->name &&
(memcmp(dev->name, driver_name, strlen(dev->name)))) {
while (dev && dev->name && strncmp(dev->name, driver_name, IFNAMSIZ)) {
dev = dev->next;
}
if (!dev)
@ -252,7 +253,9 @@ int tipc_eth_media_start(void)
if (eth_started)
return -EINVAL;
memset(&bcast_addr, 0xff, sizeof(bcast_addr));
bcast_addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
memset(&bcast_addr.dev_addr, 0xff, ETH_ALEN);
memset(eth_bearers, 0, sizeof(eth_bearers));
res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",

View File

@ -419,7 +419,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
if (!l_ptr) {
warn("Memory squeeze; Failed to create link\n");
warn("Link creation failed, no memory\n");
return NULL;
}
memset(l_ptr, 0, sizeof(*l_ptr));
@ -469,7 +469,7 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
if (!pb) {
kfree(l_ptr);
warn("Memory squeeze; Failed to create link\n");
warn("Link creation failed, no memory for print buffer\n");
return NULL;
}
tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
@ -574,7 +574,6 @@ void tipc_link_wakeup_ports(struct link *l_ptr, int all)
break;
list_del_init(&p_ptr->wait_list);
p_ptr->congested_link = NULL;
assert(p_ptr->wakeup);
spin_lock_bh(p_ptr->publ.lock);
p_ptr->publ.congested = 0;
p_ptr->wakeup(&p_ptr->publ);
@ -691,6 +690,7 @@ void tipc_link_reset(struct link *l_ptr)
struct sk_buff *buf;
u32 prev_state = l_ptr->state;
u32 checkpoint = l_ptr->next_in_no;
int was_active_link = tipc_link_is_active(l_ptr);
msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
@ -712,7 +712,7 @@ void tipc_link_reset(struct link *l_ptr)
tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
dbg_link_dump();
#endif
if (tipc_node_has_active_links(l_ptr->owner) &&
if (was_active_link && tipc_node_has_active_links(l_ptr->owner) &&
l_ptr->owner->permit_changeover) {
l_ptr->reset_checkpoint = checkpoint;
l_ptr->exp_msg_count = START_CHANGEOVER;
@ -755,7 +755,7 @@ void tipc_link_reset(struct link *l_ptr)
static void link_activate(struct link *l_ptr)
{
l_ptr->next_in_no = 1;
l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
tipc_node_link_up(l_ptr->owner, l_ptr);
tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
link_send_event(tipc_cfg_link_event, l_ptr, 1);
@ -820,6 +820,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
break;
case RESET_MSG:
dbg_link("RES -> RR\n");
info("Resetting link <%s>, requested by peer\n",
l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@ -844,6 +846,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
break;
case RESET_MSG:
dbg_link("RES -> RR\n");
info("Resetting link <%s>, requested by peer "
"while probing\n", l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@ -875,6 +879,8 @@ static void link_state_event(struct link *l_ptr, unsigned event)
} else { /* Link has failed */
dbg_link("-> RU (%u probes unanswered)\n",
l_ptr->fsm_msg_cnt);
warn("Resetting link <%s>, peer not responding\n",
l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
@ -1050,7 +1056,7 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
msg_dbg(msg, "TIPC: Congestion, throwing away\n");
buf_discard(buf);
if (imp > CONN_MANAGER) {
warn("Resetting <%s>, send queue full", l_ptr->name);
warn("Resetting link <%s>, send queue full", l_ptr->name);
tipc_link_reset(l_ptr);
}
return dsz;
@ -1135,9 +1141,13 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
if (n_ptr) {
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
if (l_ptr) {
dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
res = tipc_link_send_buf(l_ptr, buf);
} else {
dbg("Attempt to send msg to unreachable node:\n");
msg_dbg(buf_msg(buf),">>>");
buf_discard(buf);
}
tipc_node_unlock(n_ptr);
} else {
@ -1242,8 +1252,6 @@ int tipc_link_send_sections_fast(struct port *sender,
int res;
u32 selector = msg_origport(hdr) & 1;
assert(destaddr != tipc_own_addr);
again:
/*
* Try building message using port's max_pkt hint.
@ -1604,40 +1612,121 @@ void tipc_link_push_queue(struct link *l_ptr)
tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
}
static void link_reset_all(unsigned long addr)
{
struct node *n_ptr;
char addr_string[16];
u32 i;
read_lock_bh(&tipc_net_lock);
n_ptr = tipc_node_find((u32)addr);
if (!n_ptr) {
read_unlock_bh(&tipc_net_lock);
return; /* node no longer exists */
}
tipc_node_lock(n_ptr);
warn("Resetting all links to %s\n",
addr_string_fill(addr_string, n_ptr->addr));
for (i = 0; i < MAX_BEARERS; i++) {
if (n_ptr->links[i]) {
link_print(n_ptr->links[i], TIPC_OUTPUT,
"Resetting link\n");
tipc_link_reset(n_ptr->links[i]);
}
}
tipc_node_unlock(n_ptr);
read_unlock_bh(&tipc_net_lock);
}
static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
warn("Retransmission failure on link <%s>\n", l_ptr->name);
tipc_msg_print(TIPC_OUTPUT, msg, ">RETR-FAIL>");
if (l_ptr->addr) {
/* Handle failure on standard link */
link_print(l_ptr, TIPC_OUTPUT, "Resetting link\n");
tipc_link_reset(l_ptr);
} else {
/* Handle failure on broadcast link */
struct node *n_ptr;
char addr_string[16];
tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
tipc_printf(TIPC_OUTPUT, "Outstanding acks: %u\n", (u32)TIPC_SKB_CB(buf)->handle);
n_ptr = l_ptr->owner->next;
tipc_node_lock(n_ptr);
addr_string_fill(addr_string, n_ptr->addr);
tipc_printf(TIPC_OUTPUT, "Multicast link info for %s\n", addr_string);
tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
tipc_node_unlock(n_ptr);
l_ptr->stale_count = 0;
}
}
void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
u32 retransmits)
{
struct tipc_msg *msg;
if (!buf)
return;
msg = buf_msg(buf);
dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
dbg_print_link(l_ptr, " ");
l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
l_ptr->retransm_queue_size = retransmits;
return;
if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
if (!skb_cloned(buf)) {
msg_dbg(msg, ">NO_RETR->BCONG>");
dbg_print_link(l_ptr, " ");
l_ptr->retransm_queue_head = msg_seqno(msg);
l_ptr->retransm_queue_size = retransmits;
return;
} else {
/* Don't retransmit if driver already has the buffer */
}
} else {
/* Detect repeated retransmit failures on uncongested bearer */
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
if (++l_ptr->stale_count > 100) {
link_retransmit_failure(l_ptr, buf);
return;
}
} else {
l_ptr->last_retransmitted = msg_seqno(msg);
l_ptr->stale_count = 1;
}
}
while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
msg = buf_msg(buf);
msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
/* Catch if retransmissions fail repeatedly: */
if (l_ptr->last_retransmitted == msg_seqno(msg)) {
if (++l_ptr->stale_count > 100) {
tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
info("...Retransmitted %u times\n",
l_ptr->stale_count);
link_print(l_ptr, TIPC_CONS, "Resetting Link\n");
tipc_link_reset(l_ptr);
break;
}
} else {
l_ptr->stale_count = 0;
}
l_ptr->last_retransmitted = msg_seqno(msg);
msg_dbg(buf_msg(buf), ">RETR>");
buf = buf->next;
retransmits--;
@ -1650,6 +1739,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
return;
}
}
l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
}
@ -1720,6 +1810,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
link_recv_non_seq(buf);
continue;
}
if (unlikely(!msg_short(msg) &&
(msg_destnode(msg) != tipc_own_addr)))
goto cont;
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
goto cont;
@ -2140,7 +2235,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
if (msg_linkprio(msg) &&
(msg_linkprio(msg) != l_ptr->priority)) {
warn("Changing prio <%s>: %u->%u\n",
warn("Resetting link <%s>, priority change %u->%u\n",
l_ptr->name, l_ptr->priority, msg_linkprio(msg));
l_ptr->priority = msg_linkprio(msg);
tipc_link_reset(l_ptr); /* Enforce change to take effect */
@ -2209,17 +2304,22 @@ void tipc_link_tunnel(struct link *l_ptr,
u32 length = msg_size(msg);
tunnel = l_ptr->owner->active_links[selector & 1];
if (!tipc_link_is_up(tunnel))
if (!tipc_link_is_up(tunnel)) {
warn("Link changeover error, "
"tunnel link no longer available\n");
return;
}
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = buf_acquire(length + INT_H_SIZE);
if (!buf)
if (!buf) {
warn("Link changeover error, "
"unable to send tunnel msg\n");
return;
}
memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
msg_dbg(buf_msg(buf), ">SEND>");
assert(tunnel);
tipc_link_send_buf(tunnel, buf);
}
@ -2235,23 +2335,27 @@ void tipc_link_changeover(struct link *l_ptr)
u32 msgcount = l_ptr->out_queue_size;
struct sk_buff *crs = l_ptr->first_out;
struct link *tunnel = l_ptr->owner->active_links[0];
int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
struct tipc_msg tunnel_hdr;
int split_bundles;
if (!tunnel)
return;
if (!l_ptr->owner->permit_changeover)
if (!l_ptr->owner->permit_changeover) {
warn("Link changeover error, "
"peer did not permit changeover\n");
return;
}
msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
dbg("Link changeover requires %u tunnel messages\n", msgcount);
if (!l_ptr->first_out) {
struct sk_buff *buf;
assert(!msgcount);
buf = buf_acquire(INT_H_SIZE);
if (buf) {
memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@ -2261,10 +2365,15 @@ void tipc_link_changeover(struct link *l_ptr)
msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
tipc_link_send_buf(tunnel, buf);
} else {
warn("Memory squeeze; link changeover failed\n");
warn("Link changeover error, "
"unable to send changeover msg\n");
}
return;
}
split_bundles = (l_ptr->owner->active_links[0] !=
l_ptr->owner->active_links[1]);
while (crs) {
struct tipc_msg *msg = buf_msg(crs);
@ -2310,7 +2419,8 @@ void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
outbuf = buf_acquire(length + INT_H_SIZE);
if (outbuf == NULL) {
warn("Memory squeeze; buffer duplication failed\n");
warn("Link changeover error, "
"unable to send duplicate msg\n");
return;
}
memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
@ -2364,11 +2474,15 @@ static int link_recv_changeover_msg(struct link **l_ptr,
u32 msg_count = msg_msgcnt(tunnel_msg);
dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
assert(dest_link != *l_ptr);
if (!dest_link) {
msg_dbg(tunnel_msg, "NOLINK/<REC<");
goto exit;
}
if (dest_link == *l_ptr) {
err("Unexpected changeover message on link <%s>\n",
(*l_ptr)->name);
goto exit;
}
dbg("%c<-%c:", dest_link->b_ptr->net_plane,
(*l_ptr)->b_ptr->net_plane);
*l_ptr = dest_link;
@ -2381,7 +2495,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
}
*buf = buf_extract(tunnel_buf,INT_H_SIZE);
if (*buf == NULL) {
warn("Memory squeeze; failed to extract msg\n");
warn("Link changeover error, duplicate msg dropped\n");
goto exit;
}
msg_dbg(tunnel_msg, "TNL<REC<");
@ -2393,13 +2507,17 @@ static int link_recv_changeover_msg(struct link **l_ptr,
if (tipc_link_is_up(dest_link)) {
msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
info("Resetting link <%s>, changeover initiated by peer\n",
dest_link->name);
tipc_link_reset(dest_link);
dest_link->exp_msg_count = msg_count;
dbg("Expecting %u tunnelled messages\n", msg_count);
if (!msg_count)
goto exit;
} else if (dest_link->exp_msg_count == START_CHANGEOVER) {
msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
dest_link->exp_msg_count = msg_count;
dbg("Expecting %u tunnelled messages\n", msg_count);
if (!msg_count)
goto exit;
}
@ -2407,6 +2525,8 @@ static int link_recv_changeover_msg(struct link **l_ptr,
/* Receive original message */
if (dest_link->exp_msg_count == 0) {
warn("Link switchover error, "
"got too many tunnelled messages\n");
msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
dbg_print_link(dest_link, "LINK:");
goto exit;
@ -2422,7 +2542,7 @@ static int link_recv_changeover_msg(struct link **l_ptr,
buf_discard(tunnel_buf);
return 1;
} else {
warn("Memory squeeze; dropped incoming msg\n");
warn("Link changeover error, original msg dropped\n");
}
}
exit:
@ -2444,13 +2564,8 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
while (msgcount--) {
obuf = buf_extract(buf, pos);
if (obuf == NULL) {
char addr_string[16];
warn("Buffer allocation failure;\n");
warn(" incoming message(s) from %s lost\n",
addr_string_fill(addr_string,
msg_orignode(buf_msg(buf))));
return;
warn("Link unable to unbundle message(s)\n");
break;
};
pos += align(msg_size(buf_msg(obuf)));
msg_dbg(buf_msg(obuf), " /");
@ -2508,7 +2623,7 @@ int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
}
fragm = buf_acquire(fragm_sz + INT_H_SIZE);
if (fragm == NULL) {
warn("Memory squeeze; failed to fragment msg\n");
warn("Link unable to fragment message\n");
dsz = -ENOMEM;
goto exit;
}
@ -2623,7 +2738,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
set_fragm_size(pbuf,fragm_sz);
set_expected_frags(pbuf,exp_fragm_cnt - 1);
} else {
warn("Memory squeeze; got no defragmenting buffer\n");
warn("Link unable to reassemble fragmented message\n");
}
buf_discard(fbuf);
return 0;

View File

@ -127,7 +127,7 @@ void tipc_named_publish(struct publication *publ)
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
warn("Memory squeeze; failed to distribute publication\n");
warn("Publication distribution failure\n");
return;
}
@ -151,7 +151,7 @@ void tipc_named_withdraw(struct publication *publ)
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
warn("Memory squeeze; failed to distribute withdrawal\n");
warn("Withdrawl distribution failure\n");
return;
}
@ -174,7 +174,6 @@ void tipc_named_node_up(unsigned long node)
u32 rest;
u32 max_item_buf;
assert(in_own_cluster(node));
read_lock_bh(&tipc_nametbl_lock);
max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
max_item_buf *= ITEM_SIZE;
@ -185,8 +184,8 @@ void tipc_named_node_up(unsigned long node)
left = (rest <= max_item_buf) ? rest : max_item_buf;
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
if (buf == NULL) {
warn("Memory Squeeze; could not send publication\n");
if (!buf) {
warn("Bulk publication distribution failure\n");
goto exit;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
@ -221,15 +220,24 @@ exit:
static void node_is_down(struct publication *publ)
{
struct publication *p;
write_lock_bh(&tipc_nametbl_lock);
dbg("node_is_down: withdrawing %u, %u, %u\n",
publ->type, publ->lower, publ->upper);
publ->key += 1222345;
p = tipc_nametbl_remove_publ(publ->type, publ->lower,
publ->node, publ->ref, publ->key);
assert(p == publ);
write_unlock_bh(&tipc_nametbl_lock);
kfree(publ);
if (p != publ) {
err("Unable to remove publication from failed node\n"
"(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node, publ->ref, publ->key);
}
if (p) {
kfree(p);
}
}
/**
@ -275,9 +283,15 @@ void tipc_named_recv(struct sk_buff *buf)
if (publ) {
tipc_nodesub_unsubscribe(&publ->subscr);
kfree(publ);
} else {
err("Unable to remove publication by node 0x%x\n"
"(type=%u, lower=%u, ref=%u, key=%u)\n",
msg_orignode(msg),
ntohl(item->type), ntohl(item->lower),
ntohl(item->ref), ntohl(item->key));
}
} else {
warn("tipc_named_recv: unknown msg\n");
warn("Unrecognized name table message received\n");
}
item++;
}

View File

@ -71,7 +71,7 @@ struct sub_seq {
* @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
* sub-sequences are sorted in ascending order
* @alloc: number of sub-sequences currently in array
* @first_free: upper bound of highest sub-sequence + 1
* @first_free: array index of first unused sub-sequence entry
* @ns_list: links to adjacent name sequences in hash chain
* @subscriptions: list of subscriptions for this 'type'
* @lock: spinlock controlling access to name sequence structure
@ -120,7 +120,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
struct publication *publ =
(struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
warn("Memory squeeze; failed to create publication\n");
warn("Publication creation failure, no memory\n");
return NULL;
}
@ -165,7 +165,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
struct sub_seq *sseq = tipc_subseq_alloc(1);
if (!nseq || !sseq) {
warn("Memory squeeze; failed to create name sequence\n");
warn("Name sequence creation failed, no memory\n");
kfree(nseq);
kfree(sseq);
return NULL;
@ -175,7 +175,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
nseq->lock = SPIN_LOCK_UNLOCKED;
nseq->type = type;
nseq->sseqs = sseq;
dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
nseq, type, nseq->sseqs, nseq->first_free);
nseq->alloc = 1;
INIT_HLIST_NODE(&nseq->ns_list);
@ -253,16 +253,16 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
struct sub_seq *sseq;
int created_subseq = 0;
assert(nseq->first_free <= nseq->alloc);
sseq = nameseq_find_subseq(nseq, lower);
dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
dbg("nameseq_ins: for seq %p, {%u,%u}, found sseq %p\n",
nseq, type, lower, sseq);
if (sseq) {
/* Lower end overlaps existing entry => need an exact match */
if ((sseq->lower != lower) || (sseq->upper != upper)) {
warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
warn("Cannot publish {%u,%u,%u}, overlap error\n",
type, lower, upper);
return NULL;
}
} else {
@ -277,25 +277,27 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
warn("Cannot publish {%u,%u,%u}, overlap error\n",
type, lower, upper);
return NULL;
}
/* Ensure there is space for new sub-sequence */
if (nseq->first_free == nseq->alloc) {
struct sub_seq *sseqs = nseq->sseqs;
nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2);
if (nseq->sseqs != NULL) {
memcpy(nseq->sseqs, sseqs,
nseq->alloc * sizeof (struct sub_seq));
kfree(sseqs);
dbg("Allocated %u sseqs\n", nseq->alloc);
nseq->alloc *= 2;
} else {
warn("Memory squeeze; failed to create sub-sequence\n");
struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
if (!sseqs) {
warn("Cannot publish {%u,%u,%u}, no memory\n",
type, lower, upper);
return NULL;
}
dbg("Allocated %u more sseqs\n", nseq->alloc);
memcpy(sseqs, nseq->sseqs,
nseq->alloc * sizeof(struct sub_seq));
kfree(nseq->sseqs);
nseq->sseqs = sseqs;
nseq->alloc *= 2;
}
dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
@ -311,7 +313,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
sseq->upper = upper;
created_subseq = 1;
}
dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
dbg("inserting {%u,%u,%u} from <0x%x:%u> into sseq %p(%u,%u) of seq %p\n",
type, lower, upper, node, port, sseq,
sseq->lower, sseq->upper, nseq);
@ -320,7 +322,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
publ = publ_create(type, lower, upper, scope, node, port, key);
if (!publ)
return NULL;
dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
dbg("inserting publ %p, node=0x%x publ->node=0x%x, subscr->node=%p\n",
publ, node, publ->node, publ->subscr.node);
if (!sseq->zone_list)
@ -367,45 +369,47 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/**
* tipc_nameseq_remove_publ -
*
* NOTE: There may be cases where TIPC is asked to remove a publication
* that is not in the name table. For example, if another node issues a
* publication for a name sequence that overlaps an existing name sequence
* the publication will not be recorded, which means the publication won't
* be found when the name sequence is later withdrawn by that node.
* A failed withdraw request simply returns a failure indication and lets the
* caller issue any error or warning messages associated with such a problem.
*/
static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
u32 node, u32 ref, u32 key)
{
struct publication *publ;
struct publication *curr;
struct publication *prev;
struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
struct sub_seq *free;
struct subscription *s, *st;
int removed_subseq = 0;
assert(nseq);
if (!sseq) {
int i;
warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
assert(nseq->sseqs);
dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
nseq->sseqs, nseq, nseq->alloc,
nseq->first_free);
for (i = 0; i < nseq->first_free; i++) {
dbg("Subseq %u(%x): lower = %u,upper = %u\n",
i, &nseq->sseqs[i], nseq->sseqs[i].lower,
nseq->sseqs[i].upper);
}
if (!sseq)
return NULL;
}
dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
dbg("tipc_nameseq_remove_publ: seq: %p, sseq %p, {%u,%u}, key %u\n",
nseq, sseq, nseq->type, inst, key);
/* Remove publication from zone scope list */
prev = sseq->zone_list;
publ = sseq->zone_list->zone_list_next;
while ((publ->key != key) || (publ->ref != ref) ||
(publ->node && (publ->node != node))) {
prev = publ;
publ = publ->zone_list_next;
assert(prev != sseq->zone_list);
if (prev == sseq->zone_list) {
/* Prevent endless loop if publication not found */
return NULL;
}
}
if (publ != sseq->zone_list)
prev->zone_list_next = publ->zone_list_next;
@ -416,14 +420,24 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
sseq->zone_list = NULL;
}
/* Remove publication from cluster scope list, if present */
if (in_own_cluster(node)) {
prev = sseq->cluster_list;
publ = sseq->cluster_list->cluster_list_next;
while ((publ->key != key) || (publ->ref != ref) ||
(publ->node && (publ->node != node))) {
prev = publ;
publ = publ->cluster_list_next;
assert(prev != sseq->cluster_list);
curr = sseq->cluster_list->cluster_list_next;
while (curr != publ) {
prev = curr;
curr = curr->cluster_list_next;
if (prev == sseq->cluster_list) {
/* Prevent endless loop for malformed list */
err("Unable to de-list cluster publication\n"
"{%u%u}, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node,
publ->ref, publ->key);
goto end_cluster;
}
}
if (publ != sseq->cluster_list)
prev->cluster_list_next = publ->cluster_list_next;
@ -434,15 +448,26 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
sseq->cluster_list = NULL;
}
}
end_cluster:
/* Remove publication from node scope list, if present */
if (node == tipc_own_addr) {
prev = sseq->node_list;
publ = sseq->node_list->node_list_next;
while ((publ->key != key) || (publ->ref != ref) ||
(publ->node && (publ->node != node))) {
prev = publ;
publ = publ->node_list_next;
assert(prev != sseq->node_list);
curr = sseq->node_list->node_list_next;
while (curr != publ) {
prev = curr;
curr = curr->node_list_next;
if (prev == sseq->node_list) {
/* Prevent endless loop for malformed list */
err("Unable to de-list node publication\n"
"{%u%u}, node=0x%x, ref=%u, key=%u)\n",
publ->type, publ->lower, publ->node,
publ->ref, publ->key);
goto end_node;
}
}
if (publ != sseq->node_list)
prev->node_list_next = publ->node_list_next;
@ -453,22 +478,18 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
sseq->node_list = NULL;
}
}
assert(!publ->node || (publ->node == node));
assert(publ->ref == ref);
assert(publ->key == key);
end_node:
/*
* Contract subseq list if no more publications:
*/
if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
/* Contract subseq list if no more publications for that subseq */
if (!sseq->zone_list) {
free = &nseq->sseqs[nseq->first_free--];
memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
removed_subseq = 1;
}
/*
* Any subscriptions waiting ?
*/
/* Notify any waiting subscriptions */
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
tipc_subscr_report_overlap(s,
publ->lower,
@ -478,6 +499,7 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
publ->node,
removed_subseq);
}
return publ;
}
@ -530,7 +552,7 @@ static struct name_seq *nametbl_find_seq(u32 type)
seq_head = &table.types[hash(type)];
hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
if (ns->type == type) {
dbg("found %x\n", ns);
dbg("found %p\n", ns);
return ns;
}
}
@ -543,22 +565,21 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
{
struct name_seq *seq = nametbl_find_seq(type);
dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
dbg("tipc_nametbl_insert_publ: {%u,%u,%u} found %p\n", type, lower, upper, seq);
if (lower > upper) {
warn("Failed to publish illegal <%u,%u,%u>\n",
warn("Failed to publish illegal {%u,%u,%u}\n",
type, lower, upper);
return NULL;
}
dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
dbg("Publishing {%u,%u,%u} from 0x%x\n", type, lower, upper, node);
if (!seq) {
seq = tipc_nameseq_create(type, &table.types[hash(type)]);
dbg("tipc_nametbl_insert_publ: created %x\n", seq);
dbg("tipc_nametbl_insert_publ: created %p\n", seq);
}
if (!seq)
return NULL;
assert(seq->type == type);
return tipc_nameseq_insert_publ(seq, type, lower, upper,
scope, node, port, key);
}
@ -572,7 +593,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
if (!seq)
return NULL;
dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
dbg("Withdrawing {%u,%u} from 0x%x\n", type, lower, node);
publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
if (!seq->first_free && list_empty(&seq->subscriptions)) {
@ -738,12 +759,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
struct publication *publ;
if (table.local_publ_count >= tipc_max_publications) {
warn("Failed publish: max %u local publication\n",
warn("Publication failed, local publication limit reached (%u)\n",
tipc_max_publications);
return NULL;
}
if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
warn("Failed to publish reserved name <%u,%u,%u>\n",
warn("Publication failed, reserved name {%u,%u,%u}\n",
type, lower, upper);
return NULL;
}
@ -767,10 +788,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
{
struct publication *publ;
dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
dbg("tipc_nametbl_withdraw: {%u,%u}, key=%u\n", type, lower, key);
write_lock_bh(&tipc_nametbl_lock);
publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
if (publ) {
if (likely(publ)) {
table.local_publ_count--;
if (publ->scope != TIPC_NODE_SCOPE)
tipc_named_withdraw(publ);
@ -780,6 +801,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
return 1;
}
write_unlock_bh(&tipc_nametbl_lock);
err("Unable to remove local publication\n"
"(type=%u, lower=%u, ref=%u, key=%u)\n",
type, lower, ref, key);
return 0;
}
@ -787,8 +811,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
void
tipc_nametbl_subscribe(struct subscription *s)
void tipc_nametbl_subscribe(struct subscription *s)
{
u32 type = s->seq.type;
struct name_seq *seq;
@ -800,11 +823,13 @@ tipc_nametbl_subscribe(struct subscription *s)
}
if (seq){
spin_lock_bh(&seq->lock);
dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n",
dbg("tipc_nametbl_subscribe:found %p for {%u,%u,%u}\n",
seq, type, s->seq.lower, s->seq.upper);
assert(seq->type == type);
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
warn("Failed to create subscription for {%u,%u,%u}\n",
s->seq.type, s->seq.lower, s->seq.upper);
}
write_unlock_bh(&tipc_nametbl_lock);
}
@ -813,8 +838,7 @@ tipc_nametbl_subscribe(struct subscription *s)
* tipc_nametbl_unsubscribe - remove a subscription object from name table
*/
void
tipc_nametbl_unsubscribe(struct subscription *s)
void tipc_nametbl_unsubscribe(struct subscription *s)
{
struct name_seq *seq;
@ -1049,35 +1073,20 @@ int tipc_nametbl_init(void)
void tipc_nametbl_stop(void)
{
struct hlist_head *seq_head;
struct hlist_node *seq_node;
struct hlist_node *tmp;
struct name_seq *seq;
u32 i;
if (!table.types)
return;
/* Verify name table is empty, then release it */
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < tipc_nametbl_size; i++) {
seq_head = &table.types[i];
hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
struct sub_seq *sseq = seq->sseqs;
for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
struct publication *publ = sseq->zone_list;
assert(publ);
do {
struct publication *next =
publ->zone_list_next;
kfree(publ);
publ = next;
}
while (publ != sseq->zone_list);
}
}
if (!hlist_empty(&table.types[i]))
err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
}
kfree(table.types);
table.types = NULL;
write_unlock_bh(&tipc_nametbl_lock);
}

View File

@ -61,34 +61,37 @@ struct node *tipc_node_create(u32 addr)
struct node **curr_node;
n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
if (n_ptr != NULL) {
memset(n_ptr, 0, sizeof(*n_ptr));
n_ptr->addr = addr;
n_ptr->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&n_ptr->nsub);
c_ptr = tipc_cltr_find(addr);
if (c_ptr == NULL)
c_ptr = tipc_cltr_create(addr);
if (c_ptr != NULL) {
n_ptr->owner = c_ptr;
tipc_cltr_attach_node(c_ptr, n_ptr);
n_ptr->last_router = -1;
if (!n_ptr) {
warn("Node creation failed, no memory\n");
return NULL;
}
/* Insert node into ordered list */
for (curr_node = &tipc_nodes; *curr_node;
curr_node = &(*curr_node)->next) {
if (addr < (*curr_node)->addr) {
n_ptr->next = *curr_node;
break;
}
}
(*curr_node) = n_ptr;
} else {
kfree(n_ptr);
n_ptr = NULL;
}
}
c_ptr = tipc_cltr_find(addr);
if (!c_ptr) {
c_ptr = tipc_cltr_create(addr);
}
if (!c_ptr) {
kfree(n_ptr);
return NULL;
}
memset(n_ptr, 0, sizeof(*n_ptr));
n_ptr->addr = addr;
n_ptr->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&n_ptr->nsub);
n_ptr->owner = c_ptr;
tipc_cltr_attach_node(c_ptr, n_ptr);
n_ptr->last_router = -1;
/* Insert node into ordered list */
for (curr_node = &tipc_nodes; *curr_node;
curr_node = &(*curr_node)->next) {
if (addr < (*curr_node)->addr) {
n_ptr->next = *curr_node;
break;
}
}
(*curr_node) = n_ptr;
return n_ptr;
}
@ -122,6 +125,8 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
{
struct link **active = &n_ptr->active_links[0];
n_ptr->working_links++;
info("Established link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
@ -132,7 +137,7 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
return;
}
if (l_ptr->priority < active[0]->priority) {
info("Link is standby\n");
info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
tipc_link_send_duplicate(active[0], l_ptr);
@ -140,8 +145,9 @@ void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
active[0] = l_ptr;
return;
}
info("Link <%s> on network plane %c becomes standby\n",
active[0]->name, active[0]->b_ptr->net_plane);
info("Old link <%s> becomes standby\n", active[0]->name);
if (active[1] != active[0])
info("Old link <%s> becomes standby\n", active[1]->name);
active[0] = active[1] = l_ptr;
}
@ -181,6 +187,8 @@ void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
{
struct link **active;
n_ptr->working_links--;
if (!tipc_link_is_active(l_ptr)) {
info("Lost standby link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
@ -210,8 +218,7 @@ int tipc_node_has_active_links(struct node *n_ptr)
int tipc_node_has_redundant_links(struct node *n_ptr)
{
return (tipc_node_has_active_links(n_ptr) &&
(n_ptr->active_links[0] != n_ptr->active_links[1]));
return (n_ptr->working_links > 1);
}
static int tipc_node_has_active_routes(struct node *n_ptr)
@ -234,7 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
u32 bearer_id = l_ptr->b_ptr->identity;
char addr_string[16];
assert(bearer_id < MAX_BEARERS);
if (n_ptr->link_cnt >= 2) {
char addr_string[16];
@ -249,7 +255,7 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
n_ptr->link_cnt++;
return n_ptr;
}
err("Attempt to establish second link on <%s> to <%s> \n",
err("Attempt to establish second link on <%s> to %s \n",
l_ptr->b_ptr->publ.name,
addr_string_fill(addr_string, l_ptr->addr));
}
@ -314,7 +320,7 @@ static void node_established_contact(struct node *n_ptr)
struct cluster *c_ptr;
dbg("node_established_contact:-> %x\n", n_ptr->addr);
if (!tipc_node_has_active_routes(n_ptr)) {
if (!tipc_node_has_active_routes(n_ptr) && in_own_cluster(n_ptr->addr)) {
tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
}

View File

@ -51,6 +51,7 @@
* @nsub: list of "node down" subscriptions monitoring node
* @active_links: pointers to active links to node
* @links: pointers to all links to node
* @working_links: number of working links to node (both active and standby)
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @routers: bitmap (used for multicluster communication)
@ -76,6 +77,7 @@ struct node {
struct link *active_links[2];
struct link *links[MAX_BEARERS];
int link_cnt;
int working_links;
int permit_changeover;
u32 routers[512/32];
int last_router;

View File

@ -47,18 +47,19 @@
void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
void *usr_handle, net_ev_handler handle_down)
{
node_sub->node = NULL;
if (addr == tipc_own_addr)
return;
if (!tipc_addr_node_valid(addr)) {
warn("node_subscr with illegal %x\n", addr);
if (addr == tipc_own_addr) {
node_sub->node = NULL;
return;
}
node_sub->node = tipc_node_find(addr);
if (!node_sub->node) {
warn("Node subscription rejected, unknown node 0x%x\n", addr);
return;
}
node_sub->handle_node_down = handle_down;
node_sub->usr_handle = usr_handle;
node_sub->node = tipc_node_find(addr);
assert(node_sub->node);
tipc_node_lock(node_sub->node);
list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
tipc_node_unlock(node_sub->node);

View File

@ -168,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
struct port_list *item = dp;
int cnt = 0;
assert(buf);
msg = buf_msg(buf);
/* Create destination port list, if one wasn't supplied */
@ -196,7 +195,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
if (b == NULL) {
warn("Buffer allocation failure\n");
warn("Unable to deliver multicast message(s)\n");
msg_dbg(msg, "LOST:");
goto exit;
}
@ -228,14 +227,14 @@ u32 tipc_createport_raw(void *usr_handle,
u32 ref;
p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
if (p_ptr == NULL) {
warn("Memory squeeze; failed to create port\n");
if (!p_ptr) {
warn("Port creation failed, no memory\n");
return 0;
}
memset(p_ptr, 0, sizeof(*p_ptr));
ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
if (!ref) {
warn("Reference Table Exhausted\n");
warn("Port creation failed, reference table exhausted\n");
kfree(p_ptr);
return 0;
}
@ -810,18 +809,20 @@ static void port_dispatcher_sigh(void *dummy)
void *usr_handle;
int connected;
int published;
u32 message_type;
struct sk_buff *next = buf->next;
struct tipc_msg *msg = buf_msg(buf);
u32 dref = msg_destport(msg);
message_type = msg_type(msg);
if (message_type > TIPC_DIRECT_MSG)
goto reject; /* Unsupported message type */
p_ptr = tipc_port_lock(dref);
if (!p_ptr) {
/* Port deleted while msg in queue */
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
buf = next;
continue;
}
if (!p_ptr)
goto reject; /* Port deleted while msg in queue */
orig.ref = msg_origport(msg);
orig.node = msg_orignode(msg);
up_ptr = p_ptr->user_port;
@ -832,7 +833,7 @@ static void port_dispatcher_sigh(void *dummy)
if (unlikely(msg_errcode(msg)))
goto err;
switch (msg_type(msg)) {
switch (message_type) {
case TIPC_CONN_MSG:{
tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
@ -874,6 +875,7 @@ static void port_dispatcher_sigh(void *dummy)
&orig);
break;
}
case TIPC_MCAST_MSG:
case TIPC_NAMED_MSG:{
tipc_named_msg_event cb = up_ptr->named_msg_cb;
@ -886,7 +888,8 @@ static void port_dispatcher_sigh(void *dummy)
goto reject;
dseq.type = msg_nametype(msg);
dseq.lower = msg_nameinst(msg);
dseq.upper = dseq.lower;
dseq.upper = (message_type == TIPC_NAMED_MSG)
? dseq.lower : msg_nameupper(msg);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_importance(msg),
@ -899,7 +902,7 @@ static void port_dispatcher_sigh(void *dummy)
buf = next;
continue;
err:
switch (msg_type(msg)) {
switch (message_type) {
case TIPC_CONN_MSG:{
tipc_conn_shutdown_event cb =
@ -931,6 +934,7 @@ err:
msg_data_sz(msg), msg_errcode(msg), &orig);
break;
}
case TIPC_MCAST_MSG:
case TIPC_NAMED_MSG:{
tipc_named_msg_err_event cb =
up_ptr->named_err_cb;
@ -940,7 +944,8 @@ err:
break;
dseq.type = msg_nametype(msg);
dseq.lower = msg_nameinst(msg);
dseq.upper = dseq.lower;
dseq.upper = (message_type == TIPC_NAMED_MSG)
? dseq.lower : msg_nameupper(msg);
skb_pull(buf, msg_hdr_sz(msg));
cb(usr_handle, dref, &buf, msg_data(msg),
msg_data_sz(msg), msg_errcode(msg), &dseq);
@ -1054,7 +1059,8 @@ int tipc_createport(u32 user_ref,
u32 ref;
up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
if (up_ptr == NULL) {
if (!up_ptr) {
warn("Port creation failed, no memory\n");
return -ENOMEM;
}
ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
@ -1165,8 +1171,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
if (!p_ptr->publ.published)
goto exit;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
@ -1193,7 +1197,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
if (list_empty(&p_ptr->publications))
p_ptr->publ.published = 0;
exit:
tipc_port_unlock(p_ptr);
return res;
}

View File

@ -127,7 +127,14 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
u32 next_plus_upper;
u32 reference = 0;
assert(tipc_ref_table.entries && object);
if (!object) {
err("Attempt to acquire reference to non-existent object\n");
return 0;
}
if (!tipc_ref_table.entries) {
err("Reference table not found during acquisition attempt\n");
return 0;
}
write_lock_bh(&ref_table_lock);
if (tipc_ref_table.first_free) {
@ -162,15 +169,28 @@ void tipc_ref_discard(u32 ref)
u32 index;
u32 index_mask;
assert(tipc_ref_table.entries);
assert(ref != 0);
if (!ref) {
err("Attempt to discard reference 0\n");
return;
}
if (!tipc_ref_table.entries) {
err("Reference table not found during discard attempt\n");
return;
}
write_lock_bh(&ref_table_lock);
index_mask = tipc_ref_table.index_mask;
index = ref & index_mask;
entry = &(tipc_ref_table.entries[index]);
assert(entry->object != 0);
assert(entry->data.reference == ref);
if (!entry->object) {
err("Attempt to discard reference to non-existent object\n");
goto exit;
}
if (entry->data.reference != ref) {
err("Attempt to discard non-existent reference\n");
goto exit;
}
/* mark entry as unused */
entry->object = NULL;
@ -184,6 +204,7 @@ void tipc_ref_discard(u32 ref)
/* increment upper bits of entry to invalidate subsequent references */
entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
exit:
write_unlock_bh(&ref_table_lock);
}

View File

@ -169,12 +169,6 @@ static int tipc_create(struct socket *sock, int protocol)
struct sock *sk;
u32 ref;
if ((sock->type != SOCK_STREAM) &&
(sock->type != SOCK_SEQPACKET) &&
(sock->type != SOCK_DGRAM) &&
(sock->type != SOCK_RDM))
return -EPROTOTYPE;
if (unlikely(protocol != 0))
return -EPROTONOSUPPORT;
@ -199,6 +193,9 @@ static int tipc_create(struct socket *sock, int protocol)
sock->ops = &msg_ops;
sock->state = SS_READY;
break;
default:
tipc_deleteport(ref);
return -EPROTOTYPE;
}
sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
@ -426,7 +423,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
return -EFAULT;
if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
return -EACCES;
return 0;
@ -437,7 +434,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
* @iocb: (unused)
* @sock: socket structure
* @m: message to send
* @total_len: (unused)
* @total_len: length of message
*
* Message must have an destination specified explicitly.
* Used for SOCK_RDM and SOCK_DGRAM messages,
@ -458,7 +455,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
if (unlikely(!dest))
return -EDESTADDRREQ;
if (unlikely(dest->family != AF_TIPC))
if (unlikely((m->msg_namelen < sizeof(*dest)) ||
(dest->family != AF_TIPC)))
return -EINVAL;
needs_conn = (sock->state != SS_READY);
@ -470,6 +468,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
if ((tsock->p->published) ||
((sock->type == SOCK_STREAM) && (total_len != 0)))
return -EOPNOTSUPP;
if (dest->addrtype == TIPC_ADDR_NAME) {
tsock->p->conn_type = dest->addr.name.name.type;
tsock->p->conn_instance = dest->addr.name.name.instance;
}
}
if (down_interruptible(&tsock->sem))
@ -538,7 +540,7 @@ exit:
* @iocb: (unused)
* @sock: socket structure
* @m: message to send
* @total_len: (unused)
* @total_len: length of message
*
* Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
*
@ -561,15 +563,15 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
return -ERESTARTSYS;
}
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_DISCONNECTING)
res = -EPIPE;
else
res = -ENOTCONN;
goto exit;
}
do {
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_DISCONNECTING)
res = -EPIPE;
else
res = -ENOTCONN;
goto exit;
}
res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
if (likely(res != -ELINKCONG)) {
exit:
@ -597,7 +599,8 @@ exit:
*
* Used for SOCK_STREAM data.
*
* Returns the number of bytes sent on success, or errno otherwise
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
@ -611,6 +614,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
char __user *curr_start;
int curr_left;
int bytes_to_send;
int bytes_sent;
int res;
if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
@ -633,11 +637,11 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
* of small iovec entries into send_packet().
*/
my_msg = *m;
curr_iov = my_msg.msg_iov;
curr_iovlen = my_msg.msg_iovlen;
curr_iov = m->msg_iov;
curr_iovlen = m->msg_iovlen;
my_msg.msg_iov = &my_iov;
my_msg.msg_iovlen = 1;
bytes_sent = 0;
while (curr_iovlen--) {
curr_start = curr_iov->iov_base;
@ -648,16 +652,18 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
? curr_left : TIPC_MAX_USER_MSG_SIZE;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
return res;
if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) {
return bytes_sent ? bytes_sent : res;
}
curr_left -= bytes_to_send;
curr_start += bytes_to_send;
bytes_sent += bytes_to_send;
}
curr_iov++;
}
return total_len;
return bytes_sent;
}
/**
@ -727,6 +733,7 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
u32 anc_data[3];
u32 err;
u32 dest_type;
int has_name;
int res;
if (likely(m->msg_controllen == 0))
@ -738,10 +745,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
if (unlikely(err)) {
anc_data[0] = err;
anc_data[1] = msg_data_sz(msg);
if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
return res;
if (anc_data[1] &&
(res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1],
(res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
msg_data(msg))))
return res;
}
@ -751,25 +758,28 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
switch (dest_type) {
case TIPC_NAMED_MSG:
has_name = 1;
anc_data[0] = msg_nametype(msg);
anc_data[1] = msg_namelower(msg);
anc_data[2] = msg_namelower(msg);
break;
case TIPC_MCAST_MSG:
has_name = 1;
anc_data[0] = msg_nametype(msg);
anc_data[1] = msg_namelower(msg);
anc_data[2] = msg_nameupper(msg);
break;
case TIPC_CONN_MSG:
has_name = (tport->conn_type != 0);
anc_data[0] = tport->conn_type;
anc_data[1] = tport->conn_instance;
anc_data[2] = tport->conn_instance;
break;
default:
anc_data[0] = 0;
has_name = 0;
}
if (anc_data[0] &&
(res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
if (has_name &&
(res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
return res;
return 0;
@ -960,7 +970,7 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
restart:
if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
(flags & MSG_DONTWAIT))) {
res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
res = -EWOULDBLOCK;
goto exit;
}
@ -1051,7 +1061,7 @@ restart:
exit:
up(&tsock->sem);
return res ? res : sz_copied;
return sz_copied ? sz_copied : res;
}
/**
@ -1236,7 +1246,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
if (sock->state == SS_READY)
return -EOPNOTSUPP;
/* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
/* Issue Posix-compliant error code if socket is in the wrong state */
if (sock->state == SS_LISTENING)
return -EOPNOTSUPP;
if (sock->state == SS_CONNECTING)
@ -1244,13 +1255,20 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
if (sock->state != SS_UNCONNECTED)
return -EISCONN;
if ((dst->family != AF_TIPC) ||
((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
/*
* Reject connection attempt using multicast address
*
* Note: send_msg() validates the rest of the address fields,
* so there's no need to do it here
*/
if (dst->addrtype == TIPC_ADDR_MCAST)
return -EINVAL;
/* Send a 'SYN-' to destination */
m.msg_name = dest;
m.msg_namelen = destlen;
if ((res = send_msg(NULL, sock, &m, 0)) < 0) {
sock->state = SS_DISCONNECTING;
return res;
@ -1269,10 +1287,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
msg = buf_msg(buf);
res = auto_connect(sock, tsock, msg);
if (!res) {
if (dst->addrtype == TIPC_ADDR_NAME) {
tsock->p->conn_type = dst->addr.name.name.type;
tsock->p->conn_instance = dst->addr.name.name.instance;
}
if (!msg_data_sz(msg))
advance_queue(tsock);
}
@ -1386,7 +1400,7 @@ exit:
/**
* shutdown - shutdown socket connection
* @sock: socket structure
* @how: direction to close (always treated as read + write)
* @how: direction to close (unused; always treated as read + write)
*
* Terminates connection (if necessary), then purges socket's receive queue.
*
@ -1469,7 +1483,8 @@ restart:
* Returns 0 on success, errno otherwise
*/
static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
static int setsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, int ol)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
u32 value;
@ -1525,7 +1540,8 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
* Returns 0 on success, errno otherwise
*/
static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
static int getsockopt(struct socket *sock,
int lvl, int opt, char __user *ov, int *ol)
{
struct tipc_sock *tsock = tipc_sk(sock->sk);
int len;

View File

@ -266,7 +266,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
/* Refuse subscription if global limit exceeded */
if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
warn("Subscription rejected, subscription limit reached (%u)\n",
tipc_max_subscriptions);
subscr_terminate(subscriber);
return;
}
@ -274,8 +275,8 @@ static void subscr_subscribe(struct tipc_subscr *s,
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (sub == NULL) {
warn("Memory squeeze; ignoring subscription\n");
if (!sub) {
warn("Subscription rejected, no memory\n");
subscr_terminate(subscriber);
return;
}
@ -298,8 +299,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
if ((((sub->filter != TIPC_SUB_PORTS)
&& (sub->filter != TIPC_SUB_SERVICE)))
|| (sub->seq.lower > sub->seq.upper)) {
warn("Rejecting illegal subscription %u,%u,%u\n",
sub->seq.type, sub->seq.lower, sub->seq.upper);
warn("Subscription rejected, illegal request\n");
kfree(sub);
subscr_terminate(subscriber);
return;
@ -387,7 +387,7 @@ static void subscr_named_msg_event(void *usr_handle,
dbg("subscr_named_msg_event: orig = %x own = %x,\n",
orig->node, tipc_own_addr);
if (size && (size != sizeof(struct tipc_subscr))) {
warn("Received tipc_subscr of invalid size\n");
warn("Subscriber rejected, invalid subscription size\n");
return;
}
@ -395,7 +395,7 @@ static void subscr_named_msg_event(void *usr_handle,
subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
warn("Memory squeeze; ignoring subscriber setup\n");
warn("Subscriber rejected, no memory\n");
return;
}
memset(subscriber, 0, sizeof(struct subscriber));
@ -403,7 +403,7 @@ static void subscr_named_msg_event(void *usr_handle,
INIT_LIST_HEAD(&subscriber->subscriber_list);
subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
if (subscriber->ref == 0) {
warn("Failed to acquire subscriber reference\n");
warn("Subscriber rejected, reference table exhausted\n");
kfree(subscriber);
return;
}
@ -422,7 +422,7 @@ static void subscr_named_msg_event(void *usr_handle,
NULL,
&subscriber->port_ref);
if (subscriber->port_ref == 0) {
warn("Memory squeeze; failed to create subscription port\n");
warn("Subscriber rejected, unable to create port\n");
tipc_ref_discard(subscriber->ref);
kfree(subscriber);
return;

View File

@ -44,19 +44,24 @@
struct _zone *tipc_zone_create(u32 addr)
{
struct _zone *z_ptr = NULL;
struct _zone *z_ptr;
u32 z_num;
if (!tipc_addr_domain_valid(addr))
if (!tipc_addr_domain_valid(addr)) {
err("Zone creation failed, invalid domain 0x%x\n", addr);
return NULL;
}
z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
if (z_ptr != NULL) {
memset(z_ptr, 0, sizeof(*z_ptr));
z_num = tipc_zone(addr);
z_ptr->addr = tipc_addr(z_num, 0, 0);
tipc_net.zones[z_num] = z_ptr;
if (!z_ptr) {
warn("Zone creation failed, insufficient memory\n");
return NULL;
}
memset(z_ptr, 0, sizeof(*z_ptr));
z_num = tipc_zone(addr);
z_ptr->addr = tipc_addr(z_num, 0, 0);
tipc_net.zones[z_num] = z_ptr;
return z_ptr;
}