1
0
Fork 0

Included changes:

* major skb->data pointer usage fix
 * interval version update
 * added get_ethtool_stats() support
 * endianess clean up
 * routing protocol API improvement wrt TT commit code
 * fix locking in hash table code
 * minor cleanups and fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iEYEABECAAYFAk/fVPAACgkQpGgxIkP9cwd1xQCfbzK3k3ncBRK6YnL23XAM5pMp
 bfAAoIbr2vqFEWncIOy3m/eg42preaTm
 =QPMa
 -----END PGP SIGNATURE-----

Merge tag 'batman-adv-for-davem' of git://git.open-mesh.org/linux-merge

Included changes:

* major skb->data pointer usage fix
* interval version update
* added get_ethtool_stats() support
* endianess clean up
* routing protocol API improvement wrt TT commit code
* fix locking in hash table code
* minor cleanups and fixes
hifive-unleashed-5.1
David S. Miller 2012-06-18 20:23:55 -07:00
commit f032537fef
21 changed files with 421 additions and 226 deletions

View File

@ -211,6 +211,11 @@ The debug output can be changed at runtime using the file
will enable debug messages for when routes change.
Counters for different types of packets entering and leaving the
batman-adv module are available through ethtool:
# ethtool --statistics bat0
BATCTL
------

View File

@ -195,13 +195,13 @@ static int debug_log_setup(struct bat_priv *bat_priv)
d = debugfs_create_file("log", S_IFREG | S_IRUSR,
bat_priv->debug_dir, bat_priv, &log_fops);
if (d)
if (!d)
goto err;
return 0;
err:
return 1;
return -ENOMEM;
}
static void debug_log_cleanup(struct bat_priv *bat_priv)
@ -348,8 +348,11 @@ int debugfs_add_meshif(struct net_device *dev)
if (!bat_priv->debug_dir)
goto out;
bat_socket_setup(bat_priv);
debug_log_setup(bat_priv);
if (bat_socket_setup(bat_priv) < 0)
goto rem_attr;
if (debug_log_setup(bat_priv) < 0)
goto rem_attr;
for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
file = debugfs_create_file(((*bat_debug)->attr).name,

View File

@ -34,11 +34,12 @@ static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
const uint8_t *neigh_addr,
struct orig_node *orig_node,
struct orig_node *orig_neigh,
uint32_t seqno)
__be32 seqno)
{
struct neigh_node *neigh_node;
neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr,
ntohl(seqno));
if (!neigh_node)
goto out;
@ -59,7 +60,7 @@ static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
{
struct batman_ogm_packet *batman_ogm_packet;
uint32_t random_seqno;
int res = -1;
int res = -ENOMEM;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
@ -196,8 +197,12 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
/* create clone because function is called more than once */
skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb)
if (skb) {
batadv_inc_counter(bat_priv, BAT_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BAT_CNT_MGMT_TX_BYTES,
skb->len + ETH_HLEN);
send_skb_packet(skb, hard_iface, broadcast_addr);
}
}
/* send a batman ogm packet */
@ -542,9 +547,6 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
"Forwarding packet: tq: %i, ttl: %i\n",
batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
/* switch of primaries first hop flag when forwarding */
batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
if (is_single_hop_neigh)
@ -557,26 +559,31 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
if_incoming, 0, bat_iv_ogm_fwd_send_time());
}
static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
int tt_num_changes)
static void bat_iv_ogm_schedule(struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct batman_ogm_packet *batman_ogm_packet;
struct hard_iface *primary_if;
int vis_server;
int vis_server, tt_num_changes = 0;
vis_server = atomic_read(&bat_priv->vis_mode);
primary_if = primary_if_get_selected(bat_priv);
if (hard_iface == primary_if)
tt_num_changes = batadv_tt_append_diff(bat_priv,
&hard_iface->packet_buff,
&hard_iface->packet_len,
BATMAN_OGM_HLEN);
batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
/* change sequence number to network order */
batman_ogm_packet->seqno =
htonl((uint32_t)atomic_read(&hard_iface->seqno));
atomic_inc(&hard_iface->seqno);
batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
batman_ogm_packet->tt_crc = htons((uint16_t)
atomic_read(&bat_priv->tt_crc));
batman_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
if (tt_num_changes >= 0)
batman_ogm_packet->tt_num_changes = tt_num_changes;
@ -592,8 +599,6 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
else
batman_ogm_packet->gw_flags = NO_FLAGS;
atomic_inc(&hard_iface->seqno);
slide_own_bcast_window(hard_iface);
bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
hard_iface->packet_len, hard_iface, 1,
@ -721,7 +726,7 @@ update_tt:
tt_update_orig(bat_priv, orig_node, tt_buff,
batman_ogm_packet->tt_num_changes,
batman_ogm_packet->ttvn,
batman_ogm_packet->tt_crc);
ntohs(batman_ogm_packet->tt_crc));
if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
gw_node_update(bat_priv, orig_node,
@ -868,13 +873,14 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
int32_t seq_diff;
int need_update = 0;
int set_mark, ret = -1;
uint32_t seqno = ntohl(batman_ogm_packet->seqno);
orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
if (!orig_node)
return 0;
spin_lock_bh(&orig_node->ogm_cnt_lock);
seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
seq_diff = seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
if (!hlist_empty(&orig_node->neigh_list) &&
@ -888,7 +894,7 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
orig_node->last_real_seqno,
batman_ogm_packet->seqno);
seqno);
if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
(tmp_neigh_node->if_incoming == if_incoming))
@ -910,8 +916,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
if (need_update) {
bat_dbg(DBG_BATMAN, bat_priv,
"updating last_seqno: old %u, new %u\n",
orig_node->last_real_seqno, batman_ogm_packet->seqno);
orig_node->last_real_seqno = batman_ogm_packet->seqno;
orig_node->last_real_seqno, seqno);
orig_node->last_real_seqno = seqno;
}
ret = is_duplicate;
@ -967,8 +973,8 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
"Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
batman_ogm_packet->prev_sender, ntohl(batman_ogm_packet->seqno),
batman_ogm_packet->ttvn, ntohs(batman_ogm_packet->tt_crc),
batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
batman_ogm_packet->header.ttl,
batman_ogm_packet->header.version, has_directlink_flag);
@ -1039,7 +1045,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
word = &(orig_neigh_node->bcast_own[offset]);
bat_set_bit(word,
if_incoming_seqno -
batman_ogm_packet->seqno - 2);
ntohl(batman_ogm_packet->seqno) - 2);
orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
@ -1132,7 +1138,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
* seqno and similar ttl as the non-duplicate */
if (is_bidirectional &&
(!is_duplicate ||
((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
((orig_node->last_real_seqno == ntohl(batman_ogm_packet->seqno)) &&
(orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
batman_ogm_packet, if_incoming,
@ -1204,6 +1210,10 @@ static int bat_iv_ogm_receive(struct sk_buff *skb,
if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
return NET_RX_DROP;
batadv_inc_counter(bat_priv, BAT_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BAT_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
packet_len = skb_headlen(skb);
ethhdr = (struct ethhdr *)skb_mac_header(skb);
packet_buff = skb->data;
@ -1211,11 +1221,6 @@ static int bat_iv_ogm_receive(struct sk_buff *skb,
/* unpack the aggregated packets and process them one by one */
do {
/* network to host order for our 32bit seqno and the
orig_interval */
batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
bat_iv_ogm_process(ethhdr, batman_ogm_packet,
@ -1234,7 +1239,7 @@ static int bat_iv_ogm_receive(struct sk_buff *skb,
}
static struct bat_algo_ops batman_iv __read_mostly = {
.name = "BATMAN IV",
.name = "BATMAN_IV",
.bat_iface_enable = bat_iv_ogm_iface_enable,
.bat_iface_disable = bat_iv_ogm_iface_disable,
.bat_iface_update_mac = bat_iv_ogm_iface_update_mac,

View File

@ -445,7 +445,7 @@ BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
store_gw_bwidth);
#ifdef CONFIG_BATMAN_ADV_DEBUG
BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, DBG_ALL, NULL);
#endif
static struct bat_attribute *mesh_attrs[] = {
@ -680,7 +680,7 @@ void sysfs_del_hardif(struct kobject **hardif_obj)
int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
enum uev_action action, const char *data)
{
int ret = -1;
int ret = -ENOMEM;
struct hard_iface *primary_if = NULL;
struct kobject *bat_kobj;
char *uevent_env[4] = { NULL, NULL, NULL, NULL };

View File

@ -258,7 +258,7 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
struct net_device *soft_iface;
uint8_t *hw_src;
struct bla_claim_dst local_claim_dest;
uint32_t zeroip = 0;
__be32 zeroip = 0;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
@ -506,11 +506,11 @@ static void bla_send_announce(struct bat_priv *bat_priv,
struct backbone_gw *backbone_gw)
{
uint8_t mac[ETH_ALEN];
uint16_t crc;
__be16 crc;
memcpy(mac, announce_mac, 4);
crc = htons(backbone_gw->crc);
memcpy(&mac[4], (uint8_t *)&crc, 2);
memcpy(&mac[4], &crc, 2);
bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
@ -627,7 +627,7 @@ static int handle_announce(struct bat_priv *bat_priv,
/* handle as ANNOUNCE frame */
backbone_gw->lasttime = jiffies;
crc = ntohs(*((uint16_t *)(&an_addr[4])));
crc = ntohs(*((__be16 *)(&an_addr[4])));
bat_dbg(DBG_BLA, bat_priv,
"handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
@ -1127,6 +1127,14 @@ out:
bla_start_timer(bat_priv);
}
/* The hash for claim and backbone hash receive the same key because they
* are getting initialized by hash_new with the same key. Reinitializing
* them with to different keys to allow nested locking without generating
* lockdep warnings
*/
static struct lock_class_key claim_hash_lock_class_key;
static struct lock_class_key backbone_hash_lock_class_key;
/* initialize all bla structures */
int bla_init(struct bat_priv *bat_priv)
{
@ -1156,18 +1164,23 @@ int bla_init(struct bat_priv *bat_priv)
bat_priv->bcast_duplist_curr = 0;
if (bat_priv->claim_hash)
return 1;
return 0;
bat_priv->claim_hash = hash_new(128);
bat_priv->backbone_hash = hash_new(32);
if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
return -1;
return -ENOMEM;
batadv_hash_set_lock_class(bat_priv->claim_hash,
&claim_hash_lock_class_key);
batadv_hash_set_lock_class(bat_priv->backbone_hash,
&backbone_hash_lock_class_key);
bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
bla_start_timer(bat_priv);
return 1;
return 0;
}
/**

View File

@ -162,6 +162,9 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
**/
gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
return count;
gw_deselect(bat_priv);
bat_info(net_dev,
"Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",

View File

@ -306,10 +306,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
bat_priv = netdev_priv(hard_iface->soft_iface);
ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
if (ret < 0) {
ret = -ENOMEM;
if (ret < 0)
goto err_dev;
}
hard_iface->if_num = bat_priv->num_ifaces;
bat_priv->num_ifaces++;

View File

@ -69,3 +69,12 @@ free_hash:
kfree(hash);
return NULL;
}
void batadv_hash_set_lock_class(struct hashtable_t *hash,
struct lock_class_key *key)
{
uint32_t i;
for (i = 0; i < hash->size; i++)
lockdep_set_class(&hash->list_locks[i], key);
}

View File

@ -45,6 +45,10 @@ struct hashtable_t {
/* allocates and clears the hash */
struct hashtable_t *hash_new(uint32_t size);
/* set class key for all locks */
void batadv_hash_set_lock_class(struct hashtable_t *hash,
struct lock_class_key *key);
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
@ -106,26 +110,23 @@ static inline int hash_add(struct hashtable_t *hash,
head = &hash->table[index];
list_lock = &hash->list_locks[index];
rcu_read_lock();
__hlist_for_each_rcu(node, head) {
spin_lock_bh(list_lock);
hlist_for_each(node, head) {
if (!compare(node, data))
continue;
ret = 1;
goto err_unlock;
goto unlock;
}
rcu_read_unlock();
/* no duplicate found in list, add new element */
spin_lock_bh(list_lock);
hlist_add_head_rcu(data_node, head);
spin_unlock_bh(list_lock);
ret = 0;
goto out;
err_unlock:
rcu_read_unlock();
unlock:
spin_unlock_bh(list_lock);
out:
return ret;
}

View File

@ -285,13 +285,13 @@ int bat_socket_setup(struct bat_priv *bat_priv)
d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
bat_priv->debug_dir, bat_priv, &fops);
if (d)
if (!d)
goto err;
return 0;
err:
return 1;
return -ENOMEM;
}
static void bat_socket_add_packet(struct socket_client *socket_client,

View File

@ -40,7 +40,7 @@
* list traversals just rcu-locked */
struct list_head hardif_list;
static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
char bat_routing_algo[20] = "BATMAN IV";
char bat_routing_algo[20] = "BATMAN_IV";
static struct hlist_head bat_algo_list;
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@ -92,6 +92,7 @@ static void __exit batman_exit(void)
int mesh_init(struct net_device *soft_iface)
{
struct bat_priv *bat_priv = netdev_priv(soft_iface);
int ret;
spin_lock_init(&bat_priv->forw_bat_list_lock);
spin_lock_init(&bat_priv->forw_bcast_list_lock);
@ -110,30 +111,32 @@ int mesh_init(struct net_device *soft_iface)
INIT_LIST_HEAD(&bat_priv->tt_req_list);
INIT_LIST_HEAD(&bat_priv->tt_roam_list);
if (originator_init(bat_priv) < 1)
ret = originator_init(bat_priv);
if (ret < 0)
goto err;
if (tt_init(bat_priv) < 1)
ret = tt_init(bat_priv);
if (ret < 0)
goto err;
tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
if (vis_init(bat_priv) < 1)
ret = vis_init(bat_priv);
if (ret < 0)
goto err;
if (bla_init(bat_priv) < 1)
ret = bla_init(bat_priv);
if (ret < 0)
goto err;
atomic_set(&bat_priv->gw_reselect, 0);
atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
goto end;
return 0;
err:
mesh_free(soft_iface);
return -1;
end:
return 0;
return ret;
}
void mesh_free(struct net_device *soft_iface)
@ -153,6 +156,8 @@ void mesh_free(struct net_device *soft_iface)
bla_free(bat_priv);
free_percpu(bat_priv->bat_counters);
atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
}
@ -317,12 +322,13 @@ static struct bat_algo_ops *bat_algo_get(char *name)
int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
{
struct bat_algo_ops *bat_algo_ops_tmp;
int ret = -1;
int ret;
bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
if (bat_algo_ops_tmp) {
pr_info("Trying to register already registered routing algorithm: %s\n",
bat_algo_ops->name);
ret = -EEXIST;
goto out;
}
@ -335,6 +341,7 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
!bat_algo_ops->bat_ogm_emit) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
ret = -EINVAL;
goto out;
}
@ -349,7 +356,7 @@ out:
int bat_algo_select(struct bat_priv *bat_priv, char *name)
{
struct bat_algo_ops *bat_algo_ops;
int ret = -1;
int ret = -EINVAL;
bat_algo_ops = bat_algo_get(name);
if (!bat_algo_ops)
@ -379,14 +386,19 @@ int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
static int param_set_ra(const char *val, const struct kernel_param *kp)
{
struct bat_algo_ops *bat_algo_ops;
char *algo_name = (char *)val;
size_t name_len = strlen(algo_name);
bat_algo_ops = bat_algo_get((char *)val);
if (algo_name[name_len - 1] == '\n')
algo_name[name_len - 1] = '\0';
bat_algo_ops = bat_algo_get(algo_name);
if (!bat_algo_ops) {
pr_err("Routing algorithm '%s' is not supported\n", val);
pr_err("Routing algorithm '%s' is not supported\n", algo_name);
return -EINVAL;
}
return param_set_copystring(val, kp);
return param_set_copystring(algo_name, kp);
}
static const struct kernel_param_ops param_ops_ra = {

View File

@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
#define SOURCE_VERSION "2012.2.0"
#define SOURCE_VERSION "2012.3.0"
#endif
/* B.A.T.M.A.N. parameters */
@ -138,6 +138,7 @@ enum dbg_level {
#include <linux/kthread.h> /* kernel threads */
#include <linux/pkt_sched.h> /* schedule types */
#include <linux/workqueue.h> /* workqueue */
#include <linux/percpu.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/jiffies.h>
@ -242,4 +243,30 @@ static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
/* Stop preemption on local cpu while incrementing the counter */
static inline void batadv_add_counter(struct bat_priv *bat_priv, size_t idx,
size_t count)
{
int cpu = get_cpu();
per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
put_cpu();
}
#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
/* Sum and return the cpu-local counters for index 'idx' */
static inline uint64_t batadv_sum_counter(struct bat_priv *bat_priv, size_t idx)
{
uint64_t *counters;
int cpu;
int sum = 0;
for_each_possible_cpu(cpu) {
counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
sum += counters[idx];
}
return sum;
}
#endif /* _NET_BATMAN_ADV_MAIN_H_ */

View File

@ -50,7 +50,7 @@ static int compare_orig(const struct hlist_node *node, const void *data2)
int originator_init(struct bat_priv *bat_priv)
{
if (bat_priv->orig_hash)
return 1;
return 0;
bat_priv->orig_hash = hash_new(1024);
@ -58,10 +58,10 @@ int originator_init(struct bat_priv *bat_priv)
goto err;
start_purge_timer(bat_priv);
return 1;
return 0;
err:
return 0;
return -ENOMEM;
}
void neigh_node_free_ref(struct neigh_node *neigh_node)
@ -488,7 +488,7 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
GFP_ATOMIC);
if (!data_ptr)
return -1;
return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own,
(max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
@ -497,7 +497,7 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr)
return -1;
return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own_sum,
(max_if_num - 1) * sizeof(uint8_t));
@ -528,7 +528,7 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
ret = orig_node_add_if(orig_node, max_if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
if (ret == -1)
if (ret == -ENOMEM)
goto err;
}
rcu_read_unlock();
@ -554,7 +554,7 @@ static int orig_node_del_if(struct orig_node *orig_node,
chunk_size = sizeof(unsigned long) * NUM_WORDS;
data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
if (!data_ptr)
return -1;
return -ENOMEM;
/* copy first part */
memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@ -573,7 +573,7 @@ free_bcast_own:
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr)
return -1;
return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own_sum,
del_if_num * sizeof(uint8_t));
@ -612,7 +612,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
hard_iface->if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
if (ret == -1)
if (ret == -ENOMEM)
goto err;
}
rcu_read_unlock();

View File

@ -105,7 +105,7 @@ enum bla_claimframe {
struct bla_claim_dst {
uint8_t magic[3]; /* FF:43:05 */
uint8_t type; /* bla_claimframe */
uint16_t group; /* group id */
__be16 group; /* group id */
} __packed;
struct batman_header {
@ -117,14 +117,14 @@ struct batman_header {
struct batman_ogm_packet {
struct batman_header header;
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
uint32_t seqno;
__be32 seqno;
uint8_t orig[ETH_ALEN];
uint8_t prev_sender[ETH_ALEN];
uint8_t gw_flags; /* flags related to gateway class */
uint8_t tq;
uint8_t tt_num_changes;
uint8_t ttvn; /* translation table version number */
uint16_t tt_crc;
__be16 tt_crc;
} __packed;
#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
@ -134,7 +134,7 @@ struct icmp_packet {
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
uint16_t seqno;
__be16 seqno;
uint8_t uid;
uint8_t reserved;
} __packed;
@ -148,7 +148,7 @@ struct icmp_packet_rr {
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
uint16_t seqno;
__be16 seqno;
uint8_t uid;
uint8_t rr_cur;
uint8_t rr[BAT_RR_LEN][ETH_ALEN];
@ -167,20 +167,20 @@ struct unicast_frag_packet {
uint8_t flags;
uint8_t align;
uint8_t orig[ETH_ALEN];
uint16_t seqno;
__be16 seqno;
} __packed;
struct bcast_packet {
struct batman_header header;
uint8_t reserved;
uint32_t seqno;
__be32 seqno;
uint8_t orig[ETH_ALEN];
} __packed;
struct vis_packet {
struct batman_header header;
uint8_t vis_type; /* which type of vis-participant sent this? */
uint32_t seqno; /* sequence number */
__be32 seqno; /* sequence number */
uint8_t entries; /* number of entries behind this struct */
uint8_t reserved;
uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
@ -206,7 +206,7 @@ struct tt_query_packet {
* if TT_REQUEST: crc associated with the
* ttvn
* if TT_RESPONSE: table_size */
uint16_t tt_data;
__be16 tt_data;
} __packed;
struct roam_adv_packet {

View File

@ -573,7 +573,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct tt_query_packet *tt_query;
uint16_t tt_len;
uint16_t tt_size;
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
@ -596,10 +596,10 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
tt_query = (struct tt_query_packet *)skb->data;
tt_query->tt_data = ntohs(tt_query->tt_data);
switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
case TT_REQUEST:
batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_RX);
/* If we cannot provide an answer the tt_request is
* forwarded */
if (!send_tt_response(bat_priv, tt_query)) {
@ -607,22 +607,25 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
"Routing TT_REQUEST to %pM [%c]\n",
tt_query->dst,
(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
tt_query->tt_data = htons(tt_query->tt_data);
return route_unicast_packet(skb, recv_if);
}
break;
case TT_RESPONSE:
batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_RX);
if (is_my_mac(tt_query->dst)) {
/* packet needs to be linearized to access the TT
* changes */
if (skb_linearize(skb) < 0)
goto out;
/* skb_linearize() possibly changed skb->data */
tt_query = (struct tt_query_packet *)skb->data;
tt_len = tt_query->tt_data * sizeof(struct tt_change);
tt_size = tt_len(ntohs(tt_query->tt_data));
/* Ensure we have all the claimed data */
if (unlikely(skb_headlen(skb) <
sizeof(struct tt_query_packet) + tt_len))
sizeof(struct tt_query_packet) + tt_size))
goto out;
handle_tt_response(bat_priv, tt_query);
@ -631,7 +634,6 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
"Routing TT_RESPONSE to %pM [%c]\n",
tt_query->dst,
(tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
tt_query->tt_data = htons(tt_query->tt_data);
return route_unicast_packet(skb, recv_if);
}
break;
@ -663,6 +665,8 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_RX);
roam_adv_packet = (struct roam_adv_packet *)skb->data;
if (!is_my_mac(roam_adv_packet->dst))
@ -870,6 +874,11 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
/* decrement ttl */
unicast_packet->header.ttl--;
/* Update stats counter */
batadv_inc_counter(bat_priv, BAT_CNT_FORWARD);
batadv_add_counter(bat_priv, BAT_CNT_FORWARD_BYTES,
skb->len + ETH_HLEN);
/* route it */
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = NET_RX_SUCCESS;

View File

@ -77,62 +77,9 @@ send_skb_err:
return NET_XMIT_DROP;
}
static void realloc_packet_buffer(struct hard_iface *hard_iface,
int new_len)
{
unsigned char *new_buff;
new_buff = kmalloc(new_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
if (new_buff) {
memcpy(new_buff, hard_iface->packet_buff,
BATMAN_OGM_HLEN);
kfree(hard_iface->packet_buff);
hard_iface->packet_buff = new_buff;
hard_iface->packet_len = new_len;
}
}
/* when calling this function (hard_iface == primary_if) has to be true */
static int prepare_packet_buffer(struct bat_priv *bat_priv,
struct hard_iface *hard_iface)
{
int new_len;
new_len = BATMAN_OGM_HLEN +
tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented */
if (new_len > hard_iface->soft_iface->mtu)
new_len = BATMAN_OGM_HLEN;
realloc_packet_buffer(hard_iface, new_len);
atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
/* reset the sending counter */
atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
return tt_changes_fill_buffer(bat_priv,
hard_iface->packet_buff + BATMAN_OGM_HLEN,
hard_iface->packet_len - BATMAN_OGM_HLEN);
}
static int reset_packet_buffer(struct bat_priv *bat_priv,
struct hard_iface *hard_iface)
{
realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
return 0;
}
void schedule_bat_ogm(struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct hard_iface *primary_if;
int tt_num_changes = -1;
if ((hard_iface->if_status == IF_NOT_IN_USE) ||
(hard_iface->if_status == IF_TO_BE_REMOVED))
@ -148,26 +95,7 @@ void schedule_bat_ogm(struct hard_iface *hard_iface)
if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
hard_iface->if_status = IF_ACTIVE;
primary_if = primary_if_get_selected(bat_priv);
if (hard_iface == primary_if) {
/* if at least one change happened */
if (atomic_read(&bat_priv->tt_local_changes) > 0) {
tt_commit_changes(bat_priv);
tt_num_changes = prepare_packet_buffer(bat_priv,
hard_iface);
}
/* if the changes have been sent often enough */
if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
tt_num_changes = reset_packet_buffer(bat_priv,
hard_iface);
}
if (primary_if)
hardif_free_ref(primary_if);
bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
}
static void forw_packet_free(struct forw_packet *forw_packet)

View File

@ -45,6 +45,10 @@ static void bat_get_drvinfo(struct net_device *dev,
static u32 bat_get_msglevel(struct net_device *dev);
static void bat_set_msglevel(struct net_device *dev, u32 value);
static u32 bat_get_link(struct net_device *dev);
static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
static void batadv_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data);
static int batadv_get_sset_count(struct net_device *dev, int stringset);
static const struct ethtool_ops bat_ethtool_ops = {
.get_settings = bat_get_settings,
@ -52,6 +56,9 @@ static const struct ethtool_ops bat_ethtool_ops = {
.get_msglevel = bat_get_msglevel,
.set_msglevel = bat_set_msglevel,
.get_link = bat_get_link,
.get_strings = batadv_get_strings,
.get_ethtool_stats = batadv_get_ethtool_stats,
.get_sset_count = batadv_get_sset_count,
};
int my_skb_head_push(struct sk_buff *skb, unsigned int len)
@ -399,13 +406,18 @@ struct net_device *softif_create(const char *name)
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
bat_priv->bat_counters = __alloc_percpu(sizeof(uint64_t) * BAT_CNT_NUM,
__alignof__(uint64_t));
if (!bat_priv->bat_counters)
goto unreg_soft_iface;
ret = bat_algo_select(bat_priv, bat_routing_algo);
if (ret < 0)
goto unreg_soft_iface;
goto free_bat_counters;
ret = sysfs_add_meshif(soft_iface);
if (ret < 0)
goto unreg_soft_iface;
goto free_bat_counters;
ret = debugfs_add_meshif(soft_iface);
if (ret < 0)
@ -421,6 +433,8 @@ unreg_debugfs:
debugfs_del_meshif(soft_iface);
unreg_sysfs:
sysfs_del_meshif(soft_iface);
free_bat_counters:
free_percpu(bat_priv->bat_counters);
unreg_soft_iface:
unregister_netdevice(soft_iface);
return NULL;
@ -486,3 +500,51 @@ static u32 bat_get_link(struct net_device *dev)
{
return 1;
}
/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
* Declare each description string in struct.name[] to get fixed sized buffer
* and compile time checking for strings longer than ETH_GSTRING_LEN.
*/
static const struct {
const char name[ETH_GSTRING_LEN];
} bat_counters_strings[] = {
{ "forward" },
{ "forward_bytes" },
{ "mgmt_tx" },
{ "mgmt_tx_bytes" },
{ "mgmt_rx" },
{ "mgmt_rx_bytes" },
{ "tt_request_tx" },
{ "tt_request_rx" },
{ "tt_response_tx" },
{ "tt_response_rx" },
{ "tt_roam_adv_tx" },
{ "tt_roam_adv_rx" },
};
static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
uint8_t *data)
{
if (stringset == ETH_SS_STATS)
memcpy(data, bat_counters_strings,
sizeof(bat_counters_strings));
}
static void batadv_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
uint64_t *data)
{
struct bat_priv *bat_priv = netdev_priv(dev);
int i;
for (i = 0; i < BAT_CNT_NUM; i++)
data[i] = batadv_sum_counter(bat_priv, i);
}
static int batadv_get_sset_count(struct net_device *dev, int stringset)
{
if (stringset == ETH_SS_STATS)
return BAT_CNT_NUM;
return -EOPNOTSUPP;
}

View File

@ -181,14 +181,14 @@ int tt_len(int changes_num)
static int tt_local_init(struct bat_priv *bat_priv)
{
if (bat_priv->tt_local_hash)
return 1;
return 0;
bat_priv->tt_local_hash = hash_new(1024);
if (!bat_priv->tt_local_hash)
return 0;
return -ENOMEM;
return 1;
return 0;
}
void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
@ -275,14 +275,64 @@ out:
tt_global_entry_free_ref(tt_global_entry);
}
int tt_changes_fill_buffer(struct bat_priv *bat_priv,
unsigned char *buff, int buff_len)
static void tt_realloc_packet_buff(unsigned char **packet_buff,
int *packet_buff_len, int min_packet_len,
int new_packet_len)
{
int count = 0, tot_changes = 0;
struct tt_change_node *entry, *safe;
unsigned char *new_buff;
if (buff_len > 0)
tot_changes = buff_len / tt_len(1);
new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
if (new_buff) {
memcpy(new_buff, *packet_buff, min_packet_len);
kfree(*packet_buff);
*packet_buff = new_buff;
*packet_buff_len = new_packet_len;
}
}
static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
unsigned char **packet_buff,
int *packet_buff_len, int min_packet_len)
{
struct hard_iface *primary_if;
int req_len;
primary_if = primary_if_get_selected(bat_priv);
req_len = min_packet_len;
req_len += tt_len(atomic_read(&bat_priv->tt_local_changes));
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented
*/
if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
req_len = min_packet_len;
tt_realloc_packet_buff(packet_buff, packet_buff_len,
min_packet_len, req_len);
if (primary_if)
hardif_free_ref(primary_if);
}
static int tt_changes_fill_buff(struct bat_priv *bat_priv,
unsigned char **packet_buff,
int *packet_buff_len, int min_packet_len)
{
struct tt_change_node *entry, *safe;
int count = 0, tot_changes = 0, new_len;
unsigned char *tt_buff;
tt_prepare_packet_buff(bat_priv, packet_buff,
packet_buff_len, min_packet_len);
new_len = *packet_buff_len - min_packet_len;
tt_buff = *packet_buff + min_packet_len;
if (new_len > 0)
tot_changes = new_len / tt_len(1);
spin_lock_bh(&bat_priv->tt_changes_list_lock);
atomic_set(&bat_priv->tt_local_changes, 0);
@ -290,7 +340,7 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
list) {
if (count < tot_changes) {
memcpy(buff + tt_len(count),
memcpy(tt_buff + tt_len(count),
&entry->change, sizeof(struct tt_change));
count++;
}
@ -304,22 +354,20 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
kfree(bat_priv->tt_buff);
bat_priv->tt_buff_len = 0;
bat_priv->tt_buff = NULL;
/* We check whether this new OGM has no changes due to size
* problems */
if (buff_len > 0) {
/**
* if kmalloc() fails we will reply with the full table
/* check whether this new OGM has no changes due to size problems */
if (new_len > 0) {
/* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
if (bat_priv->tt_buff) {
memcpy(bat_priv->tt_buff, buff, buff_len);
bat_priv->tt_buff_len = buff_len;
memcpy(bat_priv->tt_buff, tt_buff, new_len);
bat_priv->tt_buff_len = new_len;
}
}
spin_unlock_bh(&bat_priv->tt_buff_lock);
return tot_changes;
return count;
}
int tt_local_seq_print_text(struct seq_file *seq, void *offset)
@ -491,14 +539,14 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
static int tt_global_init(struct bat_priv *bat_priv)
{
if (bat_priv->tt_global_hash)
return 1;
return 0;
bat_priv->tt_global_hash = hash_new(1024);
if (!bat_priv->tt_global_hash)
return 0;
return -ENOMEM;
return 1;
return 0;
}
static void tt_changes_list_free(struct bat_priv *bat_priv)
@ -1105,7 +1153,7 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
}
/* Calculates the checksum of the local table */
uint16_t tt_local_crc(struct bat_priv *bat_priv)
static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
{
uint16_t total = 0, total_one;
struct hashtable_t *hash = bat_priv->tt_local_hash;
@ -1356,6 +1404,8 @@ static int send_tt_request(struct bat_priv *bat_priv,
dst_orig_node->orig, neigh_node->addr,
(full_table ? 'F' : '.'));
batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
@ -1416,7 +1466,7 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
/* I don't have the requested data */
if (orig_ttvn != req_ttvn ||
tt_request->tt_data != req_dst_orig_node->tt_crc)
tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
goto out;
/* If the full table has been explicitly requested */
@ -1480,6 +1530,8 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
res_dst_orig_node->orig, neigh_node->addr,
req_dst_orig_node->orig, req_ttvn);
batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
@ -1596,6 +1648,8 @@ static bool send_my_tt_response(struct bat_priv *bat_priv,
orig_node->orig, neigh_node->addr,
(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
@ -1672,7 +1726,7 @@ static void tt_fill_gtable(struct bat_priv *bat_priv,
_tt_update_changes(bat_priv, orig_node,
(struct tt_change *)(tt_response + 1),
tt_response->tt_data, tt_response->ttvn);
ntohs(tt_response->tt_data), tt_response->ttvn);
spin_lock_bh(&orig_node->tt_buff_lock);
kfree(orig_node->tt_buff);
@ -1727,7 +1781,8 @@ void handle_tt_response(struct bat_priv *bat_priv,
bat_dbg(DBG_TT, bat_priv,
"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
tt_response->src, tt_response->ttvn, tt_response->tt_data,
tt_response->src, tt_response->ttvn,
ntohs(tt_response->tt_data),
(tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
/* we should have never asked a backbone gw */
@ -1741,7 +1796,8 @@ void handle_tt_response(struct bat_priv *bat_priv,
if (tt_response->flags & TT_FULL_TABLE)
tt_fill_gtable(bat_priv, tt_response);
else
tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
tt_update_changes(bat_priv, orig_node,
ntohs(tt_response->tt_data),
tt_response->ttvn,
(struct tt_change *)(tt_response + 1));
@ -1767,11 +1823,15 @@ out:
int tt_init(struct bat_priv *bat_priv)
{
if (!tt_local_init(bat_priv))
return 0;
int ret;
if (!tt_global_init(bat_priv))
return 0;
ret = tt_local_init(bat_priv);
if (ret < 0)
return ret;
ret = tt_global_init(bat_priv);
if (ret < 0)
return ret;
tt_start_timer(bat_priv);
@ -1895,6 +1955,8 @@ static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
"Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
orig_node->orig, client, neigh_node->addr);
batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
@ -2011,20 +2073,56 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
}
void tt_commit_changes(struct bat_priv *bat_priv)
static int tt_commit_changes(struct bat_priv *bat_priv,
unsigned char **packet_buff, int *packet_buff_len,
int packet_min_len)
{
uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
TT_CLIENT_NEW, false);
/* all the reset entries have now to be effectively counted as local
* entries */
uint16_t changed_num = 0;
if (atomic_read(&bat_priv->tt_local_changes) < 1)
return -ENOENT;
changed_num = tt_set_flags(bat_priv->tt_local_hash,
TT_CLIENT_NEW, false);
/* all reset entries have to be counted as local entries */
atomic_add(changed_num, &bat_priv->num_local_tt);
tt_local_purge_pending_clients(bat_priv);
bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */
atomic_inc(&bat_priv->ttvn);
bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
(uint8_t)atomic_read(&bat_priv->ttvn));
bat_priv->tt_poss_change = false;
/* reset the sending counter */
atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
return tt_changes_fill_buff(bat_priv, packet_buff,
packet_buff_len, packet_min_len);
}
/* when calling this function (hard_iface == primary_if) has to be true */
int batadv_tt_append_diff(struct bat_priv *bat_priv,
unsigned char **packet_buff, int *packet_buff_len,
int packet_min_len)
{
int tt_num_changes;
/* if at least one change happened */
tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
packet_buff_len, packet_min_len);
/* if the changes have been sent often enough */
if ((tt_num_changes < 0) &&
(!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
tt_realloc_packet_buff(packet_buff, packet_buff_len,
packet_min_len, packet_min_len);
tt_num_changes = 0;
}
return tt_num_changes;
}
bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)

View File

@ -23,8 +23,6 @@
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
int tt_len(int changes_num);
int tt_changes_fill_buffer(struct bat_priv *bat_priv,
unsigned char *buff, int buff_len);
int tt_init(struct bat_priv *bat_priv);
void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
int ifindex);
@ -41,18 +39,19 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, const char *message);
struct orig_node *transtable_search(struct bat_priv *bat_priv,
const uint8_t *src, const uint8_t *addr);
uint16_t tt_local_crc(struct bat_priv *bat_priv);
void tt_free(struct bat_priv *bat_priv);
bool send_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_request);
bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
void handle_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_response);
void tt_commit_changes(struct bat_priv *bat_priv);
bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, uint8_t tt_num_changes,
uint8_t ttvn, uint16_t tt_crc);
int batadv_tt_append_diff(struct bat_priv *bat_priv,
unsigned char **packet_buff, int *packet_buff_len,
int packet_min_len);
bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);

View File

@ -148,9 +148,26 @@ struct bcast_duplist_entry {
};
#endif
enum bat_counters {
BAT_CNT_FORWARD,
BAT_CNT_FORWARD_BYTES,
BAT_CNT_MGMT_TX,
BAT_CNT_MGMT_TX_BYTES,
BAT_CNT_MGMT_RX,
BAT_CNT_MGMT_RX_BYTES,
BAT_CNT_TT_REQUEST_TX,
BAT_CNT_TT_REQUEST_RX,
BAT_CNT_TT_RESPONSE_TX,
BAT_CNT_TT_RESPONSE_RX,
BAT_CNT_TT_ROAM_ADV_TX,
BAT_CNT_TT_ROAM_ADV_RX,
BAT_CNT_NUM,
};
struct bat_priv {
atomic_t mesh_state;
struct net_device_stats stats;
uint64_t __percpu *bat_counters; /* Per cpu counters */
atomic_t aggregated_ogms; /* boolean */
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
@ -210,7 +227,7 @@ struct bat_priv {
spinlock_t vis_list_lock; /* protects vis_info::recv_list */
atomic_t num_local_tt;
/* Checksum of the local table, recomputed before sending a new OGM */
atomic_t tt_crc;
uint16_t tt_crc;
unsigned char *tt_buff;
int16_t tt_buff_len;
spinlock_t tt_buff_lock; /* protects tt_buff */
@ -388,8 +405,7 @@ struct bat_algo_ops {
/* called when primary interface is selected / changed */
void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
/* prepare a new outgoing OGM for the send queue */
void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
int tt_num_changes);
void (*bat_ogm_schedule)(struct hard_iface *hard_iface);
/* send scheduled OGM */
void (*bat_ogm_emit)(struct forw_packet *forw_packet);
};

View File

@ -207,7 +207,6 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
int compare;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
@ -228,14 +227,18 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
entries = (struct vis_info_entry *)
((char *)packet + sizeof(*packet));
vis_data_insert_interface(packet->vis_orig,
&vis_if_list, true);
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
if (compare_eth(entries[j].src,
packet->vis_orig))
continue;
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
false);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
@ -276,14 +279,18 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
entries = (struct vis_info_entry *)
((char *)packet + sizeof(*packet));
vis_data_insert_interface(packet->vis_orig,
&vis_if_list, true);
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
if (compare_eth(entries[j].src,
packet->vis_orig))
continue;
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
false);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
@ -626,7 +633,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
best_tq = find_best_vis_server(bat_priv, info);
if (best_tq < 0)
return -1;
return best_tq;
}
for (i = 0; i < hash->size; i++) {
@ -878,7 +885,7 @@ int vis_init(struct bat_priv *bat_priv)
int hash_added;
if (bat_priv->vis_hash)
return 1;
return 0;
spin_lock_bh(&bat_priv->vis_hash_lock);
@ -929,7 +936,7 @@ int vis_init(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
return 1;
return 0;
free_info:
kfree(bat_priv->my_vis_info);
@ -937,7 +944,7 @@ free_info:
err:
spin_unlock_bh(&bat_priv->vis_hash_lock);
vis_quit(bat_priv);
return 0;
return -ENOMEM;
}
/* Decrease the reference count on a hash item info */