1
0
Fork 0

[INET]: Just rename the TCP hashtable functions/structs to inet_

This is to break down the complexity of the series of patches,
making it very clear that this one just does:

1. renames tcp_ prefixed hashtable functions and data structures that
   were already mostly generic to inet_ to share it with DCCP and
   other INET transport protocols.

2. Removes not used functions (__tb_head & tb_head)

3. Removes some leftover prototypes in the headers (tcp_bucket_unlock &
   tcp_v4_build_header)

Next changesets will move tcp_sk(sk)->bind_hash to inet_sock so that we can
make functions such as tcp_inherit_port, __tcp_inherit_port, tcp_v4_get_port,
__tcp_put_port,  generic and get others like tcp_destroy_sock closer to generic
(tcp_orphan_count will go to sk->sk_prot to allow this).

Eventually most of these functions will be used passing the transport protocol
inet_hashinfo structure.

Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Arnaldo Carvalho de Melo 2005-08-09 19:59:44 -07:00 committed by David S. Miller
parent 304a16180f
commit 0f7ff9274e
7 changed files with 138 additions and 138 deletions

View File

@ -258,7 +258,7 @@ struct tcp_sock {
__u32 snd_sml; /* Last byte of the most recently transmitted small packet */
__u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
__u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
struct tcp_bind_bucket *bind_hash;
struct inet_bind_bucket *bind_hash;
/* Delayed ACK control data */
struct {
__u8 pending; /* ACK is pending */

View File

@ -44,13 +44,13 @@
* New scheme, half the table is for TIME_WAIT, the other half is
* for the rest. I'll experiment with dynamic table growth later.
*/
struct tcp_ehash_bucket {
struct inet_ehash_bucket {
rwlock_t lock;
struct hlist_head chain;
} __attribute__((__aligned__(8)));
/* This is for listening sockets, thus all sockets which possess wildcards. */
#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
/* There are a few simple rules, which allow for local port reuse by
* an application. In essence:
@ -83,31 +83,22 @@ struct tcp_ehash_bucket {
* users logged onto your box, isn't it nice to know that new data
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
struct tcp_bind_bucket {
struct inet_bind_bucket {
unsigned short port;
signed short fastreuse;
struct hlist_node node;
struct hlist_head owners;
};
#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
#define inet_bind_bucket_for_each(tb, node, head) \
hlist_for_each_entry(tb, node, head, node)
struct tcp_bind_hashbucket {
struct inet_bind_hashbucket {
spinlock_t lock;
struct hlist_head chain;
};
static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
{
return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
}
static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
{
return hlist_empty(&head->chain) ? NULL : __tb_head(head);
}
extern struct tcp_hashinfo {
struct inet_hashinfo {
/* This is for sockets with full identity only. Sockets here will
* always be without wildcards and will have the following invariant:
*
@ -116,21 +107,21 @@ extern struct tcp_hashinfo {
* First half of the table is for sockets not in TIME_WAIT, second half
* is for TIME_WAIT sockets only.
*/
struct tcp_ehash_bucket *__tcp_ehash;
struct inet_ehash_bucket *ehash;
/* Ok, let's try this, I give up, we do need a local binding
* TCP hash as well as the others for fast bind/connect.
*/
struct tcp_bind_hashbucket *__tcp_bhash;
struct inet_bind_hashbucket *bhash;
int __tcp_bhash_size;
int __tcp_ehash_size;
int bhash_size;
int ehash_size;
/* All sockets in TCP_LISTEN state will be in here. This is the only
* table where wildcard'd TCP sockets can exist. Hash function here
* is just local port number.
*/
struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
struct hlist_head listening_hash[INET_LHTABLE_SIZE];
/* All the above members are written once at bootup and
* never written again _or_ are predominantly read-access.
@ -138,36 +129,39 @@ extern struct tcp_hashinfo {
* Now align to a new cache line as all the following members
* are often dirty.
*/
rwlock_t __tcp_lhash_lock ____cacheline_aligned;
atomic_t __tcp_lhash_users;
wait_queue_head_t __tcp_lhash_wait;
spinlock_t __tcp_portalloc_lock;
} tcp_hashinfo;
rwlock_t lhash_lock ____cacheline_aligned;
atomic_t lhash_users;
wait_queue_head_t lhash_wait;
spinlock_t portalloc_lock;
};
#define tcp_ehash (tcp_hashinfo.__tcp_ehash)
#define tcp_bhash (tcp_hashinfo.__tcp_bhash)
#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
extern struct inet_hashinfo tcp_hashinfo;
#define tcp_ehash (tcp_hashinfo.ehash)
#define tcp_bhash (tcp_hashinfo.bhash)
#define tcp_ehash_size (tcp_hashinfo.ehash_size)
#define tcp_bhash_size (tcp_hashinfo.bhash_size)
#define tcp_listening_hash (tcp_hashinfo.listening_hash)
#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
#define tcp_lhash_users (tcp_hashinfo.lhash_users)
#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
extern kmem_cache_t *tcp_bucket_cachep;
extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
unsigned short snum);
extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
extern void tcp_bucket_unlock(struct sock *sk);
extern struct inet_bind_bucket *
inet_bind_bucket_create(kmem_cache_t *cachep,
struct inet_bind_hashbucket *head,
const unsigned short snum);
extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
struct inet_bind_bucket *tb);
extern int tcp_port_rover;
/* These are AF independent. */
static __inline__ int tcp_bhashfn(__u16 lport)
static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
{
return (lport & (tcp_bhash_size - 1));
return lport & (bhash_size - 1);
}
extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
extern void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
unsigned short snum);
#if (BITS_PER_LONG == 64)
@ -212,7 +206,7 @@ struct tcp_tw_bucket {
__u32 tw_ts_recent;
long tw_ts_recent_stamp;
unsigned long tw_ttd;
struct tcp_bind_bucket *tw_tb;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr tw_v6_daddr;
@ -366,14 +360,14 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
/* These can have wildcards, don't try too hard. */
static __inline__ int tcp_lhashfn(unsigned short num)
static inline int inet_lhashfn(const unsigned short num)
{
return num & (TCP_LHTABLE_SIZE - 1);
return num & (INET_LHTABLE_SIZE - 1);
}
static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
static inline int inet_sk_listen_hashfn(const struct sock *sk)
{
return tcp_lhashfn(inet_sk(sk)->num);
return inet_lhashfn(inet_sk(sk)->num);
}
#define MAX_TCP_HEADER (128 + MAX_HEADER)
@ -799,9 +793,6 @@ extern void tcp_parse_options(struct sk_buff *skb,
* TCP v4 functions exported for the inet6 API
*/
extern int tcp_v4_build_header(struct sock *sk,
struct sk_buff *skb);
extern void tcp_v4_send_check(struct sock *sk,
struct tcphdr *th, int len,
struct sk_buff *skb);

View File

@ -272,6 +272,9 @@ int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
kmem_cache_t *tcp_bucket_cachep;
EXPORT_SYMBOL_GPL(tcp_bucket_cachep);
kmem_cache_t *tcp_timewait_cachep;
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
@ -2259,7 +2262,7 @@ void __init tcp_init(void)
sizeof(skb->cb));
tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
sizeof(struct tcp_bind_bucket),
sizeof(struct inet_bind_bucket),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!tcp_bucket_cachep)
@ -2277,9 +2280,9 @@ void __init tcp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
tcp_ehash = (struct tcp_ehash_bucket *)
tcp_ehash =
alloc_large_system_hash("TCP established",
sizeof(struct tcp_ehash_bucket),
sizeof(struct inet_ehash_bucket),
thash_entries,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
@ -2294,9 +2297,9 @@ void __init tcp_init(void)
INIT_HLIST_HEAD(&tcp_ehash[i].chain);
}
tcp_bhash = (struct tcp_bind_hashbucket *)
tcp_bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct tcp_bind_hashbucket),
sizeof(struct inet_bind_hashbucket),
tcp_ehash_size,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
@ -2315,7 +2318,7 @@ void __init tcp_init(void)
* on available memory.
*/
for (order = 0; ((1 << order) << PAGE_SHIFT) <
(tcp_bhash_size * sizeof(struct tcp_bind_hashbucket));
(tcp_bhash_size * sizeof(struct inet_bind_hashbucket));
order++)
;
if (order >= 4) {

View File

@ -590,7 +590,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!(r->tcpdiag_states&(TCPF_LISTEN|TCPF_SYN_RECV)))
goto skip_listen_ht;
tcp_listen_lock();
for (i = s_i; i < TCP_LHTABLE_SIZE; i++) {
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct sock *sk;
struct hlist_node *node;
@ -646,7 +646,7 @@ skip_listen_ht:
return skb->len;
for (i = s_i; i < tcp_ehash_size; i++) {
struct tcp_ehash_bucket *head = &tcp_ehash[i];
struct inet_ehash_bucket *head = &tcp_ehash[i];
struct sock *sk;
struct hlist_node *node;

View File

@ -89,12 +89,11 @@ static struct socket *tcp_socket;
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb);
struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
.__tcp_lhash_lock = RW_LOCK_UNLOCKED,
.__tcp_lhash_users = ATOMIC_INIT(0),
.__tcp_lhash_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
.__tcp_portalloc_lock = SPIN_LOCK_UNLOCKED
struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_lock = RW_LOCK_UNLOCKED,
.lhash_users = ATOMIC_INIT(0),
.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
.portalloc_lock = SPIN_LOCK_UNLOCKED,
};
/*
@ -105,14 +104,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
int sysctl_local_port_range[2] = { 1024, 4999 };
int tcp_port_rover = 1024 - 1;
/* Allocate and initialize a new TCP local port bind bucket.
/* Allocate and initialize a new local port bind bucket.
* The bindhash mutex for snum's hash chain must be held here.
*/
struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
unsigned short snum)
struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
struct inet_bind_hashbucket *head,
const unsigned short snum)
{
struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
SLAB_ATOMIC);
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC);
if (tb) {
tb->port = snum;
tb->fastreuse = 0;
@ -123,20 +122,21 @@ struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
}
/* Caller must hold hashbucket lock for this tb with local BH disabled */
void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
{
if (hlist_empty(&tb->owners)) {
__hlist_del(&tb->node);
kmem_cache_free(tcp_bucket_cachep, tb);
kmem_cache_free(cachep, tb);
}
}
/* Caller must disable local BH processing. */
static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
{
struct tcp_bind_hashbucket *head =
&tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
struct tcp_bind_bucket *tb;
struct inet_bind_hashbucket *head =
&tcp_bhash[inet_bhashfn(inet_sk(child)->num,
tcp_bhash_size)];
struct inet_bind_bucket *tb;
spin_lock(&head->lock);
tb = tcp_sk(sk)->bind_hash;
@ -152,15 +152,15 @@ inline void tcp_inherit_port(struct sock *sk, struct sock *child)
local_bh_enable();
}
void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
unsigned short snum)
void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum)
{
inet_sk(sk)->num = snum;
sk_add_bind_node(sk, &tb->owners);
tcp_sk(sk)->bind_hash = tb;
}
static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
{
const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
struct sock *sk2;
@ -190,9 +190,9 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
*/
static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
{
struct tcp_bind_hashbucket *head;
struct inet_bind_hashbucket *head;
struct hlist_node *node;
struct tcp_bind_bucket *tb;
struct inet_bind_bucket *tb;
int ret;
local_bh_disable();
@ -211,9 +211,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
rover++;
if (rover > high)
rover = low;
head = &tcp_bhash[tcp_bhashfn(rover)];
head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)];
spin_lock(&head->lock);
tb_for_each(tb, node, &head->chain)
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover)
goto next;
break;
@ -238,9 +238,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
*/
snum = rover;
} else {
head = &tcp_bhash[tcp_bhashfn(snum)];
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
spin_lock(&head->lock);
tb_for_each(tb, node, &head->chain)
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum)
goto tb_found;
}
@ -261,7 +261,7 @@ tb_found:
}
tb_not_found:
ret = 1;
if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL)
goto fail_unlock;
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
@ -290,15 +290,16 @@ fail:
static void __tcp_put_port(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
struct tcp_bind_bucket *tb;
struct inet_bind_hashbucket *head = &tcp_bhash[inet_bhashfn(inet->num,
tcp_bhash_size)];
struct inet_bind_bucket *tb;
spin_lock(&head->lock);
tb = tcp_sk(sk)->bind_hash;
__sk_del_bind_node(sk);
tcp_sk(sk)->bind_hash = NULL;
inet->num = 0;
tcp_bucket_destroy(tb);
inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
spin_unlock(&head->lock);
}
@ -344,7 +345,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
BUG_TRAP(sk_unhashed(sk));
if (listen_possible && sk->sk_state == TCP_LISTEN) {
list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {
@ -381,7 +382,7 @@ void tcp_unhash(struct sock *sk)
tcp_listen_wlock();
lock = &tcp_lhash_lock;
} else {
struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
lock = &head->lock;
write_lock_bh(&head->lock);
}
@ -401,8 +402,10 @@ void tcp_unhash(struct sock *sk)
* connection. So always assume those are both wildcarded
* during the search since they can never be otherwise.
*/
static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
unsigned short hnum, int dif)
static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head,
const u32 daddr,
const unsigned short hnum,
const int dif)
{
struct sock *result = NULL, *sk;
struct hlist_node *node;
@ -438,14 +441,15 @@ static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
}
/* Optimize the common listener case. */
static inline struct sock *tcp_v4_lookup_listener(u32 daddr,
unsigned short hnum, int dif)
static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
const unsigned short hnum,
const int dif)
{
struct sock *sk = NULL;
struct hlist_head *head;
read_lock(&tcp_lhash_lock);
head = &tcp_listening_hash[tcp_lhashfn(hnum)];
head = &tcp_listening_hash[inet_lhashfn(hnum)];
if (!hlist_empty(head)) {
struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
@ -470,11 +474,13 @@ sherry_cache:
* Local BH must be disabled here.
*/
static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
u32 daddr, u16 hnum,
int dif)
static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
const u16 sport,
const u32 daddr,
const u16 hnum,
const int dif)
{
struct tcp_ehash_bucket *head;
struct inet_ehash_bucket *head;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(sport, hnum);
struct sock *sk;
@ -546,7 +552,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size);
struct tcp_ehash_bucket *head = &tcp_ehash[hash];
struct inet_ehash_bucket *head = &tcp_ehash[hash];
struct sock *sk2;
struct hlist_node *node;
struct tcp_tw_bucket *tw;
@ -639,9 +645,9 @@ static inline u32 connect_port_offset(const struct sock *sk)
*/
static inline int tcp_v4_hash_connect(struct sock *sk)
{
unsigned short snum = inet_sk(sk)->num;
struct tcp_bind_hashbucket *head;
struct tcp_bind_bucket *tb;
const unsigned short snum = inet_sk(sk)->num;
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
int ret;
if (!snum) {
@ -658,14 +664,14 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
local_bh_disable();
for (i = 1; i <= range; i++) {
port = low + (i + offset) % range;
head = &tcp_bhash[tcp_bhashfn(port)];
head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)];
spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks,
* because the established check is already
* unique enough.
*/
tb_for_each(tb, node, &head->chain) {
inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->port == port) {
BUG_TRAP(!hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
@ -678,7 +684,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
}
}
tb = tcp_bucket_create(head, port);
tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port);
if (!tb) {
spin_unlock(&head->lock);
break;
@ -713,7 +719,7 @@ ok:
goto out;
}
head = &tcp_bhash[tcp_bhashfn(snum)];
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
tb = tcp_sk(sk)->bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@ -2055,7 +2061,7 @@ start_req:
}
read_unlock_bh(&tp->accept_queue.syn_wait_lock);
}
if (++st->bucket < TCP_LHTABLE_SIZE) {
if (++st->bucket < INET_LHTABLE_SIZE) {
sk = sk_head(&tcp_listening_hash[st->bucket]);
goto get_sk;
}
@ -2506,7 +2512,7 @@ void __init tcp_v4_init(struct net_proto_family *ops)
EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(tcp_bind_hash);
EXPORT_SYMBOL(tcp_bucket_create);
EXPORT_SYMBOL(inet_bind_bucket_create);
EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_inherit_port);
EXPORT_SYMBOL(tcp_listen_wlock);

View File

@ -60,9 +60,9 @@ int tcp_tw_count;
/* Must be called with locally disabled BHs. */
static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
{
struct tcp_ehash_bucket *ehead;
struct tcp_bind_hashbucket *bhead;
struct tcp_bind_bucket *tb;
struct inet_ehash_bucket *ehead;
struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb;
/* Unlink from established hashes. */
ehead = &tcp_ehash[tw->tw_hashent];
@ -76,12 +76,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
write_unlock(&ehead->lock);
/* Disassociate with bind bucket. */
bhead = &tcp_bhash[tcp_bhashfn(tw->tw_num)];
bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)];
spin_lock(&bhead->lock);
tb = tw->tw_tb;
__hlist_del(&tw->tw_bind_node);
tw->tw_tb = NULL;
tcp_bucket_destroy(tb);
inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
spin_unlock(&bhead->lock);
#ifdef SOCK_REFCNT_DEBUG
@ -296,14 +296,14 @@ kill:
*/
static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
{
struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
struct tcp_bind_hashbucket *bhead;
struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet_sk(sk)->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
bhead = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
bhead = &tcp_bhash[inet_bhashfn(inet_sk(sk)->num, tcp_bhash_size)];
spin_lock(&bhead->lock);
tw->tw_tb = tcp_sk(sk)->bind_hash;
BUG_TRAP(tcp_sk(sk)->bind_hash);

View File

@ -98,11 +98,11 @@ static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
return tcp_v6_hashfn(laddr, lport, faddr, fport);
}
static inline int tcp_v6_bind_conflict(struct sock *sk,
struct tcp_bind_bucket *tb)
static inline int tcp_v6_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
struct sock *sk2;
struct hlist_node *node;
const struct sock *sk2;
const struct hlist_node *node;
/* We must walk the whole port owner list in this case. -DaveM */
sk_for_each_bound(sk2, node, &tb->owners) {
@ -126,8 +126,8 @@ static inline int tcp_v6_bind_conflict(struct sock *sk,
*/
static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
{
struct tcp_bind_hashbucket *head;
struct tcp_bind_bucket *tb;
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
struct hlist_node *node;
int ret;
@ -146,9 +146,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
do { rover++;
if (rover > high)
rover = low;
head = &tcp_bhash[tcp_bhashfn(rover)];
head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)];
spin_lock(&head->lock);
tb_for_each(tb, node, &head->chain)
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover)
goto next;
break;
@ -171,9 +171,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
/* OK, here is the one we will use. */
snum = rover;
} else {
head = &tcp_bhash[tcp_bhashfn(snum)];
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
spin_lock(&head->lock);
tb_for_each(tb, node, &head->chain)
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum)
goto tb_found;
}
@ -192,7 +192,7 @@ tb_found:
}
tb_not_found:
ret = 1;
if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL)
goto fail_unlock;
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
@ -224,7 +224,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
BUG_TRAP(sk_unhashed(sk));
if (sk->sk_state == TCP_LISTEN) {
list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock;
tcp_listen_wlock();
} else {
@ -264,7 +264,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
hiscore=0;
read_lock(&tcp_lhash_lock);
sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) {
if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
@ -305,7 +305,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
struct in6_addr *daddr, u16 hnum,
int dif)
{
struct tcp_ehash_bucket *head;
struct inet_ehash_bucket *head;
struct sock *sk;
struct hlist_node *node;
__u32 ports = TCP_COMBINED_PORTS(sport, hnum);
@ -461,7 +461,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
int dif = sk->sk_bound_dev_if;
u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
struct tcp_ehash_bucket *head = &tcp_ehash[hash];
struct inet_ehash_bucket *head = &tcp_ehash[hash];
struct sock *sk2;
struct hlist_node *node;
struct tcp_tw_bucket *tw;
@ -540,8 +540,8 @@ static inline u32 tcpv6_port_offset(const struct sock *sk)
static int tcp_v6_hash_connect(struct sock *sk)
{
unsigned short snum = inet_sk(sk)->num;
struct tcp_bind_hashbucket *head;
struct tcp_bind_bucket *tb;
struct inet_bind_hashbucket *head;
struct inet_bind_bucket *tb;
int ret;
if (!snum) {
@ -558,14 +558,14 @@ static int tcp_v6_hash_connect(struct sock *sk)
local_bh_disable();
for (i = 1; i <= range; i++) {
port = low + (i + offset) % range;
head = &tcp_bhash[tcp_bhashfn(port)];
head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)];
spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks,
* because the established check is already
* unique enough.
*/
tb_for_each(tb, node, &head->chain) {
inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->port == port) {
BUG_TRAP(!hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
@ -578,7 +578,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
}
}
tb = tcp_bucket_create(head, port);
tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port);
if (!tb) {
spin_unlock(&head->lock);
break;
@ -613,7 +613,7 @@ ok:
goto out;
}
head = &tcp_bhash[tcp_bhashfn(snum)];
head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
tb = tcp_sk(sk)->bind_hash;
spin_lock_bh(&head->lock);