alistair23-linux/include/net/snmp.h
Ingo Molnar 39c715b717 [PATCH] smp_processor_id() cleanup
This patch implements a number of smp_processor_id() cleanup ideas that
Arjan van de Ven and I came up with.

The previous __smp_processor_id/_smp_processor_id/smp_processor_id API
spaghetti was hard to follow both on the implementational and on the
usage side.

Some of the complexity arose from picking wrong names, some of the
complexity comes from the fact that not all architectures defined
__smp_processor_id.

In the new code, there are two externally visible symbols:

 - smp_processor_id(): debug variant.

 - raw_smp_processor_id(): nondebug variant. Replaces all existing
   uses of _smp_processor_id() and __smp_processor_id(). Defined
   by every SMP architecture in include/asm-*/smp.h.

There is one new internal symbol, dependent on DEBUG_PREEMPT:

 - debug_smp_processor_id(): internal debug variant, mapped to
                             smp_processor_id().

Also, i moved debug_smp_processor_id() from lib/kernel_lock.c into a new
lib/smp_processor_id.c file.  All related comments got updated and/or
clarified.

I have build/boot tested the following 8 .config combinations on x86:

 {SMP,UP} x {PREEMPT,!PREEMPT} x {DEBUG_PREEMPT,!DEBUG_PREEMPT}

I have also build/boot tested x64 on UP/PREEMPT/DEBUG_PREEMPT.  (Other
architectures are untested, but should work just fine.)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-21 18:46:13 -07:00

146 lines
4 KiB
C

/*
*
* SNMP MIB entries for the IP subsystem.
*
* Alan Cox <gw4pts@gw4pts.ampr.org>
*
* We don't chose to implement SNMP in the kernel (this would
* be silly as SNMP is a pain in the backside in places). We do
* however need to collect the MIB statistics and export them
* out of /proc (eventually)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* $Id: snmp.h,v 1.19 2001/06/14 13:40:46 davem Exp $
*
*/
#ifndef _SNMP_H
#define _SNMP_H
#include <linux/cache.h>
#include <linux/snmp.h>
/*
* Mibs are stored in array of unsigned long.
*/
/*
* struct snmp_mib{}
* - list of entries for particular API (such as /proc/net/snmp)
* - name of entries.
*/
struct snmp_mib {
char *name;
int entry;
};
#define SNMP_MIB_ITEM(_name,_entry) { \
.name = _name, \
.entry = _entry, \
}
#define SNMP_MIB_SENTINEL { \
.name = NULL, \
.entry = 0, \
}
/*
* We use all unsigned longs. Linux will soon be so reliable that even
* these will rapidly get too small 8-). Seriously consider the IpInReceives
* count on the 20Gb/s + networks people expect in a few years time!
*/
/*
* The rule for padding:
* Best is power of two because then the right structure can be found by a
* simple shift. The structure should be always cache line aligned.
* gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
* instructions to emulate multiply in case it is not power-of-two.
* Currently n is always <=3 for all sizes so simple cache line alignment
* is enough.
*
* The best solution would be a global CPU local area , especially on 64
* and 128byte cacheline machine it makes a *lot* of sense -AK
*/
#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
/* IPstats */
#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
struct ipstats_mib {
unsigned long mibs[IPSTATS_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* ICMP */
#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
#define ICMP_MIB_MAX (__ICMP_MIB_MAX + 1)
struct icmp_mib {
unsigned long mibs[ICMP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* ICMP6 (IPv6-ICMP) */
#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
struct icmpv6_mib {
unsigned long mibs[ICMP6_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* TCP */
#define TCP_MIB_MAX __TCP_MIB_MAX
struct tcp_mib {
unsigned long mibs[TCP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* UDP */
#define UDP_MIB_MAX __UDP_MIB_MAX
struct udp_mib {
unsigned long mibs[UDP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* SCTP */
#define SCTP_MIB_MAX __SCTP_MIB_MAX
struct sctp_mib {
unsigned long mibs[SCTP_MIB_MAX];
} __SNMP_MIB_ALIGN__;
/* Linux */
#define LINUX_MIB_MAX __LINUX_MIB_MAX
struct linux_mib {
unsigned long mibs[LINUX_MIB_MAX];
};
/*
* FIXME: On x86 and some other CPUs the split into user and softirq parts
* is not needed because addl $1,memory is atomic against interrupts (but
* atomic_inc would be overkill because of the lock cycles). Wants new
* nonlocked_atomic_inc() primitives -AK
*/
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) *name[2]
#define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) *name[2]
#define SNMP_STAT_BHPTR(name) (name[0])
#define SNMP_STAT_USRPTR(name) (name[1])
#define SNMP_INC_STATS_BH(mib, field) \
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++)
#define SNMP_INC_STATS_USER(mib, field) \
(per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++)
#define SNMP_INC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++)
#define SNMP_DEC_STATS(mib, field) \
(per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--)
#define SNMP_ADD_STATS_BH(mib, field, addend) \
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
(per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend)
#endif