summaryrefslogtreecommitdiff
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-01-03 20:46:48 -0800
committerDavid S. Miller <davem@davemloft.net>2008-01-28 15:00:36 -0800
commit65f7651788e18fadb2fbb7276af935d7871e1803 (patch)
treedcad32d4344d6d11d80061773d9d1dbc9ae92223 /include/net/sock.h
parent571e7682026fd0e25833d103a3eeb74be29bf199 (diff)
[NET]: prot_inuse cleanups and optimizations
1) Cleanups (all functions are prefixed by sock_prot_inuse) sock_prot_inc_use(prot) -> sock_prot_inuse_add(prot,-1) sock_prot_dec_use(prot) -> sock_prot_inuse_add(prot,-1) sock_prot_inuse() -> sock_prot_inuse_get() New functions : sock_prot_inuse_init() and sock_prot_inuse_free() to abstract pcounter use. 2) if CONFIG_PROC_FS=n, we can zap 'inuse' member from "struct proto", since nobody wants to read the inuse value. This saves 1372 bytes on i386/SMP and some cpu cycles. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h40
1 files changed, 30 insertions, 10 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 3d938f6c6725..786fae858e77 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -548,7 +548,9 @@ struct proto {
int (*get_port)(struct sock *sk, unsigned short snum);
/* Keeping track of sockets in use */
+#ifdef CONFIG_PROC_FS
struct pcounter inuse;
+#endif
/* Memory pressure */
void (*enter_memory_pressure)(void);
@@ -584,9 +586,6 @@ struct proto {
#endif
};
-#define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
-#define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
-
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
@@ -615,21 +614,42 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
+
+#ifdef CONFIG_PROC_FS
+# define DEFINE_PROTO_INUSE(NAME) DEFINE_PCOUNTER(NAME)
+# define REF_PROTO_INUSE(NAME) PCOUNTER_MEMBER_INITIALIZER(NAME, .inuse)
/* Called with local bh disabled */
-static __inline__ void sock_prot_inc_use(struct proto *prot)
+static inline void sock_prot_inuse_add(struct proto *prot, int inc)
{
- pcounter_add(&prot->inuse, 1);
+ pcounter_add(&prot->inuse, inc);
}
-
-static __inline__ void sock_prot_dec_use(struct proto *prot)
+static inline int sock_prot_inuse_init(struct proto *proto)
{
- pcounter_add(&prot->inuse, -1);
+ return pcounter_alloc(&proto->inuse);
}
-
-static __inline__ int sock_prot_inuse(struct proto *proto)
+static inline int sock_prot_inuse_get(struct proto *proto)
{
return pcounter_getval(&proto->inuse);
}
+static inline void sock_prot_inuse_free(struct proto *proto)
+{
+ pcounter_free(&proto->inuse);
+}
+#else
+# define DEFINE_PROTO_INUSE(NAME)
+# define REF_PROTO_INUSE(NAME)
+static void inline sock_prot_inuse_add(struct proto *prot, int inc)
+{
+}
+static int inline sock_prot_inuse_init(struct proto *proto)
+{
+ return 0;
+}
+static void inline sock_prot_inuse_free(struct proto *proto)
+{
+}
+#endif
+
/* With per-bucket locks this operation is not-atomic, so that
* this version is not worse.