summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-10-20 17:45:43 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-20 17:45:43 -0400
commit05bdd2f14351176d368e8ddc67993690a2d1bfb6 (patch)
tree06c00c0af56add8602fba296490b4c598418082f /include/linux
parent20c4cb792de2b5839537a99a469f4529ef1047f5 (diff)
net: constify skbuff and Qdisc elements
Preliminary patch before tcp constification Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/skbuff.h17
1 files changed, 9 insertions, 8 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1ebf1ea29d60..3411f22e7d16 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -853,9 +853,9 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
{
- struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ struct sk_buff *list = ((const struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
@@ -874,9 +874,9 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+ struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
@@ -1830,7 +1830,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
* Returns true if modifying the header part of the cloned buffer
* does not requires the data to be copied.
*/
-static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
+static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
@@ -2451,7 +2451,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
- struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
unlikely(shinfo->gso_type == 0)) {
__skb_warn_lro_forwarding(skb);
@@ -2475,7 +2476,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
* Instead of forcing ip_summed to CHECKSUM_NONE, we can
* use this helper, to document places where we make this assertion.
*/
-static inline void skb_checksum_none_assert(struct sk_buff *skb)
+static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
#ifdef DEBUG
BUG_ON(skb->ip_summed != CHECKSUM_NONE);
@@ -2484,7 +2485,7 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
-static inline bool skb_is_recycleable(struct sk_buff *skb, int skb_size)
+static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
{
if (irqs_disabled())
return false;