summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-03-07 12:12:44 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-25 22:24:02 -0700
commitfe067e8ab5e0dc5ca3c54634924c628da92090b4 (patch)
tree98f5a6ebbb770f16682cfc52caea2da1e7eeb73b /net/ipv4/tcp.c
parent02ea4923b4997d7e1310c027081f46d584b9d714 (diff)
[TCP]: Abstract out all write queue operations.
This allows the write queue implementation to be changed, for example, to one which allows fast interval searching. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3834b10b5115..689f9330f1b9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
tcb->flags = TCPCB_FLAG_ACK;
tcb->sacked = 0;
skb_header_release(skb);
- __skb_queue_tail(&sk->sk_write_queue, skb);
+ tcp_add_write_queue_tail(sk, skb);
sk_charge_skb(sk, skb);
- if (!sk->sk_send_head)
- sk->sk_send_head = skb;
if (tp->nonagle & TCP_NAGLE_PUSH)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
@@ -491,8 +489,8 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
int mss_now, int nonagle)
{
- if (sk->sk_send_head) {
- struct sk_buff *skb = sk->sk_write_queue.prev;
+ if (tcp_send_head(sk)) {
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags, skb);
@@ -526,13 +524,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
goto do_error;
while (psize > 0) {
- struct sk_buff *skb = sk->sk_write_queue.prev;
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
struct page *page = pages[poffset / PAGE_SIZE];
int copy, i, can_coalesce;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
- if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
+ if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
@@ -589,7 +587,7 @@ new_segment:
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == sk->sk_send_head)
+ } else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
@@ -704,9 +702,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
while (seglen > 0) {
int copy;
- skb = sk->sk_write_queue.prev;
+ skb = tcp_write_queue_tail(sk);
- if (!sk->sk_send_head ||
+ if (!tcp_send_head(sk) ||
(copy = size_goal - skb->len) <= 0) {
new_segment:
@@ -833,7 +831,7 @@ new_segment:
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
- } else if (skb == sk->sk_send_head)
+ } else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
@@ -860,9 +858,11 @@ out:
do_fault:
if (!skb->len) {
- if (sk->sk_send_head == skb)
- sk->sk_send_head = NULL;
- __skb_unlink(skb, &sk->sk_write_queue);
+ tcp_unlink_write_queue(skb, sk);
+ /* It is the one place in all of TCP, except connection
+ * reset, where we can be unlinking the send_head.
+ */
+ tcp_check_send_head(sk, skb);
sk_stream_free_skb(sk, skb);
}
@@ -1732,7 +1732,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
- sk_stream_writequeue_purge(sk);
+ tcp_write_queue_purge(sk);
__skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_NET_DMA
__skb_queue_purge(&sk->sk_async_wait_queue);
@@ -1758,7 +1758,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
- sk->sk_send_head = NULL;
+ tcp_init_send_head(sk);
tp->rx_opt.saw_tstamp = 0;
tcp_sack_reset(&tp->rx_opt);
__sk_dst_reset(sk);