summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorJustin Waters <justin.waters@timesys.com>2008-02-26 13:07:02 -0500
committerJustin Waters <justin.waters@timesys.com>2008-02-26 13:07:02 -0500
commitb80a32b9cc634adfa8eaef33ec981e7febf2ade2 (patch)
treef256bce13ba11f514a388160df84e1410bedbe2b /net/ipv4/tcp.c
parent594133ef22fae0d737bd1b57352cf3f48a192c63 (diff)
Update the i.MX31 Kernel to 2.6.232.6.23-mx31ads-2008022618072.6.23-mx31-200802261807
This is the result of a brute-force attempt to update the kernel to 2.6.23. Now that we have a git tree, our effort will be a little nicer in the future. Signed-off-by: Justin Waters <justin.waters@timesys.com>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 11ff18258892..7e740112b238 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1117,6 +1117,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
long timeo;
struct task_struct *user_recv = NULL;
int copied_early = 0;
+ struct sk_buff *skb;
lock_sock(sk);
@@ -1143,16 +1144,26 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
#ifdef CONFIG_NET_DMA
tp->ucopy.dma_chan = NULL;
preempt_disable();
- if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
- !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
- preempt_enable_no_resched();
- tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
- } else
- preempt_enable_no_resched();
+ skb = skb_peek_tail(&sk->sk_receive_queue);
+ {
+ int available = 0;
+
+ if (skb)
+ available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
+ if ((available < target) &&
+ (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
+ !sysctl_tcp_low_latency &&
+ __get_cpu_var(softnet_data).net_dma) {
+ preempt_enable_no_resched();
+ tp->ucopy.pinned_list =
+ dma_pin_iovec_pages(msg->msg_iov, len);
+ } else {
+ preempt_enable_no_resched();
+ }
+ }
#endif
do {
- struct sk_buff *skb;
u32 offset;
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
@@ -1440,7 +1451,6 @@ skip_copy:
#ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan) {
- struct sk_buff *skb;
dma_cookie_t done, used;
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
@@ -2421,7 +2431,7 @@ void __init tcp_init(void)
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* Size and allocate the main established and bind bucket
* hash tables.