diff options
author | chunx <chunx@nvidia.com> | 2013-07-05 11:42:05 +0800 |
---|---|---|
committer | Harry Hong <hhong@nvidia.com> | 2013-11-27 17:46:56 -0800 |
commit | 5df1d176b1f205956b5fa3a97aa2e8f79e78532f (patch) | |
tree | 67b373f6a7b132f0ca6a5e40c2f237ed8ba4bb2b | |
parent | eac96cb2d67594da324b89c7ef1e27c88b58ee46 (diff) |
active-standby: add cmdline into /proc/net/
Add cmdline into /proc/net/{tcp,tcp6,udp,udp6} files.
Get process's cmdline from a sock's corresponding inode pointer,
so that cmdline can't be used by Android active-standby app
to find the corresponding package name.
Resolve "BUG: scheduling while atomic" issue when
sk_get_waiting_task is being called.
Bug 1185001
Bug 1342554
Change-Id: Idc8651e4bb85b8a152dfade9689a719f7d72687d
(cherry picked from commit 5dcfe4f561bd8d1767e0938dfd7565b2b7718478)
Change-Id: I1673d56751a8a95b988b325b3857c8a5fe4c78ce
(cherry picked from commit 4516e7c330bb4c5da5020df0d2cc1cb5e9274d9f)
Signed-off-by: Chun Xu <chunx@nvidia.com>
Reviewed-on: http://git-master/r/336174
Reviewed-by: Harry Hong <hhong@nvidia.com>
Tested-by: Harry Hong <hhong@nvidia.com>
-rw-r--r-- | fs/eventpoll.c | 35 | ||||
-rw-r--r-- | fs/proc/base.c | 4 | ||||
-rw-r--r-- | fs/select.c | 5 | ||||
-rw-r--r-- | include/linux/eventpoll.h | 5 | ||||
-rw-r--r-- | include/linux/poll.h | 5 | ||||
-rw-r--r-- | include/linux/sched.h | 13 | ||||
-rw-r--r-- | include/net/sock.h | 4 | ||||
-rw-r--r-- | net/core/sock.c | 142 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 19 | ||||
-rw-r--r-- | net/ipv4/udp.c | 15 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 13 | ||||
-rw-r--r-- | net/ipv6/udp.c | 13 | ||||
-rw-r--r-- | net/socket.c | 3 |
13 files changed, 258 insertions, 18 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 0863bab42c4d..205b31e10f6b 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -9,6 +9,8 @@ * * Davide Libenzi <davidel@xmailserver.org> * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * */ #include <linux/init.h> @@ -893,7 +895,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) * mechanism. It is called by the stored file descriptors when they * have events to report. */ -static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) +int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; @@ -990,6 +992,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); + pwq->wait.private = get_thread_process(current); pwq->whead = whead; pwq->base = epi; add_wait_queue(whead, &pwq->wait); @@ -1201,6 +1204,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, spin_lock(&tfile->f_lock); list_add_tail(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); + tfile->f_path.dentry->d_inode->i_private = get_thread_process(current); /* * Add the current item to the RB tree. All RB tree operations are @@ -1639,6 +1643,35 @@ static void clear_tfile_check_list(void) INIT_LIST_HEAD(&tfile_check_list); } +struct task_struct *get_epoll_file_task(struct file *file) +{ + struct list_head *lh; + struct epitem *epi = NULL; + struct eppoll_entry *pwq = NULL; + struct task_struct *task = NULL; + wait_queue_head_t *whead = NULL; + wait_queue_t *wq = NULL; + + lh = &file->f_ep_links; + if (!list_empty(lh)) { + lh = lh->next; + epi = list_entry(lh, struct epitem, fllink); + lh = &epi->pwqlist; + if (!list_empty(lh)) { + lh = lh->next; + pwq = list_entry(lh, struct eppoll_entry, llink); + lh = &pwq->whead->task_list; + if (!list_empty(lh)) { + lh = lh->next; + wq = list_entry(lh, wait_queue_t, task_list); + task = wq->private; + } + } + } + + return task; +} + /* * Open an eventpoll file descriptor. */ diff --git a/fs/proc/base.c b/fs/proc/base.c index c8cb15dcca08..68375ebe25c3 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -45,6 +45,8 @@ * * Paul Mundt <paul.mundt@nokia.com>: * Overall revision about smaps. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #include <asm/uaccess.h> @@ -209,7 +211,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task) return mm_access(task, PTRACE_MODE_READ); } -static int proc_pid_cmdline(struct task_struct *task, char * buffer) +static int proc_pid_cmdline(struct task_struct *task, char *buffer) { int res = 0; unsigned int len; diff --git a/fs/select.c b/fs/select.c index 0baa0a351a1c..a010736dfe20 100644 --- a/fs/select.c +++ b/fs/select.c @@ -12,6 +12,8 @@ * 24 January 2000 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #include <linux/kernel.h> @@ -202,7 +204,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) return default_wake_function(&dummy_wait, mode, sync, key); } -static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) +int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct poll_table_entry *entry; @@ -211,6 +213,7 @@ static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) return 0; return __pollwake(wait, mode, sync, key); } +EXPORT_SYMBOL(pollwake); /* Add a new entry */ static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 6f8be328770a..9378240bad9f 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -9,6 +9,8 @@ * * Davide Libenzi <davidel@xmailserver.org> * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + * */ #ifndef _LINUX_EVENTPOLL_H @@ -108,6 +110,9 @@ static inline void eventpoll_release(struct file *file) eventpoll_release_file(file); } +int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key); +struct task_struct *get_epoll_file_task(struct file *file); + #else static inline void eventpoll_init_file(struct file *file) {} diff --git a/include/linux/poll.h b/include/linux/poll.h index 48fe8bc398d1..6769d98f2aeb 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -1,3 +1,7 @@ +/* + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + */ + #ifndef _LINUX_POLL_H #define _LINUX_POLL_H @@ -96,6 +100,7 @@ struct poll_wqueues { extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); +extern int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key); extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, ktime_t *expires, unsigned long slack); extern long select_estimate_accuracy(struct timespec *tv); diff --git a/include/linux/sched.h b/include/linux/sched.h index 43ae5a574fdb..e5f98b9f0daa 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,3 +1,7 @@ +/* + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. + */ + #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H @@ -1682,6 +1686,15 @@ static inline struct pid *task_session(struct task_struct *task) return task->group_leader->pids[PIDTYPE_SID].pid; } +static inline struct task_struct *get_thread_process(struct task_struct *thread) +{ + struct task_struct *task = thread; + while (task->pid != task->tgid) { + task = task->group_leader; + } + return task; +} + struct pid_namespace; /* diff --git a/include/net/sock.h b/include/net/sock.h index f673ba5b6b1a..8083bbc5808c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -36,6 +36,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #ifndef _SOCK_H #define _SOCK_H @@ -1488,6 +1490,8 @@ extern int compat_sock_common_setsockopt(struct socket *sock, int level, extern void sk_common_release(struct sock *sk); +extern char *sk_get_waiting_task_cmdline(struct sock *sk, char *cmdline); + /* * Default socket callbacks and setup code */ diff --git a/net/core/sock.c b/net/core/sock.c index 561eb57f590c..5dc8814db8db 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -87,6 +87,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #include <linux/capability.h> @@ -136,6 +138,8 @@ #include <net/tcp.h> #endif +#include <linux/eventpoll.h> + static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); @@ -2352,6 +2356,144 @@ void sk_common_release(struct sock *sk) } EXPORT_SYMBOL(sk_common_release); +char *sk_get_waiting_task_cmdline(struct sock *sk, char *cmdline) +{ + bool softirq_enabled = false; + int res = 0; + unsigned int len; + char *program_name = cmdline; + struct task_struct *task = NULL; + struct mm_struct *mm = NULL; + static char *apk_path_prefix = "/data/data"; + wait_queue_t *wq = NULL; + struct list_head *lh = NULL; + struct socket_wq *sk_wq = NULL; + wait_queue_func_t wait_func; + enum pid_type type; + struct pid *pid = NULL; + struct fown_struct *fown = NULL; + struct file *file; + int preempt_count; + + *program_name = '\0'; + + if (!sk || !sk->sk_wq) + goto out; + lh = sk->sk_wq->wait.task_list.next; + if (!wq_has_sleeper(sk->sk_wq)) { + sk_wq = sk->sk_wq; + if (sk_wq->fasync_list && sk_wq->fasync_list->fa_file) { + fown = &sk_wq->fasync_list->fa_file->f_owner; + pid = fown->pid; + type = fown->pid_type; + do_each_pid_task(pid, type, task) { + if (task) + break; + } while_each_pid_task(pid, type, task); + printk(KERN_DEBUG "Async wakeup process:%p\n", task); + } + } else { + lh = sk->sk_wq->wait.task_list.next; + wq = list_entry(lh, wait_queue_t, task_list); + + wait_func = wq->func; + printk(KERN_DEBUG "Wakeup function:%p\n", wait_func); + if (wait_func == pollwake) + task = ((struct poll_wqueues *) + (wq->private))->polling_task; + else if (wait_func == default_wake_function) + task = (struct task_struct *)(wq->private); + else if (wait_func == ep_poll_callback) + task = (struct task_struct *)(wq->private); + else if (wait_func == autoremove_wake_function) + task = (struct task_struct *)(wq->private); + else + printk(KERN_ERR "Unhandled wakeup:%p\n", wait_func); + + if (task) + task = get_thread_process(task); + } + +#ifdef CONFIG_EPOLL + if (!task) { + file = sk->sk_socket->file; + if (file) + task = get_epoll_file_task(file); + } +#endif + + if (!task && sk && sk->sk_socket) + task = SOCK_INODE(sk->sk_socket)->i_private; + + if (!task) { + printk(KERN_WARNING "Can't find a process for this sock.\n"); + goto out; + } + + mm = get_task_mm(task); + if (mm && mm->arg_end) { + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + if (softirq_count()) { + softirq_enabled = true; + local_bh_enable(); + } + if (preempt_count()) { + preempt_count = preempt_count(); + preempt_count() = 0; + } + + res = access_process_vm(task, mm->arg_start, cmdline, len, 0); + + if (res > 0 && cmdline[res-1] != '\0' && len < PAGE_SIZE) { + len = strnlen(cmdline, res); + if (len < res) { + res = len; + } else { + len = mm->env_end - mm->env_start; + if (len > PAGE_SIZE - res) + len = PAGE_SIZE - res; + res += access_process_vm(task, + mm->env_start, cmdline+res, len, 0); + res = strnlen(cmdline, res); + } + } + + if (preempt_count) + preempt_count() = preempt_count; + if (softirq_enabled) + local_bh_disable(); + + if (res > PAGE_SIZE) + cmdline[PAGE_SIZE-1] = '\0'; + + len = strlen(apk_path_prefix); + if (!strncmp(apk_path_prefix, program_name, len)) + program_name += len; + else + program_name = strrchr(cmdline, '/'); + + if (program_name == NULL) + program_name = cmdline; + else + program_name++; + } + + if (mm) + mmput(mm); + + len = strlen(program_name); + snprintf(program_name + len, PAGE_SIZE-(program_name-cmdline)-len, + " %d %s", task->pid, task->comm); +out: + return program_name; +} +EXPORT_SYMBOL(sk_get_waiting_task_cmdline); + + #ifdef CONFIG_PROC_FS #define PROTO_INUSE_NR 64 /* should be enough for the first time */ struct prot_inuse { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ae03b7b75af6..46b8ce33154f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -48,6 +48,8 @@ * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #define pr_fmt(fmt) "TCP: " fmt @@ -2434,6 +2436,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); int rx_queue; + unsigned long cmdline = __get_free_page(GFP_TEMPORARY); + if (cmdline == NULL) + return; if (icsk->icsk_pending == ICSK_TIME_RETRANS) { timer_active = 1; @@ -2458,7 +2463,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " - "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n", + "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d %s%n", i, src, srcp, dest, destp, sk->sk_state, tp->write_seq - tp->snd_una, rx_queue, @@ -2474,7 +2479,10 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len) (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh, + sk_get_waiting_task_cmdline(sk, cmdline), len); + + free_page(cmdline); } static void get_timewait4_sock(const struct inet_timewait_sock *tw, @@ -2508,9 +2516,10 @@ static int tcp4_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-*s\n", TMPSZ - 1, - " sl local_address rem_address st tx_queue " - "rx_queue tr tm->when retrnsmt uid timeout " - "inode"); + " sl local_address rem_address st tx_queue " + "rx_queue tr tm->when retrnsmt uid timeout " + "inode " + "cmdline"); goto out; } st = seq->private; @@ -2527,7 +2536,7 @@ static int tcp4_seq_show(struct seq_file *seq, void *v) get_timewait4_sock(v, seq, st->num, &len); break; } - seq_printf(seq, "%*s\n", TMPSZ - 1 - len, ""); + seq_printf(seq, "\n"); out: return 0; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0b6136d578f6..c83b937e4914 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -75,6 +75,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #define pr_fmt(fmt) "UDP: " fmt @@ -2088,15 +2090,20 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f, __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); + unsigned long cmdline = __get_free_page(GFP_TEMPORARY); + if (cmdline == NULL) + return; seq_printf(f, "%5d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d %s%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, - atomic_read(&sp->sk_drops), len); + atomic_read(&sp->sk_drops), + sk_get_waiting_task_cmdline(sp, cmdline), len); + free_page(cmdline); } int udp4_seq_show(struct seq_file *seq, void *v) @@ -2105,13 +2112,13 @@ int udp4_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " - "inode ref pointer drops"); + "inode ref pointer drops cmdline"); else { struct udp_iter_state *state = seq->private; int len; udp4_format_sock(v, seq, state->bucket, &len); - seq_printf(seq, "%*s\n", 127 - len, ""); + seq_printf(seq, "\n"); } return 0; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7ee712167732..2624a8d6a842 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -21,6 +21,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #include <linux/bottom_half.h> @@ -1945,6 +1947,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct ipv6_pinfo *np = inet6_sk(sp); + unsigned long cmdline = __get_free_page(GFP_TEMPORARY); + if (cmdline == NULL) + return; dest = &np->daddr; src = &np->rcv_saddr; @@ -1967,7 +1972,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d %s\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, @@ -1987,8 +1992,10 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, tp->snd_cwnd, - tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh + tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh, + sk_get_waiting_task_cmdline(sp, cmdline) ); + free_page(cmdline); } static void get_timewait6_sock(struct seq_file *seq, @@ -2030,7 +2037,7 @@ static int tcp6_seq_show(struct seq_file *seq, void *v) "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" - " uid timeout inode\n"); + " uid timeout inode cmdline\n"); goto out; } st = seq->private; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f79bfdbc247f..29f6125592e1 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -19,6 +19,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. */ #include <linux/errno.h> @@ -1397,6 +1399,9 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket struct ipv6_pinfo *np = inet6_sk(sp); const struct in6_addr *dest, *src; __u16 destp, srcp; + unsigned long cmdline = __get_free_page(GFP_TEMPORARY); + if (cmdline == NULL) + return; dest = &np->daddr; src = &np->rcv_saddr; @@ -1404,7 +1409,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket srcp = ntohs(inet->inet_sport); seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d %s\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, @@ -1417,7 +1422,9 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, - atomic_read(&sp->sk_drops)); + atomic_read(&sp->sk_drops), + sk_get_waiting_task_cmdline(sp, cmdline)); + free_page(cmdline); } int udp6_seq_show(struct seq_file *seq, void *v) @@ -1428,7 +1435,7 @@ int udp6_seq_show(struct seq_file *seq, void *v) "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" - " uid timeout inode ref pointer drops\n"); + " uid timeout inode ref pointer drops cmdline\n"); else udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); return 0; diff --git a/net/socket.c b/net/socket.c index 47ce3ea44300..3eaf6884a421 100644 --- a/net/socket.c +++ b/net/socket.c @@ -868,6 +868,7 @@ static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; + SOCK_INODE(sock)->i_private = get_thread_process(current); return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } @@ -1103,6 +1104,7 @@ static unsigned int sock_poll(struct file *file, poll_table *wait) * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; + SOCK_INODE(sock)->i_private = get_thread_process(current); return sock->ops->poll(file, sock, wait); } @@ -1349,6 +1351,7 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; + SOCK_INODE(sock)->i_private = get_thread_process(current); out: /* It may be already another descriptor 8) Not kernel problem. */ |