summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-12 21:21:39 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-10-12 21:21:39 +0200
commit00449628f3526aef5b682cc4c18f6c422d3be810 (patch)
tree8a9f434000c32a01fe30a3fca9aa83fa0dc3042a
parentf82786d7a94f06a35ab273002cedc1385bae8e9f (diff)
Revert "socket, bpf: fix possible use after free"
This reverts commit 02f7e4101092b88e57c73171174976c8a72a3eba, which was commit 02f7e4101092b88e57c73171174976c8a72a3eba upstream Turns out the backport to 4.9 was broken. Reported-by: Shuah Khan <shuahkh@osg.samsung.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--net/core/filter.c15
-rw-r--r--net/core/sock.c5
2 files changed, 3 insertions, 17 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index bfeedbbde214..4eb4ce0aeef4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -937,31 +937,20 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
/* try to charge the socket memory if there is space available
* return true on success
*/
-static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
u32 filter_size = bpf_prog_size(fp->prog->len);
/* same check as in sock_kmalloc() */
if (filter_size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
+ atomic_inc(&fp->refcnt);
atomic_add(filter_size, &sk->sk_omem_alloc);
return true;
}
return false;
}
-bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
-{
- if (!atomic_inc_not_zero(&fp->refcnt))
- return false;
-
- if (!__sk_filter_charge(sk, fp)) {
- sk_filter_release(fp);
- return false;
- }
- return true;
-}
-
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
{
struct sock_filter *old_prog;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2a77cc50f021..231c38d91855 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1528,16 +1528,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sock_reset_flag(newsk, SOCK_DONE);
skb_queue_head_init(&newsk->sk_error_queue);
- rcu_read_lock();
- filter = rcu_dereference(sk->sk_filter);
+ filter = rcu_dereference_protected(newsk->sk_filter, 1);
if (filter != NULL)
/* though it's an empty new sock, the charging may fail
* if sysctl_optmem_max was changed between creation of
* original socket and cloning
*/
is_charged = sk_filter_charge(newsk, filter);
- RCU_INIT_POINTER(newsk->sk_filter, filter);
- rcu_read_unlock();
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
/* We need to make sure that we don't uncharge the new