summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/appletalk/ddp.c37
-rw-r--r--net/appletalk/sysctl_net_atalk.c5
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c16
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/hci_conn.c8
-rw-r--r--net/bluetooth/hidp/sock.c1
-rw-r--r--net/bluetooth/l2cap_core.c9
-rw-r--r--net/bridge/br_if.c13
-rw-r--r--net/bridge/br_input.c23
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/failover.c6
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/dsa/dsa.c11
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c30
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/udp_offload.c16
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c22
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c6
-rw-r--r--net/ipv6/route.c51
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/l2tp/l2tp_core.c10
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/trace_msg.h7
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/ncsi/ncsi-rsp.c6
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c42
-rw-r--r--net/netfilter/nf_conntrack_netlink.c34
-rw-r--r--net/netfilter/nf_conntrack_proto.c2
-rw-r--r--net/netfilter/nf_nat_core.c11
-rw-r--r--net/netfilter/nf_tables_api.c30
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_dynset.c13
-rw-r--r--net/netfilter/nft_lookup.c13
-rw-r--r--net/netfilter/nft_objref.c32
-rw-r--r--net/netfilter/nft_set_rbtree.c7
-rw-r--r--net/netfilter/xt_time.c23
-rw-r--r--net/netrom/af_netrom.c76
-rw-r--r--net/netrom/nr_loopback.c2
-rw-r--r--net/netrom/nr_route.c2
-rw-r--r--net/netrom/sysctl_net_netrom.c5
-rw-r--r--net/nfc/nci/hci.c8
-rw-r--r--net/packet/af_packet.c62
-rw-r--r--net/rds/af_rds.c3
-rw-r--r--net/rds/bind.c2
-rw-r--r--net/rds/ib_fmr.c11
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rose/rose_loopback.c27
-rw-r--r--net/rxrpc/call_object.c32
-rw-r--r--net/rxrpc/conn_client.c20
-rw-r--r--net/rxrpc/input.c12
-rw-r--r--net/rxrpc/local_object.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/sch_cake.c57
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c35
-rw-r--r--net/strparser/strparser.c12
-rw-r--r--net/sunrpc/cache.c3
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tipc/netlink_compat.c24
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/tls/tls_device.c55
-rw-r--r--net/tls/tls_device_fallback.c16
-rw-r--r--net/tls/tls_main.c29
-rw-r--r--net/tls/tls_sw.c18
-rw-r--r--net/wireless/nl80211.c18
-rw-r--r--net/wireless/reg.c39
-rw-r--r--net/xdp/xdp_umem.c19
92 files changed, 894 insertions, 400 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b2d9c8f27cd7..1991ce2eb268 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -368,10 +368,12 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ifrr.ifr_ifru = ifr->ifr_ifru;
switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (!net_eq(dev_net(dev), &init_net))
+ break;
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- case SIOCSHWTSTAMP:
case SIOCGHWTSTAMP:
if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 8006295f8bd7..dda73991bb54 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -255,7 +255,7 @@ out_interface:
goto out;
}
-void __exit atalk_proc_exit(void)
+void atalk_proc_exit(void)
{
remove_proc_entry("interface", atalk_proc_dir);
remove_proc_entry("route", atalk_proc_dir);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 9b6bc5abe946..795fbc6c06aa 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1910,12 +1910,16 @@ static const char atalk_err_snap[] __initconst =
/* Called by proto.c on kernel start up */
static int __init atalk_init(void)
{
- int rc = proto_register(&ddp_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&ddp_proto, 0);
+ if (rc)
goto out;
- (void)sock_register(&atalk_family_ops);
+ rc = sock_register(&atalk_family_ops);
+ if (rc)
+ goto out_proto;
+
ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
if (!ddp_dl)
printk(atalk_err_snap);
@@ -1923,12 +1927,33 @@ static int __init atalk_init(void)
dev_add_pack(&ltalk_packet_type);
dev_add_pack(&ppptalk_packet_type);
- register_netdevice_notifier(&ddp_notifier);
+ rc = register_netdevice_notifier(&ddp_notifier);
+ if (rc)
+ goto out_sock;
+
aarp_proto_init();
- atalk_proc_init();
- atalk_register_sysctl();
+ rc = atalk_proc_init();
+ if (rc)
+ goto out_aarp;
+
+ rc = atalk_register_sysctl();
+ if (rc)
+ goto out_proc;
out:
return rc;
+out_proc:
+ atalk_proc_exit();
+out_aarp:
+ aarp_cleanup_module();
+ unregister_netdevice_notifier(&ddp_notifier);
+out_sock:
+ dev_remove_pack(&ppptalk_packet_type);
+ dev_remove_pack(&ltalk_packet_type);
+ unregister_snap_client(ddp_dl);
+ sock_unregister(PF_APPLETALK);
+out_proto:
+ proto_unregister(&ddp_proto);
+ goto out;
}
module_init(atalk_init);
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index c744a853fa5f..d945b7c0176d 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -45,9 +45,12 @@ static struct ctl_table atalk_table[] = {
static struct ctl_table_header *atalk_table_header;
-void atalk_register_sysctl(void)
+int __init atalk_register_sysctl(void)
{
atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
+ if (!atalk_table_header)
+ return -ENOMEM;
+ return 0;
}
void atalk_unregister_sysctl(void)
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d7f5cf5b7594..ad4f829193f0 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
{
- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+ if (arg < 0 || arg >= MAX_LEC_ITF)
+ return -EINVAL;
+ arg = array_index_nospec(arg, MAX_LEC_ITF);
+ if (!dev_lec[arg])
return -EINVAL;
vcc->proto_data = dev_lec[arg];
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
i = arg;
if (arg >= MAX_LEC_ITF)
return -EINVAL;
+ i = array_index_nospec(arg, MAX_LEC_ITF);
if (!dev_lec[i]) {
int size;
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index ef0dec20c7d8..5da183b2f4c9 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
- /* free the TID stats immediately */
- cfg80211_sinfo_release_content(&sinfo);
+ if (!ret) {
+ /* free the TID stats immediately */
+ cfg80211_sinfo_release_content(&sinfo);
+ }
dev_put(real_netdev);
if (ret == -ENOENT) {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5fdde2947802..cf2bcea7df82 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid)
{
struct batadv_bla_claim search_claim, *claim;
+ struct batadv_bla_claim *claim_removed_entry;
+ struct hlist_node *claim_removed_node;
ether_addr_copy(search_claim.addr, mac);
search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
mac, batadv_print_vid(vid));
- batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
- batadv_choose_claim, claim);
- batadv_claim_put(claim); /* reference from the hash is gone */
+ claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim);
+ if (!claim_removed_node)
+ goto free_claim;
+ /* reference from the hash is gone */
+ claim_removed_entry = hlist_entry(claim_removed_node,
+ struct batadv_bla_claim, hash_entry);
+ batadv_claim_put(claim_removed_entry);
+
+free_claim:
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8dcd4968cde7..6ec0e67be560 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global,
const char *message)
{
+ struct batadv_tt_global_entry *tt_removed_entry;
+ struct hlist_node *tt_removed_node;
+
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
batadv_print_vid(tt_global->common.vid), message);
- batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
- batadv_choose_tt, &tt_global->common);
- batadv_tt_global_entry_put(tt_global);
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_global->common);
+ if (!tt_removed_node)
+ return;
+
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_global_entry,
+ common.hash_entry);
+ batadv_tt_global_entry_put(tt_removed_entry);
}
/**
@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid, const char *message,
bool roaming)
{
+ struct batadv_tt_local_entry *tt_removed_entry;
struct batadv_tt_local_entry *tt_local_entry;
u16 flags, curr_flags = BATADV_NO_FLAGS;
- void *tt_entry_exists;
+ struct hlist_node *tt_removed_node;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_local_entry->common);
- if (!tt_entry_exists)
+ if (!tt_removed_node)
goto out;
- /* extra call to free the local tt entry */
- batadv_tt_local_entry_put(tt_local_entry);
+ /* drop reference of remove hash entry */
+ tt_removed_entry = hlist_entry(tt_removed_node,
+ struct batadv_tt_local_entry,
+ common.hash_entry);
+ batadv_tt_local_entry_put(tt_removed_entry);
out:
if (tt_local_entry)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index bd4978ce8c45..3cf0764d5793 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1276,6 +1276,14 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0;
+ /* The minimum encryption key size needs to be enforced by the
+ * host stack before establishing any L2CAP connections. The
+ * specification in theory allows a minimum of 1, but to align
+ * BR/EDR and LE transports, a minimum of 7 is chosen.
+ */
+ if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
+ return 0;
+
return 1;
}
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 9f85a1943be9..2151913892ce 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -75,6 +75,7 @@ static int do_hidp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user
sockfd_put(csock);
return err;
}
+ ca.name[sizeof(ca.name)-1] = 0;
err = hidp_connection_add(&ca, csock, isock);
if (!err && copy_to_user(argp, &ca, sizeof(ca)))
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index ccdc5c67d22a..a3b2e3b5f04b 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -510,12 +510,12 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
}
EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
-static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
+static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
{
chan->sdu = NULL;
chan->sdu_last_frag = NULL;
chan->sdu_len = 0;
- chan->tx_credits = 0;
+ chan->tx_credits = tx_credits;
/* Derive MPS from connection MTU to stop HCI fragmentation */
chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
/* Give enough credits for a full packet */
@@ -1281,7 +1281,7 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
return;
- l2cap_le_flowctl_init(chan);
+ l2cap_le_flowctl_init(chan, 0);
req.psm = chan->psm;
req.scid = cpu_to_le16(chan->scid);
@@ -5531,11 +5531,10 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
chan->dcid = scid;
chan->omtu = mtu;
chan->remote_mps = mps;
- chan->tx_credits = __le16_to_cpu(req->credits);
__l2cap_chan_add(conn, chan);
- l2cap_le_flowctl_init(chan);
+ l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
dcid = chan->scid;
credits = chan->rx_credits;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 41f0a696a65f..0cb0aa0313a8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -602,13 +602,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
call_netdevice_notifiers(NETDEV_JOIN, dev);
err = dev_set_allmulti(dev, 1);
- if (err)
- goto put_back;
+ if (err) {
+ kfree(p); /* kobject not yet init'd, manually free */
+ goto err1;
+ }
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
- goto err1;
+ goto err2;
err = br_sysfs_addif(p);
if (err)
@@ -700,12 +702,9 @@ err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
kobject_put(&p->kobj);
- p = NULL; /* kobject_put frees */
-err1:
dev_set_allmulti(dev, -1);
-put_back:
+err1:
dev_put(dev);
- kfree(p);
return err;
}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ea7e56119c1..ba303ee99b9b 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- struct net_bridge_port *p = br_port_get_rcu(skb->dev);
-
__br_handle_local_finish(skb);
- BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
- br_pass_frame_up(skb);
- return 0;
+ /* return 1 to signal the okfn() was called so it's ok to use the skb */
+ return 1;
}
/*
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto forward;
}
- /* Deliver packet to local host only */
- NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
- NULL, skb, skb->dev, NULL, br_handle_local_finish);
- return RX_HANDLER_CONSUMED;
+ /* The else clause should be hit when nf_hook():
+ * - returns < 0 (drop/error)
+ * - returns = 0 (stolen/nf_queue)
+ * Thus return 1 from the okfn() to signal the skb is ok to pass
+ */
+ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
+ dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+ br_handle_local_finish) == 1) {
+ return RX_HANDLER_PASS;
+ } else {
+ return RX_HANDLER_CONSUMED;
+ }
}
forward:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index e4777614a8a0..61ff0d497da6 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1916,7 +1916,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
__br_multicast_open(br, query);
- list_for_each_entry(port, &br->port_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
continue;
@@ -1928,6 +1929,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
br_multicast_enable(&port->ip6_own_query);
#endif
}
+ rcu_read_unlock();
}
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 40d058378b52..fc605758323b 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IP);
+ skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 564710f88f93..e88d6641647b 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
skb->protocol = htons(ETH_P_IPV6);
+ skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 9c07591b0232..7104cf13da84 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
- br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT)))
+ br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
return -EMSGSIZE;
#endif
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f77888ec93f1..0bb4d712b80c 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
- if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
+ /* rule should have no remaining data after target */
+ if (type == EBT_COMPAT_TARGET && size_left)
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;
diff --git a/net/core/dev.c b/net/core/dev.c
index 12824e007e06..7277dd393c00 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
BUG_ON(!dev_net(dev));
net = dev_net(dev);
- if (dev->flags & IFF_UP)
+
+ /* Some auto-enslaved devices e.g. failover slaves are
+ * special, as userspace might rename the device after
+ * the interface had been brought up and running since
+ * the point kernel initiated auto-enslavement. Allow
+ * live name change even when these slave devices are
+ * up and running.
+ *
+ * Typically, users of these auto-enslaving devices
+ * don't actually care about slave name change, as
+ * they are supposed to operate on master interface
+ * directly.
+ */
+ if (dev->flags & IFF_UP &&
+ likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
return -EBUSY;
write_seqcount_begin(&devnet_rename_seq);
diff --git a/net/core/failover.c b/net/core/failover.c
index 4a92a98ccce9..b5cd3c727285 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
goto err_upper_link;
}
- slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
+ slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
if (fops && fops->slave_register &&
!fops->slave_register(slave_dev, failover_dev))
return NOTIFY_OK;
netdev_upper_dev_unlink(slave_dev, failover_dev);
- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
err_upper_link:
netdev_rx_handler_unregister(slave_dev);
done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
netdev_rx_handler_unregister(slave_dev);
netdev_upper_dev_unlink(slave_dev, failover_dev);
- slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
+ slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
if (fops && fops->slave_unregister &&
!fops->slave_unregister(slave_dev, failover_dev))
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index ffbb827723a2..c49b752ea7eb 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -756,9 +756,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
goto errout;
- if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
- rule_exists(ops, frh, tb, rule)) {
- err = -EEXIST;
+ if (rule_exists(ops, frh, tb, rule)) {
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ err = -EEXIST;
goto errout_free;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index f7d0004fc160..ff07996515f2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2789,7 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2830,7 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2955,7 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_cow(skb, len_diff);
@@ -2984,7 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
- if (!skb_is_gso_tcp(skb))
+ if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 9f2840510e63..afc6e025c85c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -786,7 +786,10 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
flow_keys.thoff = nhoff;
bpf_compute_data_pointers((struct sk_buff *)skb);
+
+ preempt_disable();
result = BPF_PROG_RUN(attached, skb);
+ preempt_enable();
/* Restore state */
memcpy(cb, &cb_saved, sizeof(cb_saved));
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ef2cd5712098..40796b8bf820 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
- int mac_len;
+ int mac_len, meta_len;
+ void *meta;
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
mac_len - VLAN_HLEN - ETH_TLEN);
}
+
+ meta_len = skb_metadata_len(skb);
+ if (meta_len) {
+ meta = skb_metadata_end(skb) - meta_len;
+ memmove(meta + VLAN_HLEN, meta, meta_len);
+ }
+
skb->mac_header += VLAN_HLEN;
return skb;
}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index aee909bcddc4..41d534d3f42b 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -342,15 +342,22 @@ static int __init dsa_init_module(void)
rc = dsa_slave_register_notifier();
if (rc)
- return rc;
+ goto register_notifier_fail;
rc = dsa_legacy_register();
if (rc)
- return rc;
+ goto legacy_register_fail;
dev_add_pack(&dsa_pack_type);
return 0;
+
+legacy_register_fail:
+ dsa_slave_unregister_notifier();
+register_notifier_fail:
+ destroy_workqueue(dsa_owq);
+
+ return rc;
}
module_init(dsa_init_module);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 79e98e21cdd7..12ce6c526d72 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
struct guehdr *guehdr;
void *data;
u16 doffset = 0;
+ u8 proto_ctype;
if (!fou)
return 1;
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(guehdr->control))
return gue_control_message(skb, guehdr);
+ proto_ctype = guehdr->proto_ctype;
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb);
if (iptunnel_pull_offloads(skb))
goto drop;
- return -guehdr->proto_ctype;
+ return -proto_ctype;
drop:
kfree_skb(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c80188875f39..e8bb2e85c5a4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -519,6 +519,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
+ to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index c55a5432cf37..dc91c27bb788 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -173,6 +173,7 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
{
int sdif = inet_sdif(skb);
+ int dif = inet_iif(skb);
struct sock *sk;
struct hlist_head *head;
int delivered = 0;
@@ -185,8 +186,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
net = dev_net(skb->dev);
sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
- iph->saddr, iph->daddr,
- skb->dev->ifindex, sdif);
+ iph->saddr, iph->daddr, dif, sdif);
while (sk) {
delivered = 1;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e04cdb58a602..3c89ca325947 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1183,11 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+ struct ip_options opt;
+ int res;
+
+ /* Recompile ip options since IPCB may not be valid anymore.
+ * Also check we have a reasonable ipv4 header.
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+ return;
+
+ memset(&opt, 0, sizeof(opt));
+ if (ip_hdr(skb)->ihl > 5) {
+ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+ return;
+ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+ rcu_read_lock();
+ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
+ return;
+ }
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ba0fc4b18465..eeb4041fa5f9 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static int comp_sack_nr_max = 255;
static u32 u32_max_div_HZ = UINT_MAX / HZ;
+static int one_day_secs = 24 * 3600;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one_day_secs
},
{
.procname = "tcp_autocorking",
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7b1ef897b398..95b2e31fff08 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int room;
+
+ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
/* Check #1 */
- if (tp->rcv_ssthresh < tp->window_clamp &&
- (int)tp->rcv_ssthresh < tcp_space(sk) &&
- !tcp_under_memory_pressure(sk)) {
+ if (room > 0 && !tcp_under_memory_pressure(sk)) {
int incr;
/* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
if (incr) {
incr = max_t(int, incr, 2 * skb->len);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
- tp->window_clamp);
+ tp->rcv_ssthresh += min(room, incr);
inet_csk(sk)->icsk_ack.quick |= 1;
}
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 00852f47a73d..9a2ff79a93ad 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1673,7 +1673,9 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
((TCP_SKB_CB(tail)->tcp_flags |
- TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
+ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
+ !((TCP_SKB_CB(tail)->tcp_flags &
+ TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
@@ -1692,6 +1694,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
+ /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
+ * thtail->fin, so that the fast path in tcp_rcv_established()
+ * is not entered if we append a packet with a FIN.
+ * SYN, RST, URG are not present.
+ * ACK is set on both packets.
+ * PSH : we do not really care in TCP stack,
+ * at least for 'GRO' packets.
+ */
+ thtail->fin |= th->fin;
TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
if (TCP_SKB_CB(skb)->has_rxtstamp) {
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 64f9715173ac..065334b41d57 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -352,6 +352,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *pp = NULL;
struct udphdr *uh2;
struct sk_buff *p;
+ unsigned int ulen;
/* requires non zero csum, for symmetry with GSO */
if (!uh->check) {
@@ -359,6 +360,12 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return NULL;
}
+ /* Do not deal with padded or malicious packets, sorry ! */
+ ulen = ntohs(uh->len);
+ if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
/* pull encapsulating udp header */
skb_gro_pull(skb, sizeof(struct udphdr));
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
@@ -377,13 +384,14 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
/* Terminate the flow on len mismatch or if it grow "too much".
* Under small packet flood GRO count could elsewhere grow a lot
- * leading to execessive truesize values
+ * leading to excessive truesize values.
+ * On len mismatch merge the first packet shorter than gso_size,
+ * otherwise complete the GRO packet.
*/
- if (!skb_gro_receive(p, skb) &&
+ if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
+ ulen != ntohs(uh2->len) ||
NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
pp = p;
- else if (uh->len != uh2->len)
- pp = p;
return pp;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6613d8dbb0e5..91247a6fc67f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -921,9 +921,7 @@ static void fib6_drop_pcpu_from(struct fib6_info *f6i,
if (pcpu_rt) {
struct fib6_info *from;
- from = rcu_dereference_protected(pcpu_rt->from,
- lockdep_is_held(&table->tb6_lock));
- rcu_assign_pointer(pcpu_rt->from, NULL);
+ from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
fib6_info_release(from);
}
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index cb54a8a3c273..be5f3d7ceb96 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
return fl;
}
+static void fl_free_rcu(struct rcu_head *head)
+{
+ struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+ if (fl->share == IPV6_FL_S_PROCESS)
+ put_pid(fl->owner.pid);
+ kfree(fl->opt);
+ kfree(fl);
+}
+
static void fl_free(struct ip6_flowlabel *fl)
{
- if (fl) {
- if (fl->share == IPV6_FL_S_PROCESS)
- put_pid(fl->owner.pid);
- kfree(fl->opt);
- kfree_rcu(fl, rcu);
- }
+ if (fl)
+ call_rcu(&fl->rcu, fl_free_rcu);
}
static void fl_release(struct ip6_flowlabel *fl)
@@ -633,9 +639,9 @@ recheck:
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
((fl1->share == IPV6_FL_S_PROCESS) &&
- (fl1->owner.pid == fl->owner.pid)) ||
+ (fl1->owner.pid != fl->owner.pid)) ||
((fl1->share == IPV6_FL_S_USER) &&
- uid_eq(fl1->owner.uid, fl->owner.uid)))
+ !uid_eq(fl1->owner.uid, fl->owner.uid)))
goto release;
err = -ENOMEM;
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 1059894a6f4c..4cb83fb69844 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
((srh->segments_left + 1) * sizeof(struct in6_addr));
psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
+ if (!psid)
+ return false;
if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
&srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
((srh->segments_left - 1) * sizeof(struct in6_addr));
nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
+ if (!nsid)
+ return false;
if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
&srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
if (srhinfo->mt_flags & IP6T_SRH_LSID) {
lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
+ if (!lsid)
+ return false;
if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
&srhinfo->lsid_addr)))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0086acc16f3c..59c90bba048c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -379,11 +379,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
in6_dev_put(idev);
}
- rcu_read_lock();
- from = rcu_dereference(rt->from);
- rcu_assign_pointer(rt->from, NULL);
+ from = xchg((__force struct fib6_info **)&rt->from, NULL);
fib6_info_release(from);
- rcu_read_unlock();
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -1288,9 +1285,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
/* purge completely the exception to allow releasing the held resources:
* some [sk] cache may keep the dst around for unlimited time
*/
- from = rcu_dereference_protected(rt6_ex->rt6i->from,
- lockdep_is_held(&rt6_exception_lock));
- rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+ from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
fib6_info_release(from);
dst_dev_put(&rt6_ex->rt6i->dst);
@@ -2336,6 +2331,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
rcu_read_lock();
from = rcu_dereference(rt6->from);
+ if (!from) {
+ rcu_read_unlock();
+ return;
+ }
nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
if (nrt6) {
rt6_do_update_pmtu(nrt6, mtu);
@@ -3399,11 +3398,8 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
rcu_read_lock();
from = rcu_dereference(rt->from);
- /* This fib6_info_hold() is safe here because we hold reference to rt
- * and rt already holds reference to fib6_info.
- */
- fib6_info_hold(from);
- rcu_read_unlock();
+ if (!from)
+ goto out;
nrt = ip6_rt_cache_alloc(from, &msg->dest, NULL);
if (!nrt)
@@ -3415,10 +3411,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
- /* No need to remove rt from the exception table if rt is
- * a cached route because rt6_insert_exception() will
- * takes care of it
- */
+ /* rt6_insert_exception() will take care of duplicated exceptions */
if (rt6_insert_exception(nrt, from)) {
dst_release_immediate(&nrt->dst);
goto out;
@@ -3431,7 +3424,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
out:
- fib6_info_release(from);
+ rcu_read_unlock();
neigh_release(neigh);
}
@@ -4953,16 +4946,20 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
rcu_read_lock();
from = rcu_dereference(rt->from);
-
- if (fibmatch)
- err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, iif,
- RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0);
- else
- err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
- &fl6.saddr, iif, RTM_NEWROUTE,
- NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
- 0);
+ if (from) {
+ if (fibmatch)
+ err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
+ iif, RTM_NEWROUTE,
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ else
+ err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
+ &fl6.saddr, iif, RTM_NEWROUTE,
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, 0);
+ } else {
+ err = -ENETUNREACH;
+ }
rcu_read_unlock();
if (err < 0) {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b2109b74857d..971d60bf9640 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1084,7 +1084,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
- if (tdev) {
+ if (tdev && !netif_is_l3_master(tdev)) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index fed6becc5daf..52b5a2797c0c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -169,8 +169,8 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
- if (tunnel->tunnel_id == tunnel_id) {
- l2tp_tunnel_inc_refcount(tunnel);
+ if (tunnel->tunnel_id == tunnel_id &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
@@ -190,8 +190,8 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
- if (++count > nth) {
- l2tp_tunnel_inc_refcount(tunnel);
+ if (++count > nth &&
+ refcount_inc_not_zero(&tunnel->ref_count)) {
rcu_read_unlock_bh();
return tunnel;
}
@@ -909,7 +909,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_tunnel *tunnel;
- tunnel = l2tp_tunnel(sk);
+ tunnel = rcu_dereference_sk_user_data(sk);
if (tunnel == NULL)
goto pass_up;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cff0fb3578c9..deb3faf08337 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -841,7 +841,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
dir = sdata->vif.debugfs_dir;
- if (!dir)
+ if (IS_ERR_OR_NULL(dir))
return;
sprintf(buf, "netdev:%s", sdata->name);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 3e0d5922a440..a9c1d6e3cdae 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1166,6 +1166,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
+ if (local->in_reconfig)
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 4700718e010f..37e372896230 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
* The driver doesn't know anything about VLAN interfaces.
* Hence, don't send GTKs for VLAN interfaces to the driver.
*/
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ ret = 1;
goto out_unsupported;
+ }
}
ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
/* all of these we can do in software - if driver can */
if (ret == 1)
return 0;
- if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- return 0;
+ if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
return -EINVAL;
- }
return 0;
default:
return -EINVAL;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 88a6d5e18ccc..ac1f5db52994 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
{
/* Use last four bytes of hw addr as hash index */
- return jhash_1word(*(u32 *)(addr+2), seed);
+ return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
}
static const struct rhashtable_params mesh_rht_params = {
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index 366b9e6f043e..40141df09f25 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2019 Intel Corporation
+ */
+
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
-#define MAX_MSG_LEN 100
+#define MAX_MSG_LEN 120
DECLARE_EVENT_CLASS(mac80211_msg_event,
TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 928f13a208b0..714d80e48a10 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3214,6 +3214,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
u8 max_subframes = sta->sta.max_amsdu_subframes;
int max_frags = local->hw.max_tx_fragments;
int max_amsdu_len = sta->sta.max_amsdu_len;
+ int orig_truesize;
__be16 len;
void *data;
bool ret = false;
@@ -3254,6 +3255,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!head || skb_is_gso(head))
goto out;
+ orig_truesize = head->truesize;
orig_len = head->len;
if (skb->len + head->len > max_amsdu_len)
@@ -3311,6 +3313,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
*frag_tail = skb;
out_recalc:
+ fq->memory_usage += head->truesize - orig_truesize;
if (head->len != orig_len) {
flow->backlog += head->len - orig_len;
tin->backlog_bytes += head->len - orig_len;
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index dc07fcc7938e..802db01e3075 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/ncsi.h>
@@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
/* Increase mac address by 1 for BMC's address */
- saddr.sa_data[ETH_ALEN - 1]++;
+ eth_addr_inc((u8 *)saddr.sa_data);
+ if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+ return -ENXIO;
+
ret = ops->ndo_set_mac_address(ndev, &saddr);
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index beb3a69ce1d4..0f0e5806bf77 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -995,6 +995,7 @@ config NETFILTER_XT_TARGET_TEE
depends on NETFILTER_ADVANCED
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
+ depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
select NF_DUP_IPV4
select NF_DUP_IPV6 if IP6_NF_IPTABLES
---help---
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 235205c93e14..df112b27246a 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1647,7 +1647,7 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
if (!cp) {
int v;
- if (!sysctl_schedule_icmp(ipvs))
+ if (ipip || !sysctl_schedule_icmp(ipvs))
return NF_ACCEPT;
if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 9dd4c2048a2b..d7ac2f82bb6d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
+#include <linux/siphash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
@@ -424,6 +425,40 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
}
EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+/* Generate a almost-unique pseudo-id for a given conntrack.
+ *
+ * intentionally doesn't re-use any of the seeds used for hash
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+ * of the nf_conn after it has been committed to main hash table:
+ *
+ * 1. nf_conn address
+ * 2. nf_conn->ext address
+ * 3. nf_conn->master address (normally NULL)
+ * 4. tuple
+ * 5. the associated net namespace
+ */
+u32 nf_ct_get_id(const struct nf_conn *ct)
+{
+ static __read_mostly siphash_key_t ct_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+ a = (unsigned long)ct;
+ b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+ c = (unsigned long)ct->ext;
+ d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+ &ct_id_seed);
+#ifdef CONFIG_64BIT
+ return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+#else
+ return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_get_id);
+
static void
clean_from_lists(struct nf_conn *ct)
{
@@ -948,12 +983,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* set conntrack timestamp, if enabled. */
tstamp = nf_conn_tstamp_find(ct);
- if (tstamp) {
- if (skb->tstamp == 0)
- __net_timestamp(skb);
+ if (tstamp)
+ tstamp->start = ktime_get_real_ns();
- tstamp->start = ktime_to_ns(skb->tstamp);
- }
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 1213beb5a714..36619ad8ab8c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/siphash.h>
#include <linux/netfilter.h>
#include <net/netlink.h>
@@ -485,7 +486,9 @@ nla_put_failure:
static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
{
- if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
+ __be32 id = (__force __be32)nf_ct_get_id(ct);
+
+ if (nla_put_be32(skb, CTA_ID, id))
goto nla_put_failure;
return 0;
@@ -1286,8 +1289,9 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
}
if (cda[CTA_ID]) {
- u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
- if (id != (u32)(unsigned long)ct) {
+ __be32 id = nla_get_be32(cda[CTA_ID]);
+
+ if (id != (__force __be32)nf_ct_get_id(ct)) {
nf_ct_put(ct);
return -ENOENT;
}
@@ -2694,6 +2698,25 @@ nla_put_failure:
static const union nf_inet_addr any_addr;
+static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
+{
+ static __read_mostly siphash_key_t exp_id_seed;
+ unsigned long a, b, c, d;
+
+ net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
+
+ a = (unsigned long)exp;
+ b = (unsigned long)exp->helper;
+ c = (unsigned long)exp->master;
+ d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed);
+
+#ifdef CONFIG_64BIT
+ return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed);
+#else
+ return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
+#endif
+}
+
static int
ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
@@ -2741,7 +2764,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
}
#endif
if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
- nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
+ nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) ||
nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
goto nla_put_failure;
@@ -3046,7 +3069,8 @@ static int ctnetlink_get_expect(struct net *net, struct sock *ctnl,
if (cda[CTA_EXPECT_ID]) {
__be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
- if (ntohl(id) != (u32)(unsigned long)exp) {
+
+ if (id != nf_expect_get_id(exp)) {
nf_ct_expect_put(exp);
return -ENOENT;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 859f5d07a915..78361e462e80 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -86,7 +86,7 @@ void nf_l4proto_log_invalid(const struct sk_buff *skb,
struct va_format vaf;
va_list args;
- if (net->ct.sysctl_log_invalid != protonum ||
+ if (net->ct.sysctl_log_invalid != protonum &&
net->ct.sysctl_log_invalid != IPPROTO_RAW)
return;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index d159e9e7835b..ade527565127 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -358,9 +358,14 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
case IPPROTO_ICMPV6:
/* id is same for either direction... */
keyptr = &tuple->src.u.icmp.id;
- min = range->min_proto.icmp.id;
- range_size = ntohs(range->max_proto.icmp.id) -
- ntohs(range->min_proto.icmp.id) + 1;
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+ min = 0;
+ range_size = 65536;
+ } else {
+ min = ntohs(range->min_proto.icmp.id);
+ range_size = ntohs(range->max_proto.icmp.id) -
+ ntohs(range->min_proto.icmp.id) + 1;
+ }
goto find_free_id;
#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
case IPPROTO_GRE:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index acb124ce92ec..25c2b98b9a96 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1502,7 +1502,7 @@ static int nft_chain_parse_hook(struct net *net,
if (IS_ERR(type))
return PTR_ERR(type);
}
- if (!(type->hook_mask & (1 << hook->num)))
+ if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
if (type->type == NFT_CHAIN_T_NAT &&
@@ -3624,6 +3624,9 @@ err1:
static void nft_set_destroy(struct nft_set *set)
{
+ if (WARN_ON(set->use > 0))
+ return;
+
set->ops->destroy(set);
module_put(to_set_type(set->ops)->owner);
kfree(set->name);
@@ -3664,7 +3667,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
NL_SET_BAD_ATTR(extack, attr);
return PTR_ERR(set);
}
- if (!list_empty(&set->bindings) ||
+ if (set->use ||
(nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
NL_SET_BAD_ATTR(extack, attr);
return -EBUSY;
@@ -3694,6 +3697,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *i;
struct nft_set_iter iter;
+ if (set->use == UINT_MAX)
+ return -EOVERFLOW;
+
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
return -EBUSY;
@@ -3721,6 +3727,7 @@ bind:
binding->chain = ctx->chain;
list_add_tail_rcu(&binding->list, &set->bindings);
nft_set_trans_bind(ctx, set);
+ set->use++;
return 0;
}
@@ -3740,6 +3747,25 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
+void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_set_binding *binding,
+ enum nft_trans_phase phase)
+{
+ switch (phase) {
+ case NFT_TRANS_PREPARE:
+ set->use--;
+ return;
+ case NFT_TRANS_ABORT:
+ case NFT_TRANS_RELEASE:
+ set->use--;
+ /* fall through */
+ default:
+ nf_tables_unbind_set(ctx, set, binding,
+ phase == NFT_TRANS_COMMIT);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
+
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
{
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b1f9c5303f02..0b3347570265 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -540,7 +540,7 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
}
- if (skb->tstamp) {
+ if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
struct nfulnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
ts.sec = cpu_to_be64(kts.tv_sec);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 0dcc3592d053..e057b2961d31 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -582,7 +582,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (nfqnl_put_bridge(entry, skb) < 0)
goto nla_put_failure;
- if (entskb->tstamp) {
+ if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) {
struct nfqnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index f1172f99752b..eb7f9a5f2aeb 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -241,11 +241,15 @@ static void nft_dynset_deactivate(const struct nft_ctx *ctx,
{
struct nft_dynset *priv = nft_expr_priv(expr);
- if (phase == NFT_TRANS_PREPARE)
- return;
+ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_dynset_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_dynset *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
- phase == NFT_TRANS_COMMIT);
+ priv->set->use++;
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -293,6 +297,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
.eval = nft_dynset_eval,
.init = nft_dynset_init,
.destroy = nft_dynset_destroy,
+ .activate = nft_dynset_activate,
.deactivate = nft_dynset_deactivate,
.dump = nft_dynset_dump,
};
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 14496da5141d..161c3451a747 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -127,11 +127,15 @@ static void nft_lookup_deactivate(const struct nft_ctx *ctx,
{
struct nft_lookup *priv = nft_expr_priv(expr);
- if (phase == NFT_TRANS_PREPARE)
- return;
+ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_lookup_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_lookup *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
- phase == NFT_TRANS_COMMIT);
+ priv->set->use++;
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -222,6 +226,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
.eval = nft_lookup_eval,
.init = nft_lookup_init,
+ .activate = nft_lookup_activate,
.deactivate = nft_lookup_deactivate,
.destroy = nft_lookup_destroy,
.dump = nft_lookup_dump,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index ae178e914486..bf92a40dd1b2 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -64,21 +64,34 @@ nla_put_failure:
return -1;
}
-static void nft_objref_destroy(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ enum nft_trans_phase phase)
{
struct nft_object *obj = nft_objref_priv(expr);
+ if (phase == NFT_TRANS_COMMIT)
+ return;
+
obj->use--;
}
+static void nft_objref_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_object *obj = nft_objref_priv(expr);
+
+ obj->use++;
+}
+
static struct nft_expr_type nft_objref_type;
static const struct nft_expr_ops nft_objref_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
.eval = nft_objref_eval,
.init = nft_objref_init,
- .destroy = nft_objref_destroy,
+ .activate = nft_objref_activate,
+ .deactivate = nft_objref_deactivate,
.dump = nft_objref_dump,
};
@@ -161,11 +174,15 @@ static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- if (phase == NFT_TRANS_PREPARE)
- return;
+ nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
+}
+
+static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_objref_map *priv = nft_expr_priv(expr);
- nf_tables_unbind_set(ctx, priv->set, &priv->binding,
- phase == NFT_TRANS_COMMIT);
+ priv->set->use++;
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -182,6 +199,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
.eval = nft_objref_map_eval,
.init = nft_objref_map_init,
+ .activate = nft_objref_map_activate,
.deactivate = nft_objref_map_deactivate,
.destroy = nft_objref_map_destroy,
.dump = nft_objref_map_dump,
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index fa61208371f8..321a0036fdf5 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
else if (d > 0)
parent = parent->rb_right;
else {
- if (!nft_set_elem_active(&rbe->ext, genmask)) {
- parent = parent->rb_left;
- continue;
- }
if (nft_rbtree_interval_end(rbe) &&
!nft_rbtree_interval_end(this)) {
parent = parent->rb_left;
@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+ parent = parent->rb_left;
+ continue;
}
nft_rbtree_flush(net, set, rbe);
return rbe;
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index c13bcd0ab491..8dbb4d48f2ed 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -163,19 +163,24 @@ time_mt(const struct sk_buff *skb, struct xt_action_param *par)
s64 stamp;
/*
- * We cannot use get_seconds() instead of __net_timestamp() here.
+ * We need real time here, but we can neither use skb->tstamp
+ * nor __net_timestamp().
+ *
+ * skb->tstamp and skb->skb_mstamp_ns overlap, however, they
+ * use different clock types (real vs monotonic).
+ *
* Suppose you have two rules:
- * 1. match before 13:00
- * 2. match after 13:00
+ * 1. match before 13:00
+ * 2. match after 13:00
+ *
* If you match against processing time (get_seconds) it
* may happen that the same packet matches both rules if
- * it arrived at the right moment before 13:00.
+ * it arrived at the right moment before 13:00, so it would be
+ * better to check skb->tstamp and set it via __net_timestamp()
+ * if needed. This however breaks outgoing packets tx timestamp,
+ * and causes them to get delayed forever by fq packet scheduler.
*/
- if (skb->tstamp == 0)
- __net_timestamp((struct sk_buff *)skb);
-
- stamp = ktime_to_ns(skb->tstamp);
- stamp = div_s64(stamp, NSEC_PER_SEC);
+ stamp = get_seconds();
if (info->flags & XT_TIME_LOCAL_TZ)
/* Adjust for local timezone */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1d3144d19903..71ffd1a6dc7c 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
int i;
int rc = proto_register(&nr_proto, 0);
- if (rc != 0)
- goto out;
+ if (rc)
+ return rc;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
- return -1;
+ pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
+ __func__);
+ rc = -EINVAL;
+ goto unregister_proto;
}
dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
- if (dev_nr == NULL) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
- return -1;
+ if (!dev_nr) {
+ pr_err("NET/ROM: %s - unable to allocate device array\n",
+ __func__);
+ rc = -ENOMEM;
+ goto unregister_proto;
}
for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
sprintf(name, "nr%d", i);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
if (!dev) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
+ rc = -ENOMEM;
goto fail;
}
dev->base_addr = i;
- if (register_netdev(dev)) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
+ rc = register_netdev(dev);
+ if (rc) {
free_netdev(dev);
goto fail;
}
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
dev_nr[i] = dev;
}
- if (sock_register(&nr_family_ops)) {
- printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
+ rc = sock_register(&nr_family_ops);
+ if (rc)
goto fail;
- }
- register_netdevice_notifier(&nr_dev_notifier);
+ rc = register_netdevice_notifier(&nr_dev_notifier);
+ if (rc)
+ goto out_sock;
ax25_register_pid(&nr_pid);
ax25_linkfail_register(&nr_linkfail_notifier);
#ifdef CONFIG_SYSCTL
- nr_register_sysctl();
+ rc = nr_register_sysctl();
+ if (rc)
+ goto out_sysctl;
#endif
nr_loopback_init();
- proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
- proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
- proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
-out:
- return rc;
+ rc = -ENOMEM;
+ if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
+ goto proc_remove1;
+ if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
+ &nr_neigh_seqops))
+ goto proc_remove2;
+ if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
+ &nr_node_seqops))
+ goto proc_remove3;
+
+ return 0;
+
+proc_remove3:
+ remove_proc_entry("nr_neigh", init_net.proc_net);
+proc_remove2:
+ remove_proc_entry("nr", init_net.proc_net);
+proc_remove1:
+
+ nr_loopback_clear();
+ nr_rt_free();
+
+#ifdef CONFIG_SYSCTL
+ nr_unregister_sysctl();
+out_sysctl:
+#endif
+ ax25_linkfail_release(&nr_linkfail_notifier);
+ ax25_protocol_release(AX25_P_NETROM);
+ unregister_netdevice_notifier(&nr_dev_notifier);
+out_sock:
+ sock_unregister(PF_NETROM);
fail:
while (--i >= 0) {
unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr);
+unregister_proto:
proto_unregister(&nr_proto);
- rc = -1;
- goto out;
+ return rc;
}
module_init(nr_proto_init);
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 215ad22a9647..93d13f019981 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
}
}
-void __exit nr_loopback_clear(void)
+void nr_loopback_clear(void)
{
del_timer_sync(&loopback_timer);
skb_queue_purge(&loopback_queue);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 6485f593e2f0..b76aa668a94b 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
/*
* Free all memory associated with the nodes and routes lists.
*/
-void __exit nr_rt_free(void)
+void nr_rt_free(void)
{
struct nr_neigh *s = NULL;
struct nr_node *t = NULL;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index ba1c368b3f18..771011b84270 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
{ }
};
-void __init nr_register_sysctl(void)
+int __init nr_register_sysctl(void)
{
nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
+ if (!nr_table_header)
+ return -ENOMEM;
+ return 0;
}
void nr_unregister_sysctl(void)
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index ddfc52ac1f9b..c0d323b58e73 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -312,6 +312,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
create_info = (struct nci_hci_create_pipe_resp *)skb->data;
dest_gate = create_info->dest_gate;
new_pipe = create_info->pipe;
+ if (new_pipe >= NCI_HCI_MAX_PIPES) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
@@ -336,6 +340,10 @@ static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe,
goto exit;
}
delete_info = (struct nci_hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NCI_HCI_MAX_PIPES) {
+ status = NCI_HCI_ANY_E_NOK;
+ goto exit;
+ }
ndev->hci_dev->pipes[delete_info->pipe].gate =
NCI_HCI_INVALID_GATE;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8406bf11eef4..b6c23af4a315 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2603,8 +2603,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
void *ph;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
+ unsigned char *addr = NULL;
int tp_len, size_max;
- unsigned char *addr;
void *data;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
@@ -2615,7 +2615,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
- addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2625,10 +2624,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
- if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out_put;
+ if (po->sk.sk_socket->type == SOCK_DGRAM) {
+ if (dev && msg->msg_namelen < dev->addr_len +
+ offsetof(struct sockaddr_ll, sll_addr))
+ goto out_put;
+ addr = saddr->sll_addr;
+ }
}
err = -ENXIO;
@@ -2800,7 +2802,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
- unsigned char *addr;
+ unsigned char *addr = NULL;
int err, reserve = 0;
struct sockcm_cookie sockc;
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -2817,7 +2819,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
- addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@@ -2825,10 +2826,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
- if (addr && dev && saddr->sll_halen < dev->addr_len)
- goto out_unlock;
+ if (sock->type == SOCK_DGRAM) {
+ if (dev && msg->msg_namelen < dev->addr_len +
+ offsetof(struct sockaddr_ll, sll_addr))
+ goto out_unlock;
+ addr = saddr->sll_addr;
+ }
}
err = -ENXIO;
@@ -3345,20 +3349,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name) {
+ int copy_len;
+
/* If the address length field is there to be filled
* in, we fill it in now.
*/
if (sock->type == SOCK_PACKET) {
__sockaddr_check_size(sizeof(struct sockaddr_pkt));
msg->msg_namelen = sizeof(struct sockaddr_pkt);
+ copy_len = msg->msg_namelen;
} else {
struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
msg->msg_namelen = sll->sll_halen +
offsetof(struct sockaddr_ll, sll_addr);
+ copy_len = msg->msg_namelen;
+ if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
+ memset(msg->msg_name +
+ offsetof(struct sockaddr_ll, sll_addr),
+ 0, sizeof(sll->sll_addr));
+ msg->msg_namelen = sizeof(struct sockaddr_ll);
+ }
}
- memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
- msg->msg_namelen);
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}
if (pkt_sk(sk)->auxdata) {
@@ -4591,14 +4604,29 @@ static void __exit packet_exit(void)
static int __init packet_init(void)
{
- int rc = proto_register(&packet_proto, 0);
+ int rc;
- if (rc != 0)
+ rc = proto_register(&packet_proto, 0);
+ if (rc)
goto out;
+ rc = sock_register(&packet_family_ops);
+ if (rc)
+ goto out_proto;
+ rc = register_pernet_subsys(&packet_net_ops);
+ if (rc)
+ goto out_sock;
+ rc = register_netdevice_notifier(&packet_netdev_notifier);
+ if (rc)
+ goto out_pernet;
- sock_register(&packet_family_ops);
- register_pernet_subsys(&packet_net_ops);
- register_netdevice_notifier(&packet_netdev_notifier);
+ return 0;
+
+out_pernet:
+ unregister_pernet_subsys(&packet_net_ops);
+out_sock:
+ sock_unregister(PF_PACKET);
+out_proto:
+ proto_unregister(&packet_proto);
out:
return rc;
}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 65387e1e6964..cd7e01ea8144 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
struct rds_sock *rs = rds_sk_to_rs(sk);
int ret = 0;
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
+
lock_sock(sk);
switch (uaddr->sa_family) {
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 17c9d9f0c848..0f4398e7f2a7 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* We allow an RDS socket to be bound to either IPv4 or IPv6
* address.
*/
+ if (addr_len < offsetofend(struct sockaddr, sa_family))
+ return -EINVAL;
if (uaddr->sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index e0f70c4051b6..01e764f8f224 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
else
pool = rds_ibdev->mr_1m_pool;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+ /* Switch pools if one of the pool is reaching upper limit */
+ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ pool = rds_ibdev->mr_1m_pool;
+ else
+ pool = rds_ibdev->mr_8k_pool;
+ }
+
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 63c8d107adcf..d664e9ade74d 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 7af4f99c4a93..094a6621f8e8 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
@@ -35,29 +36,27 @@ static int rose_loopback_running(void)
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
- struct sk_buff *skbn;
+ struct sk_buff *skbn = NULL;
- skbn = skb_clone(skb, GFP_ATOMIC);
+ if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+ skbn = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
- if (skbn != NULL) {
+ if (skbn) {
+ consume_skb(skb);
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
+ } else {
+ kfree_skb(skb);
}
return 1;
}
-
static void rose_set_loopback_timer(void)
{
- del_timer(&loopback_timer);
-
- loopback_timer.expires = jiffies + 10;
- add_timer(&loopback_timer);
+ mod_timer(&loopback_timer, jiffies + 10);
}
static void rose_loopback_timer(struct timer_list *unused)
@@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
+ int count;
- while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+ for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+ skb = skb_dequeue(&loopback_queue);
+ if (!skb)
+ return;
if (skb->len < ROSE_MIN_LEN) {
kfree_skb(skb);
continue;
@@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
kfree_skb(skb);
}
}
+ if (!skb_queue_empty(&loopback_queue))
+ mod_timer(&loopback_timer, jiffies + 1);
}
void __exit rose_loopback_clear(void)
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8aa2937b069f..fe96881a334d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -604,30 +604,30 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
_enter("");
- if (list_empty(&rxnet->calls))
- return;
+ if (!list_empty(&rxnet->calls)) {
+ write_lock(&rxnet->call_lock);
- write_lock(&rxnet->call_lock);
+ while (!list_empty(&rxnet->calls)) {
+ call = list_entry(rxnet->calls.next,
+ struct rxrpc_call, link);
+ _debug("Zapping call %p", call);
- while (!list_empty(&rxnet->calls)) {
- call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
- _debug("Zapping call %p", call);
+ rxrpc_see_call(call);
+ list_del_init(&call->link);
- rxrpc_see_call(call);
- list_del_init(&call->link);
+ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
+ call, atomic_read(&call->usage),
+ rxrpc_call_states[call->state],
+ call->flags, call->events);
- pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
- call, atomic_read(&call->usage),
- rxrpc_call_states[call->state],
- call->flags, call->events);
+ write_unlock(&rxnet->call_lock);
+ cond_resched();
+ write_lock(&rxnet->call_lock);
+ }
write_unlock(&rxnet->call_lock);
- cond_resched();
- write_lock(&rxnet->call_lock);
}
- write_unlock(&rxnet->call_lock);
-
atomic_dec(&rxnet->nr_calls);
wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 5cf6d9f4761d..83797b3949e2 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -704,6 +704,7 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
ret = rxrpc_wait_for_channel(call, gfp);
if (ret < 0) {
+ trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
rxrpc_disconnect_client_call(call);
goto out;
}
@@ -774,16 +775,22 @@ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
*/
void rxrpc_disconnect_client_call(struct rxrpc_call *call)
{
- unsigned int channel = call->cid & RXRPC_CHANNELMASK;
struct rxrpc_connection *conn = call->conn;
- struct rxrpc_channel *chan = &conn->channels[channel];
+ struct rxrpc_channel *chan = NULL;
struct rxrpc_net *rxnet = conn->params.local->rxnet;
+ unsigned int channel = -1;
+ u32 cid;
+ spin_lock(&conn->channel_lock);
+
+ cid = call->cid;
+ if (cid) {
+ channel = cid & RXRPC_CHANNELMASK;
+ chan = &conn->channels[channel];
+ }
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
- spin_lock(&conn->channel_lock);
-
/* Calls that have never actually been assigned a channel can simply be
* discarded. If the conn didn't get used either, it will follow
* immediately unless someone else grabs it in the meantime.
@@ -807,7 +814,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
goto out;
}
- ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
+ if (rcu_access_pointer(chan->call) != call) {
+ spin_unlock(&conn->channel_lock);
+ BUG();
+ }
/* If a client call was exposed to the world, we save the result for
* retransmission.
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9128aa0e40aa..b4ffb81223ad 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* handle data received on the local endpoint
* - may be called in interrupt context
*
- * The socket is locked by the caller and this prevents the socket from being
- * shut down and the local endpoint from going away, thus sk_user_data will not
- * be cleared until this function returns.
+ * [!] Note that as this is called from the encap_rcv hook, the socket is not
+ * held locked by the caller and nothing prevents sk_user_data on the UDP from
+ * being cleared in the middle of processing this function.
*
* Called with the RCU read lock held from the IP layer via UDP.
*/
int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
{
+ struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
struct rxrpc_connection *conn;
struct rxrpc_channel *chan;
struct rxrpc_call *call = NULL;
struct rxrpc_skb_priv *sp;
- struct rxrpc_local *local = udp_sk->sk_user_data;
struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL;
unsigned int channel;
@@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
_enter("%p", udp_sk);
+ if (unlikely(!local)) {
+ kfree_skb(skb);
+ return 0;
+ }
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 0906e51d3cfb..10317dbdab5f 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -304,7 +304,8 @@ nomem:
ret = -ENOMEM;
sock_error:
mutex_unlock(&rxnet->local_mutex);
- kfree(local);
+ if (local)
+ call_rcu(&local->rcu, rxrpc_local_rcu);
_leave(" = %d", ret);
return ERR_PTR(ret);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c8cf4d10c435..971dc03304f4 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -159,6 +159,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
}
m = to_mirred(*a);
+ if (ret == ACT_P_CREATED)
+ INIT_LIST_HEAD(&m->tcfm_list);
+
spin_lock_bh(&m->tcf_lock);
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 73940293700d..7b5ce1343474 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1508,32 +1508,29 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
return idx + (tin << 16);
}
-static void cake_wash_diffserv(struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
- break;
- case htons(ETH_P_IPV6):
- ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
- break;
- default:
- break;
- }
-}
-
static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
{
+ int wlen = skb_network_offset(skb);
u8 dscp;
- switch (skb->protocol) {
+ switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
+ wlen += sizeof(struct iphdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
if (wash && dscp)
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
return dscp;
case htons(ETH_P_IPV6):
+ wlen += sizeof(struct ipv6hdr);
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
if (wash && dscp)
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
@@ -1553,25 +1550,27 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin;
+ u8 dscp;
+
+ /* Tin selection: Default to diffserv-based selection, allow overriding
+ * using firewall marks or skb->priority.
+ */
+ dscp = cake_handle_diffserv(skb,
+ q->rate_flags & CAKE_FLAG_WASH);
- if (TC_H_MAJ(skb->priority) == sch->handle &&
- TC_H_MIN(skb->priority) > 0 &&
- TC_H_MIN(skb->priority) <= q->tin_cnt) {
+ if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
+ tin = 0;
+
+ else if (TC_H_MAJ(skb->priority) == sch->handle &&
+ TC_H_MIN(skb->priority) > 0 &&
+ TC_H_MIN(skb->priority) <= q->tin_cnt)
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
- if (q->rate_flags & CAKE_FLAG_WASH)
- cake_wash_diffserv(skb);
- } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
- /* extract the Diffserv Precedence field, if it exists */
- /* and clear DSCP bits if washing */
- tin = q->tin_index[cake_handle_diffserv(skb,
- q->rate_flags & CAKE_FLAG_WASH)];
+ else {
+ tin = q->tin_index[dscp];
+
if (unlikely(tin >= q->tin_cnt))
tin = 0;
- } else {
- tin = 0;
- if (q->rate_flags & CAKE_FLAG_WASH)
- cake_wash_diffserv(skb);
}
return &q->tins[tin];
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1d143bc3f73d..4aa03588f87b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1112,32 +1112,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
}
-/* Sent the next ASCONF packet currently stored in the association.
- * This happens after the ASCONF_ACK was succeffully processed.
- */
-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
-{
- struct net *net = sock_net(asoc->base.sk);
-
- /* Send the next asconf chunk from the addip chunk
- * queue.
- */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- struct sctp_chunk *asconf = list_entry(entry,
- struct sctp_chunk, list);
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(net, asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-}
-
-
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1783,9 +1757,6 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
}
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
- case SCTP_CMD_SEND_NEXT_ASCONF:
- sctp_cmd_send_asconf(asoc);
- break;
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c9ae3404b1bb..713a669d2058 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3824,6 +3824,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+static enum sctp_disposition sctp_send_next_asconf(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ const union sctp_subtype type,
+ struct sctp_cmd_seq *commands)
+{
+ struct sctp_chunk *asconf;
+ struct list_head *entry;
+
+ if (list_empty(&asoc->addip_chunk_list))
+ return SCTP_DISPOSITION_CONSUME;
+
+ entry = asoc->addip_chunk_list.next;
+ asconf = list_entry(entry, struct sctp_chunk, list);
+
+ list_del_init(entry);
+ sctp_chunk_hold(asconf);
+ asoc->addip_last_asconf = asconf;
+
+ return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
+}
+
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
@@ -3915,14 +3938,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack)) {
- /* Successfully processed ASCONF_ACK. We can
- * release the next asconf if we have one.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
- SCTP_NULL());
- return SCTP_DISPOSITION_CONSUME;
- }
+ asconf_ack))
+ return sctp_send_next_asconf(net, ep,
+ (struct sctp_association *)asoc,
+ type, commands);
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(struct sctp_errhdr));
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index da1a676860ca..0f4e42792878 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* We are going to append to the frags_list of head.
* Need to unshare the frag_list.
*/
- if (skb_has_frag_list(head)) {
- err = skb_unclone(head, GFP_ATOMIC);
- if (err) {
- STRP_STATS_INCR(strp->stats.mem_fail);
- desc->error = err;
- return 0;
- }
+ err = skb_unclone(head, GFP_ATOMIC);
+ if (err) {
+ STRP_STATS_INCR(strp->stats.mem_fail);
+ desc->error = err;
+ return 0;
}
if (unlikely(skb_shinfo(head)->frag_list)) {
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 12bb23b8e0c5..261131dfa1f1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
+static inline int cache_is_valid(struct cache_head *h);
static void cache_fresh_locked(struct cache_head *head, time_t expiry,
struct cache_detail *detail);
static void cache_fresh_unlocked(struct cache_head *head,
@@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
if (cache_is_expired(detail, tmp)) {
hlist_del_init_rcu(&tmp->cache_list);
detail->entries --;
+ if (cache_is_valid(tmp) == -EAGAIN)
+ set_bit(CACHE_NEGATIVE, &tmp->flags);
cache_fresh_locked(tmp, 0, detail);
freeme = tmp;
break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7754aa3e434f..f88c2bd1335a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -486,8 +486,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
int flags, struct rpc_rqst *req)
{
struct xdr_buf *buf = &req->rq_private_buf;
- size_t want, read;
- ssize_t ret;
+ size_t want, uninitialized_var(read);
+ ssize_t uninitialized_var(ret);
xs_read_header(transport, buf);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bff241f03525..89993afe0fbd 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
for (; i < TIPC_NAMETBL_SIZE; i++) {
head = &tn->nametbl->services[i];
- if (*last_type) {
+ if (*last_type ||
+ (!i && *last_key && (*last_lower == *last_key))) {
service = tipc_service_find(net, *last_type);
if (!service)
return -EPIPE;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4ad3586da8f0..340a6e7c43a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
if (msg->rep_type)
tipc_tlv_init(msg->rep, msg->rep_type);
- if (cmd->header)
- (*cmd->header)(msg);
+ if (cmd->header) {
+ err = (*cmd->header)(msg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return err;
+ }
+ }
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_bearer_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
if (!string_is_valid(b->name, len))
return -EINVAL;
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
- len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_link_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 4dca9161f99b..020477ff91a2 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -734,11 +734,11 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
- case TIPC_CONNECTING:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
revents |= EPOLLOUT;
/* fall thru' */
case TIPC_LISTEN:
+ case TIPC_CONNECTING:
if (!skb_queue_empty(&sk->sk_receive_queue))
revents |= EPOLLIN | EPOLLRDNORM;
break;
@@ -2041,7 +2041,7 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
if (msg_data_sz(hdr))
return true;
/* Empty ACK-, - wake up sleeping connect() and drop */
- sk->sk_data_ready(sk);
+ sk->sk_state_change(sk);
msg_set_dest_droppable(hdr, 1);
return false;
}
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index d753e362d2d9..7d5136ecee78 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx)
{
- if (ctx->tx_conf == TLS_HW)
+ if (ctx->tx_conf == TLS_HW) {
kfree(tls_offload_ctx_tx(ctx));
+ kfree(ctx->tx.rec_seq);
+ kfree(ctx->tx.iv);
+ }
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
}
EXPORT_SYMBOL(tls_device_sk_destruct);
+void tls_device_free_resources_tx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_free_partial_record(sk, tls_ctx);
+}
+
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
int size)
@@ -569,7 +579,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
- int err = 0, offset = rxm->offset, copy, nsg;
+ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
struct sk_buff *skb_iter, *unused;
struct scatterlist sg[1];
char *orig_buf, *buf;
@@ -600,25 +610,42 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
- copy = min_t(int, skb_pagelen(skb) - offset,
- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb_pagelen(skb) > offset) {
+ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
- offset += copy;
- buf += copy;
+ if (skb->decrypted)
+ skb_store_bits(skb, offset, buf, copy);
+
+ offset += copy;
+ buf += copy;
+ }
+ pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
- copy = min_t(int, skb_iter->len,
- rxm->full_len - offset + rxm->offset -
- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ int frag_pos;
+
+ /* Practically all frags must belong to msg if reencrypt
+ * is needed with current strparser and coalescing logic,
+ * but strparser may "get optimized", so let's be safe.
+ */
+ if (pos + skb_iter->len <= offset)
+ goto done_with_frag;
+ if (pos >= data_len + rxm->offset)
+ break;
+
+ frag_pos = offset - pos;
+ copy = min_t(int, skb_iter->len - frag_pos,
+ data_len + rxm->offset - offset);
if (skb_iter->decrypted)
- skb_store_bits(skb_iter, offset, buf, copy);
+ skb_store_bits(skb_iter, frag_pos, buf, copy);
offset += copy;
buf += copy;
+done_with_frag:
+ pos += skb_iter->len;
}
free_buf:
@@ -874,7 +901,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
goto release_netdev;
free_sw_resources:
+ up_read(&device_offload_lock);
tls_sw_free_resources_rx(sk);
+ down_read(&device_offload_lock);
release_ctx:
ctx->priv_ctx_rx = NULL;
release_netdev:
@@ -909,8 +938,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
}
out:
up_read(&device_offload_lock);
- kfree(tls_ctx->rx.rec_seq);
- kfree(tls_ctx->rx.iv);
tls_sw_release_resources_rx(sk);
}
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 450a6dbc5a88..426dd97725e4 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -193,18 +193,26 @@ static void update_chksum(struct sk_buff *skb, int headln)
static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
{
+ struct sock *sk = skb->sk;
+ int delta;
+
skb_copy_header(nskb, skb);
skb_put(nskb, skb->len);
memcpy(nskb->data, skb->data, headln);
- update_chksum(nskb, headln);
nskb->destructor = skb->destructor;
- nskb->sk = skb->sk;
+ nskb->sk = sk;
skb->destructor = NULL;
skb->sk = NULL;
- refcount_add(nskb->truesize - skb->truesize,
- &nskb->sk->sk_wmem_alloc);
+
+ update_chksum(nskb, headln);
+
+ delta = nskb->truesize - skb->truesize;
+ if (likely(delta < 0))
+ WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+ else if (delta)
+ refcount_add(delta, &sk->sk_wmem_alloc);
}
/* This function may be called after the user socket is already
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 78cb4a584080..ce5dd79365a7 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -220,6 +220,26 @@ int tls_push_pending_closed_record(struct sock *sk,
return tls_ctx->push_pending_record(sk, flags);
}
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+{
+ struct scatterlist *sg;
+
+ sg = ctx->partially_sent_record;
+ if (!sg)
+ return false;
+
+ while (1) {
+ put_page(sg_page(sg));
+ sk_mem_uncharge(sk, sg->length);
+
+ if (sg_is_last(sg))
+ break;
+ sg++;
+ }
+ ctx->partially_sent_record = NULL;
+ return true;
+}
+
static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
@@ -278,13 +298,14 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
tls_sw_free_resources_tx(sk);
+#ifdef CONFIG_TLS_DEVICE
+ } else if (ctx->tx_conf == TLS_HW) {
+ tls_device_free_resources_tx(sk);
+#endif
}
- if (ctx->rx_conf == TLS_SW) {
- kfree(ctx->rx.rec_seq);
- kfree(ctx->rx.iv);
+ if (ctx->rx_conf == TLS_SW)
tls_sw_free_resources_rx(sk);
- }
#ifdef CONFIG_TLS_DEVICE
if (ctx->rx_conf == TLS_HW)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index bf5b54b513bc..839a0a0b5dfa 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1804,20 +1804,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
/* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list.
*/
- if (tls_ctx->partially_sent_record) {
- struct scatterlist *sg = tls_ctx->partially_sent_record;
-
- while (1) {
- put_page(sg_page(sg));
- sk_mem_uncharge(sk, sg->length);
-
- if (sg_is_last(sg))
- break;
- sg++;
- }
-
- tls_ctx->partially_sent_record = NULL;
-
+ if (tls_free_partial_record(sk, tls_ctx)) {
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
list_del(&rec->list);
@@ -1843,6 +1830,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
+ kfree(tls_ctx->rx.rec_seq);
+ kfree(tls_ctx->rx.iv);
+
if (ctx->aead_recv) {
kfree_skb(ctx->recv_pkt);
ctx->recv_pkt = NULL;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d91a408db113..156ce708b533 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13596,7 +13596,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13647,7 +13648,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13655,7 +13657,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DISCONNECT,
@@ -13684,7 +13687,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEL_PMKSA,
@@ -14036,7 +14040,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_UNS_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_WIPHY |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14091,7 +14096,8 @@ static const struct genl_ops nl80211_ops[] = {
.doit = nl80211_set_pmk,
.policy = nl80211_policy,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEL_PMK,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index dd58b9909ac9..649c89946dec 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1298,6 +1298,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
return dfs_region1;
}
+static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
+ const struct ieee80211_wmm_ac *wmm_ac2,
+ struct ieee80211_wmm_ac *intersect)
+{
+ intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
+ intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
+ intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
+ intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
+}
+
/*
* Helper for regdom_intersect(), this does the real
* mathematical intersection fun
@@ -1312,6 +1322,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule1, *power_rule2;
struct ieee80211_power_rule *power_rule;
+ const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
+ struct ieee80211_wmm_rule *wmm_rule;
u32 freq_diff, max_bandwidth1, max_bandwidth2;
freq_range1 = &rule1->freq_range;
@@ -1322,6 +1334,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
power_rule2 = &rule2->power_rule;
power_rule = &intersected_rule->power_rule;
+ wmm_rule1 = &rule1->wmm_rule;
+ wmm_rule2 = &rule2->wmm_rule;
+ wmm_rule = &intersected_rule->wmm_rule;
+
freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
freq_range2->start_freq_khz);
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1365,6 +1381,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
+ if (rule1->has_wmm && rule2->has_wmm) {
+ u8 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ reg_wmm_rules_intersect(&wmm_rule1->client[ac],
+ &wmm_rule2->client[ac],
+ &wmm_rule->client[ac]);
+ reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
+ &wmm_rule2->ap[ac],
+ &wmm_rule->ap[ac]);
+ }
+
+ intersected_rule->has_wmm = true;
+ } else if (rule1->has_wmm) {
+ *wmm_rule = *wmm_rule1;
+ intersected_rule->has_wmm = true;
+ } else if (rule2->has_wmm) {
+ *wmm_rule = *wmm_rule2;
+ intersected_rule->has_wmm = true;
+ } else {
+ intersected_rule->has_wmm = false;
+ }
+
if (!is_valid_reg_rule(intersected_rule))
return -EINVAL;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 37e1fe180769..9c767c68ed3a 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -189,9 +189,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
static void xdp_umem_release(struct xdp_umem *umem)
{
- struct task_struct *task;
- struct mm_struct *mm;
-
xdp_umem_clear_dev(umem);
if (umem->fq) {
@@ -208,21 +205,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
xdp_umem_unpin_pages(umem);
- task = get_pid_task(umem->pid, PIDTYPE_PID);
- put_pid(umem->pid);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
-
- mmput(mm);
kfree(umem->pages);
umem->pages = NULL;
xdp_umem_unaccount_pages(umem);
-out:
kfree(umem);
}
@@ -351,7 +337,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (size_chk < 0)
return -EINVAL;
- umem->pid = get_task_pid(current, PIDTYPE_PID);
umem->address = (unsigned long)addr;
umem->chunk_mask = ~((u64)chunk_size - 1);
umem->size = size;
@@ -367,7 +352,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
err = xdp_umem_account_pages(umem);
if (err)
- goto out;
+ return err;
err = xdp_umem_pin_pages(umem);
if (err)
@@ -386,8 +371,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
out_account:
xdp_umem_unaccount_pages(umem);
-out:
- put_pid(umem->pid);
return err;
}