aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c8
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c20
-rw-r--r--net/9p/client.c4
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/smp.c35
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_netfilter_hooks.c21
-rw-r--r--net/bridge/br_netlink.c38
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/caif/cfpkt_skbuff.c6
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/raw.c4
-rw-r--r--net/ceph/messenger.c6
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/core/dev.c135
-rw-r--r--net/core/dev_ioctl.c1
-rw-r--r--net/core/dst.c37
-rw-r--r--net/core/ethtool.c9
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/rtnetlink.c46
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/core/sock.c28
-rw-r--r--net/dccp/ccids/ccid2.c1
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dccp/input.c3
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c15
-rw-r--r--net/dccp/minisocks.c25
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c4
-rw-r--r--net/dsa/slave.c8
-rw-r--r--net/ethernet/eth.c1
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/fib_frontend.c27
-rw-r--r--net/ipv4/fib_semantics.c28
-rw-r--r--net/ipv4/fib_trie.c26
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c7
-rw-r--r--net/ipv4/ip_sockglue.c17
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c18
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/tcp.c16
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_fastopen.c3
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c28
-rw-r--r--net/ipv4/tcp_timer.c9
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c111
-rw-r--r--net/ipv6/datagram.c14
-rw-r--r--net/ipv6/fib6_rules.c22
-rw-r--r--net/ipv6/ip6_fib.c10
-rw-r--r--net/ipv6/ip6_gre.c41
-rw-r--r--net/ipv6/ip6_offload.c9
-rw-r--r--net/ipv6/ip6_output.c43
-rw-r--r--net/ipv6/ip6_tunnel.c68
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/output_core.c20
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/route.c56
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/ipx/af_ipx.c5
-rw-r--r--net/irda/irqueue.c34
-rw-r--r--net/key/af_key.c110
-rw-r--r--net/l2tp/l2tp_core.c8
-rw-r--r--net/l2tp/l2tp_core.h4
-rw-r--r--net/l2tp/l2tp_debugfs.c10
-rw-r--r--net/l2tp/l2tp_ip.c29
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/l2tp/l2tp_netlink.c7
-rw-r--r--net/l2tp/l2tp_ppp.c19
-rw-r--r--net/llc/llc_conn.c3
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/mac80211/agg-rx.c29
-rw-r--r--net/mac80211/agg-tx.c53
-rw-r--r--net/mac80211/driver-ops.c10
-rw-r--r--net/mac80211/driver-ops.h4
-rw-r--r--net/mac80211/ibss.c6
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/pm.c1
-rw-r--r--net/mac80211/rx.c27
-rw-r--r--net/mac80211/trace.h43
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mpls/af_mpls.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c19
-rw-r--r--net/netfilter/nf_conntrack_extend.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_qtaguid.c24
-rw-r--r--net/netlink/Kconfig9
-rw-r--r--net/netlink/af_netlink.c753
-rw-r--r--net/netlink/af_netlink.h15
-rw-r--r--net/netlink/diag.c39
-rw-r--r--net/nfc/core.c31
-rw-r--r--net/nfc/llcp_sock.c9
-rw-r--r--net/nfc/nci/core.c3
-rw-r--r--net/nfc/netlink.c4
-rw-r--r--net/openvswitch/conntrack.c10
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/packet/af_packet.c110
-rw-r--r--net/rds/cong.c4
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rpmsg/rpmsg_proto.c19
-rw-r--r--net/rxrpc/ar-key.c64
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_connmark.c3
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_mq.c10
-rw-r--r--net/sched/sch_mqprio.c19
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/ipv6.c49
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c7
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c2
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/tipc/bearer.c13
-rw-r--r--net/tipc/bearer.h13
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c24
-rw-r--r--net/tipc/node.c28
-rw-r--r--net/tipc/server.c13
-rw-r--r--net/tipc/socket.c56
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/unix/af_unix.c34
-rw-r--r--net/unix/garbage.c17
-rw-r--r--net/vmw_vsock/vmci_transport.c4
-rw-r--r--net/wireless/nl80211.c129
-rw-r--r--net/xfrm/xfrm_policy.c66
-rw-r--r--net/xfrm/xfrm_user.c9
163 files changed, 1695 insertions, 1807 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ad8d6e6b87ca..5e4199d5a388 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
278 return 0; 278 return 0;
279 279
280out_free_newdev: 280out_free_newdev:
281 free_netdev(new_dev); 281 if (new_dev->reg_state == NETREG_UNINITIALIZED)
282 free_netdev(new_dev);
282 return err; 283 return err;
283} 284}
284 285
@@ -291,6 +292,10 @@ static void vlan_sync_address(struct net_device *dev,
291 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) 292 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
292 return; 293 return;
293 294
295 /* vlan continues to inherit address of lower device */
296 if (vlan_dev_inherit_address(vlandev, dev))
297 goto out;
298
294 /* vlan address was different from the old address and is equal to 299 /* vlan address was different from the old address and is equal to
295 * the new address */ 300 * the new address */
296 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 301 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -303,6 +308,7 @@ static void vlan_sync_address(struct net_device *dev,
303 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 308 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
304 dev_uc_add(dev, vlandev->dev_addr); 309 dev_uc_add(dev, vlandev->dev_addr);
305 310
311out:
306 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); 312 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
307} 313}
308 314
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09ab98..cc1557978066 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
109void vlan_setup(struct net_device *dev); 109void vlan_setup(struct net_device *dev);
110int register_vlan_dev(struct net_device *dev); 110int register_vlan_dev(struct net_device *dev);
111void unregister_vlan_dev(struct net_device *dev, struct list_head *head); 111void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
112bool vlan_dev_inherit_address(struct net_device *dev,
113 struct net_device *real_dev);
112 114
113static inline u32 vlan_get_ingress_priority(struct net_device *dev, 115static inline u32 vlan_get_ingress_priority(struct net_device *dev,
114 u16 vlan_tci) 116 u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fded86508117..ca4dc9031073 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -244,6 +244,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
244 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); 244 strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
245} 245}
246 246
247bool vlan_dev_inherit_address(struct net_device *dev,
248 struct net_device *real_dev)
249{
250 if (dev->addr_assign_type != NET_ADDR_STOLEN)
251 return false;
252
253 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
254 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
255 return true;
256}
257
247static int vlan_dev_open(struct net_device *dev) 258static int vlan_dev_open(struct net_device *dev)
248{ 259{
249 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 260 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -254,7 +265,8 @@ static int vlan_dev_open(struct net_device *dev)
254 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 265 !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
255 return -ENETDOWN; 266 return -ENETDOWN;
256 267
257 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { 268 if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
269 !vlan_dev_inherit_address(dev, real_dev)) {
258 err = dev_uc_add(real_dev, dev->dev_addr); 270 err = dev_uc_add(real_dev, dev->dev_addr);
259 if (err < 0) 271 if (err < 0)
260 goto out; 272 goto out;
@@ -558,8 +570,10 @@ static int vlan_dev_init(struct net_device *dev)
558 /* ipv6 shared card related stuff */ 570 /* ipv6 shared card related stuff */
559 dev->dev_id = real_dev->dev_id; 571 dev->dev_id = real_dev->dev_id;
560 572
561 if (is_zero_ether_addr(dev->dev_addr)) 573 if (is_zero_ether_addr(dev->dev_addr)) {
562 eth_hw_addr_inherit(dev, real_dev); 574 ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
575 dev->addr_assign_type = NET_ADDR_STOLEN;
576 }
563 if (is_zero_ether_addr(dev->broadcast)) 577 if (is_zero_ether_addr(dev->broadcast))
564 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 578 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
565 579
diff --git a/net/9p/client.c b/net/9p/client.c
index ea79ee9a7348..f5feac4ff4ec 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
2101 trace_9p_protocol_dump(clnt, req->rc); 2101 trace_9p_protocol_dump(clnt, req->rc);
2102 goto free_and_error; 2102 goto free_and_error;
2103 } 2103 }
2104 if (rsize < count) {
2105 pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
2106 count = rsize;
2107 }
2104 2108
2105 p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); 2109 p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
2106 2110
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 655a7d4c96e1..983f0b5e14f1 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 if (!sock_flag(ax25->sk, SOCK_DESTROY)) 267 if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
268 ax25_stop_heartbeat(ax25); 268 ax25_stop_heartbeat(ax25);
269 ax25_stop_t1timer(ax25); 269 ax25_stop_t1timer(ax25);
270 ax25_stop_t2timer(ax25); 270 ax25_stop_t2timer(ax25);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b1eb8c09a660..c842f40c1173 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1164,7 +1164,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1164 if (msg->msg_flags & MSG_OOB) 1164 if (msg->msg_flags & MSG_OOB)
1165 return -EOPNOTSUPP; 1165 return -EOPNOTSUPP;
1166 1166
1167 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) 1167 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1168 MSG_CMSG_COMPAT))
1168 return -EINVAL; 1169 return -EINVAL;
1169 1170
1170 if (len < 4 || len > HCI_MAX_FRAME_SIZE) 1171 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 4b175df35184..906f88550cd8 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/crypto.h> 24#include <linux/crypto.h>
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <crypto/algapi.h>
26#include <crypto/b128ops.h> 27#include <crypto/b128ops.h>
27 28
28#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
@@ -524,7 +525,7 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
524 if (err) 525 if (err)
525 return false; 526 return false;
526 527
527 return !memcmp(bdaddr->b, hash, 3); 528 return !crypto_memneq(bdaddr->b, hash, 3);
528} 529}
529 530
530int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) 531int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
@@ -577,7 +578,7 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
577 /* This is unlikely, but we need to check that 578 /* This is unlikely, but we need to check that
578 * we didn't accidentially generate a debug key. 579 * we didn't accidentially generate a debug key.
579 */ 580 */
580 if (memcmp(smp->local_sk, debug_sk, 32)) 581 if (crypto_memneq(smp->local_sk, debug_sk, 32))
581 break; 582 break;
582 } 583 }
583 smp->debug_key = false; 584 smp->debug_key = false;
@@ -991,7 +992,7 @@ static u8 smp_random(struct smp_chan *smp)
991 if (ret) 992 if (ret)
992 return SMP_UNSPECIFIED; 993 return SMP_UNSPECIFIED;
993 994
994 if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) { 995 if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
995 BT_ERR("Pairing failed (confirmation values mismatch)"); 996 BT_ERR("Pairing failed (confirmation values mismatch)");
996 return SMP_CONFIRM_FAILED; 997 return SMP_CONFIRM_FAILED;
997 } 998 }
@@ -1491,7 +1492,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
1491 smp->rrnd, r, cfm)) 1492 smp->rrnd, r, cfm))
1492 return SMP_UNSPECIFIED; 1493 return SMP_UNSPECIFIED;
1493 1494
1494 if (memcmp(smp->pcnf, cfm, 16)) 1495 if (crypto_memneq(smp->pcnf, cfm, 16))
1495 return SMP_CONFIRM_FAILED; 1496 return SMP_CONFIRM_FAILED;
1496 1497
1497 smp->passkey_round++; 1498 smp->passkey_round++;
@@ -1875,7 +1876,7 @@ static u8 sc_send_public_key(struct smp_chan *smp)
1875 /* This is unlikely, but we need to check that 1876 /* This is unlikely, but we need to check that
1876 * we didn't accidentially generate a debug key. 1877 * we didn't accidentially generate a debug key.
1877 */ 1878 */
1878 if (memcmp(smp->local_sk, debug_sk, 32)) 1879 if (crypto_memneq(smp->local_sk, debug_sk, 32))
1879 break; 1880 break;
1880 } 1881 }
1881 } 1882 }
@@ -2140,7 +2141,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
2140 if (err) 2141 if (err)
2141 return SMP_UNSPECIFIED; 2142 return SMP_UNSPECIFIED;
2142 2143
2143 if (memcmp(smp->pcnf, cfm, 16)) 2144 if (crypto_memneq(smp->pcnf, cfm, 16))
2144 return SMP_CONFIRM_FAILED; 2145 return SMP_CONFIRM_FAILED;
2145 } else { 2146 } else {
2146 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), 2147 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -2621,7 +2622,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
2621 if (err) 2622 if (err)
2622 return SMP_UNSPECIFIED; 2623 return SMP_UNSPECIFIED;
2623 2624
2624 if (memcmp(cfm.confirm_val, smp->pcnf, 16)) 2625 if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
2625 return SMP_CONFIRM_FAILED; 2626 return SMP_CONFIRM_FAILED;
2626 } 2627 }
2627 2628
@@ -2654,7 +2655,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
2654 else 2655 else
2655 hcon->pending_sec_level = BT_SECURITY_FIPS; 2656 hcon->pending_sec_level = BT_SECURITY_FIPS;
2656 2657
2657 if (!memcmp(debug_pk, smp->remote_pk, 64)) 2658 if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
2658 set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); 2659 set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
2659 2660
2660 if (smp->method == DSP_PASSKEY) { 2661 if (smp->method == DSP_PASSKEY) {
@@ -2753,7 +2754,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
2753 if (err) 2754 if (err)
2754 return SMP_UNSPECIFIED; 2755 return SMP_UNSPECIFIED;
2755 2756
2756 if (memcmp(check->e, e, 16)) 2757 if (crypto_memneq(check->e, e, 16))
2757 return SMP_DHKEY_CHECK_FAILED; 2758 return SMP_DHKEY_CHECK_FAILED;
2758 2759
2759 if (!hcon->out) { 2760 if (!hcon->out) {
@@ -3463,7 +3464,7 @@ static int __init test_ah(struct crypto_blkcipher *tfm_aes)
3463 if (err) 3464 if (err)
3464 return err; 3465 return err;
3465 3466
3466 if (memcmp(res, exp, 3)) 3467 if (crypto_memneq(res, exp, 3))
3467 return -EINVAL; 3468 return -EINVAL;
3468 3469
3469 return 0; 3470 return 0;
@@ -3493,7 +3494,7 @@ static int __init test_c1(struct crypto_blkcipher *tfm_aes)
3493 if (err) 3494 if (err)
3494 return err; 3495 return err;
3495 3496
3496 if (memcmp(res, exp, 16)) 3497 if (crypto_memneq(res, exp, 16))
3497 return -EINVAL; 3498 return -EINVAL;
3498 3499
3499 return 0; 3500 return 0;
@@ -3518,7 +3519,7 @@ static int __init test_s1(struct crypto_blkcipher *tfm_aes)
3518 if (err) 3519 if (err)
3519 return err; 3520 return err;
3520 3521
3521 if (memcmp(res, exp, 16)) 3522 if (crypto_memneq(res, exp, 16))
3522 return -EINVAL; 3523 return -EINVAL;
3523 3524
3524 return 0; 3525 return 0;
@@ -3550,7 +3551,7 @@ static int __init test_f4(struct crypto_hash *tfm_cmac)
3550 if (err) 3551 if (err)
3551 return err; 3552 return err;
3552 3553
3553 if (memcmp(res, exp, 16)) 3554 if (crypto_memneq(res, exp, 16))
3554 return -EINVAL; 3555 return -EINVAL;
3555 3556
3556 return 0; 3557 return 0;
@@ -3584,10 +3585,10 @@ static int __init test_f5(struct crypto_hash *tfm_cmac)
3584 if (err) 3585 if (err)
3585 return err; 3586 return err;
3586 3587
3587 if (memcmp(mackey, exp_mackey, 16)) 3588 if (crypto_memneq(mackey, exp_mackey, 16))
3588 return -EINVAL; 3589 return -EINVAL;
3589 3590
3590 if (memcmp(ltk, exp_ltk, 16)) 3591 if (crypto_memneq(ltk, exp_ltk, 16))
3591 return -EINVAL; 3592 return -EINVAL;
3592 3593
3593 return 0; 3594 return 0;
@@ -3620,7 +3621,7 @@ static int __init test_f6(struct crypto_hash *tfm_cmac)
3620 if (err) 3621 if (err)
3621 return err; 3622 return err;
3622 3623
3623 if (memcmp(res, exp, 16)) 3624 if (crypto_memneq(res, exp, 16))
3624 return -EINVAL; 3625 return -EINVAL;
3625 3626
3626 return 0; 3627 return 0;
@@ -3674,7 +3675,7 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
3674 if (err) 3675 if (err)
3675 return err; 3676 return err;
3676 3677
3677 if (memcmp(res, exp, 16)) 3678 if (crypto_memneq(res, exp, 16))
3678 return -EINVAL; 3679 return -EINVAL;
3679 3680
3680 return 0; 3681 return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f7fba74108a9..e24754a0e052 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -29,6 +29,7 @@ EXPORT_SYMBOL(br_should_route_hook);
29static int 29static int
30br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) 30br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
31{ 31{
32 br_drop_fake_rtable(skb);
32 return netif_receive_skb(skb); 33 return netif_receive_skb(skb);
33} 34}
34 35
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 7ddbe7ec81d6..97fc19f001bf 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -516,21 +516,6 @@ static unsigned int br_nf_pre_routing(void *priv,
516} 516}
517 517
518 518
519/* PF_BRIDGE/LOCAL_IN ************************************************/
520/* The packet is locally destined, which requires a real
521 * dst_entry, so detach the fake one. On the way up, the
522 * packet would pass through PRE_ROUTING again (which already
523 * took place when the packet entered the bridge), but we
524 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
525 * prevent this from happening. */
526static unsigned int br_nf_local_in(void *priv,
527 struct sk_buff *skb,
528 const struct nf_hook_state *state)
529{
530 br_drop_fake_rtable(skb);
531 return NF_ACCEPT;
532}
533
534/* PF_BRIDGE/FORWARD *************************************************/ 519/* PF_BRIDGE/FORWARD *************************************************/
535static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 520static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
536{ 521{
@@ -901,12 +886,6 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
901 .priority = NF_BR_PRI_BRNF, 886 .priority = NF_BR_PRI_BRNF,
902 }, 887 },
903 { 888 {
904 .hook = br_nf_local_in,
905 .pf = NFPROTO_BRIDGE,
906 .hooknum = NF_BR_LOCAL_IN,
907 .priority = NF_BR_PRI_BRNF,
908 },
909 {
910 .hook = br_nf_forward_ip, 889 .hook = br_nf_forward_ip,
911 .pf = NFPROTO_BRIDGE, 890 .pf = NFPROTO_BRIDGE,
912 .hooknum = NF_BR_FORWARD, 891 .hooknum = NF_BR_FORWARD,
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 40197ff8918a..ff8bb41d713f 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -768,23 +768,16 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
768 return -EPROTONOSUPPORT; 768 return -EPROTONOSUPPORT;
769 } 769 }
770 } 770 }
771#endif
772 771
773 return 0; 772 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
774} 773 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
775
776static int br_dev_newlink(struct net *src_net, struct net_device *dev,
777 struct nlattr *tb[], struct nlattr *data[])
778{
779 struct net_bridge *br = netdev_priv(dev);
780 774
781 if (tb[IFLA_ADDRESS]) { 775 if (defpvid >= VLAN_VID_MASK)
782 spin_lock_bh(&br->lock); 776 return -EINVAL;
783 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
784 spin_unlock_bh(&br->lock);
785 } 777 }
778#endif
786 779
787 return register_netdevice(dev); 780 return 0;
788} 781}
789 782
790static int br_port_slave_changelink(struct net_device *brdev, 783static int br_port_slave_changelink(struct net_device *brdev,
@@ -1068,6 +1061,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1068 return 0; 1061 return 0;
1069} 1062}
1070 1063
1064static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1065 struct nlattr *tb[], struct nlattr *data[])
1066{
1067 struct net_bridge *br = netdev_priv(dev);
1068 int err;
1069
1070 if (tb[IFLA_ADDRESS]) {
1071 spin_lock_bh(&br->lock);
1072 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1073 spin_unlock_bh(&br->lock);
1074 }
1075
1076 err = br_changelink(dev, tb, data);
1077 if (err)
1078 return err;
1079
1080 return register_netdevice(dev);
1081}
1082
1071static size_t br_get_size(const struct net_device *brdev) 1083static size_t br_get_size(const struct net_device *brdev)
1072{ 1084{
1073 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1085 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 8a7ada8bb947..bcb4559e735d 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -166,6 +166,8 @@ static void br_stp_start(struct net_bridge *br)
166 br_debug(br, "using kernel STP\n"); 166 br_debug(br, "using kernel STP\n");
167 167
168 /* To start timers on any ports left in blocking */ 168 /* To start timers on any ports left in blocking */
169 if (br->dev->flags & IFF_UP)
170 mod_timer(&br->hello_timer, jiffies + br->hello_time);
169 br_port_state_selection(br); 171 br_port_state_selection(br);
170 } 172 }
171 173
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 5f0f5af0ec35..7dbe6a5c31eb 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 if (br->stp_enabled != BR_USER_STP) 43 if (br->stp_enabled == BR_KERNEL_STP)
44 mod_timer(&br->hello_timer, 44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time)); 45 round_jiffies(jiffies + br->hello_time));
46 } 46 }
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fcc220c..71b6ab240dea 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
81{ 81{
82 struct sk_buff *skb; 82 struct sk_buff *skb;
83 83
84 if (likely(in_interrupt())) 84 skb = alloc_skb(len + pfx, GFP_ATOMIC);
85 skb = alloc_skb(len + pfx, GFP_ATOMIC);
86 else
87 skb = alloc_skb(len + pfx, GFP_KERNEL);
88
89 if (unlikely(skb == NULL)) 85 if (unlikely(skb == NULL))
90 return NULL; 86 return NULL;
91 87
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 166d436196c1..928f58064098 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
445 * @func: callback function on filter match 445 * @func: callback function on filter match
446 * @data: returned parameter for callback function 446 * @data: returned parameter for callback function
447 * @ident: string for calling module identification 447 * @ident: string for calling module identification
448 * @sk: socket pointer (might be NULL)
448 * 449 *
449 * Description: 450 * Description:
450 * Invokes the callback function with the received sk_buff and the given 451 * Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
468 */ 469 */
469int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, 470int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
470 void (*func)(struct sk_buff *, void *), void *data, 471 void (*func)(struct sk_buff *, void *), void *data,
471 char *ident) 472 char *ident, struct sock *sk)
472{ 473{
473 struct receiver *r; 474 struct receiver *r;
474 struct hlist_head *rl; 475 struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
496 r->func = func; 497 r->func = func;
497 r->data = data; 498 r->data = data;
498 r->ident = ident; 499 r->ident = ident;
500 r->sk = sk;
499 501
500 hlist_add_head_rcu(&r->list, rl); 502 hlist_add_head_rcu(&r->list, rl);
501 d->entries++; 503 d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
520static void can_rx_delete_receiver(struct rcu_head *rp) 522static void can_rx_delete_receiver(struct rcu_head *rp)
521{ 523{
522 struct receiver *r = container_of(rp, struct receiver, rcu); 524 struct receiver *r = container_of(rp, struct receiver, rcu);
525 struct sock *sk = r->sk;
523 526
524 kmem_cache_free(rcv_cache, r); 527 kmem_cache_free(rcv_cache, r);
528 if (sk)
529 sock_put(sk);
525} 530}
526 531
527/** 532/**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
596 spin_unlock(&can_rcvlists_lock); 601 spin_unlock(&can_rcvlists_lock);
597 602
598 /* schedule the receiver item for deletion */ 603 /* schedule the receiver item for deletion */
599 if (r) 604 if (r) {
605 if (r->sk)
606 sock_hold(r->sk);
600 call_rcu(&r->rcu, can_rx_delete_receiver); 607 call_rcu(&r->rcu, can_rx_delete_receiver);
608 }
601} 609}
602EXPORT_SYMBOL(can_rx_unregister); 610EXPORT_SYMBOL(can_rx_unregister);
603 611
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fca0fe9fc45a..b86f5129e838 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -50,13 +50,14 @@
50 50
51struct receiver { 51struct receiver {
52 struct hlist_node list; 52 struct hlist_node list;
53 struct rcu_head rcu;
54 canid_t can_id; 53 canid_t can_id;
55 canid_t mask; 54 canid_t mask;
56 unsigned long matches; 55 unsigned long matches;
57 void (*func)(struct sk_buff *, void *); 56 void (*func)(struct sk_buff *, void *);
58 void *data; 57 void *data;
59 char *ident; 58 char *ident;
59 struct sock *sk;
60 struct rcu_head rcu;
60}; 61};
61 62
62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) 63#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8ef1afacad82..4ccfd356baed 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -710,14 +710,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
710 710
711static void bcm_remove_op(struct bcm_op *op) 711static void bcm_remove_op(struct bcm_op *op)
712{ 712{
713 hrtimer_cancel(&op->timer); 713 if (op->tsklet.func) {
714 hrtimer_cancel(&op->thrtimer); 714 while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
715 715 test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
716 if (op->tsklet.func) 716 hrtimer_active(&op->timer)) {
717 tasklet_kill(&op->tsklet); 717 hrtimer_cancel(&op->timer);
718 tasklet_kill(&op->tsklet);
719 }
720 }
718 721
719 if (op->thrtsklet.func) 722 if (op->thrtsklet.func) {
720 tasklet_kill(&op->thrtsklet); 723 while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
724 test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
725 hrtimer_active(&op->thrtimer)) {
726 hrtimer_cancel(&op->thrtimer);
727 tasklet_kill(&op->thrtsklet);
728 }
729 }
721 730
722 if ((op->frames) && (op->frames != &op->sframe)) 731 if ((op->frames) && (op->frames != &op->sframe))
723 kfree(op->frames); 732 kfree(op->frames);
@@ -1170,7 +1179,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1170 err = can_rx_register(dev, op->can_id, 1179 err = can_rx_register(dev, op->can_id,
1171 REGMASK(op->can_id), 1180 REGMASK(op->can_id),
1172 bcm_rx_handler, op, 1181 bcm_rx_handler, op,
1173 "bcm"); 1182 "bcm", sk);
1174 1183
1175 op->rx_reg_dev = dev; 1184 op->rx_reg_dev = dev;
1176 dev_put(dev); 1185 dev_put(dev);
@@ -1179,7 +1188,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1179 } else 1188 } else
1180 err = can_rx_register(NULL, op->can_id, 1189 err = can_rx_register(NULL, op->can_id,
1181 REGMASK(op->can_id), 1190 REGMASK(op->can_id),
1182 bcm_rx_handler, op, "bcm"); 1191 bcm_rx_handler, op, "bcm", sk);
1183 if (err) { 1192 if (err) {
1184 /* this bcm rx op is broken -> remove it */ 1193 /* this bcm rx op is broken -> remove it */
1185 list_del(&op->list); 1194 list_del(&op->list);
diff --git a/net/can/gw.c b/net/can/gw.c
index 455168718c2e..77c8af4047ef 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
442{ 442{
443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, 443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
444 gwj->ccgw.filter.can_mask, can_can_gw_rcv, 444 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
445 gwj, "gw"); 445 gwj, "gw", NULL);
446} 446}
447 447
448static inline void cgw_unregister_filter(struct cgw_job *gwj) 448static inline void cgw_unregister_filter(struct cgw_job *gwj)
diff --git a/net/can/raw.c b/net/can/raw.c
index 56af689ca999..e9403a26a1d5 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
190 for (i = 0; i < count; i++) { 190 for (i = 0; i < count; i++) {
191 err = can_rx_register(dev, filter[i].can_id, 191 err = can_rx_register(dev, filter[i].can_id,
192 filter[i].can_mask, 192 filter[i].can_mask,
193 raw_rcv, sk, "raw"); 193 raw_rcv, sk, "raw", sk);
194 if (err) { 194 if (err) {
195 /* clean up successfully registered filters */ 195 /* clean up successfully registered filters */
196 while (--i >= 0) 196 while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
211 211
212 if (err_mask) 212 if (err_mask)
213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, 213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
214 raw_rcv, sk, "raw"); 214 raw_rcv, sk, "raw", sk);
215 215
216 return err; 216 return err;
217} 217}
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b8d927c56494..a6b2f2138c9d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
10#include <linux/sched.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/socket.h> 12#include <linux/socket.h>
12#include <linux/string.h> 13#include <linux/string.h>
@@ -478,11 +479,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
478{ 479{
479 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 480 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
480 struct socket *sock; 481 struct socket *sock;
482 unsigned int noio_flag;
481 int ret; 483 int ret;
482 484
483 BUG_ON(con->sock); 485 BUG_ON(con->sock);
486
487 /* sock_create_kern() allocates with GFP_KERNEL */
488 noio_flag = memalloc_noio_save();
484 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 489 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
485 SOCK_STREAM, IPPROTO_TCP, &sock); 490 SOCK_STREAM, IPPROTO_TCP, &sock);
491 memalloc_noio_restore(noio_flag);
486 if (ret) 492 if (ret)
487 return ret; 493 return ret;
488 sock->sk->sk_allocation = GFP_NOFS; 494 sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index ddc3573894b0..bc95e48d5cfb 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1265,7 +1265,6 @@ static int decode_new_up_state_weight(void **p, void *end,
1265 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && 1265 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1266 (xorstate & CEPH_OSD_EXISTS)) { 1266 (xorstate & CEPH_OSD_EXISTS)) {
1267 pr_info("osd%d does not exist\n", osd); 1267 pr_info("osd%d does not exist\n", osd);
1268 map->osd_weight[osd] = CEPH_OSD_IN;
1269 ret = set_primary_affinity(map, osd, 1268 ret = set_primary_affinity(map, osd,
1270 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); 1269 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1271 if (ret) 1270 if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 6f203c7fb166..24d243084aab 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(dev_base_lock);
182/* protects napi_hash addition/deletion and napi_gen_id */ 182/* protects napi_hash addition/deletion and napi_gen_id */
183static DEFINE_SPINLOCK(napi_hash_lock); 183static DEFINE_SPINLOCK(napi_hash_lock);
184 184
185static unsigned int napi_gen_id; 185static unsigned int napi_gen_id = NR_CPUS;
186static DEFINE_HASHTABLE(napi_hash, 8); 186static DEFINE_HASHTABLE(napi_hash, 8);
187 187
188static seqcount_t devnet_rename_seq; 188static seqcount_t devnet_rename_seq;
@@ -1246,8 +1246,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1246 if (!new_ifalias) 1246 if (!new_ifalias)
1247 return -ENOMEM; 1247 return -ENOMEM;
1248 dev->ifalias = new_ifalias; 1248 dev->ifalias = new_ifalias;
1249 memcpy(dev->ifalias, alias, len);
1250 dev->ifalias[len] = 0;
1249 1251
1250 strlcpy(dev->ifalias, alias, len+1);
1251 return len; 1252 return len;
1252} 1253}
1253 1254
@@ -1676,37 +1677,59 @@ EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1676 1677
1677static struct static_key netstamp_needed __read_mostly; 1678static struct static_key netstamp_needed __read_mostly;
1678#ifdef HAVE_JUMP_LABEL 1679#ifdef HAVE_JUMP_LABEL
1679/* We are not allowed to call static_key_slow_dec() from irq context
1680 * If net_disable_timestamp() is called from irq context, defer the
1681 * static_key_slow_dec() calls.
1682 */
1683static atomic_t netstamp_needed_deferred; 1680static atomic_t netstamp_needed_deferred;
1681static atomic_t netstamp_wanted;
1682static void netstamp_clear(struct work_struct *work)
1683{
1684 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1685 int wanted;
1686
1687 wanted = atomic_add_return(deferred, &netstamp_wanted);
1688 if (wanted > 0)
1689 static_key_enable(&netstamp_needed);
1690 else
1691 static_key_disable(&netstamp_needed);
1692}
1693static DECLARE_WORK(netstamp_work, netstamp_clear);
1684#endif 1694#endif
1685 1695
1686void net_enable_timestamp(void) 1696void net_enable_timestamp(void)
1687{ 1697{
1688#ifdef HAVE_JUMP_LABEL 1698#ifdef HAVE_JUMP_LABEL
1689 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1699 int wanted;
1690 1700
1691 if (deferred) { 1701 while (1) {
1692 while (--deferred) 1702 wanted = atomic_read(&netstamp_wanted);
1693 static_key_slow_dec(&netstamp_needed); 1703 if (wanted <= 0)
1694 return; 1704 break;
1705 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1706 return;
1695 } 1707 }
1696#endif 1708 atomic_inc(&netstamp_needed_deferred);
1709 schedule_work(&netstamp_work);
1710#else
1697 static_key_slow_inc(&netstamp_needed); 1711 static_key_slow_inc(&netstamp_needed);
1712#endif
1698} 1713}
1699EXPORT_SYMBOL(net_enable_timestamp); 1714EXPORT_SYMBOL(net_enable_timestamp);
1700 1715
1701void net_disable_timestamp(void) 1716void net_disable_timestamp(void)
1702{ 1717{
1703#ifdef HAVE_JUMP_LABEL 1718#ifdef HAVE_JUMP_LABEL
1704 if (in_interrupt()) { 1719 int wanted;
1705 atomic_inc(&netstamp_needed_deferred); 1720
1706 return; 1721 while (1) {
1722 wanted = atomic_read(&netstamp_wanted);
1723 if (wanted <= 1)
1724 break;
1725 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1726 return;
1707 } 1727 }
1708#endif 1728 atomic_dec(&netstamp_needed_deferred);
1729 schedule_work(&netstamp_work);
1730#else
1709 static_key_slow_dec(&netstamp_needed); 1731 static_key_slow_dec(&netstamp_needed);
1732#endif
1710} 1733}
1711EXPORT_SYMBOL(net_disable_timestamp); 1734EXPORT_SYMBOL(net_disable_timestamp);
1712 1735
@@ -2527,9 +2550,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
2527static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) 2550static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2528{ 2551{
2529 if (tx_path) 2552 if (tx_path)
2530 return skb->ip_summed != CHECKSUM_PARTIAL; 2553 return skb->ip_summed != CHECKSUM_PARTIAL &&
2531 else 2554 skb->ip_summed != CHECKSUM_UNNECESSARY;
2532 return skb->ip_summed == CHECKSUM_NONE; 2555
2556 return skb->ip_summed == CHECKSUM_NONE;
2533} 2557}
2534 2558
2535/** 2559/**
@@ -2548,11 +2572,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2548struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2572struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2549 netdev_features_t features, bool tx_path) 2573 netdev_features_t features, bool tx_path)
2550{ 2574{
2575 struct sk_buff *segs;
2576
2551 if (unlikely(skb_needs_check(skb, tx_path))) { 2577 if (unlikely(skb_needs_check(skb, tx_path))) {
2552 int err; 2578 int err;
2553 2579
2554 skb_warn_bad_offload(skb); 2580 /* We're going to init ->check field in TCP or UDP header */
2555
2556 err = skb_cow_head(skb, 0); 2581 err = skb_cow_head(skb, 0);
2557 if (err < 0) 2582 if (err < 0)
2558 return ERR_PTR(err); 2583 return ERR_PTR(err);
@@ -2567,7 +2592,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2567 skb_reset_mac_header(skb); 2592 skb_reset_mac_header(skb);
2568 skb_reset_mac_len(skb); 2593 skb_reset_mac_len(skb);
2569 2594
2570 return skb_mac_gso_segment(skb, features); 2595 segs = skb_mac_gso_segment(skb, features);
2596
2597 if (unlikely(skb_needs_check(skb, tx_path)))
2598 skb_warn_bad_offload(skb);
2599
2600 return segs;
2571} 2601}
2572EXPORT_SYMBOL(__skb_gso_segment); 2602EXPORT_SYMBOL(__skb_gso_segment);
2573 2603
@@ -2650,9 +2680,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2650 if (skb->ip_summed != CHECKSUM_NONE && 2680 if (skb->ip_summed != CHECKSUM_NONE &&
2651 !can_checksum_protocol(features, type)) { 2681 !can_checksum_protocol(features, type)) {
2652 features &= ~NETIF_F_ALL_CSUM; 2682 features &= ~NETIF_F_ALL_CSUM;
2653 } else if (illegal_highdma(skb->dev, skb)) {
2654 features &= ~NETIF_F_SG;
2655 } 2683 }
2684 if (illegal_highdma(skb->dev, skb))
2685 features &= ~NETIF_F_SG;
2656 2686
2657 return features; 2687 return features;
2658} 2688}
@@ -3027,7 +3057,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3027 int queue_index = 0; 3057 int queue_index = 0;
3028 3058
3029#ifdef CONFIG_XPS 3059#ifdef CONFIG_XPS
3030 if (skb->sender_cpu == 0) 3060 u32 sender_cpu = skb->sender_cpu - 1;
3061
3062 if (sender_cpu >= (u32)NR_CPUS)
3031 skb->sender_cpu = raw_smp_processor_id() + 1; 3063 skb->sender_cpu = raw_smp_processor_id() + 1;
3032#endif 3064#endif
3033 3065
@@ -4350,6 +4382,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
4350} 4382}
4351EXPORT_SYMBOL(gro_find_complete_by_type); 4383EXPORT_SYMBOL(gro_find_complete_by_type);
4352 4384
4385static void napi_skb_free_stolen_head(struct sk_buff *skb)
4386{
4387 skb_dst_drop(skb);
4388 kmem_cache_free(skbuff_head_cache, skb);
4389}
4390
4353static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4391static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4354{ 4392{
4355 switch (ret) { 4393 switch (ret) {
@@ -4363,12 +4401,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4363 break; 4401 break;
4364 4402
4365 case GRO_MERGED_FREE: 4403 case GRO_MERGED_FREE:
4366 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { 4404 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4367 skb_dst_drop(skb); 4405 napi_skb_free_stolen_head(skb);
4368 kmem_cache_free(skbuff_head_cache, skb); 4406 else
4369 } else {
4370 __kfree_skb(skb); 4407 __kfree_skb(skb);
4371 }
4372 break; 4408 break;
4373 4409
4374 case GRO_HELD: 4410 case GRO_HELD:
@@ -4434,10 +4470,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
4434 break; 4470 break;
4435 4471
4436 case GRO_DROP: 4472 case GRO_DROP:
4437 case GRO_MERGED_FREE:
4438 napi_reuse_skb(napi, skb); 4473 napi_reuse_skb(napi, skb);
4439 break; 4474 break;
4440 4475
4476 case GRO_MERGED_FREE:
4477 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4478 napi_skb_free_stolen_head(skb);
4479 else
4480 napi_reuse_skb(napi, skb);
4481 break;
4482
4441 case GRO_MERGED: 4483 case GRO_MERGED:
4442 break; 4484 break;
4443 } 4485 }
@@ -4704,25 +4746,22 @@ EXPORT_SYMBOL_GPL(napi_by_id);
4704 4746
4705void napi_hash_add(struct napi_struct *napi) 4747void napi_hash_add(struct napi_struct *napi)
4706{ 4748{
4707 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) { 4749 if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
4750 return;
4708 4751
4709 spin_lock(&napi_hash_lock); 4752 spin_lock(&napi_hash_lock);
4710 4753
4711 /* 0 is not a valid id, we also skip an id that is taken 4754 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4712 * we expect both events to be extremely rare 4755 do {
4713 */ 4756 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4714 napi->napi_id = 0; 4757 napi_gen_id = NR_CPUS + 1;
4715 while (!napi->napi_id) { 4758 } while (napi_by_id(napi_gen_id));
4716 napi->napi_id = ++napi_gen_id; 4759 napi->napi_id = napi_gen_id;
4717 if (napi_by_id(napi->napi_id))
4718 napi->napi_id = 0;
4719 }
4720 4760
4721 hlist_add_head_rcu(&napi->napi_hash_node, 4761 hlist_add_head_rcu(&napi->napi_hash_node,
4722 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); 4762 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4723 4763
4724 spin_unlock(&napi_hash_lock); 4764 spin_unlock(&napi_hash_lock);
4725 }
4726} 4765}
4727EXPORT_SYMBOL_GPL(napi_hash_add); 4766EXPORT_SYMBOL_GPL(napi_hash_add);
4728 4767
@@ -7030,8 +7069,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7030 } else { 7069 } else {
7031 netdev_stats_to_stats64(storage, &dev->stats); 7070 netdev_stats_to_stats64(storage, &dev->stats);
7032 } 7071 }
7033 storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 7072 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
7034 storage->tx_dropped += atomic_long_read(&dev->tx_dropped); 7073 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
7035 return storage; 7074 return storage;
7036} 7075}
7037EXPORT_SYMBOL(dev_get_stats); 7076EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 183ef95db502..2bded3ecf03d 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
28 28
29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 29 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
30 return -EFAULT; 30 return -EFAULT;
31 ifr.ifr_name[IFNAMSIZ-1] = 0;
31 32
32 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); 33 error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
33 if (error) 34 if (error)
diff --git a/net/core/dst.c b/net/core/dst.c
index a1656e3b8d72..e72d706f8d0c 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
151} 151}
152EXPORT_SYMBOL(dst_discard_out); 152EXPORT_SYMBOL(dst_discard_out);
153 153
154const u32 dst_default_metrics[RTAX_MAX + 1] = { 154const struct dst_metrics dst_default_metrics = {
155 /* This initializer is needed to force linker to place this variable 155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section. 156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch 157 * We really want to avoid false sharing on this variable, and catch
158 * any writes on it. 158 * any writes on it.
159 */ 159 */
160 [RTAX_MAX] = 0xdeadbeef, 160 .refcnt = ATOMIC_INIT(1),
161}; 161};
162 162
163void dst_init(struct dst_entry *dst, struct dst_ops *ops, 163void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
169 if (dev) 169 if (dev)
170 dev_hold(dev); 170 dev_hold(dev);
171 dst->ops = ops; 171 dst->ops = ops;
172 dst_init_metrics(dst, dst_default_metrics, true); 172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
173 dst->expires = 0UL; 173 dst->expires = 0UL;
174 dst->path = dst; 174 dst->path = dst;
175 dst->from = NULL; 175 dst->from = NULL;
@@ -315,25 +315,30 @@ EXPORT_SYMBOL(dst_release);
315 315
316u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) 316u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
317{ 317{
318 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); 318 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
319 319
320 if (p) { 320 if (p) {
321 u32 *old_p = __DST_METRICS_PTR(old); 321 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
322 unsigned long prev, new; 322 unsigned long prev, new;
323 323
324 memcpy(p, old_p, sizeof(u32) * RTAX_MAX); 324 atomic_set(&p->refcnt, 1);
325 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
325 326
326 new = (unsigned long) p; 327 new = (unsigned long) p;
327 prev = cmpxchg(&dst->_metrics, old, new); 328 prev = cmpxchg(&dst->_metrics, old, new);
328 329
329 if (prev != old) { 330 if (prev != old) {
330 kfree(p); 331 kfree(p);
331 p = __DST_METRICS_PTR(prev); 332 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
332 if (prev & DST_METRICS_READ_ONLY) 333 if (prev & DST_METRICS_READ_ONLY)
333 p = NULL; 334 p = NULL;
335 } else if (prev & DST_METRICS_REFCOUNTED) {
336 if (atomic_dec_and_test(&old_p->refcnt))
337 kfree(old_p);
334 } 338 }
335 } 339 }
336 return p; 340 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
341 return (u32 *)p;
337} 342}
338EXPORT_SYMBOL(dst_cow_metrics_generic); 343EXPORT_SYMBOL(dst_cow_metrics_generic);
339 344
@@ -342,7 +347,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
342{ 347{
343 unsigned long prev, new; 348 unsigned long prev, new;
344 349
345 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; 350 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
346 prev = cmpxchg(&dst->_metrics, old, new); 351 prev = cmpxchg(&dst->_metrics, old, new);
347 if (prev == old) 352 if (prev == old)
348 kfree(__DST_METRICS_PTR(old)); 353 kfree(__DST_METRICS_PTR(old));
@@ -457,6 +462,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
457 spin_lock_bh(&dst_garbage.lock); 462 spin_lock_bh(&dst_garbage.lock);
458 dst = dst_garbage.list; 463 dst = dst_garbage.list;
459 dst_garbage.list = NULL; 464 dst_garbage.list = NULL;
465 /* The code in dst_ifdown places a hold on the loopback device.
466 * If the gc entry processing is set to expire after a lengthy
467 * interval, this hold can cause netdev_wait_allrefs() to hang
468 * out and wait for a long time -- until the the loopback
469 * interface is released. If we're really unlucky, it'll emit
470 * pr_emerg messages to console too. Reset the interval here,
471 * so dst cleanups occur in a more timely fashion.
472 */
473 if (dst_garbage.timer_inc > DST_GC_INC) {
474 dst_garbage.timer_inc = DST_GC_INC;
475 dst_garbage.timer_expires = DST_GC_MIN;
476 mod_delayed_work(system_wq, &dst_gc_work,
477 dst_garbage.timer_expires);
478 }
460 spin_unlock_bh(&dst_garbage.lock); 479 spin_unlock_bh(&dst_garbage.lock);
461 480
462 if (last) 481 if (last)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 29edf74846fc..b6bca625b0d2 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -886,9 +886,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
886 if (regs.len > reglen) 886 if (regs.len > reglen)
887 regs.len = reglen; 887 regs.len = reglen;
888 888
889 regbuf = vzalloc(reglen); 889 regbuf = NULL;
890 if (reglen && !regbuf) 890 if (reglen) {
891 return -ENOMEM; 891 regbuf = vzalloc(reglen);
892 if (!regbuf)
893 return -ENOMEM;
894 }
892 895
893 ops->get_regs(dev, &regs, regbuf); 896 ops->get_regs(dev, &regs, regbuf);
894 897
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 769cece9b00b..ae92131c4f89 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -859,7 +859,8 @@ static void neigh_probe(struct neighbour *neigh)
859 if (skb) 859 if (skb)
860 skb = skb_clone(skb, GFP_ATOMIC); 860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock); 861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb); 862 if (neigh->ops->solicit)
863 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes); 864 atomic_inc(&neigh->probes);
864 kfree_skb(skb); 865 kfree_skb(skb);
865} 866}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94acfc89ad97..440aa9f6e0a8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
105 while ((skb = skb_dequeue(&npinfo->txq))) { 105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev; 106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq; 107 struct netdev_queue *txq;
108 unsigned int q_index;
108 109
109 if (!netif_device_present(dev) || !netif_running(dev)) { 110 if (!netif_device_present(dev) || !netif_running(dev)) {
110 kfree_skb(skb); 111 kfree_skb(skb);
111 continue; 112 continue;
112 } 113 }
113 114
114 txq = skb_get_tx_queue(dev, skb);
115
116 local_irq_save(flags); 115 local_irq_save(flags);
116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
117 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq) || 124 if (netif_xmit_frozen_or_stopped(txq) ||
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b94e165a4f79..5b3d611d8b5f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -897,6 +897,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
897 + nla_total_size(1) /* IFLA_LINKMODE */ 897 + nla_total_size(1) /* IFLA_LINKMODE */
898 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 898 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
899 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 899 + nla_total_size(4) /* IFLA_LINK_NETNSID */
900 + nla_total_size(4) /* IFLA_GROUP */
900 + nla_total_size(ext_filter_mask 901 + nla_total_size(ext_filter_mask
901 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 902 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
902 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ 903 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1018,7 +1019,7 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
1018 return err; 1019 return err;
1019 } 1020 }
1020 1021
1021 if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name)) 1022 if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
1022 return -EMSGSIZE; 1023 return -EMSGSIZE;
1023 1024
1024 return 0; 1025 return 0;
@@ -1089,6 +1090,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1089 struct ifla_vf_mac vf_mac; 1090 struct ifla_vf_mac vf_mac;
1090 struct ifla_vf_info ivi; 1091 struct ifla_vf_info ivi;
1091 1092
1093 memset(&ivi, 0, sizeof(ivi));
1094
1092 /* Not all SR-IOV capable drivers support the 1095 /* Not all SR-IOV capable drivers support the
1093 * spoofcheck and "RSS query enable" query. Preset to 1096 * spoofcheck and "RSS query enable" query. Preset to
1094 * -1 so the user space tool can detect that the driver 1097 * -1 so the user space tool can detect that the driver
@@ -1097,7 +1100,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1097 ivi.spoofchk = -1; 1100 ivi.spoofchk = -1;
1098 ivi.rss_query_en = -1; 1101 ivi.rss_query_en = -1;
1099 ivi.trusted = -1; 1102 ivi.trusted = -1;
1100 memset(ivi.mac, 0, sizeof(ivi.mac));
1101 /* The default value for VF link state is "auto" 1103 /* The default value for VF link state is "auto"
1102 * IFLA_VF_LINK_STATE_AUTO which equals zero 1104 * IFLA_VF_LINK_STATE_AUTO which equals zero
1103 */ 1105 */
@@ -1370,6 +1372,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1370 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, 1372 [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
1371 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1373 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
1372 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1374 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
1375 [IFLA_GROUP] = { .type = NLA_U32 },
1373}; 1376};
1374 1377
1375static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 1378static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1458,13 +1461,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1458 cb->nlh->nlmsg_seq, 0, 1461 cb->nlh->nlmsg_seq, 0,
1459 NLM_F_MULTI, 1462 NLM_F_MULTI,
1460 ext_filter_mask); 1463 ext_filter_mask);
1461 /* If we ran out of room on the first message,
1462 * we're in trouble
1463 */
1464 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1465 1464
1466 if (err < 0) 1465 if (err < 0) {
1467 goto out; 1466 if (likely(skb->len))
1467 goto out;
1468
1469 goto out_err;
1470 }
1468 1471
1469 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1472 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1470cont: 1473cont:
@@ -1472,10 +1475,12 @@ cont:
1472 } 1475 }
1473 } 1476 }
1474out: 1477out:
1478 err = skb->len;
1479out_err:
1475 cb->args[1] = idx; 1480 cb->args[1] = idx;
1476 cb->args[0] = h; 1481 cb->args[0] = h;
1477 1482
1478 return skb->len; 1483 return err;
1479} 1484}
1480 1485
1481int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len) 1486int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
@@ -1737,7 +1742,8 @@ static int do_setlink(const struct sk_buff *skb,
1737 struct sockaddr *sa; 1742 struct sockaddr *sa;
1738 int len; 1743 int len;
1739 1744
1740 len = sizeof(sa_family_t) + dev->addr_len; 1745 len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
1746 sizeof(*sa));
1741 sa = kmalloc(len, GFP_KERNEL); 1747 sa = kmalloc(len, GFP_KERNEL);
1742 if (!sa) { 1748 if (!sa) {
1743 err = -ENOMEM; 1749 err = -ENOMEM;
@@ -3127,8 +3133,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3127 err = br_dev->netdev_ops->ndo_bridge_getlink( 3133 err = br_dev->netdev_ops->ndo_bridge_getlink(
3128 skb, portid, seq, dev, 3134 skb, portid, seq, dev,
3129 filter_mask, NLM_F_MULTI); 3135 filter_mask, NLM_F_MULTI);
3130 if (err < 0 && err != -EOPNOTSUPP) 3136 if (err < 0 && err != -EOPNOTSUPP) {
3131 break; 3137 if (likely(skb->len))
3138 break;
3139
3140 goto out_err;
3141 }
3132 } 3142 }
3133 idx++; 3143 idx++;
3134 } 3144 }
@@ -3139,16 +3149,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3139 seq, dev, 3149 seq, dev,
3140 filter_mask, 3150 filter_mask,
3141 NLM_F_MULTI); 3151 NLM_F_MULTI);
3142 if (err < 0 && err != -EOPNOTSUPP) 3152 if (err < 0 && err != -EOPNOTSUPP) {
3143 break; 3153 if (likely(skb->len))
3154 break;
3155
3156 goto out_err;
3157 }
3144 } 3158 }
3145 idx++; 3159 idx++;
3146 } 3160 }
3147 } 3161 }
3162 err = skb->len;
3163out_err:
3148 rcu_read_unlock(); 3164 rcu_read_unlock();
3149 cb->args[0] = idx; 3165 cb->args[0] = idx;
3150 3166
3151 return skb->len; 3167 return err;
3152} 3168}
3153 3169
3154static inline size_t bridge_nlmsg_size(void) 3170static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4968b5ddea69..73dfd7729bc9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3678,13 +3678,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
3678 if (!skb_may_tx_timestamp(sk, false)) 3678 if (!skb_may_tx_timestamp(sk, false))
3679 return; 3679 return;
3680 3680
3681 /* take a reference to prevent skb_orphan() from freeing the socket */ 3681 /* Take a reference to prevent skb_orphan() from freeing the socket,
3682 sock_hold(sk); 3682 * but only if the socket refcount is not zero.
3683 3683 */
3684 *skb_hwtstamps(skb) = *hwtstamps; 3684 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3685 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3685 *skb_hwtstamps(skb) = *hwtstamps;
3686 3686 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
3687 sock_put(sk); 3687 sock_put(sk);
3688 }
3688} 3689}
3689EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3690EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3690 3691
@@ -3735,7 +3736,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3735{ 3736{
3736 struct sock *sk = skb->sk; 3737 struct sock *sk = skb->sk;
3737 struct sock_exterr_skb *serr; 3738 struct sock_exterr_skb *serr;
3738 int err; 3739 int err = 1;
3739 3740
3740 skb->wifi_acked_valid = 1; 3741 skb->wifi_acked_valid = 1;
3741 skb->wifi_acked = acked; 3742 skb->wifi_acked = acked;
@@ -3745,14 +3746,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3745 serr->ee.ee_errno = ENOMSG; 3746 serr->ee.ee_errno = ENOMSG;
3746 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3747 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3747 3748
3748 /* take a reference to prevent skb_orphan() from freeing the socket */ 3749 /* Take a reference to prevent skb_orphan() from freeing the socket,
3749 sock_hold(sk); 3750 * but only if the socket refcount is not zero.
3750 3751 */
3751 err = sock_queue_err_skb(sk, skb); 3752 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3753 err = sock_queue_err_skb(sk, skb);
3754 sock_put(sk);
3755 }
3752 if (err) 3756 if (err)
3753 kfree_skb(skb); 3757 kfree_skb(skb);
3754
3755 sock_put(sk);
3756} 3758}
3757EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3759EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3758 3760
diff --git a/net/core/sock.c b/net/core/sock.c
index f367df38c264..2871364e4420 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1459,6 +1459,11 @@ void sk_destruct(struct sock *sk)
1459 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1459 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1460 __func__, atomic_read(&sk->sk_omem_alloc)); 1460 __func__, atomic_read(&sk->sk_omem_alloc));
1461 1461
1462 if (sk->sk_frag.page) {
1463 put_page(sk->sk_frag.page);
1464 sk->sk_frag.page = NULL;
1465 }
1466
1462 if (sk->sk_peer_cred) 1467 if (sk->sk_peer_cred)
1463 put_cred(sk->sk_peer_cred); 1468 put_cred(sk->sk_peer_cred);
1464 put_pid(sk->sk_peer_pid); 1469 put_pid(sk->sk_peer_pid);
@@ -1552,6 +1557,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1552 is_charged = sk_filter_charge(newsk, filter); 1557 is_charged = sk_filter_charge(newsk, filter);
1553 1558
1554 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1559 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1560 /* We need to make sure that we don't uncharge the new
1561 * socket if we couldn't charge it in the first place
1562 * as otherwise we uncharge the parent's filter.
1563 */
1564 if (!is_charged)
1565 RCU_INIT_POINTER(newsk->sk_filter, NULL);
1555 /* It is still raw copy of parent, so invalidate 1566 /* It is still raw copy of parent, so invalidate
1556 * destructor and make plain sk_free() */ 1567 * destructor and make plain sk_free() */
1557 newsk->sk_destruct = NULL; 1568 newsk->sk_destruct = NULL;
@@ -1679,17 +1690,17 @@ EXPORT_SYMBOL(skb_set_owner_w);
1679 1690
1680void skb_orphan_partial(struct sk_buff *skb) 1691void skb_orphan_partial(struct sk_buff *skb)
1681{ 1692{
1682 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1683 * so we do not completely orphan skb, but transfert all
1684 * accounted bytes but one, to avoid unexpected reorders.
1685 */
1686 if (skb->destructor == sock_wfree 1693 if (skb->destructor == sock_wfree
1687#ifdef CONFIG_INET 1694#ifdef CONFIG_INET
1688 || skb->destructor == tcp_wfree 1695 || skb->destructor == tcp_wfree
1689#endif 1696#endif
1690 ) { 1697 ) {
1691 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1698 struct sock *sk = skb->sk;
1692 skb->truesize = 1; 1699
1700 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1701 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1702 skb->destructor = sock_efree;
1703 }
1693 } else { 1704 } else {
1694 skb_orphan(skb); 1705 skb_orphan(skb);
1695 } 1706 }
@@ -2694,11 +2705,6 @@ void sk_common_release(struct sock *sk)
2694 2705
2695 sk_refcnt_debug_release(sk); 2706 sk_refcnt_debug_release(sk);
2696 2707
2697 if (sk->sk_frag.page) {
2698 put_page(sk->sk_frag.page);
2699 sk->sk_frag.page = NULL;
2700 }
2701
2702 sock_put(sk); 2708 sock_put(sk);
2703} 2709}
2704EXPORT_SYMBOL(sk_common_release); 2710EXPORT_SYMBOL(sk_common_release);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198e730c..5e3a7302f774 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
749 for (i = 0; i < hc->tx_seqbufc; i++) 749 for (i = 0; i < hc->tx_seqbufc; i++)
750 kfree(hc->tx_seqbuf[i]); 750 kfree(hc->tx_seqbuf[i]);
751 hc->tx_seqbufc = 0; 751 hc->tx_seqbufc = 0;
752 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
752} 753}
753 754
754static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 755static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 1704948e6a12..f227f002c73d 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
1471 * singleton values (which always leads to failure). 1471 * singleton values (which always leads to failure).
1472 * These settings can still (later) be overridden via sockopts. 1472 * These settings can still (later) be overridden via sockopts.
1473 */ 1473 */
1474 if (ccid_get_builtin_ccids(&tx.val, &tx.len) || 1474 if (ccid_get_builtin_ccids(&tx.val, &tx.len))
1475 ccid_get_builtin_ccids(&rx.val, &rx.len))
1476 return -ENOBUFS; 1475 return -ENOBUFS;
1476 if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
1477 kfree(tx.val);
1478 return -ENOBUFS;
1479 }
1477 1480
1478 if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || 1481 if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
1479 !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) 1482 !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 3bd14e885396..dbe2573f6ba1 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk, 606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
607 skb) < 0) 607 skb) < 0)
608 return 1; 608 return 1;
609 goto discard; 609 consume_skb(skb);
610 return 0;
610 } 611 }
611 if (dh->dccph_type == DCCP_PKT_RESET) 612 if (dh->dccph_type == DCCP_PKT_RESET)
612 goto discard; 613 goto discard;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 0759f5b9180e..e217f17997a4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
289 289
290 switch (type) { 290 switch (type) {
291 case ICMP_REDIRECT: 291 case ICMP_REDIRECT:
292 dccp_do_redirect(skb, sk); 292 if (!sock_owned_by_user(sk))
293 dccp_do_redirect(skb, sk);
293 goto out; 294 goto out;
294 case ICMP_SOURCE_QUENCH: 295 case ICMP_SOURCE_QUENCH:
295 /* Just silently ignore these. */ 296 /* Just silently ignore these. */
@@ -634,6 +635,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
634 goto drop_and_free; 635 goto drop_and_free;
635 636
636 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 637 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
638 reqsk_put(req);
637 return 0; 639 return 0;
638 640
639drop_and_free: 641drop_and_free:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 27c4e81efa24..09a9ab65f4e1 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
122 np = inet6_sk(sk); 122 np = inet6_sk(sk);
123 123
124 if (type == NDISC_REDIRECT) { 124 if (type == NDISC_REDIRECT) {
125 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 125 if (!sock_owned_by_user(sk)) {
126 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
126 127
127 if (dst) 128 if (dst)
128 dst->ops->redirect(dst, sk, skb); 129 dst->ops->redirect(dst, sk, skb);
130 }
129 goto out; 131 goto out;
130 } 132 }
131 133
@@ -374,6 +376,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
374 goto drop_and_free; 376 goto drop_and_free;
375 377
376 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 378 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
379 reqsk_put(req);
377 return 0; 380 return 0;
378 381
379drop_and_free: 382drop_and_free:
@@ -420,6 +423,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
420 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 423 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
421 newnp->pktoptions = NULL; 424 newnp->pktoptions = NULL;
422 newnp->opt = NULL; 425 newnp->opt = NULL;
426 newnp->ipv6_mc_list = NULL;
427 newnp->ipv6_ac_list = NULL;
428 newnp->ipv6_fl_list = NULL;
423 newnp->mcast_oif = inet6_iif(skb); 429 newnp->mcast_oif = inet6_iif(skb);
424 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 430 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
425 431
@@ -484,6 +490,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
484 /* Clone RX bits */ 490 /* Clone RX bits */
485 newnp->rxopt.all = np->rxopt.all; 491 newnp->rxopt.all = np->rxopt.all;
486 492
493 newnp->ipv6_mc_list = NULL;
494 newnp->ipv6_ac_list = NULL;
495 newnp->ipv6_fl_list = NULL;
487 newnp->pktoptions = NULL; 496 newnp->pktoptions = NULL;
488 newnp->opt = NULL; 497 newnp->opt = NULL;
489 newnp->mcast_oif = inet6_iif(skb); 498 newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 1994f8af646b..68eed344b471 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
122 /* It is still raw copy of parent, so invalidate 122 /* It is still raw copy of parent, so invalidate
123 * destructor and make plain sk_free() */ 123 * destructor and make plain sk_free() */
124 newsk->sk_destruct = NULL; 124 newsk->sk_destruct = NULL;
125 bh_unlock_sock(newsk);
125 sk_free(newsk); 126 sk_free(newsk);
126 return NULL; 127 return NULL;
127 } 128 }
@@ -145,6 +146,13 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
145 struct dccp_request_sock *dreq = dccp_rsk(req); 146 struct dccp_request_sock *dreq = dccp_rsk(req);
146 bool own_req; 147 bool own_req;
147 148
149 /* TCP/DCCP listeners became lockless.
150 * DCCP stores complex state in its request_sock, so we need
151 * a protection for them, now this code runs without being protected
152 * by the parent (listener) lock.
153 */
154 spin_lock_bh(&dreq->dreq_lock);
155
148 /* Check for retransmitted REQUEST */ 156 /* Check for retransmitted REQUEST */
149 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) { 157 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
150 158
@@ -159,7 +167,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
159 inet_rtx_syn_ack(sk, req); 167 inet_rtx_syn_ack(sk, req);
160 } 168 }
161 /* Network Duplicate, discard packet */ 169 /* Network Duplicate, discard packet */
162 return NULL; 170 goto out;
163 } 171 }
164 172
165 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; 173 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -185,20 +193,20 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
185 193
186 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, 194 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
187 req, &own_req); 195 req, &own_req);
188 if (!child) 196 if (child) {
189 goto listen_overflow; 197 child = inet_csk_complete_hashdance(sk, child, req, own_req);
190 198 goto out;
191 return inet_csk_complete_hashdance(sk, child, req, own_req); 199 }
192 200
193listen_overflow:
194 dccp_pr_debug("listen_overflow!\n");
195 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; 201 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
196drop: 202drop:
197 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET) 203 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
198 req->rsk_ops->send_reset(sk, skb); 204 req->rsk_ops->send_reset(sk, skb);
199 205
200 inet_csk_reqsk_queue_drop(sk, req); 206 inet_csk_reqsk_queue_drop(sk, req);
201 return NULL; 207out:
208 spin_unlock_bh(&dreq->dreq_lock);
209 return child;
202} 210}
203 211
204EXPORT_SYMBOL_GPL(dccp_check_req); 212EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -249,6 +257,7 @@ int dccp_reqsk_init(struct request_sock *req,
249{ 257{
250 struct dccp_request_sock *dreq = dccp_rsk(req); 258 struct dccp_request_sock *dreq = dccp_rsk(req);
251 259
260 spin_lock_init(&dreq->dreq_lock);
252 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; 261 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
253 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport); 262 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);
254 inet_rsk(req)->acked = 0; 263 inet_rsk(req)->acked = 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b1dc096d22f8..403593bd2b83 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
189} 189}
190 190
191static inline void dnrt_drop(struct dn_route *rt)
192{
193 dst_release(&rt->dst);
194 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
195}
196
197static void dn_dst_check_expire(unsigned long dummy) 191static void dn_dst_check_expire(unsigned long dummy)
198{ 192{
199 int i; 193 int i;
@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
248 } 242 }
249 *rtp = rt->dst.dn_next; 243 *rtp = rt->dst.dn_next;
250 rt->dst.dn_next = NULL; 244 rt->dst.dn_next = NULL;
251 dnrt_drop(rt); 245 dnrt_free(rt);
252 break; 246 break;
253 } 247 }
254 spin_unlock_bh(&dn_rt_hash_table[i].lock); 248 spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
350 dst_use(&rth->dst, now); 344 dst_use(&rth->dst, now);
351 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 345 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
352 346
353 dnrt_drop(rt); 347 dst_free(&rt->dst);
354 *rp = rth; 348 *rp = rth;
355 return 0; 349 return 0;
356 } 350 }
@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
380 for(; rt; rt = next) { 374 for(; rt; rt = next) {
381 next = rcu_dereference_raw(rt->dst.dn_next); 375 next = rcu_dereference_raw(rt->dst.dn_next);
382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); 376 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
383 dst_free((struct dst_entry *)rt); 377 dnrt_free(rt);
384 } 378 }
385 379
386nothing_to_declare: 380nothing_to_declare:
@@ -1187,7 +1181,7 @@ make_route:
1187 if (dev_out->flags & IFF_LOOPBACK) 1181 if (dev_out->flags & IFF_LOOPBACK)
1188 flags |= RTCF_LOCAL; 1182 flags |= RTCF_LOCAL;
1189 1183
1190 rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1184 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
1191 if (rt == NULL) 1185 if (rt == NULL)
1192 goto e_nobufs; 1186 goto e_nobufs;
1193 1187
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 85f2fdc360c2..29246bc9a7b4 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
102{ 102{
103 struct nlmsghdr *nlh = nlmsg_hdr(skb); 103 struct nlmsghdr *nlh = nlmsg_hdr(skb);
104 104
105 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 105 if (skb->len < sizeof(*nlh) ||
106 nlh->nlmsg_len < sizeof(*nlh) ||
107 skb->len < nlh->nlmsg_len)
106 return; 108 return;
107 109
108 if (!netlink_capable(skb, CAP_NET_ADMIN)) 110 if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7bc787b095c8..554c2a961ad5 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1006,10 +1006,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
1006 /* Use already configured phy mode */ 1006 /* Use already configured phy mode */
1007 if (p->phy_interface == PHY_INTERFACE_MODE_NA) 1007 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
1008 p->phy_interface = p->phy->interface; 1008 p->phy_interface = p->phy->interface;
1009 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 1009 return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
1010 p->phy_interface); 1010 p->phy_interface);
1011
1012 return 0;
1013} 1011}
1014 1012
1015static int dsa_slave_phy_setup(struct dsa_slave_priv *p, 1013static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1101,6 +1099,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1101{ 1099{
1102 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1100 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1103 1101
1102 netif_device_detach(slave_dev);
1103
1104 if (p->phy) { 1104 if (p->phy) {
1105 phy_stop(p->phy); 1105 phy_stop(p->phy);
1106 p->old_pause = -1; 1106 p->old_pause = -1;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index de85d4e1cf43..52dcd414c2af 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -353,6 +353,7 @@ void ether_setup(struct net_device *dev)
353 dev->header_ops = &eth_header_ops; 353 dev->header_ops = &eth_header_ops;
354 dev->type = ARPHRD_ETHER; 354 dev->type = ARPHRD_ETHER;
355 dev->hard_header_len = ETH_HLEN; 355 dev->hard_header_len = ETH_HLEN;
356 dev->min_header_len = ETH_HLEN;
356 dev->mtu = ETH_DATA_LEN; 357 dev->mtu = ETH_DATA_LEN;
357 dev->addr_len = ETH_ALEN; 358 dev->addr_len = ETH_ALEN;
358 dev->tx_queue_len = 1000; /* Ethernet wants good queues */ 359 dev->tx_queue_len = 1000; /* Ethernet wants good queues */
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 68bf7bdf7fdb..b25a1b1ee657 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1029,7 +1029,7 @@ static struct inet_protosw inetsw_array[] =
1029 .type = SOCK_DGRAM, 1029 .type = SOCK_DGRAM,
1030 .protocol = IPPROTO_ICMP, 1030 .protocol = IPPROTO_ICMP,
1031 .prot = &ping_prot, 1031 .prot = &ping_prot,
1032 .ops = &inet_dgram_ops, 1032 .ops = &inet_sockraw_ops,
1033 .flags = INET_PROTOSW_REUSE, 1033 .flags = INET_PROTOSW_REUSE,
1034 }, 1034 },
1035 1035
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 59b3e0e8fd51..711b4dfa17c3 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1250,7 +1250,7 @@ void __init arp_init(void)
1250/* 1250/*
1251 * ax25 -> ASCII conversion 1251 * ax25 -> ASCII conversion
1252 */ 1252 */
1253static char *ax2asc2(ax25_address *a, char *buf) 1253static void ax2asc2(ax25_address *a, char *buf)
1254{ 1254{
1255 char c, *s; 1255 char c, *s;
1256 int n; 1256 int n;
@@ -1272,10 +1272,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
1272 *s++ = n + '0'; 1272 *s++ = n + '0';
1273 *s++ = '\0'; 1273 *s++ = '\0';
1274 1274
1275 if (*buf == '\0' || *buf == '-') 1275 if (*buf == '\0' || *buf == '-') {
1276 return "*"; 1276 buf[0] = '*';
1277 1277 buf[1] = '\0';
1278 return buf; 1278 }
1279} 1279}
1280#endif /* CONFIG_AX25 */ 1280#endif /* CONFIG_AX25 */
1281 1281
@@ -1309,7 +1309,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1309 } 1309 }
1310#endif 1310#endif
1311 sprintf(tbuf, "%pI4", n->primary_key); 1311 sprintf(tbuf, "%pI4", n->primary_key);
1312 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", 1312 seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
1313 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); 1313 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
1314 read_unlock(&n->lock); 1314 read_unlock(&n->lock);
1315} 1315}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index bdb2a07ec363..6cc3e1d602fb 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1657,6 +1657,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1657 goto validate_return_locked; 1657 goto validate_return_locked;
1658 } 1658 }
1659 1659
1660 if (opt_iter + 1 == opt_len) {
1661 err_offset = opt_iter;
1662 goto validate_return_locked;
1663 }
1660 tag_len = tag[1]; 1664 tag_len = tag[1];
1661 if (tag_len > (opt_len - opt_iter)) { 1665 if (tag_len > (opt_len - opt_iter)) {
1662 err_offset = opt_iter + 1; 1666 err_offset = opt_iter + 1;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 7e30c7b50a28..ee94bd32d6dc 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -758,7 +758,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
758 unsigned int e = 0, s_e; 758 unsigned int e = 0, s_e;
759 struct fib_table *tb; 759 struct fib_table *tb;
760 struct hlist_head *head; 760 struct hlist_head *head;
761 int dumped = 0; 761 int dumped = 0, err;
762 762
763 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && 763 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
764 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) 764 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -778,20 +778,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
778 if (dumped) 778 if (dumped)
779 memset(&cb->args[2], 0, sizeof(cb->args) - 779 memset(&cb->args[2], 0, sizeof(cb->args) -
780 2 * sizeof(cb->args[0])); 780 2 * sizeof(cb->args[0]));
781 if (fib_table_dump(tb, skb, cb) < 0) 781 err = fib_table_dump(tb, skb, cb);
782 goto out; 782 if (err < 0) {
783 if (likely(skb->len))
784 goto out;
785
786 goto out_err;
787 }
783 dumped = 1; 788 dumped = 1;
784next: 789next:
785 e++; 790 e++;
786 } 791 }
787 } 792 }
788out: 793out:
794 err = skb->len;
795out_err:
789 rcu_read_unlock(); 796 rcu_read_unlock();
790 797
791 cb->args[1] = e; 798 cb->args[1] = e;
792 cb->args[0] = h; 799 cb->args[0] = h;
793 800
794 return skb->len; 801 return err;
795} 802}
796 803
797/* Prepare and feed intra-kernel routing request. 804/* Prepare and feed intra-kernel routing request.
@@ -1081,7 +1088,8 @@ static void nl_fib_input(struct sk_buff *skb)
1081 1088
1082 net = sock_net(skb->sk); 1089 net = sock_net(skb->sk);
1083 nlh = nlmsg_hdr(skb); 1090 nlh = nlmsg_hdr(skb);
1084 if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || 1091 if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
1092 skb->len < nlh->nlmsg_len ||
1085 nlmsg_len(nlh) < sizeof(*frn)) 1093 nlmsg_len(nlh) < sizeof(*frn))
1086 return; 1094 return;
1087 1095
@@ -1312,13 +1320,14 @@ static struct pernet_operations fib_net_ops = {
1312 1320
1313void __init ip_fib_init(void) 1321void __init ip_fib_init(void)
1314{ 1322{
1315 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); 1323 fib_trie_init();
1316 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1317 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1318 1324
1319 register_pernet_subsys(&fib_net_ops); 1325 register_pernet_subsys(&fib_net_ops);
1326
1320 register_netdevice_notifier(&fib_netdev_notifier); 1327 register_netdevice_notifier(&fib_netdev_notifier);
1321 register_inetaddr_notifier(&fib_inetaddr_notifier); 1328 register_inetaddr_notifier(&fib_inetaddr_notifier);
1322 1329
1323 fib_trie_init(); 1330 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
1331 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1332 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1324} 1333}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 840b450aab46..b2504712259f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -204,6 +204,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
204static void free_fib_info_rcu(struct rcu_head *head) 204static void free_fib_info_rcu(struct rcu_head *head)
205{ 205{
206 struct fib_info *fi = container_of(head, struct fib_info, rcu); 206 struct fib_info *fi = container_of(head, struct fib_info, rcu);
207 struct dst_metrics *m;
207 208
208 change_nexthops(fi) { 209 change_nexthops(fi) {
209 if (nexthop_nh->nh_dev) 210 if (nexthop_nh->nh_dev)
@@ -214,8 +215,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
214 rt_fibinfo_free(&nexthop_nh->nh_rth_input); 215 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
215 } endfor_nexthops(fi); 216 } endfor_nexthops(fi);
216 217
217 if (fi->fib_metrics != (u32 *) dst_default_metrics) 218 m = fi->fib_metrics;
218 kfree(fi->fib_metrics); 219 if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
220 kfree(m);
219 kfree(fi); 221 kfree(fi);
220} 222}
221 223
@@ -982,11 +984,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
982 val = 255; 984 val = 255;
983 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 985 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
984 return -EINVAL; 986 return -EINVAL;
985 fi->fib_metrics[type - 1] = val; 987 fi->fib_metrics->metrics[type - 1] = val;
986 } 988 }
987 989
988 if (ecn_ca) 990 if (ecn_ca)
989 fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; 991 fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
990 992
991 return 0; 993 return 0;
992} 994}
@@ -1044,11 +1046,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1044 goto failure; 1046 goto failure;
1045 fib_info_cnt++; 1047 fib_info_cnt++;
1046 if (cfg->fc_mx) { 1048 if (cfg->fc_mx) {
1047 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1049 fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
1048 if (!fi->fib_metrics) 1050 if (!fi->fib_metrics)
1049 goto failure; 1051 goto failure;
1052 atomic_set(&fi->fib_metrics->refcnt, 1);
1050 } else 1053 } else
1051 fi->fib_metrics = (u32 *) dst_default_metrics; 1054 fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
1052 1055
1053 fi->fib_net = net; 1056 fi->fib_net = net;
1054 fi->fib_protocol = cfg->fc_protocol; 1057 fi->fib_protocol = cfg->fc_protocol;
@@ -1251,7 +1254,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1251 if (fi->fib_priority && 1254 if (fi->fib_priority &&
1252 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) 1255 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1253 goto nla_put_failure; 1256 goto nla_put_failure;
1254 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 1257 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1255 goto nla_put_failure; 1258 goto nla_put_failure;
1256 1259
1257 if (fi->fib_prefsrc && 1260 if (fi->fib_prefsrc &&
@@ -1277,8 +1280,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1277 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) 1280 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1278 goto nla_put_failure; 1281 goto nla_put_failure;
1279#endif 1282#endif
1280 if (fi->fib_nh->nh_lwtstate) 1283 if (fi->fib_nh->nh_lwtstate &&
1281 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate); 1284 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
1285 goto nla_put_failure;
1282 } 1286 }
1283#ifdef CONFIG_IP_ROUTE_MULTIPATH 1287#ifdef CONFIG_IP_ROUTE_MULTIPATH
1284 if (fi->fib_nhs > 1) { 1288 if (fi->fib_nhs > 1) {
@@ -1314,8 +1318,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1314 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) 1318 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1315 goto nla_put_failure; 1319 goto nla_put_failure;
1316#endif 1320#endif
1317 if (nh->nh_lwtstate) 1321 if (nh->nh_lwtstate &&
1318 lwtunnel_fill_encap(skb, nh->nh_lwtstate); 1322 lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
1323 goto nla_put_failure;
1324
1319 /* length of rtnetlink header + attributes */ 1325 /* length of rtnetlink header + attributes */
1320 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 1326 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1321 } endfor_nexthops(fi); 1327 } endfor_nexthops(fi);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 7c52afb98c42..5c598f99a500 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1906,6 +1906,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1906 1906
1907 /* rcu_read_lock is hold by caller */ 1907 /* rcu_read_lock is hold by caller */
1908 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { 1908 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1909 int err;
1910
1909 if (i < s_i) { 1911 if (i < s_i) {
1910 i++; 1912 i++;
1911 continue; 1913 continue;
@@ -1916,17 +1918,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1916 continue; 1918 continue;
1917 } 1919 }
1918 1920
1919 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, 1921 err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1920 cb->nlh->nlmsg_seq, 1922 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1921 RTM_NEWROUTE, 1923 tb->tb_id, fa->fa_type,
1922 tb->tb_id, 1924 xkey, KEYLENGTH - fa->fa_slen,
1923 fa->fa_type, 1925 fa->fa_tos, fa->fa_info, NLM_F_MULTI);
1924 xkey, 1926 if (err < 0) {
1925 KEYLENGTH - fa->fa_slen,
1926 fa->fa_tos,
1927 fa->fa_info, NLM_F_MULTI) < 0) {
1928 cb->args[4] = i; 1927 cb->args[4] = i;
1929 return -1; 1928 return err;
1930 } 1929 }
1931 i++; 1930 i++;
1932 } 1931 }
@@ -1948,10 +1947,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1948 t_key key = cb->args[3]; 1947 t_key key = cb->args[3];
1949 1948
1950 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 1949 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
1951 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { 1950 int err;
1951
1952 err = fn_trie_dump_leaf(l, tb, skb, cb);
1953 if (err < 0) {
1952 cb->args[3] = key; 1954 cb->args[3] = key;
1953 cb->args[2] = count; 1955 cb->args[2] = count;
1954 return -1; 1956 return err;
1955 } 1957 }
1956 1958
1957 ++count; 1959 ++count;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 17adfdaf5795..3809d523d012 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1102,6 +1102,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1102 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1102 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1103 if (!pmc) 1103 if (!pmc)
1104 return; 1104 return;
1105 spin_lock_init(&pmc->lock);
1105 spin_lock_bh(&im->lock); 1106 spin_lock_bh(&im->lock);
1106 pmc->interface = im->interface; 1107 pmc->interface = im->interface;
1107 in_dev_hold(in_dev); 1108 in_dev_hold(in_dev);
@@ -2026,21 +2027,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
2026 2027
2027static void ip_mc_clear_src(struct ip_mc_list *pmc) 2028static void ip_mc_clear_src(struct ip_mc_list *pmc)
2028{ 2029{
2029 struct ip_sf_list *psf, *nextpsf; 2030 struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
2030 2031
2031 for (psf = pmc->tomb; psf; psf = nextpsf) { 2032 spin_lock_bh(&pmc->lock);
2033 tomb = pmc->tomb;
2034 pmc->tomb = NULL;
2035 sources = pmc->sources;
2036 pmc->sources = NULL;
2037 pmc->sfmode = MCAST_EXCLUDE;
2038 pmc->sfcount[MCAST_INCLUDE] = 0;
2039 pmc->sfcount[MCAST_EXCLUDE] = 1;
2040 spin_unlock_bh(&pmc->lock);
2041
2042 for (psf = tomb; psf; psf = nextpsf) {
2032 nextpsf = psf->sf_next; 2043 nextpsf = psf->sf_next;
2033 kfree(psf); 2044 kfree(psf);
2034 } 2045 }
2035 pmc->tomb = NULL; 2046 for (psf = sources; psf; psf = nextpsf) {
2036 for (psf = pmc->sources; psf; psf = nextpsf) {
2037 nextpsf = psf->sf_next; 2047 nextpsf = psf->sf_next;
2038 kfree(psf); 2048 kfree(psf);
2039 } 2049 }
2040 pmc->sources = NULL;
2041 pmc->sfmode = MCAST_EXCLUDE;
2042 pmc->sfcount[MCAST_INCLUDE] = 0;
2043 pmc->sfcount[MCAST_EXCLUDE] = 1;
2044} 2050}
2045 2051
2046/* Join a multicast group 2052/* Join a multicast group
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c97a2108cd61..a7e7aa1f6a47 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -669,6 +669,8 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
669 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); 669 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
670 newsk->sk_write_space = sk_stream_write_space; 670 newsk->sk_write_space = sk_stream_write_space;
671 671
672 inet_sk(newsk)->mc_list = NULL;
673
672 newsk->sk_mark = inet_rsk(req)->ir_mark; 674 newsk->sk_mark = inet_rsk(req)->ir_mark;
673 atomic64_set(&newsk->sk_cookie, 675 atomic64_set(&newsk->sk_cookie,
674 atomic64_read(&inet_rsk(req)->ir_cookie)); 676 atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 661bda968594..62e41d38da78 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -922,10 +922,12 @@ static int __ip_append_data(struct sock *sk,
922 csummode = CHECKSUM_PARTIAL; 922 csummode = CHECKSUM_PARTIAL;
923 923
924 cork->length += length; 924 cork->length += length;
925 if (((length > mtu) || (skb && skb_is_gso(skb))) && 925 if ((skb && skb_is_gso(skb)) ||
926 (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
927 (skb_queue_len(queue) <= 1) &&
926 (sk->sk_protocol == IPPROTO_UDP) && 928 (sk->sk_protocol == IPPROTO_UDP) &&
927 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 929 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
928 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 930 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
929 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 931 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
930 hh_len, fragheaderlen, transhdrlen, 932 hh_len, fragheaderlen, transhdrlen,
931 maxfraglen, flags); 933 maxfraglen, flags);
@@ -1241,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1241 return -EINVAL; 1243 return -EINVAL;
1242 1244
1243 if ((size + skb->len > mtu) && 1245 if ((size + skb->len > mtu) &&
1246 (skb_queue_len(&sk->sk_write_queue) == 1) &&
1244 (sk->sk_protocol == IPPROTO_UDP) && 1247 (sk->sk_protocol == IPPROTO_UDP) &&
1245 (rt->dst.dev->features & NETIF_F_UFO)) { 1248 (rt->dst.dev->features & NETIF_F_UFO)) {
1246 if (skb->ip_summed != CHECKSUM_PARTIAL) 1249 if (skb->ip_summed != CHECKSUM_PARTIAL)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 9ce202549e7a..f300d1cbfa91 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -105,10 +105,10 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
105 if (skb->ip_summed != CHECKSUM_COMPLETE) 105 if (skb->ip_summed != CHECKSUM_COMPLETE)
106 return; 106 return;
107 107
108 if (offset != 0) 108 if (offset != 0) {
109 csum = csum_sub(csum, 109 int tend_off = skb_transport_offset(skb) + tlen;
110 csum_partial(skb->data + tlen, 110 csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0));
111 offset, 0)); 111 }
112 112
113 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); 113 put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
114} 114}
@@ -1192,7 +1192,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1192 pktinfo->ipi_ifindex = 0; 1192 pktinfo->ipi_ifindex = 0;
1193 pktinfo->ipi_spec_dst.s_addr = 0; 1193 pktinfo->ipi_spec_dst.s_addr = 0;
1194 } 1194 }
1195 skb_dst_drop(skb); 1195 /* We need to keep the dst for __ip_options_echo()
1196 * We could restrict the test to opt.ts_needtime || opt.srr,
1197 * but the following is good enough as IP options are not often used.
1198 */
1199 if (unlikely(IPCB(skb)->opt.optlen))
1200 skb_dst_force(skb);
1201 else
1202 skb_dst_drop(skb);
1196} 1203}
1197 1204
1198int ip_setsockopt(struct sock *sk, int level, 1205int ip_setsockopt(struct sock *sk, int level,
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 495fefe6a898..a989aba861e0 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -154,17 +154,18 @@ void ping_hash(struct sock *sk)
154void ping_unhash(struct sock *sk) 154void ping_unhash(struct sock *sk)
155{ 155{
156 struct inet_sock *isk = inet_sk(sk); 156 struct inet_sock *isk = inet_sk(sk);
157
157 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 158 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
159 write_lock_bh(&ping_table.lock);
158 if (sk_hashed(sk)) { 160 if (sk_hashed(sk)) {
159 write_lock_bh(&ping_table.lock);
160 hlist_nulls_del(&sk->sk_nulls_node); 161 hlist_nulls_del(&sk->sk_nulls_node);
161 sk_nulls_node_init(&sk->sk_nulls_node); 162 sk_nulls_node_init(&sk->sk_nulls_node);
162 sock_put(sk); 163 sock_put(sk);
163 isk->inet_num = 0; 164 isk->inet_num = 0;
164 isk->inet_sport = 0; 165 isk->inet_sport = 0;
165 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 166 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
166 write_unlock_bh(&ping_table.lock);
167 } 167 }
168 write_unlock_bh(&ping_table.lock);
168} 169}
169EXPORT_SYMBOL_GPL(ping_unhash); 170EXPORT_SYMBOL_GPL(ping_unhash);
170 171
@@ -645,6 +646,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
645{ 646{
646 struct sk_buff *skb = skb_peek(&sk->sk_write_queue); 647 struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
647 648
649 if (!skb)
650 return 0;
648 pfh->wcheck = csum_partial((char *)&pfh->icmph, 651 pfh->wcheck = csum_partial((char *)&pfh->icmph,
649 sizeof(struct icmphdr), pfh->wcheck); 652 sizeof(struct icmphdr), pfh->wcheck);
650 pfh->icmph.checksum = csum_fold(pfh->wcheck); 653 pfh->icmph.checksum = csum_fold(pfh->wcheck);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6287418c1dfe..ca1031411aa7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -354,6 +354,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
354 rt->dst.dev->mtu); 354 rt->dst.dev->mtu);
355 return -EMSGSIZE; 355 return -EMSGSIZE;
356 } 356 }
357 if (length < sizeof(struct iphdr))
358 return -EINVAL;
359
357 if (flags&MSG_PROBE) 360 if (flags&MSG_PROBE)
358 goto out; 361 goto out;
359 362
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3708ff083211..fd15e55b28d1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -792,6 +792,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
792 struct rtable *rt; 792 struct rtable *rt;
793 struct flowi4 fl4; 793 struct flowi4 fl4;
794 const struct iphdr *iph = (const struct iphdr *) skb->data; 794 const struct iphdr *iph = (const struct iphdr *) skb->data;
795 struct net *net = dev_net(skb->dev);
795 int oif = skb->dev->ifindex; 796 int oif = skb->dev->ifindex;
796 u8 tos = RT_TOS(iph->tos); 797 u8 tos = RT_TOS(iph->tos);
797 u8 prot = iph->protocol; 798 u8 prot = iph->protocol;
@@ -799,7 +800,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
799 800
800 rt = (struct rtable *) dst; 801 rt = (struct rtable *) dst;
801 802
802 __build_flow_key(sock_net(sk), &fl4, sk, iph, oif, tos, prot, mark, 0); 803 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
803 __ip_do_redirect(rt, skb, &fl4, true); 804 __ip_do_redirect(rt, skb, &fl4, true);
804} 805}
805 806
@@ -1361,8 +1362,12 @@ static void rt_add_uncached_list(struct rtable *rt)
1361 1362
1362static void ipv4_dst_destroy(struct dst_entry *dst) 1363static void ipv4_dst_destroy(struct dst_entry *dst)
1363{ 1364{
1365 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1364 struct rtable *rt = (struct rtable *) dst; 1366 struct rtable *rt = (struct rtable *) dst;
1365 1367
1368 if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
1369 kfree(p);
1370
1366 if (!list_empty(&rt->rt_uncached)) { 1371 if (!list_empty(&rt->rt_uncached)) {
1367 struct uncached_list *ul = rt->rt_uncached_list; 1372 struct uncached_list *ul = rt->rt_uncached_list;
1368 1373
@@ -1414,7 +1419,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1414 rt->rt_gateway = nh->nh_gw; 1419 rt->rt_gateway = nh->nh_gw;
1415 rt->rt_uses_gateway = 1; 1420 rt->rt_uses_gateway = 1;
1416 } 1421 }
1417 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 1422 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1423 if (fi->fib_metrics != &dst_default_metrics) {
1424 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1425 atomic_inc(&fi->fib_metrics->refcnt);
1426 }
1418#ifdef CONFIG_IP_ROUTE_CLASSID 1427#ifdef CONFIG_IP_ROUTE_CLASSID
1419 rt->dst.tclassid = nh->nh_tclassid; 1428 rt->dst.tclassid = nh->nh_tclassid;
1420#endif 1429#endif
@@ -1963,6 +1972,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1963{ 1972{
1964 int res; 1973 int res;
1965 1974
1975 tos &= IPTOS_RT_MASK;
1966 rcu_read_lock(); 1976 rcu_read_lock();
1967 1977
1968 /* Multicast recognition logic is moved from route cache to here. 1978 /* Multicast recognition logic is moved from route cache to here.
@@ -2435,7 +2445,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2435 r->rtm_dst_len = 32; 2445 r->rtm_dst_len = 32;
2436 r->rtm_src_len = 0; 2446 r->rtm_src_len = 0;
2437 r->rtm_tos = fl4->flowi4_tos; 2447 r->rtm_tos = fl4->flowi4_tos;
2438 r->rtm_table = table_id; 2448 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2439 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2449 if (nla_put_u32(skb, RTA_TABLE, table_id))
2440 goto nla_put_failure; 2450 goto nla_put_failure;
2441 r->rtm_type = rt->rt_type; 2451 r->rtm_type = rt->rt_type;
@@ -2569,7 +2579,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2569 skb_reset_network_header(skb); 2579 skb_reset_network_header(skb);
2570 2580
2571 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ 2581 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2572 ip_hdr(skb)->protocol = IPPROTO_ICMP; 2582 ip_hdr(skb)->protocol = IPPROTO_UDP;
2573 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); 2583 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2574 2584
2575 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; 2585 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 2dc982b15df8..a2e1142145df 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -337,6 +337,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
337 treq = tcp_rsk(req); 337 treq = tcp_rsk(req);
338 treq->rcv_isn = ntohl(th->seq) - 1; 338 treq->rcv_isn = ntohl(th->seq) - 1;
339 treq->snt_isn = cookie; 339 treq->snt_isn = cookie;
340 treq->txhash = net_tx_rndhash();
340 req->mss = mss; 341 req->mss = mss;
341 ireq->ir_num = ntohs(th->dest); 342 ireq->ir_num = ntohs(th->dest);
342 ireq->ir_rmt_port = th->source; 343 ireq->ir_rmt_port = th->source;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dc173e0d2184..48e6509426b0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -783,6 +783,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
783 ret = -EAGAIN; 783 ret = -EAGAIN;
784 break; 784 break;
785 } 785 }
786 /* if __tcp_splice_read() got nothing while we have
787 * an skb in receive queue, we do not want to loop.
788 * This might happen with URG data.
789 */
790 if (!skb_queue_empty(&sk->sk_receive_queue))
791 break;
786 sk_wait_data(sk, &timeo, NULL); 792 sk_wait_data(sk, &timeo, NULL);
787 if (signal_pending(current)) { 793 if (signal_pending(current)) {
788 ret = sock_intr_errno(timeo); 794 ret = sock_intr_errno(timeo);
@@ -1065,9 +1071,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1065 int *copied, size_t size) 1071 int *copied, size_t size)
1066{ 1072{
1067 struct tcp_sock *tp = tcp_sk(sk); 1073 struct tcp_sock *tp = tcp_sk(sk);
1074 struct sockaddr *uaddr = msg->msg_name;
1068 int err, flags; 1075 int err, flags;
1069 1076
1070 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1077 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1078 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1079 uaddr->sa_family == AF_UNSPEC))
1071 return -EOPNOTSUPP; 1080 return -EOPNOTSUPP;
1072 if (tp->fastopen_req) 1081 if (tp->fastopen_req)
1073 return -EALREADY; /* Another Fast Open is in progress */ 1082 return -EALREADY; /* Another Fast Open is in progress */
@@ -1080,7 +1089,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1080 tp->fastopen_req->size = size; 1089 tp->fastopen_req->size = size;
1081 1090
1082 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1091 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1083 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1092 err = __inet_stream_connect(sk->sk_socket, uaddr,
1084 msg->msg_namelen, flags); 1093 msg->msg_namelen, flags);
1085 *copied = tp->fastopen_req->copied; 1094 *copied = tp->fastopen_req->copied;
1086 tcp_free_fastopen_req(tp); 1095 tcp_free_fastopen_req(tp);
@@ -2254,6 +2263,9 @@ int tcp_disconnect(struct sock *sk, int flags)
2254 tcp_init_send_head(sk); 2263 tcp_init_send_head(sk);
2255 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2264 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2256 __sk_dst_reset(sk); 2265 __sk_dst_reset(sk);
2266 dst_release(sk->sk_rx_dst);
2267 sk->sk_rx_dst = NULL;
2268 tcp_saved_syn_free(tp);
2257 2269
2258 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 2270 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2259 2271
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 882caa4e72bc..aafe68134763 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -183,6 +183,7 @@ void tcp_init_congestion_control(struct sock *sk)
183{ 183{
184 const struct inet_connection_sock *icsk = inet_csk(sk); 184 const struct inet_connection_sock *icsk = inet_csk(sk);
185 185
186 tcp_sk(sk)->prior_ssthresh = 0;
186 if (icsk->icsk_ca_ops->init) 187 if (icsk->icsk_ca_ops->init)
187 icsk->icsk_ca_ops->init(sk); 188 icsk->icsk_ca_ops->init(sk);
188 if (tcp_ca_needs_ecn(sk)) 189 if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 55be6ac70cff..fca618272a01 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -112,7 +112,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
112 struct tcp_fastopen_cookie tmp; 112 struct tcp_fastopen_cookie tmp;
113 113
114 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { 114 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
115 struct in6_addr *buf = (struct in6_addr *) tmp.val; 115 struct in6_addr *buf = &tmp.addr;
116 int i; 116 int i;
117 117
118 for (i = 0; i < 4; i++) 118 for (i = 0; i < 4; i++)
@@ -161,6 +161,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
161 * scaled. So correct it appropriately. 161 * scaled. So correct it appropriately.
162 */ 162 */
163 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 163 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
164 tp->max_window = tp->snd_wnd;
164 165
165 /* Activate the retrans timer so that SYNACK can be retransmitted. 166 /* Activate the retrans timer so that SYNACK can be retransmitted.
166 * The request socket is not added to the ehash 167 * The request socket is not added to the ehash
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 35e97ff3054a..b6d99c308bef 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1135,13 +1135,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1135 */ 1135 */
1136 if (pkt_len > mss) { 1136 if (pkt_len > mss) {
1137 unsigned int new_len = (pkt_len / mss) * mss; 1137 unsigned int new_len = (pkt_len / mss) * mss;
1138 if (!in_sack && new_len < pkt_len) { 1138 if (!in_sack && new_len < pkt_len)
1139 new_len += mss; 1139 new_len += mss;
1140 if (new_len >= skb->len)
1141 return 0;
1142 }
1143 pkt_len = new_len; 1140 pkt_len = new_len;
1144 } 1141 }
1142
1143 if (pkt_len >= skb->len && !in_sack)
1144 return 0;
1145
1145 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1146 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1146 if (err < 0) 1147 if (err < 0)
1147 return err; 1148 return err;
@@ -2165,8 +2166,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2165{ 2166{
2166 struct tcp_sock *tp = tcp_sk(sk); 2167 struct tcp_sock *tp = tcp_sk(sk);
2167 struct sk_buff *skb; 2168 struct sk_buff *skb;
2168 int cnt, oldcnt; 2169 int cnt, oldcnt, lost;
2169 int err;
2170 unsigned int mss; 2170 unsigned int mss;
2171 /* Use SACK to deduce losses of new sequences sent during recovery */ 2171 /* Use SACK to deduce losses of new sequences sent during recovery */
2172 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2172 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
@@ -2206,9 +2206,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2206 break; 2206 break;
2207 2207
2208 mss = tcp_skb_mss(skb); 2208 mss = tcp_skb_mss(skb);
2209 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, 2209 /* If needed, chop off the prefix to mark as lost. */
2210 mss, GFP_ATOMIC); 2210 lost = (packets - oldcnt) * mss;
2211 if (err < 0) 2211 if (lost < skb->len &&
2212 tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
2212 break; 2213 break;
2213 cnt = packets; 2214 cnt = packets;
2214 } 2215 }
@@ -2503,8 +2504,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2503 struct tcp_sock *tp = tcp_sk(sk); 2504 struct tcp_sock *tp = tcp_sk(sk);
2504 2505
2505 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2506 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2506 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2507 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2507 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2508 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2508 tp->snd_cwnd = tp->snd_ssthresh; 2509 tp->snd_cwnd = tp->snd_ssthresh;
2509 tp->snd_cwnd_stamp = tcp_time_stamp; 2510 tp->snd_cwnd_stamp = tcp_time_stamp;
2510 } 2511 }
@@ -3220,7 +3221,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3220 int delta; 3221 int delta;
3221 3222
3222 /* Non-retransmitted hole got filled? That's reordering */ 3223 /* Non-retransmitted hole got filled? That's reordering */
3223 if (reord < prior_fackets) 3224 if (reord < prior_fackets && reord <= tp->fackets_out)
3224 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3225 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3225 3226
3226 delta = tcp_is_fack(tp) ? pkts_acked : 3227 delta = tcp_is_fack(tp) ? pkts_acked :
@@ -5436,6 +5437,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5436 struct inet_connection_sock *icsk = inet_csk(sk); 5437 struct inet_connection_sock *icsk = inet_csk(sk);
5437 5438
5438 tcp_set_state(sk, TCP_ESTABLISHED); 5439 tcp_set_state(sk, TCP_ESTABLISHED);
5440 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5439 5441
5440 if (skb) { 5442 if (skb) {
5441 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5443 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5648,7 +5650,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5648 * to stand against the temptation 8) --ANK 5650 * to stand against the temptation 8) --ANK
5649 */ 5651 */
5650 inet_csk_schedule_ack(sk); 5652 inet_csk_schedule_ack(sk);
5651 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5652 tcp_enter_quickack_mode(sk); 5653 tcp_enter_quickack_mode(sk);
5653 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5654 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5654 TCP_DELACK_MAX, TCP_RTO_MAX); 5655 TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 25309b137c43..a84f74af22f7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -271,10 +271,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
271 */ 271 */
272void tcp_v4_mtu_reduced(struct sock *sk) 272void tcp_v4_mtu_reduced(struct sock *sk)
273{ 273{
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk); 274 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info; 275 struct dst_entry *dst;
276 u32 mtu;
277 277
278 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
279 return;
280 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu); 281 dst = inet_csk_update_pmtu(sk, mtu);
279 if (!dst) 282 if (!dst)
280 return; 283 return;
@@ -420,7 +423,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
420 423
421 switch (type) { 424 switch (type) {
422 case ICMP_REDIRECT: 425 case ICMP_REDIRECT:
423 do_redirect(icmp_skb, sk); 426 if (!sock_owned_by_user(sk))
427 do_redirect(icmp_skb, sk);
424 goto out; 428 goto out;
425 case ICMP_SOURCE_QUENCH: 429 case ICMP_SOURCE_QUENCH:
426 /* Just silently ignore these. */ 430 /* Just silently ignore these. */
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 1e70fa8fa793..3861dedd5365 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -264,13 +264,15 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
264{ 264{
265 struct tcp_sock *tp = tcp_sk(sk); 265 struct tcp_sock *tp = tcp_sk(sk);
266 struct lp *lp = inet_csk_ca(sk); 266 struct lp *lp = inet_csk_ca(sk);
267 u32 delta;
267 268
268 if (rtt_us > 0) 269 if (rtt_us > 0)
269 tcp_lp_rtt_sample(sk, rtt_us); 270 tcp_lp_rtt_sample(sk, rtt_us);
270 271
271 /* calc inference */ 272 /* calc inference */
272 if (tcp_time_stamp > tp->rx_opt.rcv_tsecr) 273 delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
273 lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr); 274 if ((s32)delta > 0)
275 lp->inference = 3 * delta;
274 276
275 /* test if within inference */ 277 /* test if within inference */
276 if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference)) 278 if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9475a2748a9a..4c1c94fa8f08 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -472,6 +472,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
472 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 472 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
473 newtp->rtt_min[0].rtt = ~0U; 473 newtp->rtt_min[0].rtt = ~0U;
474 newicsk->icsk_rto = TCP_TIMEOUT_INIT; 474 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
475 newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
475 476
476 newtp->packets_out = 0; 477 newtp->packets_out = 0;
477 newtp->retrans_out = 0; 478 newtp->retrans_out = 0;
@@ -546,6 +547,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
546 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 547 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
547 newtp->rx_opt.mss_clamp = req->mss; 548 newtp->rx_opt.mss_clamp = req->mss;
548 tcp_ecn_openreq_child(newtp, req); 549 tcp_ecn_openreq_child(newtp, req);
550 newtp->fastopen_req = NULL;
549 newtp->fastopen_rsk = NULL; 551 newtp->fastopen_rsk = NULL;
550 newtp->syn_data_acked = 0; 552 newtp->syn_data_acked = 0;
551 newtp->rack.mstamp.v64 = 0; 553 newtp->rack.mstamp.v64 = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ca3731721d81..4e88f93f71c8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1221,7 +1221,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1221 * eventually). The difference is that pulled data not copied, but 1221 * eventually). The difference is that pulled data not copied, but
1222 * immediately discarded. 1222 * immediately discarded.
1223 */ 1223 */
1224static void __pskb_trim_head(struct sk_buff *skb, int len) 1224static int __pskb_trim_head(struct sk_buff *skb, int len)
1225{ 1225{
1226 struct skb_shared_info *shinfo; 1226 struct skb_shared_info *shinfo;
1227 int i, k, eat; 1227 int i, k, eat;
@@ -1231,7 +1231,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1231 __skb_pull(skb, eat); 1231 __skb_pull(skb, eat);
1232 len -= eat; 1232 len -= eat;
1233 if (!len) 1233 if (!len)
1234 return; 1234 return 0;
1235 } 1235 }
1236 eat = len; 1236 eat = len;
1237 k = 0; 1237 k = 0;
@@ -1257,23 +1257,28 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
1257 skb_reset_tail_pointer(skb); 1257 skb_reset_tail_pointer(skb);
1258 skb->data_len -= len; 1258 skb->data_len -= len;
1259 skb->len = skb->data_len; 1259 skb->len = skb->data_len;
1260 return len;
1260} 1261}
1261 1262
1262/* Remove acked data from a packet in the transmit queue. */ 1263/* Remove acked data from a packet in the transmit queue. */
1263int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1264int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1264{ 1265{
1266 u32 delta_truesize;
1267
1265 if (skb_unclone(skb, GFP_ATOMIC)) 1268 if (skb_unclone(skb, GFP_ATOMIC))
1266 return -ENOMEM; 1269 return -ENOMEM;
1267 1270
1268 __pskb_trim_head(skb, len); 1271 delta_truesize = __pskb_trim_head(skb, len);
1269 1272
1270 TCP_SKB_CB(skb)->seq += len; 1273 TCP_SKB_CB(skb)->seq += len;
1271 skb->ip_summed = CHECKSUM_PARTIAL; 1274 skb->ip_summed = CHECKSUM_PARTIAL;
1272 1275
1273 skb->truesize -= len; 1276 if (delta_truesize) {
1274 sk->sk_wmem_queued -= len; 1277 skb->truesize -= delta_truesize;
1275 sk_mem_uncharge(sk, len); 1278 sk->sk_wmem_queued -= delta_truesize;
1276 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1279 sk_mem_uncharge(sk, delta_truesize);
1280 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1281 }
1277 1282
1278 /* Any change of skb->len requires recalculation of tso factor. */ 1283 /* Any change of skb->len requires recalculation of tso factor. */
1279 if (tcp_skb_pcount(skb) > 1) 1284 if (tcp_skb_pcount(skb) > 1)
@@ -2383,9 +2388,11 @@ u32 __tcp_select_window(struct sock *sk)
2383 int full_space = min_t(int, tp->window_clamp, allowed_space); 2388 int full_space = min_t(int, tp->window_clamp, allowed_space);
2384 int window; 2389 int window;
2385 2390
2386 if (mss > full_space) 2391 if (unlikely(mss > full_space)) {
2387 mss = full_space; 2392 mss = full_space;
2388 2393 if (mss <= 0)
2394 return 0;
2395 }
2389 if (free_space < (full_space >> 1)) { 2396 if (free_space < (full_space >> 1)) {
2390 icsk->icsk_ack.quick = 0; 2397 icsk->icsk_ack.quick = 0;
2391 2398
@@ -3249,6 +3256,9 @@ int tcp_connect(struct sock *sk)
3249 struct sk_buff *buff; 3256 struct sk_buff *buff;
3250 int err; 3257 int err;
3251 3258
3259 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3260 return -EHOSTUNREACH; /* Routing failure or similar. */
3261
3252 tcp_connect_init(sk); 3262 tcp_connect_init(sk);
3253 3263
3254 if (unlikely(tp->repair)) { 3264 if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 193ba1fa8a9a..1ec12a4f327e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -223,7 +223,8 @@ void tcp_delack_timer_handler(struct sock *sk)
223 223
224 sk_mem_reclaim_partial(sk); 224 sk_mem_reclaim_partial(sk);
225 225
226 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 226 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
227 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
227 goto out; 228 goto out;
228 229
229 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 230 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -504,7 +505,8 @@ void tcp_write_timer_handler(struct sock *sk)
504 struct inet_connection_sock *icsk = inet_csk(sk); 505 struct inet_connection_sock *icsk = inet_csk(sk);
505 int event; 506 int event;
506 507
507 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 508 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
509 !icsk->icsk_pending)
508 goto out; 510 goto out;
509 511
510 if (time_after(icsk->icsk_timeout, jiffies)) { 512 if (time_after(icsk->icsk_timeout, jiffies)) {
@@ -604,7 +606,8 @@ static void tcp_keepalive_timer (unsigned long data)
604 goto death; 606 goto death;
605 } 607 }
606 608
607 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 609 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
610 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
608 goto out; 611 goto out;
609 612
610 elapsed = keepalive_time_when(tp); 613 elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ad3d1534c524..9ee5087b9b5e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
819 if (is_udplite) /* UDP-Lite */ 819 if (is_udplite) /* UDP-Lite */
820 csum = udplite_csum(skb); 820 csum = udplite_csum(skb);
821 821
822 else if (sk->sk_no_check_tx) { /* UDP csum disabled */ 822 else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
823 823
824 skb->ip_summed = CHECKSUM_NONE; 824 skb->ip_summed = CHECKSUM_NONE;
825 goto send; 825 goto send;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6396f1c80ae9..6dfc3daf7c21 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
231 if (uh->check == 0) 231 if (uh->check == 0)
232 uh->check = CSUM_MANGLED_0; 232 uh->check = CSUM_MANGLED_0;
233 233
234 skb->ip_summed = CHECKSUM_NONE; 234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 235
236 /* Fragment the skb. IP headers of the fragments are updated in 236 /* Fragment the skb. IP headers of the fragments are updated in
237 * inet_gso_segment() 237 * inet_gso_segment()
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 1e541578a66d..2d2241006d35 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -112,6 +112,27 @@ static inline u32 cstamp_delta(unsigned long cstamp)
112 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; 112 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
113} 113}
114 114
115static inline s32 rfc3315_s14_backoff_init(s32 irt)
116{
117 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
118 u64 tmp = (900000 + prandom_u32() % 200001) * (u64)irt;
119 do_div(tmp, 1000000);
120 return (s32)tmp;
121}
122
123static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
124{
125 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
126 u64 tmp = (1900000 + prandom_u32() % 200001) * (u64)rt;
127 do_div(tmp, 1000000);
128 if ((s32)tmp > mrt) {
129 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
130 tmp = (900000 + prandom_u32() % 200001) * (u64)mrt;
131 do_div(tmp, 1000000);
132 }
133 return (s32)tmp;
134}
135
115#ifdef CONFIG_SYSCTL 136#ifdef CONFIG_SYSCTL
116static int addrconf_sysctl_register(struct inet6_dev *idev); 137static int addrconf_sysctl_register(struct inet6_dev *idev);
117static void addrconf_sysctl_unregister(struct inet6_dev *idev); 138static void addrconf_sysctl_unregister(struct inet6_dev *idev);
@@ -187,6 +208,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
187 .dad_transmits = 1, 208 .dad_transmits = 1,
188 .rtr_solicits = MAX_RTR_SOLICITATIONS, 209 .rtr_solicits = MAX_RTR_SOLICITATIONS,
189 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, 210 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
211 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
190 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, 212 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
191 .use_tempaddr = 0, 213 .use_tempaddr = 0,
192 .temp_valid_lft = TEMP_VALID_LIFETIME, 214 .temp_valid_lft = TEMP_VALID_LIFETIME,
@@ -202,6 +224,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
202 .accept_ra_rtr_pref = 1, 224 .accept_ra_rtr_pref = 1,
203 .rtr_probe_interval = 60 * HZ, 225 .rtr_probe_interval = 60 * HZ,
204#ifdef CONFIG_IPV6_ROUTE_INFO 226#ifdef CONFIG_IPV6_ROUTE_INFO
227 .accept_ra_rt_info_min_plen = 0,
205 .accept_ra_rt_info_max_plen = 0, 228 .accept_ra_rt_info_max_plen = 0,
206#endif 229#endif
207#endif 230#endif
@@ -232,6 +255,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
232 .dad_transmits = 1, 255 .dad_transmits = 1,
233 .rtr_solicits = MAX_RTR_SOLICITATIONS, 256 .rtr_solicits = MAX_RTR_SOLICITATIONS,
234 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, 257 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
258 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
235 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, 259 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
236 .use_tempaddr = 0, 260 .use_tempaddr = 0,
237 .temp_valid_lft = TEMP_VALID_LIFETIME, 261 .temp_valid_lft = TEMP_VALID_LIFETIME,
@@ -247,6 +271,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
247 .accept_ra_rtr_pref = 1, 271 .accept_ra_rtr_pref = 1,
248 .rtr_probe_interval = 60 * HZ, 272 .rtr_probe_interval = 60 * HZ,
249#ifdef CONFIG_IPV6_ROUTE_INFO 273#ifdef CONFIG_IPV6_ROUTE_INFO
274 .accept_ra_rt_info_min_plen = 0,
250 .accept_ra_rt_info_max_plen = 0, 275 .accept_ra_rt_info_max_plen = 0,
251#endif 276#endif
252#endif 277#endif
@@ -293,9 +318,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
293static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 318static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
294 unsigned long delay) 319 unsigned long delay)
295{ 320{
296 if (!delayed_work_pending(&ifp->dad_work)) 321 in6_ifa_hold(ifp);
297 in6_ifa_hold(ifp); 322 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
298 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 323 in6_ifa_put(ifp);
299} 324}
300 325
301static int snmp6_alloc_dev(struct inet6_dev *idev) 326static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -1774,17 +1799,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
1774 1799
1775static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) 1800static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1776{ 1801{
1777 if (ifp->flags&IFA_F_PERMANENT) { 1802 if (ifp->flags&IFA_F_TEMPORARY) {
1778 spin_lock_bh(&ifp->lock);
1779 addrconf_del_dad_work(ifp);
1780 ifp->flags |= IFA_F_TENTATIVE;
1781 if (dad_failed)
1782 ifp->flags |= IFA_F_DADFAILED;
1783 spin_unlock_bh(&ifp->lock);
1784 if (dad_failed)
1785 ipv6_ifa_notify(0, ifp);
1786 in6_ifa_put(ifp);
1787 } else if (ifp->flags&IFA_F_TEMPORARY) {
1788 struct inet6_ifaddr *ifpub; 1803 struct inet6_ifaddr *ifpub;
1789 spin_lock_bh(&ifp->lock); 1804 spin_lock_bh(&ifp->lock);
1790 ifpub = ifp->ifpub; 1805 ifpub = ifp->ifpub;
@@ -1797,6 +1812,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1797 spin_unlock_bh(&ifp->lock); 1812 spin_unlock_bh(&ifp->lock);
1798 } 1813 }
1799 ipv6_del_addr(ifp); 1814 ipv6_del_addr(ifp);
1815 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
1816 spin_lock_bh(&ifp->lock);
1817 addrconf_del_dad_work(ifp);
1818 ifp->flags |= IFA_F_TENTATIVE;
1819 if (dad_failed)
1820 ifp->flags |= IFA_F_DADFAILED;
1821 spin_unlock_bh(&ifp->lock);
1822 if (dad_failed)
1823 ipv6_ifa_notify(0, ifp);
1824 in6_ifa_put(ifp);
1800 } else { 1825 } else {
1801 ipv6_del_addr(ifp); 1826 ipv6_del_addr(ifp);
1802 } 1827 }
@@ -3170,6 +3195,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3170{ 3195{
3171 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3196 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3172 struct inet6_dev *idev = __in6_dev_get(dev); 3197 struct inet6_dev *idev = __in6_dev_get(dev);
3198 struct net *net = dev_net(dev);
3173 int run_pending = 0; 3199 int run_pending = 0;
3174 int err; 3200 int err;
3175 3201
@@ -3185,7 +3211,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3185 case NETDEV_CHANGEMTU: 3211 case NETDEV_CHANGEMTU:
3186 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ 3212 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3187 if (dev->mtu < IPV6_MIN_MTU) { 3213 if (dev->mtu < IPV6_MIN_MTU) {
3188 addrconf_ifdown(dev, 1); 3214 addrconf_ifdown(dev, dev != net->loopback_dev);
3189 break; 3215 break;
3190 } 3216 }
3191 3217
@@ -3238,9 +3264,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3238 } 3264 }
3239 3265
3240 if (idev) { 3266 if (idev) {
3241 if (idev->if_flags & IF_READY) 3267 if (idev->if_flags & IF_READY) {
3242 /* device is already configured. */ 3268 /* device is already configured -
3269 * but resend MLD reports, we might
3270 * have roamed and need to update
3271 * multicast snooping switches
3272 */
3273 ipv6_mc_up(idev);
3243 break; 3274 break;
3275 }
3244 idev->if_flags |= IF_READY; 3276 idev->if_flags |= IF_READY;
3245 } 3277 }
3246 3278
@@ -3292,7 +3324,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3292 * IPV6_MIN_MTU stop IPv6 on this interface. 3324 * IPV6_MIN_MTU stop IPv6 on this interface.
3293 */ 3325 */
3294 if (dev->mtu < IPV6_MIN_MTU) 3326 if (dev->mtu < IPV6_MIN_MTU)
3295 addrconf_ifdown(dev, 1); 3327 addrconf_ifdown(dev, dev != net->loopback_dev);
3296 } 3328 }
3297 break; 3329 break;
3298 3330
@@ -3333,6 +3365,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3333 */ 3365 */
3334static struct notifier_block ipv6_dev_notf = { 3366static struct notifier_block ipv6_dev_notf = {
3335 .notifier_call = addrconf_notify, 3367 .notifier_call = addrconf_notify,
3368 .priority = ADDRCONF_NOTIFY_PRIORITY,
3336}; 3369};
3337 3370
3338static void addrconf_type_change(struct net_device *dev, unsigned long event) 3371static void addrconf_type_change(struct net_device *dev, unsigned long event)
@@ -3485,7 +3518,7 @@ static void addrconf_rs_timer(unsigned long data)
3485 if (idev->if_flags & IF_RA_RCVD) 3518 if (idev->if_flags & IF_RA_RCVD)
3486 goto out; 3519 goto out;
3487 3520
3488 if (idev->rs_probes++ < idev->cnf.rtr_solicits) { 3521 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3489 write_unlock(&idev->lock); 3522 write_unlock(&idev->lock);
3490 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) 3523 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3491 ndisc_send_rs(dev, &lladdr, 3524 ndisc_send_rs(dev, &lladdr,
@@ -3494,11 +3527,13 @@ static void addrconf_rs_timer(unsigned long data)
3494 goto put; 3527 goto put;
3495 3528
3496 write_lock(&idev->lock); 3529 write_lock(&idev->lock);
3530 idev->rs_interval = rfc3315_s14_backoff_update(
3531 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3497 /* The wait after the last probe can be shorter */ 3532 /* The wait after the last probe can be shorter */
3498 addrconf_mod_rs_timer(idev, (idev->rs_probes == 3533 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3499 idev->cnf.rtr_solicits) ? 3534 idev->cnf.rtr_solicits) ?
3500 idev->cnf.rtr_solicit_delay : 3535 idev->cnf.rtr_solicit_delay :
3501 idev->cnf.rtr_solicit_interval); 3536 idev->rs_interval);
3502 } else { 3537 } else {
3503 /* 3538 /*
3504 * Note: we do not support deprecated "all on-link" 3539 * Note: we do not support deprecated "all on-link"
@@ -3726,7 +3761,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3726 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp); 3761 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
3727 send_rs = send_mld && 3762 send_rs = send_mld &&
3728 ipv6_accept_ra(ifp->idev) && 3763 ipv6_accept_ra(ifp->idev) &&
3729 ifp->idev->cnf.rtr_solicits > 0 && 3764 ifp->idev->cnf.rtr_solicits != 0 &&
3730 (dev->flags&IFF_LOOPBACK) == 0; 3765 (dev->flags&IFF_LOOPBACK) == 0;
3731 read_unlock_bh(&ifp->idev->lock); 3766 read_unlock_bh(&ifp->idev->lock);
3732 3767
@@ -3748,10 +3783,11 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3748 3783
3749 write_lock_bh(&ifp->idev->lock); 3784 write_lock_bh(&ifp->idev->lock);
3750 spin_lock(&ifp->lock); 3785 spin_lock(&ifp->lock);
3786 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
3787 ifp->idev->cnf.rtr_solicit_interval);
3751 ifp->idev->rs_probes = 1; 3788 ifp->idev->rs_probes = 1;
3752 ifp->idev->if_flags |= IF_RS_SENT; 3789 ifp->idev->if_flags |= IF_RS_SENT;
3753 addrconf_mod_rs_timer(ifp->idev, 3790 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
3754 ifp->idev->cnf.rtr_solicit_interval);
3755 spin_unlock(&ifp->lock); 3791 spin_unlock(&ifp->lock);
3756 write_unlock_bh(&ifp->idev->lock); 3792 write_unlock_bh(&ifp->idev->lock);
3757 } 3793 }
@@ -4668,6 +4704,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4668 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; 4704 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
4669 array[DEVCONF_RTR_SOLICIT_INTERVAL] = 4705 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
4670 jiffies_to_msecs(cnf->rtr_solicit_interval); 4706 jiffies_to_msecs(cnf->rtr_solicit_interval);
4707 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
4708 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
4671 array[DEVCONF_RTR_SOLICIT_DELAY] = 4709 array[DEVCONF_RTR_SOLICIT_DELAY] =
4672 jiffies_to_msecs(cnf->rtr_solicit_delay); 4710 jiffies_to_msecs(cnf->rtr_solicit_delay);
4673 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; 4711 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
@@ -4689,6 +4727,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4689 array[DEVCONF_RTR_PROBE_INTERVAL] = 4727 array[DEVCONF_RTR_PROBE_INTERVAL] =
4690 jiffies_to_msecs(cnf->rtr_probe_interval); 4728 jiffies_to_msecs(cnf->rtr_probe_interval);
4691#ifdef CONFIG_IPV6_ROUTE_INFO 4729#ifdef CONFIG_IPV6_ROUTE_INFO
4730 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
4692 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; 4731 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
4693#endif 4732#endif
4694#endif 4733#endif
@@ -4876,7 +4915,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4876 return -EINVAL; 4915 return -EINVAL;
4877 if (!ipv6_accept_ra(idev)) 4916 if (!ipv6_accept_ra(idev))
4878 return -EINVAL; 4917 return -EINVAL;
4879 if (idev->cnf.rtr_solicits <= 0) 4918 if (idev->cnf.rtr_solicits == 0)
4880 return -EINVAL; 4919 return -EINVAL;
4881 4920
4882 write_lock_bh(&idev->lock); 4921 write_lock_bh(&idev->lock);
@@ -4901,8 +4940,10 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4901 4940
4902 if (update_rs) { 4941 if (update_rs) {
4903 idev->if_flags |= IF_RS_SENT; 4942 idev->if_flags |= IF_RS_SENT;
4943 idev->rs_interval = rfc3315_s14_backoff_init(
4944 idev->cnf.rtr_solicit_interval);
4904 idev->rs_probes = 1; 4945 idev->rs_probes = 1;
4905 addrconf_mod_rs_timer(idev, idev->cnf.rtr_solicit_interval); 4946 addrconf_mod_rs_timer(idev, idev->rs_interval);
4906 } 4947 }
4907 4948
4908 /* Well, that's kinda nasty ... */ 4949 /* Well, that's kinda nasty ... */
@@ -5272,8 +5313,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5272 struct net_device *dev; 5313 struct net_device *dev;
5273 struct inet6_dev *idev; 5314 struct inet6_dev *idev;
5274 5315
5275 rcu_read_lock(); 5316 for_each_netdev(net, dev) {
5276 for_each_netdev_rcu(net, dev) {
5277 idev = __in6_dev_get(dev); 5317 idev = __in6_dev_get(dev);
5278 if (idev) { 5318 if (idev) {
5279 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 5319 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5282,7 +5322,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5282 dev_disable_change(idev); 5322 dev_disable_change(idev);
5283 } 5323 }
5284 } 5324 }
5285 rcu_read_unlock();
5286} 5325}
5287 5326
5288static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) 5327static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
@@ -5542,6 +5581,13 @@ static struct addrconf_sysctl_table
5542 .proc_handler = proc_dointvec_jiffies, 5581 .proc_handler = proc_dointvec_jiffies,
5543 }, 5582 },
5544 { 5583 {
5584 .procname = "router_solicitation_max_interval",
5585 .data = &ipv6_devconf.rtr_solicit_max_interval,
5586 .maxlen = sizeof(int),
5587 .mode = 0644,
5588 .proc_handler = proc_dointvec_jiffies,
5589 },
5590 {
5545 .procname = "router_solicitation_delay", 5591 .procname = "router_solicitation_delay",
5546 .data = &ipv6_devconf.rtr_solicit_delay, 5592 .data = &ipv6_devconf.rtr_solicit_delay,
5547 .maxlen = sizeof(int), 5593 .maxlen = sizeof(int),
@@ -5651,6 +5697,13 @@ static struct addrconf_sysctl_table
5651 }, 5697 },
5652#ifdef CONFIG_IPV6_ROUTE_INFO 5698#ifdef CONFIG_IPV6_ROUTE_INFO
5653 { 5699 {
5700 .procname = "accept_ra_rt_info_min_plen",
5701 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
5702 .maxlen = sizeof(int),
5703 .mode = 0644,
5704 .proc_handler = proc_dointvec,
5705 },
5706 {
5654 .procname = "accept_ra_rt_info_max_plen", 5707 .procname = "accept_ra_rt_info_max_plen",
5655 .data = &ipv6_devconf.accept_ra_rt_info_max_plen, 5708 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
5656 .maxlen = sizeof(int), 5709 .maxlen = sizeof(int),
@@ -5977,6 +6030,8 @@ int __init addrconf_init(void)
5977 goto errlo; 6030 goto errlo;
5978 } 6031 }
5979 6032
6033 ip6_route_init_special_entries();
6034
5980 for (i = 0; i < IN6_ADDR_HSIZE; i++) 6035 for (i = 0; i < IN6_ADDR_HSIZE; i++)
5981 INIT_HLIST_HEAD(&inet6_addr_lst[i]); 6036 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
5982 6037
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index cf2dfb222230..56528e9f3e01 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -76,18 +76,22 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
76 } 76 }
77 } 77 }
78 78
79 addr_type = ipv6_addr_type(&usin->sin6_addr); 79 if (ipv6_addr_any(&usin->sin6_addr)) {
80
81 if (addr_type == IPV6_ADDR_ANY) {
82 /* 80 /*
83 * connect to self 81 * connect to self
84 */ 82 */
85 usin->sin6_addr.s6_addr[15] = 0x01; 83 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
84 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
85 &usin->sin6_addr);
86 else
87 usin->sin6_addr = in6addr_loopback;
86 } 88 }
87 89
90 addr_type = ipv6_addr_type(&usin->sin6_addr);
91
88 daddr = &usin->sin6_addr; 92 daddr = &usin->sin6_addr;
89 93
90 if (addr_type == IPV6_ADDR_MAPPED) { 94 if (addr_type & IPV6_ADDR_MAPPED) {
91 struct sockaddr_in sin; 95 struct sockaddr_in sin;
92 96
93 if (__ipv6_only_sock(sk)) { 97 if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index ed33abf57abd..9ac4f0cef27d 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@ struct fib6_rule {
32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 32struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
33 int flags, pol_lookup_t lookup) 33 int flags, pol_lookup_t lookup)
34{ 34{
35 struct rt6_info *rt;
36 struct fib_lookup_arg arg = { 35 struct fib_lookup_arg arg = {
37 .lookup_ptr = lookup, 36 .lookup_ptr = lookup,
38 .flags = FIB_LOOKUP_NOREF, 37 .flags = FIB_LOOKUP_NOREF,
@@ -41,21 +40,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
41 fib_rules_lookup(net->ipv6.fib6_rules_ops, 40 fib_rules_lookup(net->ipv6.fib6_rules_ops,
42 flowi6_to_flowi(fl6), flags, &arg); 41 flowi6_to_flowi(fl6), flags, &arg);
43 42
44 rt = arg.result; 43 if (arg.result)
44 return arg.result;
45 45
46 if (!rt) { 46 dst_hold(&net->ipv6.ip6_null_entry->dst);
47 dst_hold(&net->ipv6.ip6_null_entry->dst); 47 return &net->ipv6.ip6_null_entry->dst;
48 return &net->ipv6.ip6_null_entry->dst;
49 }
50
51 if (rt->rt6i_flags & RTF_REJECT &&
52 rt->dst.error == -EAGAIN) {
53 ip6_rt_put(rt);
54 rt = net->ipv6.ip6_null_entry;
55 dst_hold(&rt->dst);
56 }
57
58 return &rt->dst;
59} 48}
60 49
61static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 50static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -116,7 +105,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
116 flp6->saddr = saddr; 105 flp6->saddr = saddr;
117 } 106 }
118 err = rt->dst.error; 107 err = rt->dst.error;
119 goto out; 108 if (err != -EAGAIN)
109 goto out;
120 } 110 }
121again: 111again:
122 ip6_rt_put(rt); 112 ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 34cf46d74554..f60e8caea767 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -290,8 +290,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
290 struct rt6_info *rt; 290 struct rt6_info *rt;
291 291
292 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 292 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
293 if (rt->rt6i_flags & RTF_REJECT && 293 if (rt->dst.error == -EAGAIN) {
294 rt->dst.error == -EAGAIN) {
295 ip6_rt_put(rt); 294 ip6_rt_put(rt);
296 rt = net->ipv6.ip6_null_entry; 295 rt = net->ipv6.ip6_null_entry;
297 dst_hold(&rt->dst); 296 dst_hold(&rt->dst);
@@ -768,10 +767,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
768 goto next_iter; 767 goto next_iter;
769 } 768 }
770 769
771 if (iter->dst.dev == rt->dst.dev && 770 if (rt6_duplicate_nexthop(iter, rt)) {
772 iter->rt6i_idev == rt->rt6i_idev &&
773 ipv6_addr_equal(&iter->rt6i_gateway,
774 &rt->rt6i_gateway)) {
775 if (rt->rt6i_nsiblings) 771 if (rt->rt6i_nsiblings)
776 rt->rt6i_nsiblings = 0; 772 rt->rt6i_nsiblings = 0;
777 if (!(iter->rt6i_flags & RTF_EXPIRES)) 773 if (!(iter->rt6i_flags & RTF_EXPIRES))
@@ -903,6 +899,8 @@ add:
903 ins = &rt->dst.rt6_next; 899 ins = &rt->dst.rt6_next;
904 iter = *ins; 900 iter = *ins;
905 while (iter) { 901 while (iter) {
902 if (iter->rt6i_metric > rt->rt6i_metric)
903 break;
906 if (rt6_qualify_for_ecmp(iter)) { 904 if (rt6_qualify_for_ecmp(iter)) {
907 *ins = iter->dst.rt6_next; 905 *ins = iter->dst.rt6_next;
908 fib6_purge_rt(iter, fn, info->nl_net); 906 fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index eba61b42cd42..ab0efaca4a78 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -55,6 +55,7 @@
55#include <net/ip6_fib.h> 55#include <net/ip6_fib.h>
56#include <net/ip6_route.h> 56#include <net/ip6_route.h>
57#include <net/ip6_tunnel.h> 57#include <net/ip6_tunnel.h>
58#include <net/gre.h>
58 59
59 60
60static bool log_ecn_error = true; 61static bool log_ecn_error = true;
@@ -367,35 +368,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
367 368
368 369
369static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 370static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
370 u8 type, u8 code, int offset, __be32 info) 371 u8 type, u8 code, int offset, __be32 info)
371{ 372{
372 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; 373 const struct gre_base_hdr *greh;
373 __be16 *p = (__be16 *)(skb->data + offset); 374 const struct ipv6hdr *ipv6h;
374 int grehlen = offset + 4; 375 int grehlen = sizeof(*greh);
375 struct ip6_tnl *t; 376 struct ip6_tnl *t;
377 int key_off = 0;
376 __be16 flags; 378 __be16 flags;
379 __be32 key;
377 380
378 flags = p[0]; 381 if (!pskb_may_pull(skb, offset + grehlen))
379 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { 382 return;
380 if (flags&(GRE_VERSION|GRE_ROUTING)) 383 greh = (const struct gre_base_hdr *)(skb->data + offset);
381 return; 384 flags = greh->flags;
382 if (flags&GRE_KEY) { 385 if (flags & (GRE_VERSION | GRE_ROUTING))
383 grehlen += 4; 386 return;
384 if (flags&GRE_CSUM) 387 if (flags & GRE_CSUM)
385 grehlen += 4; 388 grehlen += 4;
386 } 389 if (flags & GRE_KEY) {
390 key_off = grehlen + offset;
391 grehlen += 4;
387 } 392 }
388 393
389 /* If only 8 bytes returned, keyed message will be dropped here */ 394 if (!pskb_may_pull(skb, offset + grehlen))
390 if (!pskb_may_pull(skb, grehlen))
391 return; 395 return;
392 ipv6h = (const struct ipv6hdr *)skb->data; 396 ipv6h = (const struct ipv6hdr *)skb->data;
393 p = (__be16 *)(skb->data + offset); 397 greh = (const struct gre_base_hdr *)(skb->data + offset);
398 key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
394 399
395 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 400 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
396 flags & GRE_KEY ? 401 key, greh->protocol);
397 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
398 p[1]);
399 if (!t) 402 if (!t)
400 return; 403 return;
401 404
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 225f5f7f26ba..9e2ea4ae840d 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -62,7 +62,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
62 const struct net_offload *ops; 62 const struct net_offload *ops;
63 int proto; 63 int proto;
64 struct frag_hdr *fptr; 64 struct frag_hdr *fptr;
65 unsigned int unfrag_ip6hlen;
66 u8 *prevhdr; 65 u8 *prevhdr;
67 int offset = 0; 66 int offset = 0;
68 bool encap, udpfrag; 67 bool encap, udpfrag;
@@ -121,8 +120,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
121 skb->network_header = (u8 *)ipv6h - skb->head; 120 skb->network_header = (u8 *)ipv6h - skb->head;
122 121
123 if (udpfrag) { 122 if (udpfrag) {
124 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 123 int err = ip6_find_1stfragopt(skb, &prevhdr);
125 fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); 124 if (err < 0) {
125 kfree_skb_list(segs);
126 return ERR_PTR(err);
127 }
128 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
126 fptr->frag_off = htons(offset); 129 fptr->frag_off = htons(offset);
127 if (skb->next) 130 if (skb->next)
128 fptr->frag_off |= htons(IP6_MF); 131 fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 58900c21e4e4..e22339fad10b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -571,7 +571,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
571 int ptr, offset = 0, err = 0; 571 int ptr, offset = 0, err = 0;
572 u8 *prevhdr, nexthdr = 0; 572 u8 *prevhdr, nexthdr = 0;
573 573
574 hlen = ip6_find_1stfragopt(skb, &prevhdr); 574 err = ip6_find_1stfragopt(skb, &prevhdr);
575 if (err < 0)
576 goto fail;
577 hlen = err;
575 nexthdr = *prevhdr; 578 nexthdr = *prevhdr;
576 579
577 mtu = ip6_skb_dst_mtu(skb); 580 mtu = ip6_skb_dst_mtu(skb);
@@ -644,8 +647,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
644 *prevhdr = NEXTHDR_FRAGMENT; 647 *prevhdr = NEXTHDR_FRAGMENT;
645 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); 648 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
646 if (!tmp_hdr) { 649 if (!tmp_hdr) {
647 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
648 IPSTATS_MIB_FRAGFAILS);
649 err = -ENOMEM; 650 err = -ENOMEM;
650 goto fail; 651 goto fail;
651 } 652 }
@@ -742,13 +743,14 @@ slow_path:
742 * Fragment the datagram. 743 * Fragment the datagram.
743 */ 744 */
744 745
745 *prevhdr = NEXTHDR_FRAGMENT;
746 troom = rt->dst.dev->needed_tailroom; 746 troom = rt->dst.dev->needed_tailroom;
747 747
748 /* 748 /*
749 * Keep copying data until we run out. 749 * Keep copying data until we run out.
750 */ 750 */
751 while (left > 0) { 751 while (left > 0) {
752 u8 *fragnexthdr_offset;
753
752 len = left; 754 len = left;
753 /* IF: it doesn't fit, use 'mtu' - the data space left */ 755 /* IF: it doesn't fit, use 'mtu' - the data space left */
754 if (len > mtu) 756 if (len > mtu)
@@ -763,8 +765,6 @@ slow_path:
763 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + 765 frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
764 hroom + troom, GFP_ATOMIC); 766 hroom + troom, GFP_ATOMIC);
765 if (!frag) { 767 if (!frag) {
766 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
767 IPSTATS_MIB_FRAGFAILS);
768 err = -ENOMEM; 768 err = -ENOMEM;
769 goto fail; 769 goto fail;
770 } 770 }
@@ -793,6 +793,10 @@ slow_path:
793 */ 793 */
794 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); 794 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
795 795
796 fragnexthdr_offset = skb_network_header(frag);
797 fragnexthdr_offset += prevhdr - skb_network_header(skb);
798 *fragnexthdr_offset = NEXTHDR_FRAGMENT;
799
796 /* 800 /*
797 * Build fragment header. 801 * Build fragment header.
798 */ 802 */
@@ -996,6 +1000,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
996 } 1000 }
997 } 1001 }
998#endif 1002#endif
1003 if (ipv6_addr_v4mapped(&fl6->saddr) &&
1004 !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1005 err = -EAFNOSUPPORT;
1006 goto out_err_release;
1007 }
999 1008
1000 return 0; 1009 return 0;
1001 1010
@@ -1348,11 +1357,12 @@ emsgsize:
1348 */ 1357 */
1349 1358
1350 cork->length += length; 1359 cork->length += length;
1351 if (((length > mtu) || 1360 if ((skb && skb_is_gso(skb)) ||
1352 (skb && skb_is_gso(skb))) && 1361 (((length + (skb ? skb->len : headersize)) > mtu) &&
1362 (skb_queue_len(queue) <= 1) &&
1353 (sk->sk_protocol == IPPROTO_UDP) && 1363 (sk->sk_protocol == IPPROTO_UDP) &&
1354 (rt->dst.dev->features & NETIF_F_UFO) && 1364 (rt->dst.dev->features & NETIF_F_UFO) &&
1355 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1365 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
1356 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1366 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1357 hh_len, fragheaderlen, exthdrlen, 1367 hh_len, fragheaderlen, exthdrlen,
1358 transhdrlen, mtu, flags, fl6); 1368 transhdrlen, mtu, flags, fl6);
@@ -1424,6 +1434,11 @@ alloc_new_skb:
1424 */ 1434 */
1425 alloclen += sizeof(struct frag_hdr); 1435 alloclen += sizeof(struct frag_hdr);
1426 1436
1437 copy = datalen - transhdrlen - fraggap;
1438 if (copy < 0) {
1439 err = -EINVAL;
1440 goto error;
1441 }
1427 if (transhdrlen) { 1442 if (transhdrlen) {
1428 skb = sock_alloc_send_skb(sk, 1443 skb = sock_alloc_send_skb(sk,
1429 alloclen + hh_len, 1444 alloclen + hh_len,
@@ -1473,13 +1488,9 @@ alloc_new_skb:
1473 data += fraggap; 1488 data += fraggap;
1474 pskb_trim_unique(skb_prev, maxfraglen); 1489 pskb_trim_unique(skb_prev, maxfraglen);
1475 } 1490 }
1476 copy = datalen - transhdrlen - fraggap; 1491 if (copy > 0 &&
1477 1492 getfrag(from, data + transhdrlen, offset,
1478 if (copy < 0) { 1493 copy, fraggap, skb) < 0) {
1479 err = -EINVAL;
1480 kfree_skb(skb);
1481 goto error;
1482 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1483 err = -EFAULT; 1494 err = -EFAULT;
1484 kfree_skb(skb); 1495 kfree_skb(skb);
1485 goto error; 1496 goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c9bd1ee1f145..600975c5eacf 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -479,18 +479,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
479 479
480__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 480__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
481{ 481{
482 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 482 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
483 __u8 nexthdr = ipv6h->nexthdr; 483 unsigned int nhoff = raw - skb->data;
484 __u16 off = sizeof(*ipv6h); 484 unsigned int off = nhoff + sizeof(*ipv6h);
485 u8 next, nexthdr = ipv6h->nexthdr;
485 486
486 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 487 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
487 __u16 optlen = 0;
488 struct ipv6_opt_hdr *hdr; 488 struct ipv6_opt_hdr *hdr;
489 if (raw + off + sizeof(*hdr) > skb->data && 489 u16 optlen;
490 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 490
491 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
491 break; 492 break;
492 493
493 hdr = (struct ipv6_opt_hdr *) (raw + off); 494 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
494 if (nexthdr == NEXTHDR_FRAGMENT) { 495 if (nexthdr == NEXTHDR_FRAGMENT) {
495 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 496 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
496 if (frag_hdr->frag_off) 497 if (frag_hdr->frag_off)
@@ -501,20 +502,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
501 } else { 502 } else {
502 optlen = ipv6_optlen(hdr); 503 optlen = ipv6_optlen(hdr);
503 } 504 }
505 /* cache hdr->nexthdr, since pskb_may_pull() might
506 * invalidate hdr
507 */
508 next = hdr->nexthdr;
504 if (nexthdr == NEXTHDR_DEST) { 509 if (nexthdr == NEXTHDR_DEST) {
505 __u16 i = off + 2; 510 u16 i = 2;
511
512 /* Remember : hdr is no longer valid at this point. */
513 if (!pskb_may_pull(skb, off + optlen))
514 break;
515
506 while (1) { 516 while (1) {
507 struct ipv6_tlv_tnl_enc_lim *tel; 517 struct ipv6_tlv_tnl_enc_lim *tel;
508 518
509 /* No more room for encapsulation limit */ 519 /* No more room for encapsulation limit */
510 if (i + sizeof (*tel) > off + optlen) 520 if (i + sizeof(*tel) > optlen)
511 break; 521 break;
512 522
513 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 523 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
514 /* return index of option if found and valid */ 524 /* return index of option if found and valid */
515 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 525 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
516 tel->length == 1) 526 tel->length == 1)
517 return i; 527 return i + off - nhoff;
518 /* else jump to next option */ 528 /* else jump to next option */
519 if (tel->type) 529 if (tel->type)
520 i += tel->length + 2; 530 i += tel->length + 2;
@@ -522,7 +532,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
522 i++; 532 i++;
523 } 533 }
524 } 534 }
525 nexthdr = hdr->nexthdr; 535 nexthdr = next;
526 off += optlen; 536 off += optlen;
527 } 537 }
528 return 0; 538 return 0;
@@ -1039,7 +1049,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1039 struct ip6_tnl *t = netdev_priv(dev); 1049 struct ip6_tnl *t = netdev_priv(dev);
1040 struct net *net = t->net; 1050 struct net *net = t->net;
1041 struct net_device_stats *stats = &t->dev->stats; 1051 struct net_device_stats *stats = &t->dev->stats;
1042 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1052 struct ipv6hdr *ipv6h;
1043 struct ipv6_tel_txoption opt; 1053 struct ipv6_tel_txoption opt;
1044 struct dst_entry *dst = NULL, *ndst = NULL; 1054 struct dst_entry *dst = NULL, *ndst = NULL;
1045 struct net_device *tdev; 1055 struct net_device *tdev;
@@ -1051,26 +1061,28 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1051 1061
1052 /* NBMA tunnel */ 1062 /* NBMA tunnel */
1053 if (ipv6_addr_any(&t->parms.raddr)) { 1063 if (ipv6_addr_any(&t->parms.raddr)) {
1054 struct in6_addr *addr6; 1064 if (skb->protocol == htons(ETH_P_IPV6)) {
1055 struct neighbour *neigh; 1065 struct in6_addr *addr6;
1056 int addr_type; 1066 struct neighbour *neigh;
1067 int addr_type;
1057 1068
1058 if (!skb_dst(skb)) 1069 if (!skb_dst(skb))
1059 goto tx_err_link_failure; 1070 goto tx_err_link_failure;
1060 1071
1061 neigh = dst_neigh_lookup(skb_dst(skb), 1072 neigh = dst_neigh_lookup(skb_dst(skb),
1062 &ipv6_hdr(skb)->daddr); 1073 &ipv6_hdr(skb)->daddr);
1063 if (!neigh) 1074 if (!neigh)
1064 goto tx_err_link_failure; 1075 goto tx_err_link_failure;
1065 1076
1066 addr6 = (struct in6_addr *)&neigh->primary_key; 1077 addr6 = (struct in6_addr *)&neigh->primary_key;
1067 addr_type = ipv6_addr_type(addr6); 1078 addr_type = ipv6_addr_type(addr6);
1068 1079
1069 if (addr_type == IPV6_ADDR_ANY) 1080 if (addr_type == IPV6_ADDR_ANY)
1070 addr6 = &ipv6_hdr(skb)->daddr; 1081 addr6 = &ipv6_hdr(skb)->daddr;
1071 1082
1072 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1083 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1073 neigh_release(neigh); 1084 neigh_release(neigh);
1085 }
1074 } else if (!(t->parms.flags & 1086 } else if (!(t->parms.flags &
1075 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { 1087 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1076 /* enable the cache only only if the routing decision does 1088 /* enable the cache only only if the routing decision does
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 24fb9c0efd00..5b7433887eda 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -681,6 +681,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
681 u->link = p->link; 681 u->link = p->link;
682 u->i_key = p->i_key; 682 u->i_key = p->i_key;
683 u->o_key = p->o_key; 683 u->o_key = p->o_key;
684 if (u->i_key)
685 u->i_flags |= GRE_KEY;
686 if (u->o_key)
687 u->o_flags |= GRE_KEY;
684 u->proto = p->proto; 688 u->proto = p->proto;
685 689
686 memcpy(u->name, p->name, sizeof(u->name)); 690 memcpy(u->name, p->name, sizeof(u->name));
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index d9843e5a667f..8361d73ab653 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -774,7 +774,8 @@ failure:
774 * Delete a VIF entry 774 * Delete a VIF entry
775 */ 775 */
776 776
777static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head) 777static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
778 struct list_head *head)
778{ 779{
779 struct mif_device *v; 780 struct mif_device *v;
780 struct net_device *dev; 781 struct net_device *dev;
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
820 dev->ifindex, &in6_dev->cnf); 821 dev->ifindex, &in6_dev->cnf);
821 } 822 }
822 823
823 if (v->flags & MIFF_REGISTER) 824 if ((v->flags & MIFF_REGISTER) && !notify)
824 unregister_netdevice_queue(dev, head); 825 unregister_netdevice_queue(dev, head);
825 826
826 dev_put(dev); 827 dev_put(dev);
@@ -1330,7 +1331,6 @@ static int ip6mr_device_event(struct notifier_block *this,
1330 struct mr6_table *mrt; 1331 struct mr6_table *mrt;
1331 struct mif_device *v; 1332 struct mif_device *v;
1332 int ct; 1333 int ct;
1333 LIST_HEAD(list);
1334 1334
1335 if (event != NETDEV_UNREGISTER) 1335 if (event != NETDEV_UNREGISTER)
1336 return NOTIFY_DONE; 1336 return NOTIFY_DONE;
@@ -1339,10 +1339,9 @@ static int ip6mr_device_event(struct notifier_block *this,
1339 v = &mrt->vif6_table[0]; 1339 v = &mrt->vif6_table[0];
1340 for (ct = 0; ct < mrt->maxvif; ct++, v++) { 1340 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1341 if (v->dev == dev) 1341 if (v->dev == dev)
1342 mif6_delete(mrt, ct, &list); 1342 mif6_delete(mrt, ct, 1, NULL);
1343 } 1343 }
1344 } 1344 }
1345 unregister_netdevice_many(&list);
1346 1345
1347 return NOTIFY_DONE; 1346 return NOTIFY_DONE;
1348} 1347}
@@ -1551,7 +1550,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1551 for (i = 0; i < mrt->maxvif; i++) { 1550 for (i = 0; i < mrt->maxvif; i++) {
1552 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) 1551 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1553 continue; 1552 continue;
1554 mif6_delete(mrt, i, &list); 1553 mif6_delete(mrt, i, 0, &list);
1555 } 1554 }
1556 unregister_netdevice_many(&list); 1555 unregister_netdevice_many(&list);
1557 1556
@@ -1704,7 +1703,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1704 if (copy_from_user(&mifi, optval, sizeof(mifi_t))) 1703 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1705 return -EFAULT; 1704 return -EFAULT;
1706 rtnl_lock(); 1705 rtnl_lock();
1707 ret = mif6_delete(mrt, mifi, NULL); 1706 ret = mif6_delete(mrt, mifi, 0, NULL);
1708 rtnl_unlock(); 1707 rtnl_unlock();
1709 return ret; 1708 return ret;
1710 1709
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 84afb9a77278..3452f9037ad4 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1358,6 +1358,8 @@ skip_linkparms:
1358 if (ri->prefix_len == 0 && 1358 if (ri->prefix_len == 0 &&
1359 !in6_dev->cnf.accept_ra_defrtr) 1359 !in6_dev->cnf.accept_ra_defrtr)
1360 continue; 1360 continue;
1361 if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
1362 continue;
1361 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) 1363 if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
1362 continue; 1364 continue;
1363 rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3, 1365 rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 1d184322a7b1..f9f02581c4ca 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -78,15 +78,15 @@ EXPORT_SYMBOL(ipv6_select_ident);
78 78
79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
80{ 80{
81 u16 offset = sizeof(struct ipv6hdr); 81 unsigned int offset = sizeof(struct ipv6hdr);
82 struct ipv6_opt_hdr *exthdr =
83 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
84 unsigned int packet_len = skb_tail_pointer(skb) - 82 unsigned int packet_len = skb_tail_pointer(skb) -
85 skb_network_header(skb); 83 skb_network_header(skb);
86 int found_rhdr = 0; 84 int found_rhdr = 0;
87 *nexthdr = &ipv6_hdr(skb)->nexthdr; 85 *nexthdr = &ipv6_hdr(skb)->nexthdr;
88 86
89 while (offset + 1 <= packet_len) { 87 while (offset <= packet_len) {
88 struct ipv6_opt_hdr *exthdr;
89 unsigned int len;
90 90
91 switch (**nexthdr) { 91 switch (**nexthdr) {
92 92
@@ -107,13 +107,19 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
107 return offset; 107 return offset;
108 } 108 }
109 109
110 offset += ipv6_optlen(exthdr); 110 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
111 *nexthdr = &exthdr->nexthdr; 111 return -EINVAL;
112
112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 113 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
113 offset); 114 offset);
115 len = ipv6_optlen(exthdr);
116 if (len + offset >= IPV6_MAXPLEN)
117 return -EINVAL;
118 offset += len;
119 *nexthdr = &exthdr->nexthdr;
114 } 120 }
115 121
116 return offset; 122 return -EINVAL;
117} 123}
118EXPORT_SYMBOL(ip6_find_1stfragopt); 124EXPORT_SYMBOL(ip6_find_1stfragopt);
119 125
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 1737fc0f2988..4c753f4a3712 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -50,7 +50,7 @@ static struct inet_protosw pingv6_protosw = {
50 .type = SOCK_DGRAM, 50 .type = SOCK_DGRAM,
51 .protocol = IPPROTO_ICMPV6, 51 .protocol = IPPROTO_ICMPV6,
52 .prot = &pingv6_prot, 52 .prot = &pingv6_prot,
53 .ops = &inet6_dgram_ops, 53 .ops = &inet6_sockraw_ops,
54 .flags = INET_PROTOSW_REUSE, 54 .flags = INET_PROTOSW_REUSE,
55}; 55};
56 56
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d503b7f373a3..0ef8e114c8ab 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -630,6 +630,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
630 ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); 630 ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
631 return -EMSGSIZE; 631 return -EMSGSIZE;
632 } 632 }
633 if (length < sizeof(struct ipv6hdr))
634 return -EINVAL;
633 if (flags&MSG_PROBE) 635 if (flags&MSG_PROBE)
634 goto out; 636 goto out;
635 637
@@ -1145,8 +1147,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1145 spin_lock_bh(&sk->sk_receive_queue.lock); 1147 spin_lock_bh(&sk->sk_receive_queue.lock);
1146 skb = skb_peek(&sk->sk_receive_queue); 1148 skb = skb_peek(&sk->sk_receive_queue);
1147 if (skb) 1149 if (skb)
1148 amount = skb_tail_pointer(skb) - 1150 amount = skb->len;
1149 skb_transport_header(skb);
1150 spin_unlock_bh(&sk->sk_receive_queue.lock); 1151 spin_unlock_bh(&sk->sk_receive_queue.lock);
1151 return put_user(amount, (int __user *)arg); 1152 return put_user(amount, (int __user *)arg);
1152 } 1153 }
@@ -1303,7 +1304,7 @@ void raw6_proc_exit(void)
1303#endif /* CONFIG_PROC_FS */ 1304#endif /* CONFIG_PROC_FS */
1304 1305
1305/* Same as inet6_dgram_ops, sans udp_poll. */ 1306/* Same as inet6_dgram_ops, sans udp_poll. */
1306static const struct proto_ops inet6_sockraw_ops = { 1307const struct proto_ops inet6_sockraw_ops = {
1307 .family = PF_INET6, 1308 .family = PF_INET6,
1308 .owner = THIS_MODULE, 1309 .owner = THIS_MODULE,
1309 .release = inet6_release, 1310 .release = inet6_release,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3c5d6bb3b850..d3f87ceb3408 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1759,6 +1759,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1759 int addr_type; 1759 int addr_type;
1760 int err = -EINVAL; 1760 int err = -EINVAL;
1761 1761
1762 /* RTF_PCPU is an internal flag; can not be set by userspace */
1763 if (cfg->fc_flags & RTF_PCPU)
1764 goto out;
1765
1762 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) 1766 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1763 goto out; 1767 goto out;
1764#ifndef CONFIG_IPV6_SUBTREES 1768#ifndef CONFIG_IPV6_SUBTREES
@@ -2085,6 +2089,8 @@ static int ip6_route_del(struct fib6_config *cfg)
2085 continue; 2089 continue;
2086 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) 2090 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2087 continue; 2091 continue;
2092 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2093 continue;
2088 dst_hold(&rt->dst); 2094 dst_hold(&rt->dst);
2089 read_unlock_bh(&table->tb6_lock); 2095 read_unlock_bh(&table->tb6_lock);
2090 2096
@@ -2819,17 +2825,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
2819 struct rt6_info *rt, struct fib6_config *r_cfg) 2825 struct rt6_info *rt, struct fib6_config *r_cfg)
2820{ 2826{
2821 struct rt6_nh *nh; 2827 struct rt6_nh *nh;
2822 struct rt6_info *rtnh;
2823 int err = -EEXIST; 2828 int err = -EEXIST;
2824 2829
2825 list_for_each_entry(nh, rt6_nh_list, next) { 2830 list_for_each_entry(nh, rt6_nh_list, next) {
2826 /* check if rt6_info already exists */ 2831 /* check if rt6_info already exists */
2827 rtnh = nh->rt6_info; 2832 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
2828
2829 if (rtnh->dst.dev == rt->dst.dev &&
2830 rtnh->rt6i_idev == rt->rt6i_idev &&
2831 ipv6_addr_equal(&rtnh->rt6i_gateway,
2832 &rt->rt6i_gateway))
2833 return err; 2833 return err;
2834 } 2834 }
2835 2835
@@ -3188,7 +3188,8 @@ static int rt6_fill_node(struct net *net,
3188 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) 3188 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3189 goto nla_put_failure; 3189 goto nla_put_failure;
3190 3190
3191 lwtunnel_fill_encap(skb, rt->dst.lwtstate); 3191 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3192 goto nla_put_failure;
3192 3193
3193 nlmsg_end(skb, nlh); 3194 nlmsg_end(skb, nlh);
3194 return 0; 3195 return 0;
@@ -3354,7 +3355,10 @@ static int ip6_route_dev_notify(struct notifier_block *this,
3354 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3355 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3355 struct net *net = dev_net(dev); 3356 struct net *net = dev_net(dev);
3356 3357
3357 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { 3358 if (!(dev->flags & IFF_LOOPBACK))
3359 return NOTIFY_OK;
3360
3361 if (event == NETDEV_REGISTER) {
3358 net->ipv6.ip6_null_entry->dst.dev = dev; 3362 net->ipv6.ip6_null_entry->dst.dev = dev;
3359 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 3363 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3360#ifdef CONFIG_IPV6_MULTIPLE_TABLES 3364#ifdef CONFIG_IPV6_MULTIPLE_TABLES
@@ -3363,6 +3367,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
3363 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 3367 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3364 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 3368 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3365#endif 3369#endif
3370 } else if (event == NETDEV_UNREGISTER) {
3371 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3372#ifdef CONFIG_IPV6_MULTIPLE_TABLES
3373 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
3374 in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
3375#endif
3366 } 3376 }
3367 3377
3368 return NOTIFY_OK; 3378 return NOTIFY_OK;
@@ -3669,9 +3679,24 @@ static struct pernet_operations ip6_route_net_late_ops = {
3669 3679
3670static struct notifier_block ip6_route_dev_notifier = { 3680static struct notifier_block ip6_route_dev_notifier = {
3671 .notifier_call = ip6_route_dev_notify, 3681 .notifier_call = ip6_route_dev_notify,
3672 .priority = 0, 3682 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
3673}; 3683};
3674 3684
3685void __init ip6_route_init_special_entries(void)
3686{
3687 /* Registering of the loopback is done before this portion of code,
3688 * the loopback reference in rt6_info will not be taken, do it
3689 * manually for init_net */
3690 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3691 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3692 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3693 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3694 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3695 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3696 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3697 #endif
3698}
3699
3675int __init ip6_route_init(void) 3700int __init ip6_route_init(void)
3676{ 3701{
3677 int ret; 3702 int ret;
@@ -3698,17 +3723,6 @@ int __init ip6_route_init(void)
3698 3723
3699 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; 3724 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3700 3725
3701 /* Registering of the loopback is done before this portion of code,
3702 * the loopback reference in rt6_info will not be taken, do it
3703 * manually for init_net */
3704 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3705 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3706 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3707 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3708 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3709 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3710 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3711 #endif
3712 ret = fib6_init(); 3726 ret = fib6_init();
3713 if (ret) 3727 if (ret)
3714 goto out_register_subsys; 3728 goto out_register_subsys;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3da2b16356eb..184f0fe35dc6 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1389,6 +1389,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
1389 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); 1389 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
1390 if (!tunnel->dst_cache) { 1390 if (!tunnel->dst_cache) {
1391 free_percpu(dev->tstats); 1391 free_percpu(dev->tstats);
1392 dev->tstats = NULL;
1392 return -ENOMEM; 1393 return -ENOMEM;
1393 } 1394 }
1394 1395
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 336843ca4e6b..7f3667635431 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -210,6 +210,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
210 treq->snt_synack.v64 = 0; 210 treq->snt_synack.v64 = 0;
211 treq->rcv_isn = ntohl(th->seq) - 1; 211 treq->rcv_isn = ntohl(th->seq) - 1;
212 treq->snt_isn = cookie; 212 treq->snt_isn = cookie;
213 treq->txhash = net_tx_rndhash();
213 214
214 /* 215 /*
215 * We need to lookup the dst_entry to get the correct window size. 216 * We need to lookup the dst_entry to get the correct window size.
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8ed00c8a128a..4618f52a4abe 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -149,8 +149,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
149 * connect() to INADDR_ANY means loopback (BSD'ism). 149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */ 150 */
151 151
152 if (ipv6_addr_any(&usin->sin6_addr)) 152 if (ipv6_addr_any(&usin->sin6_addr)) {
153 usin->sin6_addr.s6_addr[15] = 0x1; 153 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
154 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
155 &usin->sin6_addr);
156 else
157 usin->sin6_addr = in6addr_loopback;
158 }
154 159
155 addr_type = ipv6_addr_type(&usin->sin6_addr); 160 addr_type = ipv6_addr_type(&usin->sin6_addr);
156 161
@@ -189,7 +194,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
189 * TCP over IPv4 194 * TCP over IPv4
190 */ 195 */
191 196
192 if (addr_type == IPV6_ADDR_MAPPED) { 197 if (addr_type & IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len; 198 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin; 199 struct sockaddr_in sin;
195 200
@@ -377,10 +382,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
377 np = inet6_sk(sk); 382 np = inet6_sk(sk);
378 383
379 if (type == NDISC_REDIRECT) { 384 if (type == NDISC_REDIRECT) {
380 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); 385 if (!sock_owned_by_user(sk)) {
386 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
381 387
382 if (dst) 388 if (dst)
383 dst->ops->redirect(dst, sk, skb); 389 dst->ops->redirect(dst, sk, skb);
390 }
384 goto out; 391 goto out;
385 } 392 }
386 393
@@ -976,6 +983,16 @@ drop:
976 return 0; /* don't send reset */ 983 return 0; /* don't send reset */
977} 984}
978 985
986static void tcp_v6_restore_cb(struct sk_buff *skb)
987{
988 /* We need to move header back to the beginning if xfrm6_policy_check()
989 * and tcp_v6_fill_cb() are going to be called again.
990 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
991 */
992 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
993 sizeof(struct inet6_skb_parm));
994}
995
979static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 996static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
980 struct request_sock *req, 997 struct request_sock *req,
981 struct dst_entry *dst, 998 struct dst_entry *dst,
@@ -1023,6 +1040,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1023 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1040 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1024#endif 1041#endif
1025 1042
1043 newnp->ipv6_mc_list = NULL;
1026 newnp->ipv6_ac_list = NULL; 1044 newnp->ipv6_ac_list = NULL;
1027 newnp->ipv6_fl_list = NULL; 1045 newnp->ipv6_fl_list = NULL;
1028 newnp->pktoptions = NULL; 1046 newnp->pktoptions = NULL;
@@ -1092,6 +1110,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1092 First: no IPv4 options. 1110 First: no IPv4 options.
1093 */ 1111 */
1094 newinet->inet_opt = NULL; 1112 newinet->inet_opt = NULL;
1113 newnp->ipv6_mc_list = NULL;
1095 newnp->ipv6_ac_list = NULL; 1114 newnp->ipv6_ac_list = NULL;
1096 newnp->ipv6_fl_list = NULL; 1115 newnp->ipv6_fl_list = NULL;
1097 1116
@@ -1165,8 +1184,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1165 sk_gfp_atomic(sk, GFP_ATOMIC)); 1184 sk_gfp_atomic(sk, GFP_ATOMIC));
1166 consume_skb(ireq->pktopts); 1185 consume_skb(ireq->pktopts);
1167 ireq->pktopts = NULL; 1186 ireq->pktopts = NULL;
1168 if (newnp->pktoptions) 1187 if (newnp->pktoptions) {
1188 tcp_v6_restore_cb(newnp->pktoptions);
1169 skb_set_owner_r(newnp->pktoptions, newsk); 1189 skb_set_owner_r(newnp->pktoptions, newsk);
1190 }
1170 } 1191 }
1171 } 1192 }
1172 1193
@@ -1181,16 +1202,6 @@ out:
1181 return NULL; 1202 return NULL;
1182} 1203}
1183 1204
1184static void tcp_v6_restore_cb(struct sk_buff *skb)
1185{
1186 /* We need to move header back to the beginning if xfrm6_policy_check()
1187 * and tcp_v6_fill_cb() are going to be called again.
1188 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1189 */
1190 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1191 sizeof(struct inet6_skb_parm));
1192}
1193
1194/* The socket must have it's spinlock held when we get 1205/* The socket must have it's spinlock held when we get
1195 * here, unless it is a TCP_LISTEN socket. 1206 * here, unless it is a TCP_LISTEN socket.
1196 * 1207 *
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 156a13c7ada8..cedced59ce15 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1136,6 +1136,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1136 if (addr_len < SIN6_LEN_RFC2133) 1136 if (addr_len < SIN6_LEN_RFC2133)
1137 return -EINVAL; 1137 return -EINVAL;
1138 daddr = &sin6->sin6_addr; 1138 daddr = &sin6->sin6_addr;
1139 if (ipv6_addr_any(daddr) &&
1140 ipv6_addr_v4mapped(&np->saddr))
1141 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1142 daddr);
1139 break; 1143 break;
1140 case AF_INET: 1144 case AF_INET:
1141 goto do_udp_sendmsg; 1145 goto do_udp_sendmsg;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 7441e1e63893..2e3c12eeca07 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
29 u8 frag_hdr_sz = sizeof(struct frag_hdr); 29 u8 frag_hdr_sz = sizeof(struct frag_hdr);
30 __wsum csum; 30 __wsum csum;
31 int tnl_hlen; 31 int tnl_hlen;
32 int err;
32 33
33 mss = skb_shinfo(skb)->gso_size; 34 mss = skb_shinfo(skb)->gso_size;
34 if (unlikely(skb->len <= mss)) 35 if (unlikely(skb->len <= mss))
@@ -85,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
85 if (uh->check == 0) 86 if (uh->check == 0)
86 uh->check = CSUM_MANGLED_0; 87 uh->check = CSUM_MANGLED_0;
87 88
88 skb->ip_summed = CHECKSUM_NONE; 89 skb->ip_summed = CHECKSUM_UNNECESSARY;
89 90
90 /* Check if there is enough headroom to insert fragment header. */ 91 /* Check if there is enough headroom to insert fragment header. */
91 tnl_hlen = skb_tnl_header_len(skb); 92 tnl_hlen = skb_tnl_header_len(skb);
@@ -97,7 +98,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
97 /* Find the unfragmentable header and shift it left by frag_hdr_sz 98 /* Find the unfragmentable header and shift it left by frag_hdr_sz
98 * bytes to insert fragment header. 99 * bytes to insert fragment header.
99 */ 100 */
100 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 101 err = ip6_find_1stfragopt(skb, &prevhdr);
102 if (err < 0)
103 return ERR_PTR(err);
104 unfrag_ip6hlen = err;
101 nexthdr = *prevhdr; 105 nexthdr = *prevhdr;
102 *prevhdr = NEXTHDR_FRAGMENT; 106 *prevhdr = NEXTHDR_FRAGMENT;
103 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + 107 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
47 iph = ipv6_hdr(skb); 47 iph = ipv6_hdr(skb);
48 48
49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
50 if (hdr_len < 0)
51 return hdr_len;
50 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
51 skb_set_network_header(skb, -x->props.header_len); 53 skb_set_network_header(skb, -x->props.header_len);
52 skb->transport_header = skb->network_header + hdr_len; 54 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 4e344105b3fd..1d3bbe6e1183 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -28,6 +28,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
28 iph = ipv6_hdr(skb); 28 iph = ipv6_hdr(skb);
29 29
30 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 30 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
31 if (hdr_len < 0)
32 return hdr_len;
31 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 33 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
32 skb_set_network_header(skb, -x->props.header_len); 34 skb_set_network_header(skb, -x->props.header_len);
33 skb->transport_header = skb->network_header + hdr_len; 35 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 48d0dc89b58d..e735f781e4f3 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1168,11 +1168,10 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg)
1168 sipx->sipx_network = ipxif->if_netnum; 1168 sipx->sipx_network = ipxif->if_netnum;
1169 memcpy(sipx->sipx_node, ipxif->if_node, 1169 memcpy(sipx->sipx_node, ipxif->if_node,
1170 sizeof(sipx->sipx_node)); 1170 sizeof(sipx->sipx_node));
1171 rc = -EFAULT; 1171 rc = 0;
1172 if (copy_to_user(arg, &ifr, sizeof(ifr))) 1172 if (copy_to_user(arg, &ifr, sizeof(ifr)))
1173 break; 1173 rc = -EFAULT;
1174 ipxitf_put(ipxif); 1174 ipxitf_put(ipxif);
1175 rc = 0;
1176 break; 1175 break;
1177 } 1176 }
1178 case SIOCAIPXITFCRT: 1177 case SIOCAIPXITFCRT:
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index acbe61c7e683..160dc89335e2 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
383 * for deallocating this structure if it's complex. If not the user can 383 * for deallocating this structure if it's complex. If not the user can
384 * just supply kfree, which should take care of the job. 384 * just supply kfree, which should take care of the job.
385 */ 385 */
386#ifdef CONFIG_LOCKDEP
387static int hashbin_lock_depth = 0;
388#endif
389int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) 386int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
390{ 387{
391 irda_queue_t* queue; 388 irda_queue_t* queue;
@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
396 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); 393 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
397 394
398 /* Synchronize */ 395 /* Synchronize */
399 if ( hashbin->hb_type & HB_LOCK ) { 396 if (hashbin->hb_type & HB_LOCK)
400 spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, 397 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
401 hashbin_lock_depth++);
402 }
403 398
404 /* 399 /*
405 * Free the entries in the hashbin, TODO: use hashbin_clear when 400 * Free the entries in the hashbin, TODO: use hashbin_clear when
406 * it has been shown to work 401 * it has been shown to work
407 */ 402 */
408 for (i = 0; i < HASHBIN_SIZE; i ++ ) { 403 for (i = 0; i < HASHBIN_SIZE; i ++ ) {
409 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); 404 while (1) {
410 while (queue ) { 405 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
411 if (free_func) 406
412 (*free_func)(queue); 407 if (!queue)
413 queue = dequeue_first( 408 break;
414 (irda_queue_t**) &hashbin->hb_queue[i]); 409
410 if (free_func) {
411 if (hashbin->hb_type & HB_LOCK)
412 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
413 free_func(queue);
414 if (hashbin->hb_type & HB_LOCK)
415 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
416 }
415 } 417 }
416 } 418 }
417 419
@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
420 hashbin->magic = ~HB_MAGIC; 422 hashbin->magic = ~HB_MAGIC;
421 423
422 /* Release lock */ 424 /* Release lock */
423 if ( hashbin->hb_type & HB_LOCK) { 425 if (hashbin->hb_type & HB_LOCK)
424 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 426 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
425#ifdef CONFIG_LOCKDEP
426 hashbin_lock_depth--;
427#endif
428 }
429 427
430 /* 428 /*
431 * Free the hashbin structure 429 * Free the hashbin structure
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f9c9ecb0cdd3..2e1050ec2cf0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -63,8 +63,13 @@ struct pfkey_sock {
63 } u; 63 } u;
64 struct sk_buff *skb; 64 struct sk_buff *skb;
65 } dump; 65 } dump;
66 struct mutex dump_lock;
66}; 67};
67 68
69static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
70 xfrm_address_t *saddr, xfrm_address_t *daddr,
71 u16 *family);
72
68static inline struct pfkey_sock *pfkey_sk(struct sock *sk) 73static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
69{ 74{
70 return (struct pfkey_sock *)sk; 75 return (struct pfkey_sock *)sk;
@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
139{ 144{
140 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); 145 struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
141 struct sock *sk; 146 struct sock *sk;
147 struct pfkey_sock *pfk;
142 int err; 148 int err;
143 149
144 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 150 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
153 if (sk == NULL) 159 if (sk == NULL)
154 goto out; 160 goto out;
155 161
162 pfk = pfkey_sk(sk);
163 mutex_init(&pfk->dump_lock);
164
156 sock->ops = &pfkey_ops; 165 sock->ops = &pfkey_ops;
157 sock_init_data(sock, sk); 166 sock_init_data(sock, sk);
158 167
@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
281 struct sadb_msg *hdr; 290 struct sadb_msg *hdr;
282 int rc; 291 int rc;
283 292
293 mutex_lock(&pfk->dump_lock);
294 if (!pfk->dump.dump) {
295 rc = 0;
296 goto out;
297 }
298
284 rc = pfk->dump.dump(pfk); 299 rc = pfk->dump.dump(pfk);
285 if (rc == -ENOBUFS) 300 if (rc == -ENOBUFS) {
286 return 0; 301 rc = 0;
302 goto out;
303 }
287 304
288 if (pfk->dump.skb) { 305 if (pfk->dump.skb) {
289 if (!pfkey_can_dump(&pfk->sk)) 306 if (!pfkey_can_dump(&pfk->sk)) {
290 return 0; 307 rc = 0;
308 goto out;
309 }
291 310
292 hdr = (struct sadb_msg *) pfk->dump.skb->data; 311 hdr = (struct sadb_msg *) pfk->dump.skb->data;
293 hdr->sadb_msg_seq = 0; 312 hdr->sadb_msg_seq = 0;
@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
298 } 317 }
299 318
300 pfkey_terminate_dump(pfk); 319 pfkey_terminate_dump(pfk);
320
321out:
322 mutex_unlock(&pfk->dump_lock);
301 return rc; 323 return rc;
302} 324}
303 325
@@ -1135,6 +1157,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1135 goto out; 1157 goto out;
1136 } 1158 }
1137 1159
1160 err = -ENOBUFS;
1138 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; 1161 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1139 if (sa->sadb_sa_auth) { 1162 if (sa->sadb_sa_auth) {
1140 int keysize = 0; 1163 int keysize = 0;
@@ -1146,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1146 if (key) 1169 if (key)
1147 keysize = (key->sadb_key_bits + 7) / 8; 1170 keysize = (key->sadb_key_bits + 7) / 8;
1148 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); 1171 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
1149 if (!x->aalg) 1172 if (!x->aalg) {
1173 err = -ENOMEM;
1150 goto out; 1174 goto out;
1175 }
1151 strcpy(x->aalg->alg_name, a->name); 1176 strcpy(x->aalg->alg_name, a->name);
1152 x->aalg->alg_key_len = 0; 1177 x->aalg->alg_key_len = 0;
1153 if (key) { 1178 if (key) {
@@ -1166,8 +1191,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1166 goto out; 1191 goto out;
1167 } 1192 }
1168 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); 1193 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
1169 if (!x->calg) 1194 if (!x->calg) {
1195 err = -ENOMEM;
1170 goto out; 1196 goto out;
1197 }
1171 strcpy(x->calg->alg_name, a->name); 1198 strcpy(x->calg->alg_name, a->name);
1172 x->props.calgo = sa->sadb_sa_encrypt; 1199 x->props.calgo = sa->sadb_sa_encrypt;
1173 } else { 1200 } else {
@@ -1181,8 +1208,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1181 if (key) 1208 if (key)
1182 keysize = (key->sadb_key_bits + 7) / 8; 1209 keysize = (key->sadb_key_bits + 7) / 8;
1183 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); 1210 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
1184 if (!x->ealg) 1211 if (!x->ealg) {
1212 err = -ENOMEM;
1185 goto out; 1213 goto out;
1214 }
1186 strcpy(x->ealg->alg_name, a->name); 1215 strcpy(x->ealg->alg_name, a->name);
1187 x->ealg->alg_key_len = 0; 1216 x->ealg->alg_key_len = 0;
1188 if (key) { 1217 if (key) {
@@ -1227,8 +1256,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1227 struct xfrm_encap_tmpl *natt; 1256 struct xfrm_encap_tmpl *natt;
1228 1257
1229 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); 1258 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
1230 if (!x->encap) 1259 if (!x->encap) {
1260 err = -ENOMEM;
1231 goto out; 1261 goto out;
1262 }
1232 1263
1233 natt = x->encap; 1264 natt = x->encap;
1234 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; 1265 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -1793,19 +1824,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1793 struct xfrm_address_filter *filter = NULL; 1824 struct xfrm_address_filter *filter = NULL;
1794 struct pfkey_sock *pfk = pfkey_sk(sk); 1825 struct pfkey_sock *pfk = pfkey_sk(sk);
1795 1826
1796 if (pfk->dump.dump != NULL) 1827 mutex_lock(&pfk->dump_lock);
1828 if (pfk->dump.dump != NULL) {
1829 mutex_unlock(&pfk->dump_lock);
1797 return -EBUSY; 1830 return -EBUSY;
1831 }
1798 1832
1799 proto = pfkey_satype2proto(hdr->sadb_msg_satype); 1833 proto = pfkey_satype2proto(hdr->sadb_msg_satype);
1800 if (proto == 0) 1834 if (proto == 0) {
1835 mutex_unlock(&pfk->dump_lock);
1801 return -EINVAL; 1836 return -EINVAL;
1837 }
1802 1838
1803 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { 1839 if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
1804 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; 1840 struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
1805 1841
1806 filter = kmalloc(sizeof(*filter), GFP_KERNEL); 1842 filter = kmalloc(sizeof(*filter), GFP_KERNEL);
1807 if (filter == NULL) 1843 if (filter == NULL) {
1844 mutex_unlock(&pfk->dump_lock);
1808 return -ENOMEM; 1845 return -ENOMEM;
1846 }
1809 1847
1810 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, 1848 memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
1811 sizeof(xfrm_address_t)); 1849 sizeof(xfrm_address_t));
@@ -1821,6 +1859,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
1821 pfk->dump.dump = pfkey_dump_sa; 1859 pfk->dump.dump = pfkey_dump_sa;
1822 pfk->dump.done = pfkey_dump_sa_done; 1860 pfk->dump.done = pfkey_dump_sa_done;
1823 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); 1861 xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
1862 mutex_unlock(&pfk->dump_lock);
1824 1863
1825 return pfkey_do_dump(pfk); 1864 return pfkey_do_dump(pfk);
1826} 1865}
@@ -1913,19 +1952,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1913 1952
1914 /* addresses present only in tunnel mode */ 1953 /* addresses present only in tunnel mode */
1915 if (t->mode == XFRM_MODE_TUNNEL) { 1954 if (t->mode == XFRM_MODE_TUNNEL) {
1916 u8 *sa = (u8 *) (rq + 1); 1955 int err;
1917 int family, socklen;
1918
1919 family = pfkey_sockaddr_extract((struct sockaddr *)sa,
1920 &t->saddr);
1921 if (!family)
1922 return -EINVAL;
1923 1956
1924 socklen = pfkey_sockaddr_len(family); 1957 err = parse_sockaddr_pair(
1925 if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen), 1958 (struct sockaddr *)(rq + 1),
1926 &t->id.daddr) != family) 1959 rq->sadb_x_ipsecrequest_len - sizeof(*rq),
1927 return -EINVAL; 1960 &t->saddr, &t->id.daddr, &t->encap_family);
1928 t->encap_family = family; 1961 if (err)
1962 return err;
1929 } else 1963 } else
1930 t->encap_family = xp->family; 1964 t->encap_family = xp->family;
1931 1965
@@ -1945,7 +1979,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
1945 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) 1979 if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
1946 return -EINVAL; 1980 return -EINVAL;
1947 1981
1948 while (len >= sizeof(struct sadb_x_ipsecrequest)) { 1982 while (len >= sizeof(*rq)) {
1983 if (len < rq->sadb_x_ipsecrequest_len ||
1984 rq->sadb_x_ipsecrequest_len < sizeof(*rq))
1985 return -EINVAL;
1986
1949 if ((err = parse_ipsecrequest(xp, rq)) < 0) 1987 if ((err = parse_ipsecrequest(xp, rq)) < 0)
1950 return err; 1988 return err;
1951 len -= rq->sadb_x_ipsecrequest_len; 1989 len -= rq->sadb_x_ipsecrequest_len;
@@ -2408,7 +2446,6 @@ out:
2408 return err; 2446 return err;
2409} 2447}
2410 2448
2411#ifdef CONFIG_NET_KEY_MIGRATE
2412static int pfkey_sockaddr_pair_size(sa_family_t family) 2449static int pfkey_sockaddr_pair_size(sa_family_t family)
2413{ 2450{
2414 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); 2451 return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2420,7 +2457,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2420{ 2457{
2421 int af, socklen; 2458 int af, socklen;
2422 2459
2423 if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) 2460 if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
2424 return -EINVAL; 2461 return -EINVAL;
2425 2462
2426 af = pfkey_sockaddr_extract(sa, saddr); 2463 af = pfkey_sockaddr_extract(sa, saddr);
@@ -2436,6 +2473,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2436 return 0; 2473 return 0;
2437} 2474}
2438 2475
2476#ifdef CONFIG_NET_KEY_MIGRATE
2439static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, 2477static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2440 struct xfrm_migrate *m) 2478 struct xfrm_migrate *m)
2441{ 2479{
@@ -2443,13 +2481,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2443 struct sadb_x_ipsecrequest *rq2; 2481 struct sadb_x_ipsecrequest *rq2;
2444 int mode; 2482 int mode;
2445 2483
2446 if (len <= sizeof(struct sadb_x_ipsecrequest) || 2484 if (len < sizeof(*rq1) ||
2447 len < rq1->sadb_x_ipsecrequest_len) 2485 len < rq1->sadb_x_ipsecrequest_len ||
2486 rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
2448 return -EINVAL; 2487 return -EINVAL;
2449 2488
2450 /* old endoints */ 2489 /* old endoints */
2451 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), 2490 err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
2452 rq1->sadb_x_ipsecrequest_len, 2491 rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
2453 &m->old_saddr, &m->old_daddr, 2492 &m->old_saddr, &m->old_daddr,
2454 &m->old_family); 2493 &m->old_family);
2455 if (err) 2494 if (err)
@@ -2458,13 +2497,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
2458 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); 2497 rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
2459 len -= rq1->sadb_x_ipsecrequest_len; 2498 len -= rq1->sadb_x_ipsecrequest_len;
2460 2499
2461 if (len <= sizeof(struct sadb_x_ipsecrequest) || 2500 if (len <= sizeof(*rq2) ||
2462 len < rq2->sadb_x_ipsecrequest_len) 2501 len < rq2->sadb_x_ipsecrequest_len ||
2502 rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
2463 return -EINVAL; 2503 return -EINVAL;
2464 2504
2465 /* new endpoints */ 2505 /* new endpoints */
2466 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), 2506 err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
2467 rq2->sadb_x_ipsecrequest_len, 2507 rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
2468 &m->new_saddr, &m->new_daddr, 2508 &m->new_saddr, &m->new_daddr,
2469 &m->new_family); 2509 &m->new_family);
2470 if (err) 2510 if (err)
@@ -2679,14 +2719,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
2679{ 2719{
2680 struct pfkey_sock *pfk = pfkey_sk(sk); 2720 struct pfkey_sock *pfk = pfkey_sk(sk);
2681 2721
2682 if (pfk->dump.dump != NULL) 2722 mutex_lock(&pfk->dump_lock);
2723 if (pfk->dump.dump != NULL) {
2724 mutex_unlock(&pfk->dump_lock);
2683 return -EBUSY; 2725 return -EBUSY;
2726 }
2684 2727
2685 pfk->dump.msg_version = hdr->sadb_msg_version; 2728 pfk->dump.msg_version = hdr->sadb_msg_version;
2686 pfk->dump.msg_portid = hdr->sadb_msg_pid; 2729 pfk->dump.msg_portid = hdr->sadb_msg_pid;
2687 pfk->dump.dump = pfkey_dump_sp; 2730 pfk->dump.dump = pfkey_dump_sp;
2688 pfk->dump.done = pfkey_dump_sp_done; 2731 pfk->dump.done = pfkey_dump_sp_done;
2689 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); 2732 xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
2733 mutex_unlock(&pfk->dump_lock);
2690 2734
2691 return pfkey_do_dump(pfk); 2735 return pfkey_do_dump(pfk);
2692} 2736}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ec17cbe8a02b..d3dec414fd44 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
278} 278}
279EXPORT_SYMBOL_GPL(l2tp_session_find); 279EXPORT_SYMBOL_GPL(l2tp_session_find);
280 280
281struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 281struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
282 bool do_ref)
282{ 283{
283 int hash; 284 int hash;
284 struct l2tp_session *session; 285 struct l2tp_session *session;
@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
288 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 289 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
289 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { 290 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
290 if (++count > nth) { 291 if (++count > nth) {
292 l2tp_session_inc_refcount(session);
293 if (do_ref && session->ref)
294 session->ref(session);
291 read_unlock_bh(&tunnel->hlist_lock); 295 read_unlock_bh(&tunnel->hlist_lock);
292 return session; 296 return session;
293 } 297 }
@@ -298,7 +302,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
298 302
299 return NULL; 303 return NULL;
300} 304}
301EXPORT_SYMBOL_GPL(l2tp_session_find_nth); 305EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
302 306
303/* Lookup a session by interface name. 307/* Lookup a session by interface name.
304 * This is very inefficient but is only used by management interfaces. 308 * This is very inefficient but is only used by management interfaces.
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 5871537af387..555d962a62d2 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -243,7 +243,8 @@ out:
243struct l2tp_session *l2tp_session_find(struct net *net, 243struct l2tp_session *l2tp_session_find(struct net *net,
244 struct l2tp_tunnel *tunnel, 244 struct l2tp_tunnel *tunnel,
245 u32 session_id); 245 u32 session_id);
246struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); 246struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
247 bool do_ref);
247struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); 248struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
248struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); 249struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
249struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); 250struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
@@ -273,6 +274,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
273int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, 274int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
274 const struct l2tp_nl_cmd_ops *ops); 275 const struct l2tp_nl_cmd_ops *ops);
275void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); 276void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
277int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
276 278
277/* Session reference counts. Incremented when code obtains a reference 279/* Session reference counts. Incremented when code obtains a reference
278 * to a session. 280 * to a session.
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a2ae34..d100aed3d06f 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
53 53
54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) 54static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
55{ 55{
56 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 56 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
57 pd->session_idx++; 57 pd->session_idx++;
58 58
59 if (pd->session == NULL) { 59 if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
238 } 238 }
239 239
240 /* Show the tunnel or session context */ 240 /* Show the tunnel or session context */
241 if (pd->session == NULL) 241 if (!pd->session) {
242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel); 242 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
243 else 243 } else {
244 l2tp_dfs_seq_session_show(m, pd->session); 244 l2tp_dfs_seq_session_show(m, pd->session);
245 if (pd->session->deref)
246 pd->session->deref(pd->session);
247 l2tp_session_dec_refcount(pd->session);
248 }
245 249
246out: 250out:
247 return 0; 251 return 0;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index d0e906d39642..48ab93842322 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -11,6 +11,7 @@
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14#include <asm/ioctls.h>
14#include <linux/icmp.h> 15#include <linux/icmp.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
@@ -382,7 +383,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
382drop: 383drop:
383 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); 384 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
384 kfree_skb(skb); 385 kfree_skb(skb);
385 return -1; 386 return 0;
386} 387}
387 388
388/* Userspace will call sendmsg() on the tunnel socket to send L2TP 389/* Userspace will call sendmsg() on the tunnel socket to send L2TP
@@ -555,6 +556,30 @@ out:
555 return err ? err : copied; 556 return err ? err : copied;
556} 557}
557 558
559int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
560{
561 struct sk_buff *skb;
562 int amount;
563
564 switch (cmd) {
565 case SIOCOUTQ:
566 amount = sk_wmem_alloc_get(sk);
567 break;
568 case SIOCINQ:
569 spin_lock_bh(&sk->sk_receive_queue.lock);
570 skb = skb_peek(&sk->sk_receive_queue);
571 amount = skb ? skb->len : 0;
572 spin_unlock_bh(&sk->sk_receive_queue.lock);
573 break;
574
575 default:
576 return -ENOIOCTLCMD;
577 }
578
579 return put_user(amount, (int __user *)arg);
580}
581EXPORT_SYMBOL(l2tp_ioctl);
582
558static struct proto l2tp_ip_prot = { 583static struct proto l2tp_ip_prot = {
559 .name = "L2TP/IP", 584 .name = "L2TP/IP",
560 .owner = THIS_MODULE, 585 .owner = THIS_MODULE,
@@ -563,7 +588,7 @@ static struct proto l2tp_ip_prot = {
563 .bind = l2tp_ip_bind, 588 .bind = l2tp_ip_bind,
564 .connect = l2tp_ip_connect, 589 .connect = l2tp_ip_connect,
565 .disconnect = l2tp_ip_disconnect, 590 .disconnect = l2tp_ip_disconnect,
566 .ioctl = udp_ioctl, 591 .ioctl = l2tp_ioctl,
567 .destroy = l2tp_ip_destroy_sock, 592 .destroy = l2tp_ip_destroy_sock,
568 .setsockopt = ip_setsockopt, 593 .setsockopt = ip_setsockopt,
569 .getsockopt = ip_getsockopt, 594 .getsockopt = ip_getsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0289208b0346..c8f483cd2ca9 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -715,7 +715,7 @@ static struct proto l2tp_ip6_prot = {
715 .bind = l2tp_ip6_bind, 715 .bind = l2tp_ip6_bind,
716 .connect = l2tp_ip6_connect, 716 .connect = l2tp_ip6_connect,
717 .disconnect = l2tp_ip6_disconnect, 717 .disconnect = l2tp_ip6_disconnect,
718 .ioctl = udp_ioctl, 718 .ioctl = l2tp_ioctl,
719 .destroy = l2tp_ip6_destroy_sock, 719 .destroy = l2tp_ip6_destroy_sock,
720 .setsockopt = ipv6_setsockopt, 720 .setsockopt = ipv6_setsockopt,
721 .getsockopt = ipv6_getsockopt, 721 .getsockopt = ipv6_getsockopt,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 2caaa84ce92d..665cc74df5c5 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -827,7 +827,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
827 goto out; 827 goto out;
828 } 828 }
829 829
830 session = l2tp_session_find_nth(tunnel, si); 830 session = l2tp_session_get_nth(tunnel, si, false);
831 if (session == NULL) { 831 if (session == NULL) {
832 ti++; 832 ti++;
833 tunnel = NULL; 833 tunnel = NULL;
@@ -837,8 +837,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
837 837
838 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, 838 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
839 cb->nlh->nlmsg_seq, NLM_F_MULTI, 839 cb->nlh->nlmsg_seq, NLM_F_MULTI,
840 session, L2TP_CMD_SESSION_GET) < 0) 840 session, L2TP_CMD_SESSION_GET) < 0) {
841 l2tp_session_dec_refcount(session);
841 break; 842 break;
843 }
844 l2tp_session_dec_refcount(session);
842 845
843 si++; 846 si++;
844 } 847 }
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1ad18c55064c..8ab9c5d74416 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -467,6 +467,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
467static void pppol2tp_session_destruct(struct sock *sk) 467static void pppol2tp_session_destruct(struct sock *sk)
468{ 468{
469 struct l2tp_session *session = sk->sk_user_data; 469 struct l2tp_session *session = sk->sk_user_data;
470
471 skb_queue_purge(&sk->sk_receive_queue);
472 skb_queue_purge(&sk->sk_write_queue);
473
470 if (session) { 474 if (session) {
471 sk->sk_user_data = NULL; 475 sk->sk_user_data = NULL;
472 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 476 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -505,9 +509,6 @@ static int pppol2tp_release(struct socket *sock)
505 l2tp_session_queue_purge(session); 509 l2tp_session_queue_purge(session);
506 sock_put(sk); 510 sock_put(sk);
507 } 511 }
508 skb_queue_purge(&sk->sk_receive_queue);
509 skb_queue_purge(&sk->sk_write_queue);
510
511 release_sock(sk); 512 release_sock(sk);
512 513
513 /* This will delete the session context via 514 /* This will delete the session context via
@@ -1574,7 +1575,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1574 1575
1575static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) 1576static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1576{ 1577{
1577 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); 1578 pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
1578 pd->session_idx++; 1579 pd->session_idx++;
1579 1580
1580 if (pd->session == NULL) { 1581 if (pd->session == NULL) {
@@ -1701,10 +1702,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
1701 1702
1702 /* Show the tunnel or session context. 1703 /* Show the tunnel or session context.
1703 */ 1704 */
1704 if (pd->session == NULL) 1705 if (!pd->session) {
1705 pppol2tp_seq_tunnel_show(m, pd->tunnel); 1706 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1706 else 1707 } else {
1707 pppol2tp_seq_session_show(m, pd->session); 1708 pppol2tp_seq_session_show(m, pd->session);
1709 if (pd->session->deref)
1710 pd->session->deref(pd->session);
1711 l2tp_session_dec_refcount(pd->session);
1712 }
1708 1713
1709out: 1714out:
1710 return 0; 1715 return 0;
@@ -1863,4 +1868,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
1863MODULE_LICENSE("GPL"); 1868MODULE_LICENSE("GPL");
1864MODULE_VERSION(PPPOL2TP_DRV_VERSION); 1869MODULE_VERSION(PPPOL2TP_DRV_VERSION);
1865MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP)); 1870MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
1866MODULE_ALIAS_L2TP_PWTYPE(11); 1871MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3e821daf9dd4..8bc5a1bd2d45 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
821 * another trick required to cope with how the PROCOM state 821 * another trick required to cope with how the PROCOM state
822 * machine works. -acme 822 * machine works. -acme
823 */ 823 */
824 skb_orphan(skb);
825 sock_hold(sk);
824 skb->sk = sk; 826 skb->sk = sk;
827 skb->destructor = sock_efree;
825 } 828 }
826 if (!sock_owned_by_user(sk)) 829 if (!sock_owned_by_user(sk))
827 llc_conn_rcv(sk, skb); 830 llc_conn_rcv(sk, skb);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index d0e1e804ebd7..5404d0d195cc 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
290 290
291 ev->type = LLC_SAP_EV_TYPE_PDU; 291 ev->type = LLC_SAP_EV_TYPE_PDU;
292 ev->reason = 0; 292 ev->reason = 0;
293 skb_orphan(skb);
294 sock_hold(sk);
293 skb->sk = sk; 295 skb->sk = sk;
296 skb->destructor = sock_efree;
294 llc_sap_state_process(sap, skb); 297 llc_sap_state_process(sap, skb);
295} 298}
296 299
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index bd06799e2e69..3bbbcaf1ec0b 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -61,6 +62,14 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
61{ 62{
62 struct ieee80211_local *local = sta->local; 63 struct ieee80211_local *local = sta->local;
63 struct tid_ampdu_rx *tid_rx; 64 struct tid_ampdu_rx *tid_rx;
65 struct ieee80211_ampdu_params params = {
66 .sta = &sta->sta,
67 .action = IEEE80211_AMPDU_RX_STOP,
68 .tid = tid,
69 .amsdu = false,
70 .timeout = 0,
71 .ssn = 0,
72 };
64 73
65 lockdep_assert_held(&sta->ampdu_mlme.mtx); 74 lockdep_assert_held(&sta->ampdu_mlme.mtx);
66 75
@@ -78,8 +87,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
78 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator", 87 initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
79 (int)reason); 88 (int)reason);
80 89
81 if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, 90 if (drv_ampdu_action(local, sta->sdata, &params))
82 &sta->sta, tid, NULL, 0, false))
83 sdata_info(sta->sdata, 91 sdata_info(sta->sdata,
84 "HW problem - can not stop rx aggregation for %pM tid %d\n", 92 "HW problem - can not stop rx aggregation for %pM tid %d\n",
85 sta->sta.addr, tid); 93 sta->sta.addr, tid);
@@ -259,6 +267,15 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
259{ 267{
260 struct ieee80211_local *local = sta->sdata->local; 268 struct ieee80211_local *local = sta->sdata->local;
261 struct tid_ampdu_rx *tid_agg_rx; 269 struct tid_ampdu_rx *tid_agg_rx;
270 struct ieee80211_ampdu_params params = {
271 .sta = &sta->sta,
272 .action = IEEE80211_AMPDU_RX_START,
273 .tid = tid,
274 .amsdu = false,
275 .timeout = timeout,
276 .ssn = start_seq_num,
277 };
278
262 int i, ret = -EOPNOTSUPP; 279 int i, ret = -EOPNOTSUPP;
263 u16 status = WLAN_STATUS_REQUEST_DECLINED; 280 u16 status = WLAN_STATUS_REQUEST_DECLINED;
264 281
@@ -301,7 +318,10 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
301 if (buf_size > sta->sta.max_rx_aggregation_subframes) 318 if (buf_size > sta->sta.max_rx_aggregation_subframes)
302 buf_size = sta->sta.max_rx_aggregation_subframes; 319 buf_size = sta->sta.max_rx_aggregation_subframes;
303 320
304 ht_dbg(sta->sdata, "AddBA Req buf_size=%d\n", buf_size); 321 params.buf_size = buf_size;
322
323 ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n",
324 buf_size, sta->sta.addr);
305 325
306 if (sta->ampdu_mlme.tid_rx[tid]) { 326 if (sta->ampdu_mlme.tid_rx[tid]) {
307 ht_dbg_ratelimited(sta->sdata, 327 ht_dbg_ratelimited(sta->sdata,
@@ -346,8 +366,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
346 for (i = 0; i < buf_size; i++) 366 for (i = 0; i < buf_size; i++)
347 __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]); 367 __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
348 368
349 ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, 369 ret = drv_ampdu_action(local, sta->sdata, &params);
350 &sta->sta, tid, &start_seq_num, 0, false);
351 ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n", 370 ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
352 sta->sta.addr, tid, ret); 371 sta->sta.addr, tid, ret);
353 if (ret) { 372 if (ret) {
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index ff757181b0a8..4932e9f243a2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -295,7 +296,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
295{ 296{
296 struct ieee80211_local *local = sta->local; 297 struct ieee80211_local *local = sta->local;
297 struct tid_ampdu_tx *tid_tx; 298 struct tid_ampdu_tx *tid_tx;
298 enum ieee80211_ampdu_mlme_action action; 299 struct ieee80211_ampdu_params params = {
300 .sta = &sta->sta,
301 .tid = tid,
302 .buf_size = 0,
303 .amsdu = false,
304 .timeout = 0,
305 .ssn = 0,
306 };
299 int ret; 307 int ret;
300 308
301 lockdep_assert_held(&sta->ampdu_mlme.mtx); 309 lockdep_assert_held(&sta->ampdu_mlme.mtx);
@@ -304,10 +312,10 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
304 case AGG_STOP_DECLINED: 312 case AGG_STOP_DECLINED:
305 case AGG_STOP_LOCAL_REQUEST: 313 case AGG_STOP_LOCAL_REQUEST:
306 case AGG_STOP_PEER_REQUEST: 314 case AGG_STOP_PEER_REQUEST:
307 action = IEEE80211_AMPDU_TX_STOP_CONT; 315 params.action = IEEE80211_AMPDU_TX_STOP_CONT;
308 break; 316 break;
309 case AGG_STOP_DESTROY_STA: 317 case AGG_STOP_DESTROY_STA:
310 action = IEEE80211_AMPDU_TX_STOP_FLUSH; 318 params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
311 break; 319 break;
312 default: 320 default:
313 WARN_ON_ONCE(1); 321 WARN_ON_ONCE(1);
@@ -330,9 +338,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
330 spin_unlock_bh(&sta->lock); 338 spin_unlock_bh(&sta->lock);
331 if (reason != AGG_STOP_DESTROY_STA) 339 if (reason != AGG_STOP_DESTROY_STA)
332 return -EALREADY; 340 return -EALREADY;
333 ret = drv_ampdu_action(local, sta->sdata, 341 params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
334 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, 342 ret = drv_ampdu_action(local, sta->sdata, &params);
335 &sta->sta, tid, NULL, 0, false);
336 WARN_ON_ONCE(ret); 343 WARN_ON_ONCE(ret);
337 return 0; 344 return 0;
338 } 345 }
@@ -381,8 +388,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
381 WLAN_BACK_INITIATOR; 388 WLAN_BACK_INITIATOR;
382 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; 389 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
383 390
384 ret = drv_ampdu_action(local, sta->sdata, action, 391 ret = drv_ampdu_action(local, sta->sdata, &params);
385 &sta->sta, tid, NULL, 0, false);
386 392
387 /* HW shall not deny going back to legacy */ 393 /* HW shall not deny going back to legacy */
388 if (WARN_ON(ret)) { 394 if (WARN_ON(ret)) {
@@ -445,7 +451,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
445 struct tid_ampdu_tx *tid_tx; 451 struct tid_ampdu_tx *tid_tx;
446 struct ieee80211_local *local = sta->local; 452 struct ieee80211_local *local = sta->local;
447 struct ieee80211_sub_if_data *sdata = sta->sdata; 453 struct ieee80211_sub_if_data *sdata = sta->sdata;
448 u16 start_seq_num; 454 struct ieee80211_ampdu_params params = {
455 .sta = &sta->sta,
456 .action = IEEE80211_AMPDU_TX_START,
457 .tid = tid,
458 .buf_size = 0,
459 .amsdu = false,
460 .timeout = 0,
461 };
449 int ret; 462 int ret;
450 463
451 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 464 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -467,10 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
467 */ 480 */
468 synchronize_net(); 481 synchronize_net();
469 482
470 start_seq_num = sta->tid_seq[tid] >> 4; 483 params.ssn = sta->tid_seq[tid] >> 4;
471 484 ret = drv_ampdu_action(local, sdata, &params);
472 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
473 &sta->sta, tid, &start_seq_num, 0, false);
474 if (ret) { 485 if (ret) {
475 ht_dbg(sdata, 486 ht_dbg(sdata,
476 "BA request denied - HW unavailable for %pM tid %d\n", 487 "BA request denied - HW unavailable for %pM tid %d\n",
@@ -499,7 +510,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
499 510
500 /* send AddBA request */ 511 /* send AddBA request */
501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 512 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
502 tid_tx->dialog_token, start_seq_num, 513 tid_tx->dialog_token, params.ssn,
503 IEEE80211_MAX_AMPDU_BUF, 514 IEEE80211_MAX_AMPDU_BUF,
504 tid_tx->timeout); 515 tid_tx->timeout);
505} 516}
@@ -684,18 +695,24 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
684 struct sta_info *sta, u16 tid) 695 struct sta_info *sta, u16 tid)
685{ 696{
686 struct tid_ampdu_tx *tid_tx; 697 struct tid_ampdu_tx *tid_tx;
698 struct ieee80211_ampdu_params params = {
699 .sta = &sta->sta,
700 .action = IEEE80211_AMPDU_TX_OPERATIONAL,
701 .tid = tid,
702 .timeout = 0,
703 .ssn = 0,
704 };
687 705
688 lockdep_assert_held(&sta->ampdu_mlme.mtx); 706 lockdep_assert_held(&sta->ampdu_mlme.mtx);
689 707
690 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 708 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
709 params.buf_size = tid_tx->buf_size;
710 params.amsdu = tid_tx->amsdu;
691 711
692 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", 712 ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
693 sta->sta.addr, tid); 713 sta->sta.addr, tid);
694 714
695 drv_ampdu_action(local, sta->sdata, 715 drv_ampdu_action(local, sta->sdata, &params);
696 IEEE80211_AMPDU_TX_OPERATIONAL,
697 &sta->sta, tid, NULL, tid_tx->buf_size,
698 tid_tx->amsdu);
699 716
700 /* 717 /*
701 * synchronize with TX path, while splicing the TX path 718 * synchronize with TX path, while splicing the TX path
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index ca1fe5576103..c258f1041d33 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -284,9 +284,7 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local,
284 284
285int drv_ampdu_action(struct ieee80211_local *local, 285int drv_ampdu_action(struct ieee80211_local *local,
286 struct ieee80211_sub_if_data *sdata, 286 struct ieee80211_sub_if_data *sdata,
287 enum ieee80211_ampdu_mlme_action action, 287 struct ieee80211_ampdu_params *params)
288 struct ieee80211_sta *sta, u16 tid,
289 u16 *ssn, u8 buf_size, bool amsdu)
290{ 288{
291 int ret = -EOPNOTSUPP; 289 int ret = -EOPNOTSUPP;
292 290
@@ -296,12 +294,10 @@ int drv_ampdu_action(struct ieee80211_local *local,
296 if (!check_sdata_in_driver(sdata)) 294 if (!check_sdata_in_driver(sdata))
297 return -EIO; 295 return -EIO;
298 296
299 trace_drv_ampdu_action(local, sdata, action, sta, tid, 297 trace_drv_ampdu_action(local, sdata, params);
300 ssn, buf_size, amsdu);
301 298
302 if (local->ops->ampdu_action) 299 if (local->ops->ampdu_action)
303 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, 300 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params);
304 sta, tid, ssn, buf_size, amsdu);
305 301
306 trace_drv_return_int(local, ret); 302 trace_drv_return_int(local, ret);
307 303
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 154ce4b13406..18b0d65baff0 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -585,9 +585,7 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
585 585
586int drv_ampdu_action(struct ieee80211_local *local, 586int drv_ampdu_action(struct ieee80211_local *local,
587 struct ieee80211_sub_if_data *sdata, 587 struct ieee80211_sub_if_data *sdata,
588 enum ieee80211_ampdu_mlme_action action, 588 struct ieee80211_ampdu_params *params);
589 struct ieee80211_sta *sta, u16 tid,
590 u16 *ssn, u8 buf_size, bool amsdu);
591 589
592static inline int drv_get_survey(struct ieee80211_local *local, int idx, 590static inline int drv_get_survey(struct ieee80211_local *local, int idx,
593 struct survey_info *survey) 591 struct survey_info *survey)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 980e9e9b6684..24ba31601fc9 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,6 +66,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
66 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 66 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
67 2 + sizeof(struct ieee80211_ht_cap) + 67 2 + sizeof(struct ieee80211_ht_cap) +
68 2 + sizeof(struct ieee80211_ht_operation) + 68 2 + sizeof(struct ieee80211_ht_operation) +
69 2 + sizeof(struct ieee80211_vht_cap) +
70 2 + sizeof(struct ieee80211_vht_operation) +
69 ifibss->ie_len; 71 ifibss->ie_len;
70 presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); 72 presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
71 if (!presp) 73 if (!presp)
@@ -486,14 +488,14 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
486 struct beacon_data *presp, *old_presp; 488 struct beacon_data *presp, *old_presp;
487 struct cfg80211_bss *cbss; 489 struct cfg80211_bss *cbss;
488 const struct cfg80211_bss_ies *ies; 490 const struct cfg80211_bss_ies *ies;
489 u16 capability = 0; 491 u16 capability = WLAN_CAPABILITY_IBSS;
490 u64 tsf; 492 u64 tsf;
491 int ret = 0; 493 int ret = 0;
492 494
493 sdata_assert_lock(sdata); 495 sdata_assert_lock(sdata);
494 496
495 if (ifibss->privacy) 497 if (ifibss->privacy)
496 capability = WLAN_CAPABILITY_PRIVACY; 498 capability |= WLAN_CAPABILITY_PRIVACY;
497 499
498 cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, 500 cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
499 ifibss->bssid, ifibss->ssid, 501 ifibss->bssid, ifibss->ssid,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 175ffcf7fb06..2ee53dc1ddf7 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -891,12 +891,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
891 supp_ht = supp_ht || sband->ht_cap.ht_supported; 891 supp_ht = supp_ht || sband->ht_cap.ht_supported;
892 supp_vht = supp_vht || sband->vht_cap.vht_supported; 892 supp_vht = supp_vht || sband->vht_cap.vht_supported;
893 893
894 if (sband->ht_cap.ht_supported) 894 if (!sband->ht_cap.ht_supported)
895 local->rx_chains = 895 continue;
896 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
897 local->rx_chains);
898 896
899 /* TODO: consider VHT for RX chains, hopefully it's the same */ 897 /* TODO: consider VHT for RX chains, hopefully it's the same */
898 local->rx_chains =
899 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
900 local->rx_chains);
901
902 /* no need to mask, SM_PS_DISABLED has all bits set */
903 sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
904 IEEE80211_HT_CAP_SM_PS_SHIFT;
900 } 905 }
901 906
902 /* if low-level driver supports AP, we also support VLAN */ 907 /* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index f7bb6829b415..9063e8e736ad 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -355,7 +355,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
355 /* fast-forward to vendor IEs */ 355 /* fast-forward to vendor IEs */
356 offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); 356 offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
357 357
358 if (offset) { 358 if (offset < ifmsh->ie_len) {
359 len = ifmsh->ie_len - offset; 359 len = ifmsh->ie_len - offset;
360 data = ifmsh->ie + offset; 360 data = ifmsh->ie + offset;
361 if (skb_tailroom(skb) < len) 361 if (skb_tailroom(skb) < len)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 00a43a70e1fc..0402fa45b343 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -168,6 +168,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
168 break; 168 break;
169 } 169 }
170 170
171 flush_delayed_work(&sdata->dec_tailroom_needed_wk);
171 drv_remove_interface(local, sdata); 172 drv_remove_interface(local, sdata);
172 } 173 }
173 174
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2b528389409f..3bcabc2ba4a6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1455,12 +1455,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1455 */ 1455 */
1456 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1456 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1457 !ieee80211_has_morefrags(hdr->frame_control) && 1457 !ieee80211_has_morefrags(hdr->frame_control) &&
1458 !ieee80211_is_back_req(hdr->frame_control) &&
1458 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1459 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1459 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1460 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1460 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1461 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1461 /* PM bit is only checked in frames where it isn't reserved, 1462 /*
1463 * PM bit is only checked in frames where it isn't reserved,
1462 * in AP mode it's reserved in non-bufferable management frames 1464 * in AP mode it's reserved in non-bufferable management frames
1463 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1465 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
1466 * BAR frames should be ignored as specified in
1467 * IEEE 802.11-2012 10.2.1.2.
1464 */ 1468 */
1465 (!ieee80211_is_mgmt(hdr->frame_control) || 1469 (!ieee80211_is_mgmt(hdr->frame_control) ||
1466 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1470 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -3396,6 +3400,27 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
3396 !ether_addr_equal(bssid, hdr->addr1)) 3400 !ether_addr_equal(bssid, hdr->addr1))
3397 return false; 3401 return false;
3398 } 3402 }
3403
3404 /*
3405 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
3406 * the BSSID - we've checked that already but may have accepted
3407 * the wildcard (ff:ff:ff:ff:ff:ff).
3408 *
3409 * It also says:
3410 * The BSSID of the Data frame is determined as follows:
3411 * a) If the STA is contained within an AP or is associated
3412 * with an AP, the BSSID is the address currently in use
3413 * by the STA contained in the AP.
3414 *
3415 * So we should not accept data frames with an address that's
3416 * multicast.
3417 *
3418 * Accepting it also opens a security problem because stations
3419 * could encrypt it with the GTK and inject traffic that way.
3420 */
3421 if (ieee80211_is_data(hdr->frame_control) && multicast)
3422 return false;
3423
3399 return true; 3424 return true;
3400 case NL80211_IFTYPE_WDS: 3425 case NL80211_IFTYPE_WDS:
3401 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3426 if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 56c6d6cfa5a1..913e959b03cf 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -80,7 +80,23 @@
80#define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d" 80#define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
81#define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx 81#define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
82 82
83 83#define AMPDU_ACTION_ENTRY __field(enum ieee80211_ampdu_mlme_action, \
84 ieee80211_ampdu_mlme_action) \
85 STA_ENTRY \
86 __field(u16, tid) \
87 __field(u16, ssn) \
88 __field(u8, buf_size) \
89 __field(bool, amsdu) \
90 __field(u16, timeout)
91#define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \
92 __entry->tid = params->tid; \
93 __entry->ssn = params->ssn; \
94 __entry->buf_size = params->buf_size; \
95 __entry->amsdu = params->amsdu; \
96 __entry->timeout = params->timeout;
97#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d"
98#define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \
99 __entry->buf_size, __entry->amsdu, __entry->timeout
84 100
85/* 101/*
86 * Tracing for driver callbacks. 102 * Tracing for driver callbacks.
@@ -970,38 +986,25 @@ DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
970TRACE_EVENT(drv_ampdu_action, 986TRACE_EVENT(drv_ampdu_action,
971 TP_PROTO(struct ieee80211_local *local, 987 TP_PROTO(struct ieee80211_local *local,
972 struct ieee80211_sub_if_data *sdata, 988 struct ieee80211_sub_if_data *sdata,
973 enum ieee80211_ampdu_mlme_action action, 989 struct ieee80211_ampdu_params *params),
974 struct ieee80211_sta *sta, u16 tid,
975 u16 *ssn, u8 buf_size, bool amsdu),
976 990
977 TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu), 991 TP_ARGS(local, sdata, params),
978 992
979 TP_STRUCT__entry( 993 TP_STRUCT__entry(
980 LOCAL_ENTRY 994 LOCAL_ENTRY
981 STA_ENTRY
982 __field(u32, action)
983 __field(u16, tid)
984 __field(u16, ssn)
985 __field(u8, buf_size)
986 __field(bool, amsdu)
987 VIF_ENTRY 995 VIF_ENTRY
996 AMPDU_ACTION_ENTRY
988 ), 997 ),
989 998
990 TP_fast_assign( 999 TP_fast_assign(
991 LOCAL_ASSIGN; 1000 LOCAL_ASSIGN;
992 VIF_ASSIGN; 1001 VIF_ASSIGN;
993 STA_ASSIGN; 1002 AMPDU_ACTION_ASSIGN;
994 __entry->action = action;
995 __entry->tid = tid;
996 __entry->ssn = ssn ? *ssn : 0;
997 __entry->buf_size = buf_size;
998 __entry->amsdu = amsdu;
999 ), 1003 ),
1000 1004
1001 TP_printk( 1005 TP_printk(
1002 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d", 1006 LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT,
1003 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, 1007 LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG
1004 __entry->tid, __entry->buf_size, __entry->amsdu
1005 ) 1008 )
1006); 1009);
1007 1010
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index d824c38971ed..e19ea1c53afa 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -16,6 +16,7 @@
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include <crypto/aes.h> 18#include <crypto/aes.h>
19#include <crypto/algapi.h>
19 20
20#include "ieee80211_i.h" 21#include "ieee80211_i.h"
21#include "michael.h" 22#include "michael.h"
@@ -152,7 +153,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
152 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 153 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
153 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 154 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
154 michael_mic(key, hdr, data, data_len, mic); 155 michael_mic(key, hdr, data, data_len, mic);
155 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) 156 if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
156 goto mic_fail; 157 goto mic_fail;
157 158
158 /* remove Michael MIC from payload */ 159 /* remove Michael MIC from payload */
@@ -1044,7 +1045,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
1044 bip_aad(skb, aad); 1045 bip_aad(skb, aad);
1045 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, 1046 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
1046 skb->data + 24, skb->len - 24, mic); 1047 skb->data + 24, skb->len - 24, mic);
1047 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1048 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1048 key->u.aes_cmac.icverrors++; 1049 key->u.aes_cmac.icverrors++;
1049 return RX_DROP_UNUSABLE; 1050 return RX_DROP_UNUSABLE;
1050 } 1051 }
@@ -1094,7 +1095,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx)
1094 bip_aad(skb, aad); 1095 bip_aad(skb, aad);
1095 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, 1096 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
1096 skb->data + 24, skb->len - 24, mic); 1097 skb->data + 24, skb->len - 24, mic);
1097 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1098 if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1098 key->u.aes_cmac.icverrors++; 1099 key->u.aes_cmac.icverrors++;
1099 return RX_DROP_UNUSABLE; 1100 return RX_DROP_UNUSABLE;
1100 } 1101 }
@@ -1198,7 +1199,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
1198 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, 1199 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
1199 skb->data + 24, skb->len - 24, 1200 skb->data + 24, skb->len - 24,
1200 mic) < 0 || 1201 mic) < 0 ||
1201 memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1202 crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
1202 key->u.aes_gmac.icverrors++; 1203 key->u.aes_gmac.icverrors++;
1203 return RX_DROP_UNUSABLE; 1204 return RX_DROP_UNUSABLE;
1204 } 1205 }
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 881bc2072809..52cfc4478511 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1567,6 +1567,7 @@ static void mpls_net_exit(struct net *net)
1567 for (index = 0; index < platform_labels; index++) { 1567 for (index = 0; index < platform_labels; index++) {
1568 struct mpls_route *rt = rtnl_dereference(platform_label[index]); 1568 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1569 RCU_INIT_POINTER(platform_label[index], NULL); 1569 RCU_INIT_POINTER(platform_label[index], NULL);
1570 mpls_notify_route(net, index, rt, NULL, NULL);
1570 mpls_rt_free(rt); 1571 mpls_rt_free(rt);
1571 } 1572 }
1572 rtnl_unlock(); 1573 rtnl_unlock();
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4da560005b0e..dd1649caa2b2 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -845,10 +845,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
845{ 845{
846 unsigned int verdict = NF_DROP; 846 unsigned int verdict = NF_DROP;
847 847
848 if (IP_VS_FWD_METHOD(cp) != 0) { 848 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
849 pr_err("shouldn't reach here, because the box is on the " 849 goto ignore_cp;
850 "half connection in the tun/dr module.\n");
851 }
852 850
853 /* Ensure the checksum is correct */ 851 /* Ensure the checksum is correct */
854 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { 852 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -882,6 +880,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
882 ip_vs_notrack(skb); 880 ip_vs_notrack(skb);
883 else 881 else
884 ip_vs_update_conntrack(skb, cp, 0); 882 ip_vs_update_conntrack(skb, cp, 0);
883
884ignore_cp:
885 verdict = NF_ACCEPT; 885 verdict = NF_ACCEPT;
886 886
887out: 887out:
@@ -1242,8 +1242,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1242 */ 1242 */
1243 cp = pp->conn_out_get(ipvs, af, skb, &iph); 1243 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1244 1244
1245 if (likely(cp)) 1245 if (likely(cp)) {
1246 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1247 goto ignore_cp;
1246 return handle_response(af, skb, pd, cp, &iph, hooknum); 1248 return handle_response(af, skb, pd, cp, &iph, hooknum);
1249 }
1247 if (sysctl_nat_icmp_send(ipvs) && 1250 if (sysctl_nat_icmp_send(ipvs) &&
1248 (pp->protocol == IPPROTO_TCP || 1251 (pp->protocol == IPPROTO_TCP ||
1249 pp->protocol == IPPROTO_UDP || 1252 pp->protocol == IPPROTO_UDP ||
@@ -1285,9 +1288,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1285 } 1288 }
1286 } 1289 }
1287 } 1290 }
1291
1292out:
1288 IP_VS_DBG_PKT(12, af, pp, skb, iph.off, 1293 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1289 "ip_vs_out: packet continues traversal as normal"); 1294 "ip_vs_out: packet continues traversal as normal");
1290 return NF_ACCEPT; 1295 return NF_ACCEPT;
1296
1297ignore_cp:
1298 __ip_vs_conn_put(cp);
1299 goto out;
1291} 1300}
1292 1301
1293/* 1302/*
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 1a9545965c0d..531ca55f1af6 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
53 53
54 rcu_read_lock(); 54 rcu_read_lock();
55 t = rcu_dereference(nf_ct_ext_types[id]); 55 t = rcu_dereference(nf_ct_ext_types[id]);
56 BUG_ON(t == NULL); 56 if (!t) {
57 rcu_read_unlock();
58 return NULL;
59 }
60
57 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 61 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58 len = off + t->len + var_alloc_len; 62 len = off + t->len + var_alloc_len;
59 alloc_size = t->alloc_size + var_alloc_len; 63 alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
88 92
89 rcu_read_lock(); 93 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]); 94 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL); 95 if (!t) {
96 rcu_read_unlock();
97 return NULL;
98 }
92 99
93 newoff = ALIGN(old->len, t->align); 100 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len + var_alloc_len; 101 newlen = newoff + t->len + var_alloc_len;
@@ -186,6 +193,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
186 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); 193 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
187 update_alloc_size(type); 194 update_alloc_size(type);
188 mutex_unlock(&nf_ct_ext_type_mutex); 195 mutex_unlock(&nf_ct_ext_type_mutex);
189 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 196 synchronize_rcu();
190} 197}
191EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); 198EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9f5272968abb..e565b2becb14 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h> 47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_seqadj.h>
49#include <net/netfilter/nf_conntrack_synproxy.h>
48#ifdef CONFIG_NF_NAT_NEEDED 50#ifdef CONFIG_NF_NAT_NEEDED
49#include <net/netfilter/nf_nat_core.h> 51#include <net/netfilter/nf_nat_core.h>
50#include <net/netfilter/nf_nat_l4proto.h> 52#include <net/netfilter/nf_nat_l4proto.h>
@@ -1798,6 +1800,8 @@ ctnetlink_create_conntrack(struct net *net,
1798 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 1800 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1799 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 1801 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1800 nf_ct_labels_ext_add(ct); 1802 nf_ct_labels_ext_add(ct);
1803 nfct_seqadj_ext_add(ct);
1804 nfct_synproxy_ext_add(ct);
1801 1805
1802 /* we must add conntrack extensions before confirmation. */ 1806 /* we must add conntrack extensions before confirmation. */
1803 ct->status |= IPS_CONFIRMED; 1807 ct->status |= IPS_CONFIRMED;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 0975c993a94e..f11aa28b96ce 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -49,6 +49,7 @@
49#include <linux/notifier.h> 49#include <linux/notifier.h>
50#include <net/net_namespace.h> 50#include <net/net_namespace.h>
51#include <net/sock.h> 51#include <net/sock.h>
52#include <net/inet_sock.h>
52 53
53struct idletimer_tg_attr { 54struct idletimer_tg_attr {
54 struct attribute attr; 55 struct attribute attr;
@@ -355,7 +356,7 @@ static void reset_timer(const struct idletimer_tg_info *info,
355 /* Stores the uid resposible for waking up the radio */ 356 /* Stores the uid resposible for waking up the radio */
356 if (skb && (skb->sk)) { 357 if (skb && (skb->sk)) {
357 timer->uid = from_kuid_munged(current_user_ns(), 358 timer->uid = from_kuid_munged(current_user_ns(),
358 sock_i_uid(skb->sk)); 359 sock_i_uid(skb_to_full_sk(skb)));
359 } 360 }
360 361
361 /* checks if there is a pending inactive notification*/ 362 /* checks if there is a pending inactive notification*/
@@ -456,6 +457,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
456 del_timer_sync(&info->timer->timer); 457 del_timer_sync(&info->timer->timer);
457 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr); 458 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
458 unregister_pm_notifier(&info->timer->pm_nb); 459 unregister_pm_notifier(&info->timer->pm_nb);
460 cancel_work_sync(&info->timer->work);
459 kfree(info->timer->attr.attr.name); 461 kfree(info->timer->attr.attr.name);
460 kfree(info->timer); 462 kfree(info->timer);
461 } else { 463 } else {
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index b7c43def0dc6..00f798b20b20 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 104 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
105 tcp_hdrlen = tcph->doff * 4; 105 tcp_hdrlen = tcph->doff * 4;
106 106
107 if (len < tcp_hdrlen) 107 if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
108 return -1; 108 return -1;
109 109
110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
156 if (len > tcp_hdrlen) 156 if (len > tcp_hdrlen)
157 return 0; 157 return 0;
158 158
159 /* tcph->doff has 4 bits, do not wrap it to 0 */
160 if (tcp_hdrlen >= 15 * 4)
161 return 0;
162
159 /* 163 /*
160 * MSS Option not found ?! add it.. 164 * MSS Option not found ?! add it..
161 */ 165 */
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 3bf0c59dab2f..0f5628a59917 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1814,8 +1814,11 @@ ret_res:
1814} 1814}
1815 1815
1816#ifdef DDEBUG 1816#ifdef DDEBUG
1817/* This function is not in xt_qtaguid_print.c because of locks visibility */ 1817/*
1818static void prdebug_full_state(int indent_level, const char *fmt, ...) 1818 * This function is not in xt_qtaguid_print.c because of locks visibility.
1819 * The lock of sock_tag_list must be aquired before calling this function
1820 */
1821static void prdebug_full_state_locked(int indent_level, const char *fmt, ...)
1819{ 1822{
1820 va_list args; 1823 va_list args;
1821 char *fmt_buff; 1824 char *fmt_buff;
@@ -1836,16 +1839,12 @@ static void prdebug_full_state(int indent_level, const char *fmt, ...)
1836 kfree(buff); 1839 kfree(buff);
1837 va_end(args); 1840 va_end(args);
1838 1841
1839 spin_lock_bh(&sock_tag_list_lock);
1840 prdebug_sock_tag_tree(indent_level, &sock_tag_tree); 1842 prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
1841 spin_unlock_bh(&sock_tag_list_lock);
1842 1843
1843 spin_lock_bh(&sock_tag_list_lock);
1844 spin_lock_bh(&uid_tag_data_tree_lock); 1844 spin_lock_bh(&uid_tag_data_tree_lock);
1845 prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree); 1845 prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
1846 prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree); 1846 prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
1847 spin_unlock_bh(&uid_tag_data_tree_lock); 1847 spin_unlock_bh(&uid_tag_data_tree_lock);
1848 spin_unlock_bh(&sock_tag_list_lock);
1849 1848
1850 spin_lock_bh(&iface_stat_list_lock); 1849 spin_lock_bh(&iface_stat_list_lock);
1851 prdebug_iface_stat_list(indent_level, &iface_stat_list); 1850 prdebug_iface_stat_list(indent_level, &iface_stat_list);
@@ -1854,7 +1853,7 @@ static void prdebug_full_state(int indent_level, const char *fmt, ...)
1854 pr_debug("qtaguid: %s(): }\n", __func__); 1853 pr_debug("qtaguid: %s(): }\n", __func__);
1855} 1854}
1856#else 1855#else
1857static void prdebug_full_state(int indent_level, const char *fmt, ...) {} 1856static void prdebug_full_state_locked(int indent_level, const char *fmt, ...) {}
1858#endif 1857#endif
1859 1858
1860struct proc_ctrl_print_info { 1859struct proc_ctrl_print_info {
@@ -1977,8 +1976,11 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
1977 (u64)atomic64_read(&qtu_events.match_no_sk), 1976 (u64)atomic64_read(&qtu_events.match_no_sk),
1978 (u64)atomic64_read(&qtu_events.match_no_sk_file)); 1977 (u64)atomic64_read(&qtu_events.match_no_sk_file));
1979 1978
1980 /* Count the following as part of the last item_index */ 1979 /* Count the following as part of the last item_index. No need
1981 prdebug_full_state(0, "proc ctrl"); 1980 * to lock the sock_tag_list here since it is already locked when
1981 * starting the seq_file operation
1982 */
1983 prdebug_full_state_locked(0, "proc ctrl");
1982 } 1984 }
1983 1985
1984 return 0; 1986 return 0;
@@ -2887,8 +2889,10 @@ static int qtudev_release(struct inode *inode, struct file *file)
2887 2889
2888 sock_tag_tree_erase(&st_to_free_tree); 2890 sock_tag_tree_erase(&st_to_free_tree);
2889 2891
2890 prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__, 2892 spin_lock_bh(&sock_tag_list_lock);
2893 prdebug_full_state_locked(0, "%s(): pid=%u tgid=%u", __func__,
2891 current->pid, current->tgid); 2894 current->pid, current->tgid);
2895 spin_unlock_bh(&sock_tag_list_lock);
2892 return 0; 2896 return 0;
2893} 2897}
2894 2898
diff --git a/net/netlink/Kconfig b/net/netlink/Kconfig
index 2c5e95e9bfbd..5d6e8c05b3d4 100644
--- a/net/netlink/Kconfig
+++ b/net/netlink/Kconfig
@@ -2,15 +2,6 @@
2# Netlink Sockets 2# Netlink Sockets
3# 3#
4 4
5config NETLINK_MMAP
6 bool "NETLINK: mmaped IO"
7 ---help---
8 This option enables support for memory mapped netlink IO. This
9 reduces overhead by avoiding copying data between kernel- and
10 userspace.
11
12 If unsure, say N.
13
14config NETLINK_DIAG 5config NETLINK_DIAG
15 tristate "NETLINK: socket monitoring interface" 6 tristate "NETLINK: socket monitoring interface"
16 default n 7 default n
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 360700a2f46c..acfb16fdcd55 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -225,7 +225,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
225 225
226 dev_hold(dev); 226 dev_hold(dev);
227 227
228 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) 228 if (is_vmalloc_addr(skb->head))
229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC); 229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
230 else 230 else
231 nskb = skb_clone(skb, GFP_ATOMIC); 231 nskb = skb_clone(skb, GFP_ATOMIC);
@@ -300,610 +300,8 @@ static void netlink_rcv_wake(struct sock *sk)
300 wake_up_interruptible(&nlk->wait); 300 wake_up_interruptible(&nlk->wait);
301} 301}
302 302
303#ifdef CONFIG_NETLINK_MMAP
304static bool netlink_rx_is_mmaped(struct sock *sk)
305{
306 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
307}
308
309static bool netlink_tx_is_mmaped(struct sock *sk)
310{
311 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
312}
313
314static __pure struct page *pgvec_to_page(const void *addr)
315{
316 if (is_vmalloc_addr(addr))
317 return vmalloc_to_page(addr);
318 else
319 return virt_to_page(addr);
320}
321
322static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
323{
324 unsigned int i;
325
326 for (i = 0; i < len; i++) {
327 if (pg_vec[i] != NULL) {
328 if (is_vmalloc_addr(pg_vec[i]))
329 vfree(pg_vec[i]);
330 else
331 free_pages((unsigned long)pg_vec[i], order);
332 }
333 }
334 kfree(pg_vec);
335}
336
337static void *alloc_one_pg_vec_page(unsigned long order)
338{
339 void *buffer;
340 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
341 __GFP_NOWARN | __GFP_NORETRY;
342
343 buffer = (void *)__get_free_pages(gfp_flags, order);
344 if (buffer != NULL)
345 return buffer;
346
347 buffer = vzalloc((1 << order) * PAGE_SIZE);
348 if (buffer != NULL)
349 return buffer;
350
351 gfp_flags &= ~__GFP_NORETRY;
352 return (void *)__get_free_pages(gfp_flags, order);
353}
354
355static void **alloc_pg_vec(struct netlink_sock *nlk,
356 struct nl_mmap_req *req, unsigned int order)
357{
358 unsigned int block_nr = req->nm_block_nr;
359 unsigned int i;
360 void **pg_vec;
361
362 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
363 if (pg_vec == NULL)
364 return NULL;
365
366 for (i = 0; i < block_nr; i++) {
367 pg_vec[i] = alloc_one_pg_vec_page(order);
368 if (pg_vec[i] == NULL)
369 goto err1;
370 }
371
372 return pg_vec;
373err1:
374 free_pg_vec(pg_vec, order, block_nr);
375 return NULL;
376}
377
378
379static void
380__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
381 unsigned int order)
382{
383 struct netlink_sock *nlk = nlk_sk(sk);
384 struct sk_buff_head *queue;
385 struct netlink_ring *ring;
386
387 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
388 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
389
390 spin_lock_bh(&queue->lock);
391
392 ring->frame_max = req->nm_frame_nr - 1;
393 ring->head = 0;
394 ring->frame_size = req->nm_frame_size;
395 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
396
397 swap(ring->pg_vec_len, req->nm_block_nr);
398 swap(ring->pg_vec_order, order);
399 swap(ring->pg_vec, pg_vec);
400
401 __skb_queue_purge(queue);
402 spin_unlock_bh(&queue->lock);
403
404 WARN_ON(atomic_read(&nlk->mapped));
405
406 if (pg_vec)
407 free_pg_vec(pg_vec, order, req->nm_block_nr);
408}
409
410static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
411 bool tx_ring)
412{
413 struct netlink_sock *nlk = nlk_sk(sk);
414 struct netlink_ring *ring;
415 void **pg_vec = NULL;
416 unsigned int order = 0;
417
418 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
419
420 if (atomic_read(&nlk->mapped))
421 return -EBUSY;
422 if (atomic_read(&ring->pending))
423 return -EBUSY;
424
425 if (req->nm_block_nr) {
426 if (ring->pg_vec != NULL)
427 return -EBUSY;
428
429 if ((int)req->nm_block_size <= 0)
430 return -EINVAL;
431 if (!PAGE_ALIGNED(req->nm_block_size))
432 return -EINVAL;
433 if (req->nm_frame_size < NL_MMAP_HDRLEN)
434 return -EINVAL;
435 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
436 return -EINVAL;
437
438 ring->frames_per_block = req->nm_block_size /
439 req->nm_frame_size;
440 if (ring->frames_per_block == 0)
441 return -EINVAL;
442 if (ring->frames_per_block * req->nm_block_nr !=
443 req->nm_frame_nr)
444 return -EINVAL;
445
446 order = get_order(req->nm_block_size);
447 pg_vec = alloc_pg_vec(nlk, req, order);
448 if (pg_vec == NULL)
449 return -ENOMEM;
450 } else {
451 if (req->nm_frame_nr)
452 return -EINVAL;
453 }
454
455 mutex_lock(&nlk->pg_vec_lock);
456 if (atomic_read(&nlk->mapped) == 0) {
457 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
458 mutex_unlock(&nlk->pg_vec_lock);
459 return 0;
460 }
461
462 mutex_unlock(&nlk->pg_vec_lock);
463
464 if (pg_vec)
465 free_pg_vec(pg_vec, order, req->nm_block_nr);
466
467 return -EBUSY;
468}
469
470static void netlink_mm_open(struct vm_area_struct *vma)
471{
472 struct file *file = vma->vm_file;
473 struct socket *sock = file->private_data;
474 struct sock *sk = sock->sk;
475
476 if (sk)
477 atomic_inc(&nlk_sk(sk)->mapped);
478}
479
480static void netlink_mm_close(struct vm_area_struct *vma)
481{
482 struct file *file = vma->vm_file;
483 struct socket *sock = file->private_data;
484 struct sock *sk = sock->sk;
485
486 if (sk)
487 atomic_dec(&nlk_sk(sk)->mapped);
488}
489
490static const struct vm_operations_struct netlink_mmap_ops = {
491 .open = netlink_mm_open,
492 .close = netlink_mm_close,
493};
494
495static int netlink_mmap(struct file *file, struct socket *sock,
496 struct vm_area_struct *vma)
497{
498 struct sock *sk = sock->sk;
499 struct netlink_sock *nlk = nlk_sk(sk);
500 struct netlink_ring *ring;
501 unsigned long start, size, expected;
502 unsigned int i;
503 int err = -EINVAL;
504
505 if (vma->vm_pgoff)
506 return -EINVAL;
507
508 mutex_lock(&nlk->pg_vec_lock);
509
510 expected = 0;
511 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
512 if (ring->pg_vec == NULL)
513 continue;
514 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
515 }
516
517 if (expected == 0)
518 goto out;
519
520 size = vma->vm_end - vma->vm_start;
521 if (size != expected)
522 goto out;
523
524 start = vma->vm_start;
525 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
526 if (ring->pg_vec == NULL)
527 continue;
528
529 for (i = 0; i < ring->pg_vec_len; i++) {
530 struct page *page;
531 void *kaddr = ring->pg_vec[i];
532 unsigned int pg_num;
533
534 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
535 page = pgvec_to_page(kaddr);
536 err = vm_insert_page(vma, start, page);
537 if (err < 0)
538 goto out;
539 start += PAGE_SIZE;
540 kaddr += PAGE_SIZE;
541 }
542 }
543 }
544
545 atomic_inc(&nlk->mapped);
546 vma->vm_ops = &netlink_mmap_ops;
547 err = 0;
548out:
549 mutex_unlock(&nlk->pg_vec_lock);
550 return err;
551}
552
553static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
554{
555#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
556 struct page *p_start, *p_end;
557
558 /* First page is flushed through netlink_{get,set}_status */
559 p_start = pgvec_to_page(hdr + PAGE_SIZE);
560 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
561 while (p_start <= p_end) {
562 flush_dcache_page(p_start);
563 p_start++;
564 }
565#endif
566}
567
568static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
569{
570 smp_rmb();
571 flush_dcache_page(pgvec_to_page(hdr));
572 return hdr->nm_status;
573}
574
575static void netlink_set_status(struct nl_mmap_hdr *hdr,
576 enum nl_mmap_status status)
577{
578 smp_mb();
579 hdr->nm_status = status;
580 flush_dcache_page(pgvec_to_page(hdr));
581}
582
583static struct nl_mmap_hdr *
584__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
585{
586 unsigned int pg_vec_pos, frame_off;
587
588 pg_vec_pos = pos / ring->frames_per_block;
589 frame_off = pos % ring->frames_per_block;
590
591 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
592}
593
594static struct nl_mmap_hdr *
595netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
596 enum nl_mmap_status status)
597{
598 struct nl_mmap_hdr *hdr;
599
600 hdr = __netlink_lookup_frame(ring, pos);
601 if (netlink_get_status(hdr) != status)
602 return NULL;
603
604 return hdr;
605}
606
607static struct nl_mmap_hdr *
608netlink_current_frame(const struct netlink_ring *ring,
609 enum nl_mmap_status status)
610{
611 return netlink_lookup_frame(ring, ring->head, status);
612}
613
614static void netlink_increment_head(struct netlink_ring *ring)
615{
616 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
617}
618
619static void netlink_forward_ring(struct netlink_ring *ring)
620{
621 unsigned int head = ring->head;
622 const struct nl_mmap_hdr *hdr;
623
624 do {
625 hdr = __netlink_lookup_frame(ring, ring->head);
626 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
627 break;
628 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
629 break;
630 netlink_increment_head(ring);
631 } while (ring->head != head);
632}
633
634static bool netlink_has_valid_frame(struct netlink_ring *ring)
635{
636 unsigned int head = ring->head, pos = head;
637 const struct nl_mmap_hdr *hdr;
638
639 do {
640 hdr = __netlink_lookup_frame(ring, pos);
641 if (hdr->nm_status == NL_MMAP_STATUS_VALID)
642 return true;
643 pos = pos != 0 ? pos - 1 : ring->frame_max;
644 } while (pos != head);
645
646 return false;
647}
648
649static bool netlink_dump_space(struct netlink_sock *nlk)
650{
651 struct netlink_ring *ring = &nlk->rx_ring;
652 struct nl_mmap_hdr *hdr;
653 unsigned int n;
654
655 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
656 if (hdr == NULL)
657 return false;
658
659 n = ring->head + ring->frame_max / 2;
660 if (n > ring->frame_max)
661 n -= ring->frame_max;
662
663 hdr = __netlink_lookup_frame(ring, n);
664
665 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
666}
667
668static unsigned int netlink_poll(struct file *file, struct socket *sock,
669 poll_table *wait)
670{
671 struct sock *sk = sock->sk;
672 struct netlink_sock *nlk = nlk_sk(sk);
673 unsigned int mask;
674 int err;
675
676 if (nlk->rx_ring.pg_vec != NULL) {
677 /* Memory mapped sockets don't call recvmsg(), so flow control
678 * for dumps is performed here. A dump is allowed to continue
679 * if at least half the ring is unused.
680 */
681 while (nlk->cb_running && netlink_dump_space(nlk)) {
682 err = netlink_dump(sk);
683 if (err < 0) {
684 sk->sk_err = -err;
685 sk->sk_error_report(sk);
686 break;
687 }
688 }
689 netlink_rcv_wake(sk);
690 }
691
692 mask = datagram_poll(file, sock, wait);
693
694 /* We could already have received frames in the normal receive
695 * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
696 * so if mask contains pollin/etc already, there's no point
697 * walking the ring.
698 */
699 if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
700 spin_lock_bh(&sk->sk_receive_queue.lock);
701 if (nlk->rx_ring.pg_vec) {
702 if (netlink_has_valid_frame(&nlk->rx_ring))
703 mask |= POLLIN | POLLRDNORM;
704 }
705 spin_unlock_bh(&sk->sk_receive_queue.lock);
706 }
707
708 spin_lock_bh(&sk->sk_write_queue.lock);
709 if (nlk->tx_ring.pg_vec) {
710 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
711 mask |= POLLOUT | POLLWRNORM;
712 }
713 spin_unlock_bh(&sk->sk_write_queue.lock);
714
715 return mask;
716}
717
718static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
719{
720 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
721}
722
723static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
724 struct netlink_ring *ring,
725 struct nl_mmap_hdr *hdr)
726{
727 unsigned int size;
728 void *data;
729
730 size = ring->frame_size - NL_MMAP_HDRLEN;
731 data = (void *)hdr + NL_MMAP_HDRLEN;
732
733 skb->head = data;
734 skb->data = data;
735 skb_reset_tail_pointer(skb);
736 skb->end = skb->tail + size;
737 skb->len = 0;
738
739 skb->destructor = netlink_skb_destructor;
740 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
741 NETLINK_CB(skb).sk = sk;
742}
743
744static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
745 u32 dst_portid, u32 dst_group,
746 struct scm_cookie *scm)
747{
748 struct netlink_sock *nlk = nlk_sk(sk);
749 struct netlink_ring *ring;
750 struct nl_mmap_hdr *hdr;
751 struct sk_buff *skb;
752 unsigned int maxlen;
753 int err = 0, len = 0;
754
755 mutex_lock(&nlk->pg_vec_lock);
756
757 ring = &nlk->tx_ring;
758 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
759
760 do {
761 unsigned int nm_len;
762
763 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
764 if (hdr == NULL) {
765 if (!(msg->msg_flags & MSG_DONTWAIT) &&
766 atomic_read(&nlk->tx_ring.pending))
767 schedule();
768 continue;
769 }
770
771 nm_len = ACCESS_ONCE(hdr->nm_len);
772 if (nm_len > maxlen) {
773 err = -EINVAL;
774 goto out;
775 }
776
777 netlink_frame_flush_dcache(hdr, nm_len);
778
779 skb = alloc_skb(nm_len, GFP_KERNEL);
780 if (skb == NULL) {
781 err = -ENOBUFS;
782 goto out;
783 }
784 __skb_put(skb, nm_len);
785 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
786 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
787
788 netlink_increment_head(ring);
789
790 NETLINK_CB(skb).portid = nlk->portid;
791 NETLINK_CB(skb).dst_group = dst_group;
792 NETLINK_CB(skb).creds = scm->creds;
793
794 err = security_netlink_send(sk, skb);
795 if (err) {
796 kfree_skb(skb);
797 goto out;
798 }
799
800 if (unlikely(dst_group)) {
801 atomic_inc(&skb->users);
802 netlink_broadcast(sk, skb, dst_portid, dst_group,
803 GFP_KERNEL);
804 }
805 err = netlink_unicast(sk, skb, dst_portid,
806 msg->msg_flags & MSG_DONTWAIT);
807 if (err < 0)
808 goto out;
809 len += err;
810
811 } while (hdr != NULL ||
812 (!(msg->msg_flags & MSG_DONTWAIT) &&
813 atomic_read(&nlk->tx_ring.pending)));
814
815 if (len > 0)
816 err = len;
817out:
818 mutex_unlock(&nlk->pg_vec_lock);
819 return err;
820}
821
822static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
823{
824 struct nl_mmap_hdr *hdr;
825
826 hdr = netlink_mmap_hdr(skb);
827 hdr->nm_len = skb->len;
828 hdr->nm_group = NETLINK_CB(skb).dst_group;
829 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
830 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
831 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
832 netlink_frame_flush_dcache(hdr, hdr->nm_len);
833 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
834
835 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
836 kfree_skb(skb);
837}
838
839static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
840{
841 struct netlink_sock *nlk = nlk_sk(sk);
842 struct netlink_ring *ring = &nlk->rx_ring;
843 struct nl_mmap_hdr *hdr;
844
845 spin_lock_bh(&sk->sk_receive_queue.lock);
846 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
847 if (hdr == NULL) {
848 spin_unlock_bh(&sk->sk_receive_queue.lock);
849 kfree_skb(skb);
850 netlink_overrun(sk);
851 return;
852 }
853 netlink_increment_head(ring);
854 __skb_queue_tail(&sk->sk_receive_queue, skb);
855 spin_unlock_bh(&sk->sk_receive_queue.lock);
856
857 hdr->nm_len = skb->len;
858 hdr->nm_group = NETLINK_CB(skb).dst_group;
859 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
860 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
861 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
862 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
863}
864
865#else /* CONFIG_NETLINK_MMAP */
866#define netlink_rx_is_mmaped(sk) false
867#define netlink_tx_is_mmaped(sk) false
868#define netlink_mmap sock_no_mmap
869#define netlink_poll datagram_poll
870#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
871#endif /* CONFIG_NETLINK_MMAP */
872
873static void netlink_skb_destructor(struct sk_buff *skb) 303static void netlink_skb_destructor(struct sk_buff *skb)
874{ 304{
875#ifdef CONFIG_NETLINK_MMAP
876 struct nl_mmap_hdr *hdr;
877 struct netlink_ring *ring;
878 struct sock *sk;
879
880 /* If a packet from the kernel to userspace was freed because of an
881 * error without being delivered to userspace, the kernel must reset
882 * the status. In the direction userspace to kernel, the status is
883 * always reset here after the packet was processed and freed.
884 */
885 if (netlink_skb_is_mmaped(skb)) {
886 hdr = netlink_mmap_hdr(skb);
887 sk = NETLINK_CB(skb).sk;
888
889 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
890 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
891 ring = &nlk_sk(sk)->tx_ring;
892 } else {
893 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
894 hdr->nm_len = 0;
895 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
896 }
897 ring = &nlk_sk(sk)->rx_ring;
898 }
899
900 WARN_ON(atomic_read(&ring->pending) == 0);
901 atomic_dec(&ring->pending);
902 sock_put(sk);
903
904 skb->head = NULL;
905 }
906#endif
907 if (is_vmalloc_addr(skb->head)) { 305 if (is_vmalloc_addr(skb->head)) {
908 if (!skb->cloned || 306 if (!skb->cloned ||
909 !atomic_dec_return(&(skb_shinfo(skb)->dataref))) 307 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
@@ -936,18 +334,6 @@ static void netlink_sock_destruct(struct sock *sk)
936 } 334 }
937 335
938 skb_queue_purge(&sk->sk_receive_queue); 336 skb_queue_purge(&sk->sk_receive_queue);
939#ifdef CONFIG_NETLINK_MMAP
940 if (1) {
941 struct nl_mmap_req req;
942
943 memset(&req, 0, sizeof(req));
944 if (nlk->rx_ring.pg_vec)
945 __netlink_set_ring(sk, &req, false, NULL, 0);
946 memset(&req, 0, sizeof(req));
947 if (nlk->tx_ring.pg_vec)
948 __netlink_set_ring(sk, &req, true, NULL, 0);
949 }
950#endif /* CONFIG_NETLINK_MMAP */
951 337
952 if (!sock_flag(sk, SOCK_DEAD)) { 338 if (!sock_flag(sk, SOCK_DEAD)) {
953 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); 339 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
@@ -1201,9 +587,6 @@ static int __netlink_create(struct net *net, struct socket *sock,
1201 mutex_init(nlk->cb_mutex); 587 mutex_init(nlk->cb_mutex);
1202 } 588 }
1203 init_waitqueue_head(&nlk->wait); 589 init_waitqueue_head(&nlk->wait);
1204#ifdef CONFIG_NETLINK_MMAP
1205 mutex_init(&nlk->pg_vec_lock);
1206#endif
1207 590
1208 sk->sk_destruct = netlink_sock_destruct; 591 sk->sk_destruct = netlink_sock_destruct;
1209 sk->sk_protocol = protocol; 592 sk->sk_protocol = protocol;
@@ -1745,8 +1128,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1745 nlk = nlk_sk(sk); 1128 nlk = nlk_sk(sk);
1746 1129
1747 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 1130 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1748 test_bit(NETLINK_S_CONGESTED, &nlk->state)) && 1131 test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
1749 !netlink_skb_is_mmaped(skb)) {
1750 DECLARE_WAITQUEUE(wait, current); 1132 DECLARE_WAITQUEUE(wait, current);
1751 if (!*timeo) { 1133 if (!*timeo) {
1752 if (!ssk || netlink_is_kernel(ssk)) 1134 if (!ssk || netlink_is_kernel(ssk))
@@ -1784,14 +1166,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1784 1166
1785 netlink_deliver_tap(skb); 1167 netlink_deliver_tap(skb);
1786 1168
1787#ifdef CONFIG_NETLINK_MMAP 1169 skb_queue_tail(&sk->sk_receive_queue, skb);
1788 if (netlink_skb_is_mmaped(skb))
1789 netlink_queue_mmaped_skb(sk, skb);
1790 else if (netlink_rx_is_mmaped(sk))
1791 netlink_ring_set_copied(sk, skb);
1792 else
1793#endif /* CONFIG_NETLINK_MMAP */
1794 skb_queue_tail(&sk->sk_receive_queue, skb);
1795 sk->sk_data_ready(sk); 1170 sk->sk_data_ready(sk);
1796 return len; 1171 return len;
1797} 1172}
@@ -1815,9 +1190,6 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1815 int delta; 1190 int delta;
1816 1191
1817 WARN_ON(skb->sk != NULL); 1192 WARN_ON(skb->sk != NULL);
1818 if (netlink_skb_is_mmaped(skb))
1819 return skb;
1820
1821 delta = skb->end - skb->tail; 1193 delta = skb->end - skb->tail;
1822 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) 1194 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1823 return skb; 1195 return skb;
@@ -1897,71 +1269,6 @@ struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
1897 unsigned int ldiff, u32 dst_portid, 1269 unsigned int ldiff, u32 dst_portid,
1898 gfp_t gfp_mask) 1270 gfp_t gfp_mask)
1899{ 1271{
1900#ifdef CONFIG_NETLINK_MMAP
1901 unsigned int maxlen, linear_size;
1902 struct sock *sk = NULL;
1903 struct sk_buff *skb;
1904 struct netlink_ring *ring;
1905 struct nl_mmap_hdr *hdr;
1906
1907 sk = netlink_getsockbyportid(ssk, dst_portid);
1908 if (IS_ERR(sk))
1909 goto out;
1910
1911 ring = &nlk_sk(sk)->rx_ring;
1912 /* fast-path without atomic ops for common case: non-mmaped receiver */
1913 if (ring->pg_vec == NULL)
1914 goto out_put;
1915
1916 /* We need to account the full linear size needed as a ring
1917 * slot cannot have non-linear parts.
1918 */
1919 linear_size = size + ldiff;
1920 if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
1921 goto out_put;
1922
1923 skb = alloc_skb_head(gfp_mask);
1924 if (skb == NULL)
1925 goto err1;
1926
1927 spin_lock_bh(&sk->sk_receive_queue.lock);
1928 /* check again under lock */
1929 if (ring->pg_vec == NULL)
1930 goto out_free;
1931
1932 /* check again under lock */
1933 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1934 if (maxlen < linear_size)
1935 goto out_free;
1936
1937 netlink_forward_ring(ring);
1938 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1939 if (hdr == NULL)
1940 goto err2;
1941
1942 netlink_ring_setup_skb(skb, sk, ring, hdr);
1943 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1944 atomic_inc(&ring->pending);
1945 netlink_increment_head(ring);
1946
1947 spin_unlock_bh(&sk->sk_receive_queue.lock);
1948 return skb;
1949
1950err2:
1951 kfree_skb(skb);
1952 spin_unlock_bh(&sk->sk_receive_queue.lock);
1953 netlink_overrun(sk);
1954err1:
1955 sock_put(sk);
1956 return NULL;
1957
1958out_free:
1959 kfree_skb(skb);
1960 spin_unlock_bh(&sk->sk_receive_queue.lock);
1961out_put:
1962 sock_put(sk);
1963out:
1964#endif
1965 return alloc_skb(size, gfp_mask); 1272 return alloc_skb(size, gfp_mask);
1966} 1273}
1967EXPORT_SYMBOL_GPL(__netlink_alloc_skb); 1274EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
@@ -2242,8 +1549,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2242 if (level != SOL_NETLINK) 1549 if (level != SOL_NETLINK)
2243 return -ENOPROTOOPT; 1550 return -ENOPROTOOPT;
2244 1551
2245 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING && 1552 if (optlen >= sizeof(int) &&
2246 optlen >= sizeof(int) &&
2247 get_user(val, (unsigned int __user *)optval)) 1553 get_user(val, (unsigned int __user *)optval))
2248 return -EFAULT; 1554 return -EFAULT;
2249 1555
@@ -2296,25 +1602,6 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2296 } 1602 }
2297 err = 0; 1603 err = 0;
2298 break; 1604 break;
2299#ifdef CONFIG_NETLINK_MMAP
2300 case NETLINK_RX_RING:
2301 case NETLINK_TX_RING: {
2302 struct nl_mmap_req req;
2303
2304 /* Rings might consume more memory than queue limits, require
2305 * CAP_NET_ADMIN.
2306 */
2307 if (!capable(CAP_NET_ADMIN))
2308 return -EPERM;
2309 if (optlen < sizeof(req))
2310 return -EINVAL;
2311 if (copy_from_user(&req, optval, sizeof(req)))
2312 return -EFAULT;
2313 err = netlink_set_ring(sk, &req,
2314 optname == NETLINK_TX_RING);
2315 break;
2316 }
2317#endif /* CONFIG_NETLINK_MMAP */
2318 case NETLINK_LISTEN_ALL_NSID: 1605 case NETLINK_LISTEN_ALL_NSID:
2319 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST)) 1606 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2320 return -EPERM; 1607 return -EPERM;
@@ -2484,18 +1771,6 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2484 smp_rmb(); 1771 smp_rmb();
2485 } 1772 }
2486 1773
2487 /* It's a really convoluted way for userland to ask for mmaped
2488 * sendmsg(), but that's what we've got...
2489 */
2490 if (netlink_tx_is_mmaped(sk) &&
2491 iter_is_iovec(&msg->msg_iter) &&
2492 msg->msg_iter.nr_segs == 1 &&
2493 msg->msg_iter.iov->iov_base == NULL) {
2494 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2495 &scm);
2496 goto out;
2497 }
2498
2499 err = -EMSGSIZE; 1774 err = -EMSGSIZE;
2500 if (len > sk->sk_sndbuf - 32) 1775 if (len > sk->sk_sndbuf - 32)
2501 goto out; 1776 goto out;
@@ -2812,8 +2087,7 @@ static int netlink_dump(struct sock *sk)
2812 goto errout_skb; 2087 goto errout_skb;
2813 } 2088 }
2814 2089
2815 if (!netlink_rx_is_mmaped(sk) && 2090 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2816 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2817 goto errout_skb; 2091 goto errout_skb;
2818 2092
2819 /* NLMSG_GOODSIZE is small to avoid high order allocations being 2093 /* NLMSG_GOODSIZE is small to avoid high order allocations being
@@ -2833,7 +2107,7 @@ static int netlink_dump(struct sock *sk)
2833 if (!skb) { 2107 if (!skb) {
2834 alloc_size = alloc_min_size; 2108 alloc_size = alloc_min_size;
2835 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, 2109 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2836 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM)); 2110 GFP_KERNEL);
2837 } 2111 }
2838 if (!skb) 2112 if (!skb)
2839 goto errout_skb; 2113 goto errout_skb;
@@ -2902,16 +2176,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2902 struct netlink_sock *nlk; 2176 struct netlink_sock *nlk;
2903 int ret; 2177 int ret;
2904 2178
2905 /* Memory mapped dump requests need to be copied to avoid looping 2179 atomic_inc(&skb->users);
2906 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2907 * a reference to the skb.
2908 */
2909 if (netlink_skb_is_mmaped(skb)) {
2910 skb = skb_copy(skb, GFP_KERNEL);
2911 if (skb == NULL)
2912 return -ENOBUFS;
2913 } else
2914 atomic_inc(&skb->users);
2915 2180
2916 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); 2181 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2917 if (sk == NULL) { 2182 if (sk == NULL) {
@@ -3255,7 +2520,7 @@ static const struct proto_ops netlink_ops = {
3255 .socketpair = sock_no_socketpair, 2520 .socketpair = sock_no_socketpair,
3256 .accept = sock_no_accept, 2521 .accept = sock_no_accept,
3257 .getname = netlink_getname, 2522 .getname = netlink_getname,
3258 .poll = netlink_poll, 2523 .poll = datagram_poll,
3259 .ioctl = sock_no_ioctl, 2524 .ioctl = sock_no_ioctl,
3260 .listen = sock_no_listen, 2525 .listen = sock_no_listen,
3261 .shutdown = sock_no_shutdown, 2526 .shutdown = sock_no_shutdown,
@@ -3263,7 +2528,7 @@ static const struct proto_ops netlink_ops = {
3263 .getsockopt = netlink_getsockopt, 2528 .getsockopt = netlink_getsockopt,
3264 .sendmsg = netlink_sendmsg, 2529 .sendmsg = netlink_sendmsg,
3265 .recvmsg = netlink_recvmsg, 2530 .recvmsg = netlink_recvmsg,
3266 .mmap = netlink_mmap, 2531 .mmap = sock_no_mmap,
3267 .sendpage = sock_no_sendpage, 2532 .sendpage = sock_no_sendpage,
3268}; 2533};
3269 2534
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index df32cb92d9fc..ea4600aea6b0 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -45,12 +45,6 @@ struct netlink_sock {
45 int (*netlink_bind)(struct net *net, int group); 45 int (*netlink_bind)(struct net *net, int group);
46 void (*netlink_unbind)(struct net *net, int group); 46 void (*netlink_unbind)(struct net *net, int group);
47 struct module *module; 47 struct module *module;
48#ifdef CONFIG_NETLINK_MMAP
49 struct mutex pg_vec_lock;
50 struct netlink_ring rx_ring;
51 struct netlink_ring tx_ring;
52 atomic_t mapped;
53#endif /* CONFIG_NETLINK_MMAP */
54 48
55 struct rhash_head node; 49 struct rhash_head node;
56 struct rcu_head rcu; 50 struct rcu_head rcu;
@@ -62,15 +56,6 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
62 return container_of(sk, struct netlink_sock, sk); 56 return container_of(sk, struct netlink_sock, sk);
63} 57}
64 58
65static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
66{
67#ifdef CONFIG_NETLINK_MMAP
68 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
69#else
70 return false;
71#endif /* CONFIG_NETLINK_MMAP */
72}
73
74struct netlink_table { 59struct netlink_table {
75 struct rhashtable hash; 60 struct rhashtable hash;
76 struct hlist_head mc_list; 61 struct hlist_head mc_list;
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 3ee63a3cff30..8dd836a8dd60 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -8,41 +8,6 @@
8 8
9#include "af_netlink.h" 9#include "af_netlink.h"
10 10
11#ifdef CONFIG_NETLINK_MMAP
12static int sk_diag_put_ring(struct netlink_ring *ring, int nl_type,
13 struct sk_buff *nlskb)
14{
15 struct netlink_diag_ring ndr;
16
17 ndr.ndr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
18 ndr.ndr_block_nr = ring->pg_vec_len;
19 ndr.ndr_frame_size = ring->frame_size;
20 ndr.ndr_frame_nr = ring->frame_max + 1;
21
22 return nla_put(nlskb, nl_type, sizeof(ndr), &ndr);
23}
24
25static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
26{
27 struct netlink_sock *nlk = nlk_sk(sk);
28 int ret;
29
30 mutex_lock(&nlk->pg_vec_lock);
31 ret = sk_diag_put_ring(&nlk->rx_ring, NETLINK_DIAG_RX_RING, nlskb);
32 if (!ret)
33 ret = sk_diag_put_ring(&nlk->tx_ring, NETLINK_DIAG_TX_RING,
34 nlskb);
35 mutex_unlock(&nlk->pg_vec_lock);
36
37 return ret;
38}
39#else
40static int sk_diag_put_rings_cfg(struct sock *sk, struct sk_buff *nlskb)
41{
42 return 0;
43}
44#endif
45
46static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb) 11static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
47{ 12{
48 struct netlink_sock *nlk = nlk_sk(sk); 13 struct netlink_sock *nlk = nlk_sk(sk);
@@ -87,10 +52,6 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
87 sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO)) 52 sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
88 goto out_nlmsg_trim; 53 goto out_nlmsg_trim;
89 54
90 if ((req->ndiag_show & NDIAG_SHOW_RING_CFG) &&
91 sk_diag_put_rings_cfg(sk, skb))
92 goto out_nlmsg_trim;
93
94 nlmsg_end(skb, nlh); 55 nlmsg_end(skb, nlh);
95 return 0; 56 return 0;
96 57
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 1fe3d3b362c0..c5a2c7e733b3 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -969,6 +969,8 @@ static void nfc_release(struct device *d)
969 kfree(se); 969 kfree(se);
970 } 970 }
971 971
972 ida_simple_remove(&nfc_index_ida, dev->idx);
973
972 kfree(dev); 974 kfree(dev);
973} 975}
974 976
@@ -1043,6 +1045,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
1043 int tx_headroom, int tx_tailroom) 1045 int tx_headroom, int tx_tailroom)
1044{ 1046{
1045 struct nfc_dev *dev; 1047 struct nfc_dev *dev;
1048 int rc;
1046 1049
1047 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || 1050 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
1048 !ops->deactivate_target || !ops->im_transceive) 1051 !ops->deactivate_target || !ops->im_transceive)
@@ -1055,6 +1058,15 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
1055 if (!dev) 1058 if (!dev)
1056 return NULL; 1059 return NULL;
1057 1060
1061 rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
1062 if (rc < 0)
1063 goto err_free_dev;
1064 dev->idx = rc;
1065
1066 dev->dev.class = &nfc_class;
1067 dev_set_name(&dev->dev, "nfc%d", dev->idx);
1068 device_initialize(&dev->dev);
1069
1058 dev->ops = ops; 1070 dev->ops = ops;
1059 dev->supported_protocols = supported_protocols; 1071 dev->supported_protocols = supported_protocols;
1060 dev->tx_headroom = tx_headroom; 1072 dev->tx_headroom = tx_headroom;
@@ -1077,6 +1089,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
1077 } 1089 }
1078 1090
1079 return dev; 1091 return dev;
1092
1093err_free_dev:
1094 kfree(dev);
1095
1096 return ERR_PTR(rc);
1080} 1097}
1081EXPORT_SYMBOL(nfc_allocate_device); 1098EXPORT_SYMBOL(nfc_allocate_device);
1082 1099
@@ -1091,14 +1108,6 @@ int nfc_register_device(struct nfc_dev *dev)
1091 1108
1092 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 1109 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
1093 1110
1094 dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
1095 if (dev->idx < 0)
1096 return dev->idx;
1097
1098 dev->dev.class = &nfc_class;
1099 dev_set_name(&dev->dev, "nfc%d", dev->idx);
1100 device_initialize(&dev->dev);
1101
1102 mutex_lock(&nfc_devlist_mutex); 1111 mutex_lock(&nfc_devlist_mutex);
1103 nfc_devlist_generation++; 1112 nfc_devlist_generation++;
1104 rc = device_add(&dev->dev); 1113 rc = device_add(&dev->dev);
@@ -1136,12 +1145,10 @@ EXPORT_SYMBOL(nfc_register_device);
1136 */ 1145 */
1137void nfc_unregister_device(struct nfc_dev *dev) 1146void nfc_unregister_device(struct nfc_dev *dev)
1138{ 1147{
1139 int rc, id; 1148 int rc;
1140 1149
1141 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 1150 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
1142 1151
1143 id = dev->idx;
1144
1145 if (dev->rfkill) { 1152 if (dev->rfkill) {
1146 rfkill_unregister(dev->rfkill); 1153 rfkill_unregister(dev->rfkill);
1147 rfkill_destroy(dev->rfkill); 1154 rfkill_destroy(dev->rfkill);
@@ -1166,8 +1173,6 @@ void nfc_unregister_device(struct nfc_dev *dev)
1166 nfc_devlist_generation++; 1173 nfc_devlist_generation++;
1167 device_del(&dev->dev); 1174 device_del(&dev->dev);
1168 mutex_unlock(&nfc_devlist_mutex); 1175 mutex_unlock(&nfc_devlist_mutex);
1169
1170 ida_simple_remove(&nfc_index_ida, id);
1171} 1176}
1172EXPORT_SYMBOL(nfc_unregister_device); 1177EXPORT_SYMBOL(nfc_unregister_device);
1173 1178
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ecf0a0196f18..9c222a106c7f 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -76,7 +76,8 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
76 struct sockaddr_nfc_llcp llcp_addr; 76 struct sockaddr_nfc_llcp llcp_addr;
77 int len, ret = 0; 77 int len, ret = 0;
78 78
79 if (!addr || addr->sa_family != AF_NFC) 79 if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
80 addr->sa_family != AF_NFC)
80 return -EINVAL; 81 return -EINVAL;
81 82
82 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); 83 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -150,7 +151,8 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
150 struct sockaddr_nfc_llcp llcp_addr; 151 struct sockaddr_nfc_llcp llcp_addr;
151 int len, ret = 0; 152 int len, ret = 0;
152 153
153 if (!addr || addr->sa_family != AF_NFC) 154 if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
155 addr->sa_family != AF_NFC)
154 return -EINVAL; 156 return -EINVAL;
155 157
156 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); 158 pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -655,8 +657,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
655 657
656 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); 658 pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
657 659
658 if (!addr || len < sizeof(struct sockaddr_nfc) || 660 if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC)
659 addr->sa_family != AF_NFC)
660 return -EINVAL; 661 return -EINVAL;
661 662
662 if (addr->service_name_len == 0 && addr->dsap == 0) 663 if (addr->service_name_len == 0 && addr->dsap == 0)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 10c99a578421..67583ad7f610 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1084,8 +1084,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
1084 return ndev; 1084 return ndev;
1085 1085
1086free_nfc: 1086free_nfc:
1087 kfree(ndev->nfc_dev); 1087 nfc_free_device(ndev->nfc_dev);
1088
1089free_nci: 1088free_nci:
1090 kfree(ndev); 1089 kfree(ndev);
1091 return NULL; 1090 return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f58c1fba1026..12dfb457275d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -873,7 +873,9 @@ static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info)
873 u32 device_idx, target_idx, protocol; 873 u32 device_idx, target_idx, protocol;
874 int rc; 874 int rc;
875 875
876 if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) 876 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
877 !info->attrs[NFC_ATTR_TARGET_INDEX] ||
878 !info->attrs[NFC_ATTR_PROTOCOLS])
877 return -EINVAL; 879 return -EINVAL;
878 880
879 device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); 881 device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e004067ec24a..6a2507f24b0f 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -501,7 +501,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
501 501
502 /* The conntrack module expects to be working at L3. */ 502 /* The conntrack module expects to be working at L3. */
503 nh_ofs = skb_network_offset(skb); 503 nh_ofs = skb_network_offset(skb);
504 skb_pull(skb, nh_ofs); 504 skb_pull_rcsum(skb, nh_ofs);
505 505
506 if (key->ip.frag != OVS_FRAG_TYPE_NONE) { 506 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
507 err = handle_fragments(net, key, info->zone.id, skb); 507 err = handle_fragments(net, key, info->zone.id, skb);
@@ -527,6 +527,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
527 &info->labels.mask); 527 &info->labels.mask);
528err: 528err:
529 skb_push(skb, nh_ofs); 529 skb_push(skb, nh_ofs);
530 skb_postpush_rcsum(skb, skb->data, nh_ofs);
530 if (err) 531 if (err)
531 kfree_skb(skb); 532 kfree_skb(skb);
532 return err; 533 return err;
@@ -576,8 +577,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
576 577
577 nla_for_each_nested(a, attr, rem) { 578 nla_for_each_nested(a, attr, rem) {
578 int type = nla_type(a); 579 int type = nla_type(a);
579 int maxlen = ovs_ct_attr_lens[type].maxlen; 580 int maxlen;
580 int minlen = ovs_ct_attr_lens[type].minlen; 581 int minlen;
581 582
582 if (type > OVS_CT_ATTR_MAX) { 583 if (type > OVS_CT_ATTR_MAX) {
583 OVS_NLERR(log, 584 OVS_NLERR(log,
@@ -585,6 +586,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
585 type, OVS_CT_ATTR_MAX); 586 type, OVS_CT_ATTR_MAX);
586 return -EINVAL; 587 return -EINVAL;
587 } 588 }
589
590 maxlen = ovs_ct_attr_lens[type].maxlen;
591 minlen = ovs_ct_attr_lens[type].minlen;
588 if (nla_len(a) < minlen || nla_len(a) > maxlen) { 592 if (nla_len(a) < minlen || nla_len(a) > maxlen) {
589 OVS_NLERR(log, 593 OVS_NLERR(log,
590 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", 594 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d1bd4a45ca2d..d26b28def310 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -588,7 +588,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr,
588 ipv4 = true; 588 ipv4 = true;
589 break; 589 break;
590 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: 590 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
591 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, 591 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
592 nla_get_in6_addr(a), is_mask); 592 nla_get_in6_addr(a), is_mask);
593 ipv6 = true; 593 ipv6 = true;
594 break; 594 break;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f223d1c80ccf..148ec130d99d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
1497 f->arr[f->num_members] = sk; 1497 f->arr[f->num_members] = sk;
1498 smp_wmb(); 1498 smp_wmb();
1499 f->num_members++; 1499 f->num_members++;
1500 if (f->num_members == 1)
1501 dev_add_pack(&f->prot_hook);
1500 spin_unlock(&f->lock); 1502 spin_unlock(&f->lock);
1501} 1503}
1502 1504
@@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1513 BUG_ON(i >= f->num_members); 1515 BUG_ON(i >= f->num_members);
1514 f->arr[i] = f->arr[f->num_members - 1]; 1516 f->arr[i] = f->arr[f->num_members - 1];
1515 f->num_members--; 1517 f->num_members--;
1518 if (f->num_members == 0)
1519 __dev_remove_pack(&f->prot_hook);
1516 spin_unlock(&f->lock); 1520 spin_unlock(&f->lock);
1517} 1521}
1518 1522
@@ -1623,6 +1627,7 @@ static void fanout_release_data(struct packet_fanout *f)
1623 1627
1624static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1628static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1625{ 1629{
1630 struct packet_rollover *rollover = NULL;
1626 struct packet_sock *po = pkt_sk(sk); 1631 struct packet_sock *po = pkt_sk(sk);
1627 struct packet_fanout *f, *match; 1632 struct packet_fanout *f, *match;
1628 u8 type = type_flags & 0xff; 1633 u8 type = type_flags & 0xff;
@@ -1645,23 +1650,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1645 return -EINVAL; 1650 return -EINVAL;
1646 } 1651 }
1647 1652
1653 mutex_lock(&fanout_mutex);
1654
1655 err = -EINVAL;
1648 if (!po->running) 1656 if (!po->running)
1649 return -EINVAL; 1657 goto out;
1650 1658
1659 err = -EALREADY;
1651 if (po->fanout) 1660 if (po->fanout)
1652 return -EALREADY; 1661 goto out;
1653 1662
1654 if (type == PACKET_FANOUT_ROLLOVER || 1663 if (type == PACKET_FANOUT_ROLLOVER ||
1655 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1664 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1656 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); 1665 err = -ENOMEM;
1657 if (!po->rollover) 1666 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1658 return -ENOMEM; 1667 if (!rollover)
1659 atomic_long_set(&po->rollover->num, 0); 1668 goto out;
1660 atomic_long_set(&po->rollover->num_huge, 0); 1669 atomic_long_set(&rollover->num, 0);
1661 atomic_long_set(&po->rollover->num_failed, 0); 1670 atomic_long_set(&rollover->num_huge, 0);
1671 atomic_long_set(&rollover->num_failed, 0);
1672 po->rollover = rollover;
1662 } 1673 }
1663 1674
1664 mutex_lock(&fanout_mutex);
1665 match = NULL; 1675 match = NULL;
1666 list_for_each_entry(f, &fanout_list, list) { 1676 list_for_each_entry(f, &fanout_list, list) {
1667 if (f->id == id && 1677 if (f->id == id &&
@@ -1691,7 +1701,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1691 match->prot_hook.func = packet_rcv_fanout; 1701 match->prot_hook.func = packet_rcv_fanout;
1692 match->prot_hook.af_packet_priv = match; 1702 match->prot_hook.af_packet_priv = match;
1693 match->prot_hook.id_match = match_fanout_group; 1703 match->prot_hook.id_match = match_fanout_group;
1694 dev_add_pack(&match->prot_hook);
1695 list_add(&match->list, &fanout_list); 1704 list_add(&match->list, &fanout_list);
1696 } 1705 }
1697 err = -EINVAL; 1706 err = -EINVAL;
@@ -1708,36 +1717,40 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1708 } 1717 }
1709 } 1718 }
1710out: 1719out:
1711 mutex_unlock(&fanout_mutex); 1720 if (err && rollover) {
1712 if (err) { 1721 kfree(rollover);
1713 kfree(po->rollover);
1714 po->rollover = NULL; 1722 po->rollover = NULL;
1715 } 1723 }
1724 mutex_unlock(&fanout_mutex);
1716 return err; 1725 return err;
1717} 1726}
1718 1727
1719static void fanout_release(struct sock *sk) 1728/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1729 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1730 * It is the responsibility of the caller to call fanout_release_data() and
1731 * free the returned packet_fanout (after synchronize_net())
1732 */
1733static struct packet_fanout *fanout_release(struct sock *sk)
1720{ 1734{
1721 struct packet_sock *po = pkt_sk(sk); 1735 struct packet_sock *po = pkt_sk(sk);
1722 struct packet_fanout *f; 1736 struct packet_fanout *f;
1723 1737
1738 mutex_lock(&fanout_mutex);
1724 f = po->fanout; 1739 f = po->fanout;
1725 if (!f) 1740 if (f) {
1726 return; 1741 po->fanout = NULL;
1727 1742
1728 mutex_lock(&fanout_mutex); 1743 if (atomic_dec_and_test(&f->sk_ref))
1729 po->fanout = NULL; 1744 list_del(&f->list);
1745 else
1746 f = NULL;
1730 1747
1731 if (atomic_dec_and_test(&f->sk_ref)) { 1748 if (po->rollover)
1732 list_del(&f->list); 1749 kfree_rcu(po->rollover, rcu);
1733 dev_remove_pack(&f->prot_hook);
1734 fanout_release_data(f);
1735 kfree(f);
1736 } 1750 }
1737 mutex_unlock(&fanout_mutex); 1751 mutex_unlock(&fanout_mutex);
1738 1752
1739 if (po->rollover) 1753 return f;
1740 kfree_rcu(po->rollover, rcu);
1741} 1754}
1742 1755
1743static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1756static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
@@ -2637,7 +2650,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2637 int vnet_hdr_len; 2650 int vnet_hdr_len;
2638 struct packet_sock *po = pkt_sk(sk); 2651 struct packet_sock *po = pkt_sk(sk);
2639 unsigned short gso_type = 0; 2652 unsigned short gso_type = 0;
2640 int hlen, tlen; 2653 int hlen, tlen, linear;
2641 int extra_len = 0; 2654 int extra_len = 0;
2642 ssize_t n; 2655 ssize_t n;
2643 2656
@@ -2741,8 +2754,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2741 err = -ENOBUFS; 2754 err = -ENOBUFS;
2742 hlen = LL_RESERVED_SPACE(dev); 2755 hlen = LL_RESERVED_SPACE(dev);
2743 tlen = dev->needed_tailroom; 2756 tlen = dev->needed_tailroom;
2744 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, 2757 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2745 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), 2758 linear = max(linear, min_t(int, len, dev->hard_header_len));
2759 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2746 msg->msg_flags & MSG_DONTWAIT, &err); 2760 msg->msg_flags & MSG_DONTWAIT, &err);
2747 if (skb == NULL) 2761 if (skb == NULL)
2748 goto out_unlock; 2762 goto out_unlock;
@@ -2845,6 +2859,7 @@ static int packet_release(struct socket *sock)
2845{ 2859{
2846 struct sock *sk = sock->sk; 2860 struct sock *sk = sock->sk;
2847 struct packet_sock *po; 2861 struct packet_sock *po;
2862 struct packet_fanout *f;
2848 struct net *net; 2863 struct net *net;
2849 union tpacket_req_u req_u; 2864 union tpacket_req_u req_u;
2850 2865
@@ -2884,9 +2899,14 @@ static int packet_release(struct socket *sock)
2884 packet_set_ring(sk, &req_u, 1, 1); 2899 packet_set_ring(sk, &req_u, 1, 1);
2885 } 2900 }
2886 2901
2887 fanout_release(sk); 2902 f = fanout_release(sk);
2888 2903
2889 synchronize_net(); 2904 synchronize_net();
2905
2906 if (f) {
2907 fanout_release_data(f);
2908 kfree(f);
2909 }
2890 /* 2910 /*
2891 * Now the socket is dead. No more input will appear. 2911 * Now the socket is dead. No more input will appear.
2892 */ 2912 */
@@ -3001,7 +3021,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3001 int addr_len) 3021 int addr_len)
3002{ 3022{
3003 struct sock *sk = sock->sk; 3023 struct sock *sk = sock->sk;
3004 char name[15]; 3024 char name[sizeof(uaddr->sa_data) + 1];
3005 3025
3006 /* 3026 /*
3007 * Check legality 3027 * Check legality
@@ -3009,7 +3029,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3009 3029
3010 if (addr_len != sizeof(struct sockaddr)) 3030 if (addr_len != sizeof(struct sockaddr))
3011 return -EINVAL; 3031 return -EINVAL;
3012 strlcpy(name, uaddr->sa_data, sizeof(name)); 3032 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3033 * zero-terminated.
3034 */
3035 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3036 name[sizeof(uaddr->sa_data)] = 0;
3013 3037
3014 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); 3038 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3015} 3039}
@@ -3598,12 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3598 3622
3599 if (optlen != sizeof(val)) 3623 if (optlen != sizeof(val))
3600 return -EINVAL; 3624 return -EINVAL;
3601 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3602 return -EBUSY;
3603 if (copy_from_user(&val, optval, sizeof(val))) 3625 if (copy_from_user(&val, optval, sizeof(val)))
3604 return -EFAULT; 3626 return -EFAULT;
3605 po->tp_reserve = val; 3627 if (val > INT_MAX)
3606 return 0; 3628 return -EINVAL;
3629 lock_sock(sk);
3630 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3631 ret = -EBUSY;
3632 } else {
3633 po->tp_reserve = val;
3634 ret = 0;
3635 }
3636 release_sock(sk);
3637 return ret;
3607 } 3638 }
3608 case PACKET_LOSS: 3639 case PACKET_LOSS:
3609 { 3640 {
@@ -3860,7 +3891,6 @@ static int packet_notifier(struct notifier_block *this,
3860 } 3891 }
3861 if (msg == NETDEV_UNREGISTER) { 3892 if (msg == NETDEV_UNREGISTER) {
3862 packet_cached_dev_reset(po); 3893 packet_cached_dev_reset(po);
3863 fanout_release(sk);
3864 po->ifindex = -1; 3894 po->ifindex = -1;
3865 if (po->prot_hook.dev) 3895 if (po->prot_hook.dev)
3866 dev_put(po->prot_hook.dev); 3896 dev_put(po->prot_hook.dev);
@@ -4115,8 +4145,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4115 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4145 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4116 goto out; 4146 goto out;
4117 if (po->tp_version >= TPACKET_V3 && 4147 if (po->tp_version >= TPACKET_V3 &&
4118 (int)(req->tp_block_size - 4148 req->tp_block_size <=
4119 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) 4149 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4120 goto out; 4150 goto out;
4121 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4151 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4122 po->tp_reserve)) 4152 po->tp_reserve))
@@ -4127,6 +4157,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4127 rb->frames_per_block = req->tp_block_size / req->tp_frame_size; 4157 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4128 if (unlikely(rb->frames_per_block == 0)) 4158 if (unlikely(rb->frames_per_block == 0))
4129 goto out; 4159 goto out;
4160 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4161 goto out;
4130 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4162 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4131 req->tp_frame_nr)) 4163 req->tp_frame_nr))
4132 goto out; 4164 goto out;
@@ -4198,7 +4230,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4198 register_prot_hook(sk); 4230 register_prot_hook(sk);
4199 } 4231 }
4200 spin_unlock(&po->bind_lock); 4232 spin_unlock(&po->bind_lock);
4201 if (closing && (po->tp_version > TPACKET_V2)) { 4233 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4202 /* Because we don't support block-based V3 on tx-ring */ 4234 /* Because we don't support block-based V3 on tx-ring */
4203 if (!tx_ring) 4235 if (!tx_ring)
4204 prb_shutdown_retire_blk_timer(po, rb_queue); 4236 prb_shutdown_retire_blk_timer(po, rb_queue);
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8246fd..6641bcf7c185 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
301 301
302 __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 set_bit_le(off, (void *)map->m_page_addrs[i]);
303} 303}
304 304
305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) 305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
315 315
316 __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 clear_bit_le(off, (void *)map->m_page_addrs[i]);
317} 317}
318 318
319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) 319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0936a4a32b47..e353e3255206 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -78,7 +78,7 @@ int rds_tcp_accept_one(struct socket *sock)
78 struct inet_sock *inet; 78 struct inet_sock *inet;
79 struct rds_tcp_connection *rs_tcp; 79 struct rds_tcp_connection *rs_tcp;
80 80
81 ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, 81 ret = sock_create_lite(sock->sk->sk_family,
82 sock->sk->sk_type, sock->sk->sk_protocol, 82 sock->sk->sk_type, sock->sk->sk_protocol,
83 &new_sock); 83 &new_sock);
84 if (ret) 84 if (ret)
diff --git a/net/rpmsg/rpmsg_proto.c b/net/rpmsg/rpmsg_proto.c
index 1c30815556eb..010a9aed6dbf 100644
--- a/net/rpmsg/rpmsg_proto.c
+++ b/net/rpmsg/rpmsg_proto.c
@@ -543,6 +543,11 @@ static void __rpmsg_proto_cb(struct device *dev, int from_vproc_id, void *data,
543 len, true); 543 len, true);
544#endif 544#endif
545 545
546 if (!sk) {
547 dev_warn(dev, "callback for deleted socket (from %d)\n", src);
548 return;
549 }
550
546 lock_sock(sk); 551 lock_sock(sk);
547 552
548 switch (sk->sk_state) { 553 switch (sk->sk_state) {
@@ -590,6 +595,15 @@ static void rpmsg_proto_cb(struct rpmsg_channel *rpdev, void *data, int len,
590{ 595{
591 int id = rpmsg_sock_get_proc_id(rpdev); 596 int id = rpmsg_sock_get_proc_id(rpdev);
592 597
598 /* published rpmsg channels from remote side reuse their end-point's
599 * private field for storing the list of connected sockets, so cannot
600 * process messages.
601 */
602 if (rpdev->src >= RPMSG_RESERVED_ADDRESSES) {
603 dev_err(&rpdev->dev, "rpmsg_proto device not designed to receive any messages\n");
604 return;
605 }
606
593 __rpmsg_proto_cb(&rpdev->dev, id, data, len, priv, src); 607 __rpmsg_proto_cb(&rpdev->dev, id, data, len, priv, src);
594} 608}
595 609
@@ -609,8 +623,11 @@ static int rpmsg_proto_probe(struct rpmsg_channel *rpdev)
609 dev_err(dev, "id %d already associated to different vrp\n", 623 dev_err(dev, "id %d already associated to different vrp\n",
610 id); 624 id);
611 625
612 if (dst == RPMSG_ADDR_ANY) 626 if (dst == RPMSG_ADDR_ANY) {
627 /* Set announce to false and avoid extra delay when binding. */
628 rpdev->announce = false;
613 return 0; 629 return 0;
630 }
614 631
615 /* associate id/vrp for later lookup in rpmsg_sock_bind() */ 632 /* associate id/vrp for later lookup in rpmsg_sock_bind() */
616 if (!vrp) { 633 if (!vrp) {
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index da3cc09f683e..91d43ab3a961 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -215,7 +215,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
215 unsigned int *_toklen) 215 unsigned int *_toklen)
216{ 216{
217 const __be32 *xdr = *_xdr; 217 const __be32 *xdr = *_xdr;
218 unsigned int toklen = *_toklen, n_parts, loop, tmp; 218 unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
219 219
220 /* there must be at least one name, and at least #names+1 length 220 /* there must be at least one name, and at least #names+1 length
221 * words */ 221 * words */
@@ -245,16 +245,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
245 toklen -= 4; 245 toklen -= 4;
246 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 246 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
247 return -EINVAL; 247 return -EINVAL;
248 if (tmp > toklen) 248 paddedlen = (tmp + 3) & ~3;
249 if (paddedlen > toklen)
249 return -EINVAL; 250 return -EINVAL;
250 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 251 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
251 if (!princ->name_parts[loop]) 252 if (!princ->name_parts[loop])
252 return -ENOMEM; 253 return -ENOMEM;
253 memcpy(princ->name_parts[loop], xdr, tmp); 254 memcpy(princ->name_parts[loop], xdr, tmp);
254 princ->name_parts[loop][tmp] = 0; 255 princ->name_parts[loop][tmp] = 0;
255 tmp = (tmp + 3) & ~3; 256 toklen -= paddedlen;
256 toklen -= tmp; 257 xdr += paddedlen >> 2;
257 xdr += tmp >> 2;
258 } 258 }
259 259
260 if (toklen < 4) 260 if (toklen < 4)
@@ -263,16 +263,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
263 toklen -= 4; 263 toklen -= 4;
264 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 264 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
265 return -EINVAL; 265 return -EINVAL;
266 if (tmp > toklen) 266 paddedlen = (tmp + 3) & ~3;
267 if (paddedlen > toklen)
267 return -EINVAL; 268 return -EINVAL;
268 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 269 princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
269 if (!princ->realm) 270 if (!princ->realm)
270 return -ENOMEM; 271 return -ENOMEM;
271 memcpy(princ->realm, xdr, tmp); 272 memcpy(princ->realm, xdr, tmp);
272 princ->realm[tmp] = 0; 273 princ->realm[tmp] = 0;
273 tmp = (tmp + 3) & ~3; 274 toklen -= paddedlen;
274 toklen -= tmp; 275 xdr += paddedlen >> 2;
275 xdr += tmp >> 2;
276 276
277 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 277 _debug("%s/...@%s", princ->name_parts[0], princ->realm);
278 278
@@ -291,7 +291,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
291 unsigned int *_toklen) 291 unsigned int *_toklen)
292{ 292{
293 const __be32 *xdr = *_xdr; 293 const __be32 *xdr = *_xdr;
294 unsigned int toklen = *_toklen, len; 294 unsigned int toklen = *_toklen, len, paddedlen;
295 295
296 /* there must be at least one tag and one length word */ 296 /* there must be at least one tag and one length word */
297 if (toklen <= 8) 297 if (toklen <= 8)
@@ -305,15 +305,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
305 toklen -= 8; 305 toklen -= 8;
306 if (len > max_data_size) 306 if (len > max_data_size)
307 return -EINVAL; 307 return -EINVAL;
308 paddedlen = (len + 3) & ~3;
309 if (paddedlen > toklen)
310 return -EINVAL;
308 td->data_len = len; 311 td->data_len = len;
309 312
310 if (len > 0) { 313 if (len > 0) {
311 td->data = kmemdup(xdr, len, GFP_KERNEL); 314 td->data = kmemdup(xdr, len, GFP_KERNEL);
312 if (!td->data) 315 if (!td->data)
313 return -ENOMEM; 316 return -ENOMEM;
314 len = (len + 3) & ~3; 317 toklen -= paddedlen;
315 toklen -= len; 318 xdr += paddedlen >> 2;
316 xdr += len >> 2;
317 } 319 }
318 320
319 _debug("tag %x len %x", td->tag, td->data_len); 321 _debug("tag %x len %x", td->tag, td->data_len);
@@ -385,7 +387,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
385 const __be32 **_xdr, unsigned int *_toklen) 387 const __be32 **_xdr, unsigned int *_toklen)
386{ 388{
387 const __be32 *xdr = *_xdr; 389 const __be32 *xdr = *_xdr;
388 unsigned int toklen = *_toklen, len; 390 unsigned int toklen = *_toklen, len, paddedlen;
389 391
390 /* there must be at least one length word */ 392 /* there must be at least one length word */
391 if (toklen <= 4) 393 if (toklen <= 4)
@@ -397,6 +399,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
397 toklen -= 4; 399 toklen -= 4;
398 if (len > AFSTOKEN_K5_TIX_MAX) 400 if (len > AFSTOKEN_K5_TIX_MAX)
399 return -EINVAL; 401 return -EINVAL;
402 paddedlen = (len + 3) & ~3;
403 if (paddedlen > toklen)
404 return -EINVAL;
400 *_tktlen = len; 405 *_tktlen = len;
401 406
402 _debug("ticket len %u", len); 407 _debug("ticket len %u", len);
@@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
405 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 410 *_ticket = kmemdup(xdr, len, GFP_KERNEL);
406 if (!*_ticket) 411 if (!*_ticket)
407 return -ENOMEM; 412 return -ENOMEM;
408 len = (len + 3) & ~3; 413 toklen -= paddedlen;
409 toklen -= len; 414 xdr += paddedlen >> 2;
410 xdr += len >> 2;
411 } 415 }
412 416
413 *_xdr = xdr; 417 *_xdr = xdr;
@@ -550,7 +554,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
550{ 554{
551 const __be32 *xdr = prep->data, *token; 555 const __be32 *xdr = prep->data, *token;
552 const char *cp; 556 const char *cp;
553 unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 557 unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
554 size_t datalen = prep->datalen; 558 size_t datalen = prep->datalen;
555 int ret; 559 int ret;
556 560
@@ -576,22 +580,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
576 if (len < 1 || len > AFSTOKEN_CELL_MAX) 580 if (len < 1 || len > AFSTOKEN_CELL_MAX)
577 goto not_xdr; 581 goto not_xdr;
578 datalen -= 4; 582 datalen -= 4;
579 tmp = (len + 3) & ~3; 583 paddedlen = (len + 3) & ~3;
580 if (tmp > datalen) 584 if (paddedlen > datalen)
581 goto not_xdr; 585 goto not_xdr;
582 586
583 cp = (const char *) xdr; 587 cp = (const char *) xdr;
584 for (loop = 0; loop < len; loop++) 588 for (loop = 0; loop < len; loop++)
585 if (!isprint(cp[loop])) 589 if (!isprint(cp[loop]))
586 goto not_xdr; 590 goto not_xdr;
587 if (len < tmp) 591 for (; loop < paddedlen; loop++)
588 for (; loop < tmp; loop++) 592 if (cp[loop])
589 if (cp[loop]) 593 goto not_xdr;
590 goto not_xdr;
591 _debug("cellname: [%u/%u] '%*.*s'", 594 _debug("cellname: [%u/%u] '%*.*s'",
592 len, tmp, len, len, (const char *) xdr); 595 len, paddedlen, len, len, (const char *) xdr);
593 datalen -= tmp; 596 datalen -= paddedlen;
594 xdr += tmp >> 2; 597 xdr += paddedlen >> 2;
595 598
596 /* get the token count */ 599 /* get the token count */
597 if (datalen < 12) 600 if (datalen < 12)
@@ -612,10 +615,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
612 sec_ix = ntohl(*xdr); 615 sec_ix = ntohl(*xdr);
613 datalen -= 4; 616 datalen -= 4;
614 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 617 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
615 if (toklen < 20 || toklen > datalen) 618 paddedlen = (toklen + 3) & ~3;
619 if (toklen < 20 || toklen > datalen || paddedlen > datalen)
616 goto not_xdr; 620 goto not_xdr;
617 datalen -= (toklen + 3) & ~3; 621 datalen -= paddedlen;
618 xdr += (toklen + 3) >> 2; 622 xdr += paddedlen >> 2;
619 623
620 } while (--loop > 0); 624 } while (--loop > 0);
621 625
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 06e7c4a37245..694a06f1e0d5 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -820,10 +820,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
820 goto out_module_put; 820 goto out_module_put;
821 821
822 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); 822 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a);
823 if (err < 0) 823 if (err <= 0)
824 goto out_module_put; 824 goto out_module_put;
825 if (err == 0)
826 goto noflush_out;
827 825
828 nla_nest_end(skb, nest); 826 nla_nest_end(skb, nest);
829 827
@@ -840,7 +838,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
840out_module_put: 838out_module_put:
841 module_put(a.ops->owner); 839 module_put(a.ops->owner);
842err_out: 840err_out:
843noflush_out:
844 kfree_skb(skb); 841 kfree_skb(skb);
845 return err; 842 return err;
846} 843}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index bb41699c6c49..7ecb14f3db54 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -109,6 +109,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
109 if (ret < 0) 109 if (ret < 0)
110 return ret; 110 return ret;
111 111
112 if (!tb[TCA_CONNMARK_PARMS])
113 return -EINVAL;
114
112 parm = nla_data(tb[TCA_CONNMARK_PARMS]); 115 parm = nla_data(tb[TCA_CONNMARK_PARMS]);
113 116
114 if (!tcf_hash_check(parm->index, a, bind)) { 117 if (!tcf_hash_check(parm->index, a, bind)) {
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d05869646515..0915d448ba23 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int
42 return PTR_ERR(target); 42 return PTR_ERR(target);
43 43
44 t->u.kernel.target = target; 44 t->u.kernel.target = target;
45 memset(&par, 0, sizeof(par));
45 par.table = table; 46 par.table = table;
46 par.entryinfo = NULL;
47 par.target = target; 47 par.target = target;
48 par.targinfo = t->data; 48 par.targinfo = t->data;
49 par.hook_mask = hook; 49 par.hook_mask = hook;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e384d6aefa3a..1090a52c03cd 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -36,14 +36,15 @@ static DEFINE_SPINLOCK(mirred_list_lock);
36static void tcf_mirred_release(struct tc_action *a, int bind) 36static void tcf_mirred_release(struct tc_action *a, int bind)
37{ 37{
38 struct tcf_mirred *m = to_mirred(a); 38 struct tcf_mirred *m = to_mirred(a);
39 struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); 39 struct net_device *dev;
40 40
41 /* We could be called either in a RCU callback or with RTNL lock held. */ 41 /* We could be called either in a RCU callback or with RTNL lock held. */
42 spin_lock_bh(&mirred_list_lock); 42 spin_lock_bh(&mirred_list_lock);
43 list_del(&m->tcfm_list); 43 list_del(&m->tcfm_list);
44 spin_unlock_bh(&mirred_list_lock); 44 dev = rcu_dereference_protected(m->tcfm_dev, 1);
45 if (dev) 45 if (dev)
46 dev_put(dev); 46 dev_put(dev);
47 spin_unlock_bh(&mirred_list_lock);
47} 48}
48 49
49static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = { 50static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 95b560f0b253..6d340cd6e2a7 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
1004 1004
1005 return sch; 1005 return sch;
1006 } 1006 }
1007 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1008 if (ops->destroy)
1009 ops->destroy(sch);
1007err_out3: 1010err_out3:
1008 dev_put(dev); 1011 dev_put(dev);
1009 kfree((char *) sch - sch->padded); 1012 kfree((char *) sch - sch->padded);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 13d6f83ec491..45d4b2f22f62 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -636,7 +636,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
636 q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * 636 q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
637 sizeof(u32)); 637 sizeof(u32));
638 if (!q->hhf_arrays[i]) { 638 if (!q->hhf_arrays[i]) {
639 hhf_destroy(sch); 639 /* Note: hhf_destroy() will be called
640 * by our caller.
641 */
640 return -ENOMEM; 642 return -ENOMEM;
641 } 643 }
642 } 644 }
@@ -647,7 +649,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
647 q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / 649 q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
648 BITS_PER_BYTE); 650 BITS_PER_BYTE);
649 if (!q->hhf_valid_bits[i]) { 651 if (!q->hhf_valid_bits[i]) {
650 hhf_destroy(sch); 652 /* Note: hhf_destroy() will be called
653 * by our caller.
654 */
651 return -ENOMEM; 655 return -ENOMEM;
652 } 656 }
653 } 657 }
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 3e82f047caaf..d9c84328e7eb 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
52 /* pre-allocate qdiscs, attachment can't fail */ 52 /* pre-allocate qdiscs, attachment can't fail */
53 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), 53 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
54 GFP_KERNEL); 54 GFP_KERNEL);
55 if (priv->qdiscs == NULL) 55 if (!priv->qdiscs)
56 return -ENOMEM; 56 return -ENOMEM;
57 57
58 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 58 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
60 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, 60 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
61 TC_H_MAKE(TC_H_MAJ(sch->handle), 61 TC_H_MAKE(TC_H_MAJ(sch->handle),
62 TC_H_MIN(ntx + 1))); 62 TC_H_MIN(ntx + 1)));
63 if (qdisc == NULL) 63 if (!qdisc)
64 goto err; 64 return -ENOMEM;
65 priv->qdiscs[ntx] = qdisc; 65 priv->qdiscs[ntx] = qdisc;
66 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 66 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
67 } 67 }
68 68
69 sch->flags |= TCQ_F_MQROOT; 69 sch->flags |= TCQ_F_MQROOT;
70 return 0; 70 return 0;
71
72err:
73 mq_destroy(sch);
74 return -ENOMEM;
75} 71}
76 72
77static void mq_attach(struct Qdisc *sch) 73static void mq_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index ad70ecf57ce7..66bccc5ff4ea 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
117 /* pre-allocate qdisc, attachment can't fail */ 117 /* pre-allocate qdisc, attachment can't fail */
118 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), 118 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
119 GFP_KERNEL); 119 GFP_KERNEL);
120 if (priv->qdiscs == NULL) { 120 if (!priv->qdiscs)
121 err = -ENOMEM; 121 return -ENOMEM;
122 goto err;
123 }
124 122
125 for (i = 0; i < dev->num_tx_queues; i++) { 123 for (i = 0; i < dev->num_tx_queues; i++) {
126 dev_queue = netdev_get_tx_queue(dev, i); 124 dev_queue = netdev_get_tx_queue(dev, i);
127 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, 125 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
128 TC_H_MAKE(TC_H_MAJ(sch->handle), 126 TC_H_MAKE(TC_H_MAJ(sch->handle),
129 TC_H_MIN(i + 1))); 127 TC_H_MIN(i + 1)));
130 if (qdisc == NULL) { 128 if (!qdisc)
131 err = -ENOMEM; 129 return -ENOMEM;
132 goto err; 130
133 }
134 priv->qdiscs[i] = qdisc; 131 priv->qdiscs[i] = qdisc;
135 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; 132 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
136 } 133 }
@@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
143 priv->hw_owned = 1; 140 priv->hw_owned = 1;
144 err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc); 141 err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
145 if (err) 142 if (err)
146 goto err; 143 return err;
147 } else { 144 } else {
148 netdev_set_num_tc(dev, qopt->num_tc); 145 netdev_set_num_tc(dev, qopt->num_tc);
149 for (i = 0; i < qopt->num_tc; i++) 146 for (i = 0; i < qopt->num_tc; i++)
@@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
157 154
158 sch->flags |= TCQ_F_MQROOT; 155 sch->flags |= TCQ_F_MQROOT;
159 return 0; 156 return 0;
160
161err:
162 mqprio_destroy(sch);
163 return err;
164} 157}
165 158
166static void mqprio_attach(struct Qdisc *sch) 159static void mqprio_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 498f0a2cb47f..4431e2833e45 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
742 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); 742 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
743 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); 743 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
744 if (!q->ht || !q->slots) { 744 if (!q->ht || !q->slots) {
745 sfq_destroy(sch); 745 /* Note: sfq_destroy() will be called by our caller */
746 return -ENOMEM; 746 return -ENOMEM;
747 } 747 }
748
748 for (i = 0; i < q->divisor; i++) 749 for (i = 0; i < q->divisor; i++)
749 q->ht[i] = SFQ_EMPTY_SLOT; 750 q->ht[i] = SFQ_EMPTY_SLOT;
750 751
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b6493b3f11a9..2d7859c03fd2 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -472,15 +472,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
472 struct sctp_association **app, 472 struct sctp_association **app,
473 struct sctp_transport **tpp) 473 struct sctp_transport **tpp)
474{ 474{
475 struct sctp_init_chunk *chunkhdr, _chunkhdr;
475 union sctp_addr saddr; 476 union sctp_addr saddr;
476 union sctp_addr daddr; 477 union sctp_addr daddr;
477 struct sctp_af *af; 478 struct sctp_af *af;
478 struct sock *sk = NULL; 479 struct sock *sk = NULL;
479 struct sctp_association *asoc; 480 struct sctp_association *asoc;
480 struct sctp_transport *transport = NULL; 481 struct sctp_transport *transport = NULL;
481 struct sctp_init_chunk *chunkhdr;
482 __u32 vtag = ntohl(sctphdr->vtag); 482 __u32 vtag = ntohl(sctphdr->vtag);
483 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
484 483
485 *app = NULL; *tpp = NULL; 484 *app = NULL; *tpp = NULL;
486 485
@@ -515,13 +514,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
515 * discard the packet. 514 * discard the packet.
516 */ 515 */
517 if (vtag == 0) { 516 if (vtag == 0) {
518 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); 517 /* chunk header + first 4 octects of init header */
519 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) 518 chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
520 + sizeof(__be32) || 519 sizeof(struct sctphdr),
520 sizeof(struct sctp_chunkhdr) +
521 sizeof(__be32), &_chunkhdr);
522 if (!chunkhdr ||
521 chunkhdr->chunk_hdr.type != SCTP_CID_INIT || 523 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
522 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { 524 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
523 goto out; 525 goto out;
524 } 526
525 } else if (vtag != asoc->c.peer_vtag) { 527 } else if (vtag != asoc->c.peer_vtag) {
526 goto out; 528 goto out;
527 } 529 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ce46f1c7f133..7527c168e471 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -239,12 +239,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
239 struct sctp_bind_addr *bp; 239 struct sctp_bind_addr *bp;
240 struct ipv6_pinfo *np = inet6_sk(sk); 240 struct ipv6_pinfo *np = inet6_sk(sk);
241 struct sctp_sockaddr_entry *laddr; 241 struct sctp_sockaddr_entry *laddr;
242 union sctp_addr *baddr = NULL;
243 union sctp_addr *daddr = &t->ipaddr; 242 union sctp_addr *daddr = &t->ipaddr;
244 union sctp_addr dst_saddr; 243 union sctp_addr dst_saddr;
245 struct in6_addr *final_p, final; 244 struct in6_addr *final_p, final;
246 __u8 matchlen = 0; 245 __u8 matchlen = 0;
247 __u8 bmatchlen;
248 sctp_scope_t scope; 246 sctp_scope_t scope;
249 247
250 memset(fl6, 0, sizeof(struct flowi6)); 248 memset(fl6, 0, sizeof(struct flowi6));
@@ -311,23 +309,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
311 */ 309 */
312 rcu_read_lock(); 310 rcu_read_lock();
313 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 311 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
314 if (!laddr->valid) 312 struct dst_entry *bdst;
313 __u8 bmatchlen;
314
315 if (!laddr->valid ||
316 laddr->state != SCTP_ADDR_SRC ||
317 laddr->a.sa.sa_family != AF_INET6 ||
318 scope > sctp_scope(&laddr->a))
315 continue; 319 continue;
316 if ((laddr->state == SCTP_ADDR_SRC) && 320
317 (laddr->a.sa.sa_family == AF_INET6) && 321 fl6->saddr = laddr->a.v6.sin6_addr;
318 (scope <= sctp_scope(&laddr->a))) { 322 fl6->fl6_sport = laddr->a.v6.sin6_port;
319 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
320 if (!baddr || (matchlen < bmatchlen)) {
321 baddr = &laddr->a;
322 matchlen = bmatchlen;
323 }
324 }
325 }
326 if (baddr) {
327 fl6->saddr = baddr->v6.sin6_addr;
328 fl6->fl6_sport = baddr->v6.sin6_port;
329 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 323 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
330 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 324 bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
325
326 if (!IS_ERR(bdst) &&
327 ipv6_chk_addr(dev_net(bdst->dev),
328 &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
329 if (!IS_ERR_OR_NULL(dst))
330 dst_release(dst);
331 dst = bdst;
332 break;
333 }
334
335 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
336 if (matchlen > bmatchlen)
337 continue;
338
339 if (!IS_ERR_OR_NULL(dst))
340 dst_release(dst);
341 dst = bdst;
342 matchlen = bmatchlen;
331 } 343 }
332 rcu_read_unlock(); 344 rcu_read_unlock();
333 345
@@ -662,6 +674,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
662 newnp = inet6_sk(newsk); 674 newnp = inet6_sk(newsk);
663 675
664 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 676 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
677 newnp->ipv6_mc_list = NULL;
678 newnp->ipv6_ac_list = NULL;
679 newnp->ipv6_fl_list = NULL;
665 680
666 rcu_read_lock(); 681 rcu_read_lock();
667 opt = rcu_dereference(np->opt); 682 opt = rcu_dereference(np->opt);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b5fd4ab56156..3ebf3b652d60 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
235 sctp_assoc_t id) 235 sctp_assoc_t id)
236{ 236{
237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
238 struct sctp_transport *transport; 238 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
239 union sctp_addr *laddr = (union sctp_addr *)addr; 239 union sctp_addr *laddr = (union sctp_addr *)addr;
240 struct sctp_transport *transport;
241
242 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
243 return NULL;
240 244
241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
242 laddr, 246 laddr,
@@ -4422,6 +4426,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4422 if (!asoc) 4426 if (!asoc)
4423 return -EINVAL; 4427 return -EINVAL;
4424 4428
4429 /* If there is a thread waiting on more sndbuf space for
4430 * sending on this asoc, it cannot be peeled.
4431 */
4432 if (waitqueue_active(&asoc->wait))
4433 return -EBUSY;
4434
4425 /* An association cannot be branched off from an already peeled-off 4435 /* An association cannot be branched off from an already peeled-off
4426 * socket, nor is this supported for tcp style sockets. 4436 * socket, nor is this supported for tcp style sockets.
4427 */ 4437 */
@@ -6388,6 +6398,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
6388 if (sock->state != SS_UNCONNECTED) 6398 if (sock->state != SS_UNCONNECTED)
6389 goto out; 6399 goto out;
6390 6400
6401 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
6402 goto out;
6403
6391 /* If backlog is zero, disable listening. */ 6404 /* If backlog is zero, disable listening. */
6392 if (!backlog) { 6405 if (!backlog) {
6393 if (sctp_sstate(sk, CLOSED)) 6406 if (sctp_sstate(sk, CLOSED))
@@ -6960,7 +6973,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
6960 */ 6973 */
6961 release_sock(sk); 6974 release_sock(sk);
6962 current_timeo = schedule_timeout(current_timeo); 6975 current_timeo = schedule_timeout(current_timeo);
6963 BUG_ON(sk != asoc->base.sk);
6964 lock_sock(sk); 6976 lock_sock(sk);
6965 6977
6966 *timeo_p = current_timeo; 6978 *timeo_p = current_timeo;
diff --git a/net/socket.c b/net/socket.c
index 66d984ac2991..18aff3d804ec 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -520,11 +520,11 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
520 return used; 520 return used;
521} 521}
522 522
523int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) 523static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
524{ 524{
525 int err = simple_setattr(dentry, iattr); 525 int err = simple_setattr(dentry, iattr);
526 526
527 if (!err) { 527 if (!err && (iattr->ia_valid & ATTR_UID)) {
528 struct socket *sock = SOCKET_I(d_inode(dentry)); 528 struct socket *sock = SOCKET_I(d_inode(dentry));
529 529
530 sock->sk->sk_uid = iattr->ia_uid; 530 sock->sk->sk_uid = iattr->ia_uid;
@@ -2199,8 +2199,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2199 return err; 2199 return err;
2200 2200
2201 err = sock_error(sock->sk); 2201 err = sock_error(sock->sk);
2202 if (err) 2202 if (err) {
2203 datagrams = err;
2203 goto out_put; 2204 goto out_put;
2205 }
2204 2206
2205 entry = mmsg; 2207 entry = mmsg;
2206 compat_entry = (struct compat_mmsghdr __user *)mmsg; 2208 compat_entry = (struct compat_mmsghdr __user *)mmsg;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 06095cc8815e..1f0687d8e3d7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -541,9 +541,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
541 return gss_new; 541 return gss_new;
542 gss_msg = gss_add_msg(gss_new); 542 gss_msg = gss_add_msg(gss_new);
543 if (gss_msg == gss_new) { 543 if (gss_msg == gss_new) {
544 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 544 int res;
545 atomic_inc(&gss_msg->count);
546 res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
545 if (res) { 547 if (res) {
546 gss_unhash_msg(gss_new); 548 gss_unhash_msg(gss_new);
549 atomic_dec(&gss_msg->count);
550 gss_release_msg(gss_new);
547 gss_msg = ERR_PTR(res); 551 gss_msg = ERR_PTR(res);
548 } 552 }
549 } else 553 } else
@@ -836,6 +840,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
836 warn_gssd(); 840 warn_gssd();
837 gss_release_msg(gss_msg); 841 gss_release_msg(gss_msg);
838 } 842 }
843 gss_release_msg(gss_msg);
839} 844}
840 845
841static void gss_pipe_dentry_destroy(struct dentry *dir, 846static void gss_pipe_dentry_destroy(struct dentry *dir,
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index eeeba5adee6d..2410d557ae39 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
260 if (!oa->data) 260 if (!oa->data)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
263 creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); 263 creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
264 if (!creds) { 264 if (!creds) {
265 kfree(oa->data); 265 kfree(oa->data);
266 return -ENOMEM; 266 return -ENOMEM;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7a93922457ff..f28aeb2cfd32 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -337,6 +337,11 @@ out:
337 337
338static DEFINE_IDA(rpc_clids); 338static DEFINE_IDA(rpc_clids);
339 339
340void rpc_cleanup_clids(void)
341{
342 ida_destroy(&rpc_clids);
343}
344
340static int rpc_alloc_clid(struct rpc_clnt *clnt) 345static int rpc_alloc_clid(struct rpc_clnt *clnt)
341{ 346{
342 int clid; 347 int clid;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index ee5d3d253102..3142f38d1104 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -119,6 +119,7 @@ out:
119static void __exit 119static void __exit
120cleanup_sunrpc(void) 120cleanup_sunrpc(void)
121{ 121{
122 rpc_cleanup_clids();
122 rpcauth_remove_module(); 123 rpcauth_remove_module();
123 cleanup_socket_xprt(); 124 cleanup_socket_xprt();
124 svc_cleanup_xprt_sock(); 125 svc_cleanup_xprt_sock();
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 648f2a67f314..cb1381513c82 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -381,6 +381,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
381 dev = dev_get_by_name(net, driver_name); 381 dev = dev_get_by_name(net, driver_name);
382 if (!dev) 382 if (!dev)
383 return -ENODEV; 383 return -ENODEV;
384 if (tipc_mtu_bad(dev, 0)) {
385 dev_put(dev);
386 return -EINVAL;
387 }
384 388
385 /* Associate TIPC bearer with L2 bearer */ 389 /* Associate TIPC bearer with L2 bearer */
386 rcu_assign_pointer(b->media_ptr, dev); 390 rcu_assign_pointer(b->media_ptr, dev);
@@ -570,14 +574,19 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
570 if (!b_ptr) 574 if (!b_ptr)
571 return NOTIFY_DONE; 575 return NOTIFY_DONE;
572 576
573 b_ptr->mtu = dev->mtu;
574
575 switch (evt) { 577 switch (evt) {
576 case NETDEV_CHANGE: 578 case NETDEV_CHANGE:
577 if (netif_carrier_ok(dev)) 579 if (netif_carrier_ok(dev))
578 break; 580 break;
579 case NETDEV_GOING_DOWN: 581 case NETDEV_GOING_DOWN:
582 tipc_reset_bearer(net, b_ptr);
583 break;
580 case NETDEV_CHANGEMTU: 584 case NETDEV_CHANGEMTU:
585 if (tipc_mtu_bad(dev, 0)) {
586 bearer_disable(net, b_ptr);
587 break;
588 }
589 b_ptr->mtu = dev->mtu;
581 tipc_reset_bearer(net, b_ptr); 590 tipc_reset_bearer(net, b_ptr);
582 break; 591 break;
583 case NETDEV_CHANGEADDR: 592 case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 552185bc4773..5f11e18b1fa1 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
39 39
40#include "netlink.h" 40#include "netlink.h"
41#include "core.h" 41#include "core.h"
42#include "msg.h"
42#include <net/genetlink.h> 43#include <net/genetlink.h>
43 44
44#define MAX_MEDIA 3 45#define MAX_MEDIA 3
@@ -61,6 +62,9 @@
61#define TIPC_MEDIA_TYPE_IB 2 62#define TIPC_MEDIA_TYPE_IB 2
62#define TIPC_MEDIA_TYPE_UDP 3 63#define TIPC_MEDIA_TYPE_UDP 3
63 64
65/* minimum bearer MTU */
66#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
67
64/** 68/**
65 * struct tipc_node_map - set of node identifiers 69 * struct tipc_node_map - set of node identifiers
66 * @count: # of nodes in set 70 * @count: # of nodes in set
@@ -226,4 +230,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
226void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, 230void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
227 struct sk_buff_head *xmitq); 231 struct sk_buff_head *xmitq);
228 232
233/* check if device MTU is too low for tipc headers */
234static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
235{
236 if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
237 return false;
238 netdev_warn(dev, "MTU too low for tipc bearer\n");
239 return true;
240}
241
229#endif /* _TIPC_BEARER_H */ 242#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
69 if (err) 69 if (err)
70 goto out_nametbl; 70 goto out_nametbl;
71 71
72 INIT_LIST_HEAD(&tn->dist_queue);
72 err = tipc_topsrv_start(net); 73 err = tipc_topsrv_start(net);
73 if (err) 74 if (err)
74 goto out_subscr; 75 goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 18e95a8020cd..fe3b89e9cde4 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
103 spinlock_t nametbl_lock; 103 spinlock_t nametbl_lock;
104 struct name_table *nametbl; 104 struct name_table *nametbl;
105 105
106 /* Name dist queue */
107 struct list_head dist_queue;
108
106 /* Topology subscription server */ 109 /* Topology subscription server */
107 struct tipc_server *topsrv; 110 struct tipc_server *topsrv;
108 atomic_t subscription_count; 111 atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index f51c8bdbea1c..c4c151bc000c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
40 40
41int sysctl_tipc_named_timeout __read_mostly = 2000; 41int sysctl_tipc_named_timeout __read_mostly = 2000;
42 42
43/**
44 * struct tipc_dist_queue - queue holding deferred name table updates
45 */
46static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
47
48struct distr_queue_item { 43struct distr_queue_item {
49 struct distr_item i; 44 struct distr_item i;
50 u32 dtype; 45 u32 dtype;
@@ -67,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
67 62
68/** 63/**
69 * named_prepare_buf - allocate & initialize a publication message 64 * named_prepare_buf - allocate & initialize a publication message
65 *
66 * The buffer returned is of size INT_H_SIZE + payload size
70 */ 67 */
71static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, 68static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
72 u32 dest) 69 u32 dest)
@@ -171,9 +168,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
171 struct publication *publ; 168 struct publication *publ;
172 struct sk_buff *skb = NULL; 169 struct sk_buff *skb = NULL;
173 struct distr_item *item = NULL; 170 struct distr_item *item = NULL;
174 uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) * 171 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
175 ITEM_SIZE; 172 ITEM_SIZE) * ITEM_SIZE;
176 uint msg_rem = msg_dsz; 173 u32 msg_rem = msg_dsz;
177 174
178 list_for_each_entry(publ, pls, local_list) { 175 list_for_each_entry(publ, pls, local_list) {
179 /* Prepare next buffer: */ 176 /* Prepare next buffer: */
@@ -340,9 +337,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
340 * tipc_named_add_backlog - add a failed name table update to the backlog 337 * tipc_named_add_backlog - add a failed name table update to the backlog
341 * 338 *
342 */ 339 */
343static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 340static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
341 u32 type, u32 node)
344{ 342{
345 struct distr_queue_item *e; 343 struct distr_queue_item *e;
344 struct tipc_net *tn = net_generic(net, tipc_net_id);
346 unsigned long now = get_jiffies_64(); 345 unsigned long now = get_jiffies_64();
347 346
348 e = kzalloc(sizeof(*e), GFP_ATOMIC); 347 e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -352,7 +351,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
352 e->node = node; 351 e->node = node;
353 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 352 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
354 memcpy(e, i, sizeof(*i)); 353 memcpy(e, i, sizeof(*i));
355 list_add_tail(&e->next, &tipc_dist_queue); 354 list_add_tail(&e->next, &tn->dist_queue);
356} 355}
357 356
358/** 357/**
@@ -362,10 +361,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
362void tipc_named_process_backlog(struct net *net) 361void tipc_named_process_backlog(struct net *net)
363{ 362{
364 struct distr_queue_item *e, *tmp; 363 struct distr_queue_item *e, *tmp;
364 struct tipc_net *tn = net_generic(net, tipc_net_id);
365 char addr[16]; 365 char addr[16];
366 unsigned long now = get_jiffies_64(); 366 unsigned long now = get_jiffies_64();
367 367
368 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 368 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
369 if (time_after(e->expires, now)) { 369 if (time_after(e->expires, now)) {
370 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 370 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
371 continue; 371 continue;
@@ -405,7 +405,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
405 node = msg_orignode(msg); 405 node = msg_orignode(msg);
406 while (count--) { 406 while (count--) {
407 if (!tipc_update_nametbl(net, item, node, mtype)) 407 if (!tipc_update_nametbl(net, item, node, mtype))
408 tipc_named_add_backlog(item, mtype, node); 408 tipc_named_add_backlog(net, item, mtype, node);
409 item++; 409 item++;
410 } 410 }
411 kfree_skb(skb); 411 kfree_skb(skb);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3926b561f873..2df0b98d4a32 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -102,9 +102,10 @@ static unsigned int tipc_hashfn(u32 addr)
102 102
103static void tipc_node_kref_release(struct kref *kref) 103static void tipc_node_kref_release(struct kref *kref)
104{ 104{
105 struct tipc_node *node = container_of(kref, struct tipc_node, kref); 105 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
106 106
107 tipc_node_delete(node); 107 kfree(n->bc_entry.link);
108 kfree_rcu(n, rcu);
108} 109}
109 110
110void tipc_node_put(struct tipc_node *node) 111void tipc_node_put(struct tipc_node *node)
@@ -216,21 +217,20 @@ static void tipc_node_delete(struct tipc_node *node)
216{ 217{
217 list_del_rcu(&node->list); 218 list_del_rcu(&node->list);
218 hlist_del_rcu(&node->hash); 219 hlist_del_rcu(&node->hash);
219 kfree(node->bc_entry.link); 220 tipc_node_put(node);
220 kfree_rcu(node, rcu); 221
222 del_timer_sync(&node->timer);
223 tipc_node_put(node);
221} 224}
222 225
223void tipc_node_stop(struct net *net) 226void tipc_node_stop(struct net *net)
224{ 227{
225 struct tipc_net *tn = net_generic(net, tipc_net_id); 228 struct tipc_net *tn = tipc_net(net);
226 struct tipc_node *node, *t_node; 229 struct tipc_node *node, *t_node;
227 230
228 spin_lock_bh(&tn->node_list_lock); 231 spin_lock_bh(&tn->node_list_lock);
229 list_for_each_entry_safe(node, t_node, &tn->node_list, list) { 232 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
230 if (del_timer(&node->timer)) 233 tipc_node_delete(node);
231 tipc_node_put(node);
232 tipc_node_put(node);
233 }
234 spin_unlock_bh(&tn->node_list_lock); 234 spin_unlock_bh(&tn->node_list_lock);
235} 235}
236 236
@@ -313,9 +313,7 @@ static void tipc_node_timeout(unsigned long data)
313 if (rc & TIPC_LINK_DOWN_EVT) 313 if (rc & TIPC_LINK_DOWN_EVT)
314 tipc_node_link_down(n, bearer_id, false); 314 tipc_node_link_down(n, bearer_id, false);
315 } 315 }
316 if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) 316 mod_timer(&n->timer, jiffies + n->keepalive_intv);
317 tipc_node_get(n);
318 tipc_node_put(n);
319} 317}
320 318
321/** 319/**
@@ -730,7 +728,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
730 state = SELF_UP_PEER_UP; 728 state = SELF_UP_PEER_UP;
731 break; 729 break;
732 case SELF_LOST_CONTACT_EVT: 730 case SELF_LOST_CONTACT_EVT:
733 state = SELF_DOWN_PEER_LEAVING; 731 state = SELF_DOWN_PEER_DOWN;
734 break; 732 break;
735 case SELF_ESTABL_CONTACT_EVT: 733 case SELF_ESTABL_CONTACT_EVT:
736 case PEER_LOST_CONTACT_EVT: 734 case PEER_LOST_CONTACT_EVT:
@@ -749,7 +747,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
749 state = SELF_UP_PEER_UP; 747 state = SELF_UP_PEER_UP;
750 break; 748 break;
751 case PEER_LOST_CONTACT_EVT: 749 case PEER_LOST_CONTACT_EVT:
752 state = SELF_LEAVING_PEER_DOWN; 750 state = SELF_DOWN_PEER_DOWN;
753 break; 751 break;
754 case SELF_LOST_CONTACT_EVT: 752 case SELF_LOST_CONTACT_EVT:
755 case PEER_ESTABL_CONTACT_EVT: 753 case PEER_ESTABL_CONTACT_EVT:
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 922e04a43396..50f5b0ca7b3c 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -452,6 +452,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
452 if (!con) 452 if (!con)
453 return -EINVAL; 453 return -EINVAL;
454 454
455 if (!test_bit(CF_CONNECTED, &con->flags)) {
456 conn_put(con);
457 return 0;
458 }
459
455 e = tipc_alloc_entry(data, len); 460 e = tipc_alloc_entry(data, len);
456 if (!e) { 461 if (!e) {
457 conn_put(con); 462 conn_put(con);
@@ -465,12 +470,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
465 list_add_tail(&e->list, &con->outqueue); 470 list_add_tail(&e->list, &con->outqueue);
466 spin_unlock_bh(&con->outqueue_lock); 471 spin_unlock_bh(&con->outqueue_lock);
467 472
468 if (test_bit(CF_CONNECTED, &con->flags)) { 473 if (!queue_work(s->send_wq, &con->swork))
469 if (!queue_work(s->send_wq, &con->swork))
470 conn_put(con);
471 } else {
472 conn_put(con); 474 conn_put(con);
473 }
474 return 0; 475 return 0;
475} 476}
476 477
@@ -494,7 +495,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
494 int ret; 495 int ret;
495 496
496 spin_lock_bh(&con->outqueue_lock); 497 spin_lock_bh(&con->outqueue_lock);
497 while (1) { 498 while (test_bit(CF_CONNECTED, &con->flags)) {
498 e = list_entry(con->outqueue.next, struct outqueue_entry, 499 e = list_entry(con->outqueue.next, struct outqueue_entry,
499 list); 500 list);
500 if ((struct list_head *) e == &con->outqueue) 501 if ((struct list_head *) e == &con->outqueue)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b26b7a127773..65171f8e8c45 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -777,9 +777,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
777 * @tsk: receiving socket 777 * @tsk: receiving socket
778 * @skb: pointer to message buffer. 778 * @skb: pointer to message buffer.
779 */ 779 */
780static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb) 780static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
781 struct sk_buff_head *xmitq)
781{ 782{
782 struct sock *sk = &tsk->sk; 783 struct sock *sk = &tsk->sk;
784 u32 onode = tsk_own_node(tsk);
783 struct tipc_msg *hdr = buf_msg(skb); 785 struct tipc_msg *hdr = buf_msg(skb);
784 int mtyp = msg_type(hdr); 786 int mtyp = msg_type(hdr);
785 int conn_cong; 787 int conn_cong;
@@ -792,7 +794,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
792 794
793 if (mtyp == CONN_PROBE) { 795 if (mtyp == CONN_PROBE) {
794 msg_set_type(hdr, CONN_PROBE_REPLY); 796 msg_set_type(hdr, CONN_PROBE_REPLY);
795 tipc_sk_respond(sk, skb, TIPC_OK); 797 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
798 __skb_queue_tail(xmitq, skb);
796 return; 799 return;
797 } else if (mtyp == CONN_ACK) { 800 } else if (mtyp == CONN_ACK) {
798 conn_cong = tsk_conn_cong(tsk); 801 conn_cong = tsk_conn_cong(tsk);
@@ -1647,7 +1650,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1647 * 1650 *
1648 * Returns true if message was added to socket receive queue, otherwise false 1651 * Returns true if message was added to socket receive queue, otherwise false
1649 */ 1652 */
1650static bool filter_rcv(struct sock *sk, struct sk_buff *skb) 1653static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1654 struct sk_buff_head *xmitq)
1651{ 1655{
1652 struct socket *sock = sk->sk_socket; 1656 struct socket *sock = sk->sk_socket;
1653 struct tipc_sock *tsk = tipc_sk(sk); 1657 struct tipc_sock *tsk = tipc_sk(sk);
@@ -1657,7 +1661,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1657 int usr = msg_user(hdr); 1661 int usr = msg_user(hdr);
1658 1662
1659 if (unlikely(msg_user(hdr) == CONN_MANAGER)) { 1663 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
1660 tipc_sk_proto_rcv(tsk, skb); 1664 tipc_sk_proto_rcv(tsk, skb, xmitq);
1661 return false; 1665 return false;
1662 } 1666 }
1663 1667
@@ -1700,7 +1704,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
1700 return true; 1704 return true;
1701 1705
1702reject: 1706reject:
1703 tipc_sk_respond(sk, skb, err); 1707 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1708 __skb_queue_tail(xmitq, skb);
1704 return false; 1709 return false;
1705} 1710}
1706 1711
@@ -1716,9 +1721,24 @@ reject:
1716static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) 1721static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1717{ 1722{
1718 unsigned int truesize = skb->truesize; 1723 unsigned int truesize = skb->truesize;
1724 struct sk_buff_head xmitq;
1725 u32 dnode, selector;
1719 1726
1720 if (likely(filter_rcv(sk, skb))) 1727 __skb_queue_head_init(&xmitq);
1728
1729 if (likely(filter_rcv(sk, skb, &xmitq))) {
1721 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt); 1730 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
1731 return 0;
1732 }
1733
1734 if (skb_queue_empty(&xmitq))
1735 return 0;
1736
1737 /* Send response/rejected message */
1738 skb = __skb_dequeue(&xmitq);
1739 dnode = msg_destnode(buf_msg(skb));
1740 selector = msg_origport(buf_msg(skb));
1741 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
1722 return 0; 1742 return 0;
1723} 1743}
1724 1744
@@ -1732,12 +1752,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1732 * Caller must hold socket lock 1752 * Caller must hold socket lock
1733 */ 1753 */
1734static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, 1754static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1735 u32 dport) 1755 u32 dport, struct sk_buff_head *xmitq)
1736{ 1756{
1757 unsigned long time_limit = jiffies + 2;
1758 struct sk_buff *skb;
1737 unsigned int lim; 1759 unsigned int lim;
1738 atomic_t *dcnt; 1760 atomic_t *dcnt;
1739 struct sk_buff *skb; 1761 u32 onode;
1740 unsigned long time_limit = jiffies + 2;
1741 1762
1742 while (skb_queue_len(inputq)) { 1763 while (skb_queue_len(inputq)) {
1743 if (unlikely(time_after_eq(jiffies, time_limit))) 1764 if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1749,20 +1770,22 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1749 1770
1750 /* Add message directly to receive queue if possible */ 1771 /* Add message directly to receive queue if possible */
1751 if (!sock_owned_by_user(sk)) { 1772 if (!sock_owned_by_user(sk)) {
1752 filter_rcv(sk, skb); 1773 filter_rcv(sk, skb, xmitq);
1753 continue; 1774 continue;
1754 } 1775 }
1755 1776
1756 /* Try backlog, compensating for double-counted bytes */ 1777 /* Try backlog, compensating for double-counted bytes */
1757 dcnt = &tipc_sk(sk)->dupl_rcvcnt; 1778 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1758 if (sk->sk_backlog.len) 1779 if (!sk->sk_backlog.len)
1759 atomic_set(dcnt, 0); 1780 atomic_set(dcnt, 0);
1760 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt); 1781 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1761 if (likely(!sk_add_backlog(sk, skb, lim))) 1782 if (likely(!sk_add_backlog(sk, skb, lim)))
1762 continue; 1783 continue;
1763 1784
1764 /* Overload => reject message back to sender */ 1785 /* Overload => reject message back to sender */
1765 tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD); 1786 onode = tipc_own_addr(sock_net(sk));
1787 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1788 __skb_queue_tail(xmitq, skb);
1766 break; 1789 break;
1767 } 1790 }
1768} 1791}
@@ -1775,12 +1798,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1775 */ 1798 */
1776void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) 1799void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1777{ 1800{
1801 struct sk_buff_head xmitq;
1778 u32 dnode, dport = 0; 1802 u32 dnode, dport = 0;
1779 int err; 1803 int err;
1780 struct tipc_sock *tsk; 1804 struct tipc_sock *tsk;
1781 struct sock *sk; 1805 struct sock *sk;
1782 struct sk_buff *skb; 1806 struct sk_buff *skb;
1783 1807
1808 __skb_queue_head_init(&xmitq);
1784 while (skb_queue_len(inputq)) { 1809 while (skb_queue_len(inputq)) {
1785 dport = tipc_skb_peek_port(inputq, dport); 1810 dport = tipc_skb_peek_port(inputq, dport);
1786 tsk = tipc_sk_lookup(net, dport); 1811 tsk = tipc_sk_lookup(net, dport);
@@ -1788,9 +1813,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1788 if (likely(tsk)) { 1813 if (likely(tsk)) {
1789 sk = &tsk->sk; 1814 sk = &tsk->sk;
1790 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) { 1815 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1791 tipc_sk_enqueue(inputq, sk, dport); 1816 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
1792 spin_unlock_bh(&sk->sk_lock.slock); 1817 spin_unlock_bh(&sk->sk_lock.slock);
1793 } 1818 }
1819 /* Send pending response/rejected messages, if any */
1820 while ((skb = __skb_dequeue(&xmitq))) {
1821 dnode = msg_destnode(buf_msg(skb));
1822 tipc_node_xmit_skb(net, skb, dnode, dport);
1823 }
1794 sock_put(sk); 1824 sock_put(sk);
1795 continue; 1825 continue;
1796 } 1826 }
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 6af78c6276b4..78d6b78de29d 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,7 +52,7 @@
52/* IANA assigned UDP port */ 52/* IANA assigned UDP port */
53#define UDP_PORT_DEFAULT 6118 53#define UDP_PORT_DEFAULT 6118
54 54
55#define UDP_MIN_HEADROOM 28 55#define UDP_MIN_HEADROOM 48
56 56
57static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = { 57static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
58 [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC}, 58 [TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
@@ -376,6 +376,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
376 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 376 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
377 udp_conf.use_udp_checksums = false; 377 udp_conf.use_udp_checksums = false;
378 ub->ifindex = dev->ifindex; 378 ub->ifindex = dev->ifindex;
379 if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
380 sizeof(struct udphdr))) {
381 err = -EINVAL;
382 goto err;
383 }
379 b->mtu = dev->mtu - sizeof(struct iphdr) 384 b->mtu = dev->mtu - sizeof(struct iphdr)
380 - sizeof(struct udphdr); 385 - sizeof(struct udphdr);
381#if IS_ENABLED(CONFIG_IPV6) 386#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 73f75258ce46..e05ec54ac53f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -994,9 +994,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
994 unsigned int hash; 994 unsigned int hash;
995 struct unix_address *addr; 995 struct unix_address *addr;
996 struct hlist_head *list; 996 struct hlist_head *list;
997 struct path path = { NULL, NULL };
997 998
998 err = -EINVAL; 999 err = -EINVAL;
999 if (sunaddr->sun_family != AF_UNIX) 1000 if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
1001 sunaddr->sun_family != AF_UNIX)
1000 goto out; 1002 goto out;
1001 1003
1002 if (addr_len == sizeof(short)) { 1004 if (addr_len == sizeof(short)) {
@@ -1009,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1009 goto out; 1011 goto out;
1010 addr_len = err; 1012 addr_len = err;
1011 1013
1014 if (sun_path[0]) {
1015 umode_t mode = S_IFSOCK |
1016 (SOCK_INODE(sock)->i_mode & ~current_umask());
1017 err = unix_mknod(sun_path, mode, &path);
1018 if (err) {
1019 if (err == -EEXIST)
1020 err = -EADDRINUSE;
1021 goto out;
1022 }
1023 }
1024
1012 err = mutex_lock_interruptible(&u->bindlock); 1025 err = mutex_lock_interruptible(&u->bindlock);
1013 if (err) 1026 if (err)
1014 goto out; 1027 goto out_put;
1015 1028
1016 err = -EINVAL; 1029 err = -EINVAL;
1017 if (u->addr) 1030 if (u->addr)
@@ -1028,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1028 atomic_set(&addr->refcnt, 1); 1041 atomic_set(&addr->refcnt, 1);
1029 1042
1030 if (sun_path[0]) { 1043 if (sun_path[0]) {
1031 struct path path;
1032 umode_t mode = S_IFSOCK |
1033 (SOCK_INODE(sock)->i_mode & ~current_umask());
1034 err = unix_mknod(sun_path, mode, &path);
1035 if (err) {
1036 if (err == -EEXIST)
1037 err = -EADDRINUSE;
1038 unix_release_addr(addr);
1039 goto out_up;
1040 }
1041 addr->hash = UNIX_HASH_SIZE; 1044 addr->hash = UNIX_HASH_SIZE;
1042 hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1045 hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1043 spin_lock(&unix_table_lock); 1046 spin_lock(&unix_table_lock);
@@ -1064,6 +1067,9 @@ out_unlock:
1064 spin_unlock(&unix_table_lock); 1067 spin_unlock(&unix_table_lock);
1065out_up: 1068out_up:
1066 mutex_unlock(&u->bindlock); 1069 mutex_unlock(&u->bindlock);
1070out_put:
1071 if (err)
1072 path_put(&path);
1067out: 1073out:
1068 return err; 1074 return err;
1069} 1075}
@@ -1103,6 +1109,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1103 unsigned int hash; 1109 unsigned int hash;
1104 int err; 1110 int err;
1105 1111
1112 err = -EINVAL;
1113 if (alen < offsetofend(struct sockaddr, sa_family))
1114 goto out;
1115
1106 if (addr->sa_family != AF_UNSPEC) { 1116 if (addr->sa_family != AF_UNSPEC) {
1107 err = unix_mkname(sunaddr, alen, &hash); 1117 err = unix_mkname(sunaddr, alen, &hash);
1108 if (err < 0) 1118 if (err < 0)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d48525fcf..c36757e72844 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
146 if (s) { 146 if (s) {
147 struct unix_sock *u = unix_sk(s); 147 struct unix_sock *u = unix_sk(s);
148 148
149 BUG_ON(!atomic_long_read(&u->inflight));
149 BUG_ON(list_empty(&u->link)); 150 BUG_ON(list_empty(&u->link));
150 151
151 if (atomic_long_dec_and_test(&u->inflight)) 152 if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@ void unix_gc(void)
341 } 342 }
342 list_del(&cursor); 343 list_del(&cursor);
343 344
345 /* Now gc_candidates contains only garbage. Restore original
346 * inflight counters for these as well, and remove the skbuffs
347 * which are creating the cycle(s).
348 */
349 skb_queue_head_init(&hitlist);
350 list_for_each_entry(u, &gc_candidates, link)
351 scan_children(&u->sk, inc_inflight, &hitlist);
352
344 /* not_cycle_list contains those sockets which do not make up a 353 /* not_cycle_list contains those sockets which do not make up a
345 * cycle. Restore these to the inflight list. 354 * cycle. Restore these to the inflight list.
346 */ 355 */
@@ -350,14 +359,6 @@ void unix_gc(void)
350 list_move_tail(&u->link, &gc_inflight_list); 359 list_move_tail(&u->link, &gc_inflight_list);
351 } 360 }
352 361
353 /* Now gc_candidates contains only garbage. Restore original
354 * inflight counters for these as well, and remove the skbuffs
355 * which are creating the cycle(s).
356 */
357 skb_queue_head_init(&hitlist);
358 list_for_each_entry(u, &gc_candidates, link)
359 scan_children(&u->sk, inc_inflight, &hitlist);
360
361 spin_unlock(&unix_gc_lock); 362 spin_unlock(&unix_gc_lock);
362 363
363 /* Here we are. Hitlist is filled. Die. */ 364 /* Here we are. Hitlist is filled. Die. */
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb440e7..662bdd20a748 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id,
842 * qp_handle. 842 * qp_handle.
843 */ 843 */
844 if (vmci_handle_is_invalid(e_payload->handle) || 844 if (vmci_handle_is_invalid(e_payload->handle) ||
845 vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) 845 !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
846 return; 846 return;
847 847
848 /* We don't ask for delayed CBs when we subscribe to this event (we 848 /* We don't ask for delayed CBs when we subscribe to this event (we
@@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit);
2154 2154
2155MODULE_AUTHOR("VMware, Inc."); 2155MODULE_AUTHOR("VMware, Inc.");
2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2157MODULE_VERSION("1.0.2.0-k"); 2157MODULE_VERSION("1.0.3.0-k");
2158MODULE_LICENSE("GPL v2"); 2158MODULE_LICENSE("GPL v2");
2159MODULE_ALIAS("vmware_vsock"); 2159MODULE_ALIAS("vmware_vsock");
2160MODULE_ALIAS_NETPROTO(PF_VSOCK); 2160MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1f0de6d74daa..de10e3c0e2a4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -302,8 +302,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
302 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, 302 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
303 [NL80211_ATTR_PID] = { .type = NLA_U32 }, 303 [NL80211_ATTR_PID] = { .type = NLA_U32 },
304 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, 304 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
305 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, 305 [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
306 .len = WLAN_PMKID_LEN },
307 [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, 306 [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
308 [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, 307 [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
309 [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, 308 [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -359,6 +358,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
359 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, 358 [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
360 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, 359 [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
361 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, 360 [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
361 [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
362 [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, 362 [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
363 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, 363 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
364 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, 364 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -492,21 +492,17 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
492{ 492{
493 int err; 493 int err;
494 494
495 rtnl_lock();
496
497 if (!cb->args[0]) { 495 if (!cb->args[0]) {
498 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, 496 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
499 nl80211_fam.attrbuf, nl80211_fam.maxattr, 497 nl80211_fam.attrbuf, nl80211_fam.maxattr,
500 nl80211_policy); 498 nl80211_policy);
501 if (err) 499 if (err)
502 goto out_unlock; 500 return err;
503 501
504 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), 502 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
505 nl80211_fam.attrbuf); 503 nl80211_fam.attrbuf);
506 if (IS_ERR(*wdev)) { 504 if (IS_ERR(*wdev))
507 err = PTR_ERR(*wdev); 505 return PTR_ERR(*wdev);
508 goto out_unlock;
509 }
510 *rdev = wiphy_to_rdev((*wdev)->wiphy); 506 *rdev = wiphy_to_rdev((*wdev)->wiphy);
511 /* 0 is the first index - add 1 to parse only once */ 507 /* 0 is the first index - add 1 to parse only once */
512 cb->args[0] = (*rdev)->wiphy_idx + 1; 508 cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -516,10 +512,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
516 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 512 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
517 struct wireless_dev *tmp; 513 struct wireless_dev *tmp;
518 514
519 if (!wiphy) { 515 if (!wiphy)
520 err = -ENODEV; 516 return -ENODEV;
521 goto out_unlock;
522 }
523 *rdev = wiphy_to_rdev(wiphy); 517 *rdev = wiphy_to_rdev(wiphy);
524 *wdev = NULL; 518 *wdev = NULL;
525 519
@@ -530,21 +524,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
530 } 524 }
531 } 525 }
532 526
533 if (!*wdev) { 527 if (!*wdev)
534 err = -ENODEV; 528 return -ENODEV;
535 goto out_unlock;
536 }
537 } 529 }
538 530
539 return 0; 531 return 0;
540 out_unlock:
541 rtnl_unlock();
542 return err;
543}
544
545static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
546{
547 rtnl_unlock();
548} 532}
549 533
550/* IE validation */ 534/* IE validation */
@@ -3884,9 +3868,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
3884 int sta_idx = cb->args[2]; 3868 int sta_idx = cb->args[2];
3885 int err; 3869 int err;
3886 3870
3871 rtnl_lock();
3887 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 3872 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
3888 if (err) 3873 if (err)
3889 return err; 3874 goto out_err;
3890 3875
3891 if (!wdev->netdev) { 3876 if (!wdev->netdev) {
3892 err = -EINVAL; 3877 err = -EINVAL;
@@ -3922,7 +3907,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
3922 cb->args[2] = sta_idx; 3907 cb->args[2] = sta_idx;
3923 err = skb->len; 3908 err = skb->len;
3924 out_err: 3909 out_err:
3925 nl80211_finish_wdev_dump(rdev); 3910 rtnl_unlock();
3926 3911
3927 return err; 3912 return err;
3928} 3913}
@@ -4639,9 +4624,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4639 int path_idx = cb->args[2]; 4624 int path_idx = cb->args[2];
4640 int err; 4625 int err;
4641 4626
4627 rtnl_lock();
4642 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 4628 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
4643 if (err) 4629 if (err)
4644 return err; 4630 goto out_err;
4645 4631
4646 if (!rdev->ops->dump_mpath) { 4632 if (!rdev->ops->dump_mpath) {
4647 err = -EOPNOTSUPP; 4633 err = -EOPNOTSUPP;
@@ -4675,7 +4661,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
4675 cb->args[2] = path_idx; 4661 cb->args[2] = path_idx;
4676 err = skb->len; 4662 err = skb->len;
4677 out_err: 4663 out_err:
4678 nl80211_finish_wdev_dump(rdev); 4664 rtnl_unlock();
4679 return err; 4665 return err;
4680} 4666}
4681 4667
@@ -4835,9 +4821,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
4835 int path_idx = cb->args[2]; 4821 int path_idx = cb->args[2];
4836 int err; 4822 int err;
4837 4823
4824 rtnl_lock();
4838 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 4825 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
4839 if (err) 4826 if (err)
4840 return err; 4827 goto out_err;
4841 4828
4842 if (!rdev->ops->dump_mpp) { 4829 if (!rdev->ops->dump_mpp) {
4843 err = -EOPNOTSUPP; 4830 err = -EOPNOTSUPP;
@@ -4870,7 +4857,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb,
4870 cb->args[2] = path_idx; 4857 cb->args[2] = path_idx;
4871 err = skb->len; 4858 err = skb->len;
4872 out_err: 4859 out_err:
4873 nl80211_finish_wdev_dump(rdev); 4860 rtnl_unlock();
4874 return err; 4861 return err;
4875} 4862}
4876 4863
@@ -5718,6 +5705,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
5718 struct nlattr *attr1, *attr2; 5705 struct nlattr *attr1, *attr2;
5719 int n_channels = 0, tmp1, tmp2; 5706 int n_channels = 0, tmp1, tmp2;
5720 5707
5708 nla_for_each_nested(attr1, freqs, tmp1)
5709 if (nla_len(attr1) != sizeof(u32))
5710 return 0;
5711
5721 nla_for_each_nested(attr1, freqs, tmp1) { 5712 nla_for_each_nested(attr1, freqs, tmp1) {
5722 n_channels++; 5713 n_channels++;
5723 /* 5714 /*
@@ -6806,9 +6797,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
6806 int start = cb->args[2], idx = 0; 6797 int start = cb->args[2], idx = 0;
6807 int err; 6798 int err;
6808 6799
6800 rtnl_lock();
6809 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 6801 err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
6810 if (err) 6802 if (err) {
6803 rtnl_unlock();
6811 return err; 6804 return err;
6805 }
6812 6806
6813 wdev_lock(wdev); 6807 wdev_lock(wdev);
6814 spin_lock_bh(&rdev->bss_lock); 6808 spin_lock_bh(&rdev->bss_lock);
@@ -6831,7 +6825,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb)
6831 wdev_unlock(wdev); 6825 wdev_unlock(wdev);
6832 6826
6833 cb->args[2] = idx; 6827 cb->args[2] = idx;
6834 nl80211_finish_wdev_dump(rdev); 6828 rtnl_unlock();
6835 6829
6836 return skb->len; 6830 return skb->len;
6837} 6831}
@@ -6915,9 +6909,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
6915 int res; 6909 int res;
6916 bool radio_stats; 6910 bool radio_stats;
6917 6911
6912 rtnl_lock();
6918 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); 6913 res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
6919 if (res) 6914 if (res)
6920 return res; 6915 goto out_err;
6921 6916
6922 /* prepare_wdev_dump parsed the attributes */ 6917 /* prepare_wdev_dump parsed the attributes */
6923 radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; 6918 radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -6958,7 +6953,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb)
6958 cb->args[2] = survey_idx; 6953 cb->args[2] = survey_idx;
6959 res = skb->len; 6954 res = skb->len;
6960 out_err: 6955 out_err:
6961 nl80211_finish_wdev_dump(rdev); 6956 rtnl_unlock();
6962 return res; 6957 return res;
6963} 6958}
6964 6959
@@ -10158,17 +10153,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
10158 void *data = NULL; 10153 void *data = NULL;
10159 unsigned int data_len = 0; 10154 unsigned int data_len = 0;
10160 10155
10161 rtnl_lock();
10162
10163 if (cb->args[0]) { 10156 if (cb->args[0]) {
10164 /* subtract the 1 again here */ 10157 /* subtract the 1 again here */
10165 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); 10158 struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
10166 struct wireless_dev *tmp; 10159 struct wireless_dev *tmp;
10167 10160
10168 if (!wiphy) { 10161 if (!wiphy)
10169 err = -ENODEV; 10162 return -ENODEV;
10170 goto out_unlock;
10171 }
10172 *rdev = wiphy_to_rdev(wiphy); 10163 *rdev = wiphy_to_rdev(wiphy);
10173 *wdev = NULL; 10164 *wdev = NULL;
10174 10165
@@ -10189,13 +10180,11 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
10189 nl80211_fam.attrbuf, nl80211_fam.maxattr, 10180 nl80211_fam.attrbuf, nl80211_fam.maxattr,
10190 nl80211_policy); 10181 nl80211_policy);
10191 if (err) 10182 if (err)
10192 goto out_unlock; 10183 return err;
10193 10184
10194 if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] || 10185 if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] ||
10195 !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { 10186 !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
10196 err = -EINVAL; 10187 return -EINVAL;
10197 goto out_unlock;
10198 }
10199 10188
10200 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), 10189 *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
10201 nl80211_fam.attrbuf); 10190 nl80211_fam.attrbuf);
@@ -10204,10 +10193,8 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
10204 10193
10205 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), 10194 *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
10206 nl80211_fam.attrbuf); 10195 nl80211_fam.attrbuf);
10207 if (IS_ERR(*rdev)) { 10196 if (IS_ERR(*rdev))
10208 err = PTR_ERR(*rdev); 10197 return PTR_ERR(*rdev);
10209 goto out_unlock;
10210 }
10211 10198
10212 vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]); 10199 vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]);
10213 subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); 10200 subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -10220,19 +10207,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
10220 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) 10207 if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
10221 continue; 10208 continue;
10222 10209
10223 if (!vcmd->dumpit) { 10210 if (!vcmd->dumpit)
10224 err = -EOPNOTSUPP; 10211 return -EOPNOTSUPP;
10225 goto out_unlock;
10226 }
10227 10212
10228 vcmd_idx = i; 10213 vcmd_idx = i;
10229 break; 10214 break;
10230 } 10215 }
10231 10216
10232 if (vcmd_idx < 0) { 10217 if (vcmd_idx < 0)
10233 err = -EOPNOTSUPP; 10218 return -EOPNOTSUPP;
10234 goto out_unlock;
10235 }
10236 10219
10237 if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) { 10220 if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) {
10238 data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]); 10221 data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -10249,9 +10232,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
10249 10232
10250 /* keep rtnl locked in successful case */ 10233 /* keep rtnl locked in successful case */
10251 return 0; 10234 return 0;
10252 out_unlock:
10253 rtnl_unlock();
10254 return err;
10255} 10235}
10256 10236
10257static int nl80211_vendor_cmd_dump(struct sk_buff *skb, 10237static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -10266,9 +10246,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
10266 int err; 10246 int err;
10267 struct nlattr *vendor_data; 10247 struct nlattr *vendor_data;
10268 10248
10249 rtnl_lock();
10269 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); 10250 err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
10270 if (err) 10251 if (err)
10271 return err; 10252 goto out;
10272 10253
10273 vcmd_idx = cb->args[2]; 10254 vcmd_idx = cb->args[2];
10274 data = (void *)cb->args[3]; 10255 data = (void *)cb->args[3];
@@ -10277,18 +10258,26 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
10277 10258
10278 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | 10259 if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
10279 WIPHY_VENDOR_CMD_NEED_NETDEV)) { 10260 WIPHY_VENDOR_CMD_NEED_NETDEV)) {
10280 if (!wdev) 10261 if (!wdev) {
10281 return -EINVAL; 10262 err = -EINVAL;
10263 goto out;
10264 }
10282 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && 10265 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
10283 !wdev->netdev) 10266 !wdev->netdev) {
10284 return -EINVAL; 10267 err = -EINVAL;
10268 goto out;
10269 }
10285 10270
10286 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { 10271 if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
10287 if (wdev->netdev && 10272 if (wdev->netdev &&
10288 !netif_running(wdev->netdev)) 10273 !netif_running(wdev->netdev)) {
10289 return -ENETDOWN; 10274 err = -ENETDOWN;
10290 if (!wdev->netdev && !wdev->p2p_started) 10275 goto out;
10291 return -ENETDOWN; 10276 }
10277 if (!wdev->netdev && !wdev->p2p_started) {
10278 err = -ENETDOWN;
10279 goto out;
10280 }
10292 } 10281 }
10293 } 10282 }
10294 10283
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b5e665b3cfb0..77055a362041 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1216,7 +1216,7 @@ static inline int policy_to_flow_dir(int dir)
1216} 1216}
1217 1217
1218static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 1218static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1219 const struct flowi *fl) 1219 const struct flowi *fl, u16 family)
1220{ 1220{
1221 struct xfrm_policy *pol; 1221 struct xfrm_policy *pol;
1222 struct net *net = sock_net(sk); 1222 struct net *net = sock_net(sk);
@@ -1225,8 +1225,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1225 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1225 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1226 pol = rcu_dereference(sk->sk_policy[dir]); 1226 pol = rcu_dereference(sk->sk_policy[dir]);
1227 if (pol != NULL) { 1227 if (pol != NULL) {
1228 bool match = xfrm_selector_match(&pol->selector, fl, 1228 bool match = xfrm_selector_match(&pol->selector, fl, family);
1229 sk->sk_family);
1230 int err = 0; 1229 int err = 0;
1231 1230
1232 if (match) { 1231 if (match) {
@@ -1776,43 +1775,6 @@ free_dst:
1776 goto out; 1775 goto out;
1777} 1776}
1778 1777
1779#ifdef CONFIG_XFRM_SUB_POLICY
1780static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1781{
1782 if (!*target) {
1783 *target = kmalloc(size, GFP_ATOMIC);
1784 if (!*target)
1785 return -ENOMEM;
1786 }
1787
1788 memcpy(*target, src, size);
1789 return 0;
1790}
1791#endif
1792
1793static int xfrm_dst_update_parent(struct dst_entry *dst,
1794 const struct xfrm_selector *sel)
1795{
1796#ifdef CONFIG_XFRM_SUB_POLICY
1797 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1798 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1799 sel, sizeof(*sel));
1800#else
1801 return 0;
1802#endif
1803}
1804
1805static int xfrm_dst_update_origin(struct dst_entry *dst,
1806 const struct flowi *fl)
1807{
1808#ifdef CONFIG_XFRM_SUB_POLICY
1809 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1810 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1811#else
1812 return 0;
1813#endif
1814}
1815
1816static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1778static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1817 struct xfrm_policy **pols, 1779 struct xfrm_policy **pols,
1818 int *num_pols, int *num_xfrms) 1780 int *num_pols, int *num_xfrms)
@@ -1884,16 +1846,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1884 1846
1885 xdst = (struct xfrm_dst *)dst; 1847 xdst = (struct xfrm_dst *)dst;
1886 xdst->num_xfrms = err; 1848 xdst->num_xfrms = err;
1887 if (num_pols > 1)
1888 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1889 else
1890 err = xfrm_dst_update_origin(dst, fl);
1891 if (unlikely(err)) {
1892 dst_free(dst);
1893 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1894 return ERR_PTR(err);
1895 }
1896
1897 xdst->num_pols = num_pols; 1849 xdst->num_pols = num_pols;
1898 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 1850 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1899 xdst->policy_genid = atomic_read(&pols[0]->genid); 1851 xdst->policy_genid = atomic_read(&pols[0]->genid);
@@ -2221,7 +2173,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2221 sk = sk_const_to_full_sk(sk); 2173 sk = sk_const_to_full_sk(sk);
2222 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2174 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2223 num_pols = 1; 2175 num_pols = 1;
2224 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2176 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2225 err = xfrm_expand_policies(fl, family, pols, 2177 err = xfrm_expand_policies(fl, family, pols,
2226 &num_pols, &num_xfrms); 2178 &num_pols, &num_xfrms);
2227 if (err < 0) 2179 if (err < 0)
@@ -2500,7 +2452,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2500 pol = NULL; 2452 pol = NULL;
2501 sk = sk_to_full_sk(sk); 2453 sk = sk_to_full_sk(sk);
2502 if (sk && sk->sk_policy[dir]) { 2454 if (sk && sk->sk_policy[dir]) {
2503 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2455 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2504 if (IS_ERR(pol)) { 2456 if (IS_ERR(pol)) {
2505 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2457 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2506 return 0; 2458 return 0;
@@ -3030,6 +2982,11 @@ static int __net_init xfrm_net_init(struct net *net)
3030{ 2982{
3031 int rv; 2983 int rv;
3032 2984
2985 /* Initialize the per-net locks here */
2986 spin_lock_init(&net->xfrm.xfrm_state_lock);
2987 rwlock_init(&net->xfrm.xfrm_policy_lock);
2988 mutex_init(&net->xfrm.xfrm_cfg_mutex);
2989
3033 rv = xfrm_statistics_init(net); 2990 rv = xfrm_statistics_init(net);
3034 if (rv < 0) 2991 if (rv < 0)
3035 goto out_statistics; 2992 goto out_statistics;
@@ -3046,11 +3003,6 @@ static int __net_init xfrm_net_init(struct net *net)
3046 if (rv < 0) 3003 if (rv < 0)
3047 goto out; 3004 goto out;
3048 3005
3049 /* Initialize the per-net locks here */
3050 spin_lock_init(&net->xfrm.xfrm_state_lock);
3051 rwlock_init(&net->xfrm.xfrm_policy_lock);
3052 mutex_init(&net->xfrm.xfrm_cfg_mutex);
3053
3054 return 0; 3006 return 0;
3055 3007
3056out: 3008out:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 805681a7d356..7a5a64e70b4d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
412 up = nla_data(rp); 412 up = nla_data(rp);
413 ulen = xfrm_replay_state_esn_len(up); 413 ulen = xfrm_replay_state_esn_len(up);
414 414
415 if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) 415 /* Check the overall length and the internal bitmap length to avoid
416 * potential overflow. */
417 if (nla_len(rp) < ulen ||
418 xfrm_replay_state_esn_len(replay_esn) != ulen ||
419 replay_esn->bmp_len != up->bmp_len)
420 return -EINVAL;
421
422 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
416 return -EINVAL; 423 return -EINVAL;
417 424
418 return 0; 425 return 0;