From: David S. Miller Date: Mon, 10 Dec 2018 05:27:48 +0000 (-0800) Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net X-Git-Tag: v5.0-rc1~129^2~125 X-Git-Url: https://asedeno.scripts.mit.edu/gitweb/?a=commitdiff_plain;h=4cc1feeb6ffc2799f8badb4dea77c637d340cb0d;p=linux.git Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net Several conflicts, seemingly all over the place. I used Stephen Rothwell's sample resolutions for many of these, if not just to double check my own work, so definitely the credit largely goes to him. The NFP conflict consisted of a bug fix (moving operations past the rhashtable operation) while chaning the initial argument in the function call in the moved code. The net/dsa/master.c conflict had to do with a bug fix intermixing of making dsa_master_set_mtu() static with the fixing of the tagging attribute location. cls_flower had a conflict because the dup reject fix from Or overlapped with the addition of port range classifiction. __set_phy_supported()'s conflict was relatively easy to resolve because Andrew fixed it in both trees, so it was just a matter of taking the net-next copy. Or at least I think it was :-) Joe Stringer's fix to the handling of netns id 0 in bpf_sk_lookup() intermixed with changes on how the sdif and caller_net are calculated in these code paths in net-next. The remaining BPF conflicts were largely about the addition of the __bpf_md_ptr stuff in 'net' overlapping with adjustments and additions to the relevant data structure where the MD pointer macros are used. Signed-off-by: David S. Miller --- 4cc1feeb6ffc2799f8badb4dea77c637d340cb0d diff --cc drivers/net/ethernet/netronome/nfp/flower/offload.c index 545d94168874,2f49eb75f3cc..21499a5b3b6b --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@@ -469,10 -480,14 +464,15 @@@ nfp_flower_add_offload(struct nfp_app * err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, nfp_flower_table_params); if (err) - goto err_destroy_flow; + goto err_release_metadata; + - err = nfp_flower_xmit_flow(netdev, flow_pay, ++ err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (err) + goto err_remove_rhash; - port->tc_offload_cnt++; + if (port) + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ kfree(key_layer); diff --cc drivers/net/tun.c index 6760b86547df,005020042be9..ea528248d7d0 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -2401,17 -2381,11 +2401,18 @@@ static void tun_sock_write_space(struc kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } +static void tun_put_page(struct tun_page *tpage) +{ + if (tpage->page) + __page_frag_cache_drain(tpage->page, tpage->count); +} + static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, - struct xdp_buff *xdp, int *flush) + struct xdp_buff *xdp, int *flush, + struct tun_page *tpage) { + unsigned int datasize = xdp->data_end - xdp->data; struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; struct tun_pcpu_stats *stats; diff --cc include/net/sctp/structs.h index 7eaa294d938d,feada358d872..003020eb6e66 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@@ -2077,10 -2073,10 +2077,12 @@@ struct sctp_association int sent_cnt_removable; + __u16 subscribe; + __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; + + struct rcu_head rcu; }; diff --cc include/uapi/linux/bpf.h index 597afdbc1ab9,72c453a8bf50..ec8b40ff386e --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@@ -2481,8 -2473,7 +2494,8 @@@ struct __sk_buff /* ... here. */ __u32 data_meta; - struct bpf_flow_keys *flow_keys; + __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); + __u64 tstamp; }; struct bpf_tunnel_key { diff --cc net/core/filter.c index bd0df75dc7b6,8d2c629501e2..8659b40172d1 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -5067,26 -4887,26 +5067,27 @@@ __bpf_sk_lookup(struct sk_buff *skb, st struct sock *sk = NULL; u8 family = AF_UNSPEC; struct net *net; + int sdif; family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; - if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) + if (unlikely(family == AF_UNSPEC || flags || + !((s32)netns_id < 0 || netns_id <= S32_MAX))) goto out; - if (skb->dev) - caller_net = dev_net(skb->dev); + if (family == AF_INET) + sdif = inet_sdif(skb); else - caller_net = sock_net(skb->sk); + sdif = inet6_sdif(skb); + - if (netns_id) { + if ((s32)netns_id < 0) { + net = caller_net; - sk = sk_lookup(net, tuple, skb, family, proto); ++ sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); + } else { net = get_net_ns_by_id(caller_net, netns_id); if (unlikely(!net)) goto out; - sk = sk_lookup(net, tuple, skb, family, proto); + sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); put_net(net); - } else { - net = caller_net; - sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); } if (sk) @@@ -5737,14 -5436,10 +5738,14 @@@ static bool bpf_skb_is_valid_access(in if (size != size_default) return false; break; - case bpf_ctx_range(struct __sk_buff, flow_keys): - if (size != sizeof(struct bpf_flow_keys *)) + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): + if (size != sizeof(__u64)) return false; break; + case bpf_ctx_range(struct __sk_buff, tstamp): + if (size != sizeof(__u64)) + return false; + break; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { @@@ -5770,9 -5465,8 +5771,9 @@@ static bool sk_filter_is_valid_access(i case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): case bpf_ctx_range_till(struct __sk_buff, family, local_port): + case bpf_ctx_range(struct __sk_buff, tstamp): return false; } @@@ -5841,8 -5531,7 +5842,8 @@@ static bool lwt_is_valid_access(int off case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): + case bpf_ctx_range(struct __sk_buff, tstamp): return false; } @@@ -6271,8 -5959,7 +6272,8 @@@ static bool sk_skb_is_valid_access(int switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): - case bpf_ctx_range(struct __sk_buff, flow_keys): + case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): + case bpf_ctx_range(struct __sk_buff, tstamp): return false; } diff --cc net/dsa/master.c index d7d5145aa235,5e8c9bef78bd..71bb15f491c8 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@@ -158,36 -158,31 +158,59 @@@ static void dsa_master_ethtool_teardown cpu_dp->orig_ethtool_ops = NULL; } + static ssize_t tagging_show(struct device *d, struct device_attribute *attr, + char *buf) + { + struct net_device *dev = to_net_dev(d); + struct dsa_port *cpu_dp = dev->dsa_ptr; + + return sprintf(buf, "%s\n", + dsa_tag_protocol_to_str(cpu_dp->tag_ops)); + } + static DEVICE_ATTR_RO(tagging); + + static struct attribute *dsa_slave_attrs[] = { + &dev_attr_tagging.attr, + NULL + }; + + static const struct attribute_group dsa_group = { + .name = "dsa", + .attrs = dsa_slave_attrs, + }; + +static void dsa_master_set_mtu(struct net_device *dev, struct dsa_port *cpu_dp) +{ + unsigned int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead; + int err; + + rtnl_lock(); + if (mtu <= dev->max_mtu) { + err = dev_set_mtu(dev, mtu); + if (err) + netdev_dbg(dev, "Unable to set MTU to include for DSA overheads\n"); + } + rtnl_unlock(); +} + +static void dsa_master_reset_mtu(struct net_device *dev) +{ + int err; + + rtnl_lock(); + err = dev_set_mtu(dev, ETH_DATA_LEN); + if (err) + netdev_dbg(dev, + "Unable to reset MTU to exclude DSA overheads\n"); + rtnl_unlock(); +} + int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) { + int ret; + + dsa_master_set_mtu(dev, cpu_dp); + /* If we use a tagging format that doesn't have an ethertype * field, make sure that all packets from this point on get * sent to the tag format's receive function. @@@ -201,8 -204,8 +232,9 @@@ void dsa_master_teardown(struct net_device *dev) { + sysfs_remove_group(&dev->dev.kobj, &dsa_group); dsa_master_ethtool_teardown(dev); + dsa_master_reset_mtu(dev); dev->dsa_ptr = NULL; diff --cc net/ipv4/tcp_output.c index d3b691f3a9e8,d1676d8a6ed7..c31badfee806 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -1904,18 -1904,16 +1904,17 @@@ static int tso_fragment(struct sock *sk * This algorithm is from John Heffner. */ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, - bool *is_cwnd_limited, u32 max_segs) + bool *is_cwnd_limited, + bool *is_rwnd_limited, + u32 max_segs) { const struct inet_connection_sock *icsk = inet_csk(sk); - u32 age, send_win, cong_win, limit, in_flight; + u32 send_win, cong_win, limit, in_flight; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *head; int win_divisor; + s64 delta; - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) - goto send_now; - if (icsk->icsk_ca_state >= TCP_CA_Recovery) goto send_now; @@@ -1976,15 -1967,32 +1975,32 @@@ head = tcp_rtx_queue_head(sk); if (!head) goto send_now; - age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head)); + delta = tp->tcp_clock_cache - head->tstamp; /* If next ACK is likely to come too late (half srtt), do not defer */ - if (age < (tp->srtt_us >> 4)) + if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) goto send_now; - /* Ok, it looks like it is advisable to defer. */ + /* Ok, it looks like it is advisable to defer. + * Three cases are tracked : + * 1) We are cwnd-limited + * 2) We are rwnd-limited + * 3) We are application limited. + */ + if (cong_win < send_win) { + if (cong_win <= skb->len) { + *is_cwnd_limited = true; + return true; + } + } else { + if (send_win <= skb->len) { + *is_rwnd_limited = true; + return true; + } + } - if (cong_win < send_win && cong_win <= skb->len) - *is_cwnd_limited = true; + /* If this packet won't get more data, do not wait. */ + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + goto send_now; return true; diff --cc net/sched/cls_flower.c index 85e9f8e1da10,71312d7bd8f4..544811dded60 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@@ -1355,18 -1238,16 +1355,16 @@@ static int fl_change(struct net *net, s if (err) goto errout_idr; - if (!tc_skip_sw(fnew->flags)) { - if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) { - err = -EEXIST; - goto errout_mask; - } - - err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, - fnew->mask->filter_ht_params); - if (err) - goto errout_mask; - if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) { ++ if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) { + err = -EEXIST; + goto errout_mask; } + err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, + fnew->mask->filter_ht_params); + if (err) + goto errout_mask; + if (!tc_skip_hw(fnew->flags)) { err = fl_hw_replace_filter(tp, fnew, extack); if (err) diff --cc tools/include/uapi/linux/bpf.h index 597afdbc1ab9,72c453a8bf50..ec8b40ff386e --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@@ -2481,8 -2473,7 +2494,8 @@@ struct __sk_buff /* ... here. */ __u32 data_meta; - struct bpf_flow_keys *flow_keys; + __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); + __u64 tstamp; }; struct bpf_tunnel_key {