]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
hv_netvsc: empty current transmit aggregation if flow blocked
authorStephen Hemminger <stephen@networkplumber.org>
Wed, 13 Dec 2017 00:48:40 +0000 (16:48 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 13 Dec 2017 20:57:39 +0000 (15:57 -0500)
If the transmit queue is known full, then don't keep aggregating
data. And the cp_partial flag which indicates that the current
aggregation buffer is full can be folded in to avoid more
conditionals.

Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c

index b10c99751e3ac98812bc8220f300c9c5b6a212c4..0db3bd1ea06f5a71eb6fc67ba00abb066c7f3414 100644 (file)
@@ -194,7 +194,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
                                        const struct netvsc_device_info *info);
 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
 void netvsc_device_remove(struct hv_device *device);
-int netvsc_send(struct net_device_context *ndc,
+int netvsc_send(struct net_device *net,
                struct hv_netvsc_packet *packet,
                struct rndis_message *rndis_msg,
                struct hv_page_buffer *page_buffer,
index 83fa55336c1b95f5cf1842845f9dffb538fc3ad0..17e529af79dcd2664e19651c4065a8b4fec7b597 100644 (file)
@@ -707,7 +707,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
                                    struct hv_netvsc_packet *packet,
                                    struct rndis_message *rndis_msg,
                                    struct hv_page_buffer *pb,
-                                   struct sk_buff *skb)
+                                   bool xmit_more)
 {
        char *start = net_device->send_buf;
        char *dest = start + (section_index * net_device->send_section_size)
@@ -720,7 +720,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 
        /* Add padding */
        remain = packet->total_data_buflen & (net_device->pkt_align - 1);
-       if (skb->xmit_more && remain && !packet->cp_partial) {
+       if (xmit_more && remain) {
                padding = net_device->pkt_align - remain;
                rndis_msg->msg_len += padding;
                packet->total_data_buflen += padding;
@@ -829,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
 }
 
 /* RCU already held by caller */
-int netvsc_send(struct net_device_context *ndev_ctx,
+int netvsc_send(struct net_device *ndev,
                struct hv_netvsc_packet *packet,
                struct rndis_message *rndis_msg,
                struct hv_page_buffer *pb,
                struct sk_buff *skb)
 {
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
        struct netvsc_device *net_device
                = rcu_dereference_bh(ndev_ctx->nvdev);
        struct hv_device *device = ndev_ctx->device_ctx;
@@ -845,7 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
        struct multi_send_data *msdp;
        struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
        struct sk_buff *msd_skb = NULL;
-       bool try_batch;
+       bool try_batch, xmit_more;
 
        /* If device is rescinded, return error and packet will get dropped. */
        if (unlikely(!net_device || net_device->destroy))
@@ -896,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
                }
        }
 
+       /* Keep aggregating only if stack says more data is coming
+        * and not doing mixed modes send and not flow blocked
+        */
+       xmit_more = skb->xmit_more &&
+               !packet->cp_partial &&
+               !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
+
        if (section_index != NETVSC_INVALID_INDEX) {
                netvsc_copy_to_send_buf(net_device,
                                        section_index, msd_len,
-                                       packet, rndis_msg, pb, skb);
+                                       packet, rndis_msg, pb, xmit_more);
 
                packet->send_buf_index = section_index;
 
@@ -919,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
                if (msdp->skb)
                        dev_consume_skb_any(msdp->skb);
 
-               if (skb->xmit_more && !packet->cp_partial) {
+               if (xmit_more) {
                        msdp->skb = skb;
                        msdp->pkt = packet;
                        msdp->count++;
index 5ab81611d6d9c420c65421dee6a6ff27dfe3cd07..c5584c2d440e033b649193b39435d0a458aaf694 100644 (file)
@@ -626,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        /* timestamp packet in software */
        skb_tx_timestamp(skb);
 
-       ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
+       ret = netvsc_send(net, packet, rndis_msg, pb, skb);
        if (likely(ret == 0))
                return NETDEV_TX_OK;
 
index 035976949177a92f4aa84ec4306474e7b528b6da..91a67c5297f7b5987161b621103277ca724f4637 100644 (file)
@@ -215,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        struct hv_netvsc_packet *packet;
        struct hv_page_buffer page_buf[2];
        struct hv_page_buffer *pb = page_buf;
-       struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
        int ret;
 
        /* Setup the packet to send it */
@@ -243,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
        }
 
        rcu_read_lock_bh();
-       ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
+       ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
        rcu_read_unlock_bh();
 
        return ret;