]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net: allow fallback function to pass netdev
authorAlexander Duyck <alexander.h.duyck@intel.com>
Mon, 9 Jul 2018 16:20:04 +0000 (12:20 -0400)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Mon, 9 Jul 2018 20:57:25 +0000 (13:57 -0700)
For most of these calls we can just pass NULL through to the fallback
function as the sb_dev. The only cases where we cannot are the cases where
we might be dealing with either an upper device or a driver that would
have configured things to support an sb_dev itself.

The only driver that has any significant change in this patch set should be
ixgbe as we can drop the redundant functionality that existed in both the
ndo_select_queue function and the fallback function that was passed through
to us.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
14 files changed:
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/net_failover.c
drivers/net/xen-netback/interface.c
include/linux/netdevice.h
net/core/dev.c
net/packet/af_packet.c

index e3befb1f9204afbde095e43f82b5c074abdfc3f3..c673ac2df65bdf3f9b4d03403be705b581505657 100644 (file)
@@ -2224,7 +2224,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (skb_rx_queue_recorded(skb))
                qid = skb_get_rx_queue(skb);
        else
-               qid = fallback(dev, skb);
+               qid = fallback(dev, skb, NULL);
 
        return qid;
 }
index 32f548e6431db4a7b5b836ce3bfff62c7f504a67..eb890c4b3b2d2764563fc50d708079ac4a411d7d 100644 (file)
@@ -2116,7 +2116,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
        unsigned int q, port;
 
        if (!netdev_uses_dsa(dev))
-               return fallback(dev, skb);
+               return fallback(dev, skb, NULL);
 
        /* DSA tagging layer will have configured the correct queue */
        q = BRCM_TAG_GET_QUEUE(queue);
@@ -2124,7 +2124,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
        tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
 
        if (unlikely(!tx_ring))
-               return fallback(dev, skb);
+               return fallback(dev, skb, NULL);
 
        return tx_ring->index;
 }
index e4e1cf907ac65697e6f50bd582410080191e7c34..5a727d4729da7348075b75101154cca3cf515073 100644 (file)
@@ -1933,7 +1933,8 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+       return fallback(dev, skb, NULL) %
+              (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
index 5dc5e5604f051ae7a3e1f1eebc2f186d5a9b8680..40cf8dc9f16324330788f6bc8a593b441d47d44f 100644 (file)
@@ -973,7 +973,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
                return txq;
        }
 
-       return fallback(dev, skb) % dev->real_num_tx_queues;
+       return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
 }
 
 static int closest_timer(const struct sge *s, int time)
index ff7a74ec8f11f20ab7eda2f9af3424ef1f3ec4fc..948b3e0d18f4d89d9eeee371cacd1753eb734d91 100644 (file)
@@ -2033,7 +2033,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
            is_multicast_ether_addr(eth_hdr->h_dest))
                return 0;
        else
-               return fallback(ndev, skb);
+               return fallback(ndev, skb, NULL);
 }
 
 static const struct net_device_ops hns_nic_netdev_ops = {
index 8c7a68c57afab07e6198e14d08b1881efe969b34..bd6d9ea27b4bb26f843e2d20776e9e52ae1bd111 100644 (file)
@@ -8237,11 +8237,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
        case htons(ETH_P_FIP):
                adapter = netdev_priv(dev);
 
-               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
                        break;
                /* fall through */
        default:
-               return fallback(dev, skb);
+               return fallback(dev, skb, sb_dev);
        }
 
        f = &adapter->ring_feature[RING_F_FCOE];
index df2996618cd16a6573ab01cae0748202eb699a3b..1857ee0f0871d48285a6d3711f7c3e9a1e08a05f 100644 (file)
@@ -695,9 +695,9 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
        u16 rings_p_up = priv->num_tx_rings_p_up;
 
        if (netdev_get_num_tc(dev))
-               return fallback(dev, skb);
+               return fallback(dev, skb, NULL);
 
-       return fallback(dev, skb) % rings_p_up;
+       return fallback(dev, skb, NULL) % rings_p_up;
 }
 
 static void mlx4_bf_copy(void __iomem *dst, const void *src,
index dfcc3710b65f0f86d5ffb20a3303804a21c39f49..9106ea45e3cb7a4f9dac56c4b9ce93e84064331c 100644 (file)
@@ -115,7 +115,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       select_queue_fallback_t fallback)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int channel_ix = fallback(dev, skb);
+       int channel_ix = fallback(dev, skb, NULL);
        u16 num_channels;
        int up = 0;
 
index 98c0107d6ca17744f26765c655f7f3d72271d5d4..cf4f40a04194a7fd31e619f41b56a0878186dab1 100644 (file)
@@ -345,7 +345,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                        txq = vf_ops->ndo_select_queue(vf_netdev, skb,
                                                       sb_dev, fallback);
                else
-                       txq = fallback(vf_netdev, skb);
+                       txq = fallback(vf_netdev, skb, NULL);
 
                /* Record the queue selected by VF so that it can be
                 * used for common case where VF has more queues than
index 78b549698b7b3ff92babc97a8643d50dd3006cc1..d00d42c845b76207afa21fe5d45686fd91323b30 100644 (file)
@@ -131,7 +131,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
                        txq = ops->ndo_select_queue(primary_dev, skb,
                                                    sb_dev, fallback);
                else
-                       txq = fallback(primary_dev, skb);
+                       txq = fallback(primary_dev, skb, NULL);
 
                qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
 
index 19c4c585f472340428856889634c0428f77d810f..92274c2372008a57ba12ca960bafa84cd2eac7b3 100644 (file)
@@ -155,7 +155,7 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
        unsigned int size = vif->hash.size;
 
        if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
-               return fallback(dev, skb) % dev->real_num_tx_queues;
+               return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
 
        xenvif_set_skb_hash(vif, skb);
 
index bbf062c1ca8a31e9bc15666eb431801f6f64fdd0..2daf2fa6554f02b4987e222f5f9f7702b3d1db5c 100644 (file)
@@ -793,7 +793,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
 }
 
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
-                                      struct sk_buff *skb);
+                                      struct sk_buff *skb,
+                                      struct net_device *sb_dev);
 
 enum tc_setup_type {
        TC_SETUP_QDISC_MQPRIO,
index a051ce27198bbd39447bc7112a76a6e41f8062d9..e18d81837a6ce956ccc532a4f44904a792c71e99 100644 (file)
@@ -3633,8 +3633,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
 
-static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
-                            struct net_device *sb_dev)
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+                           struct net_device *sb_dev)
 {
        struct sock *sk = skb->sk;
        int queue_index = sk_tx_queue_get(sk);
@@ -3659,12 +3659,6 @@ static u16 ___netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
        return queue_index;
 }
 
-static u16 __netdev_pick_tx(struct net_device *dev,
-                           struct sk_buff *skb)
-{
-       return ___netdev_pick_tx(dev, skb, NULL);
-}
-
 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
                                    struct sk_buff *skb,
                                    struct net_device *sb_dev)
@@ -3685,7 +3679,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
                        queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
                                                            __netdev_pick_tx);
                else
-                       queue_index = ___netdev_pick_tx(dev, skb, sb_dev);
+                       queue_index = __netdev_pick_tx(dev, skb, sb_dev);
 
                queue_index = netdev_cap_txqueue(dev, queue_index);
        }
index f37d087ae652db771ee469623d64be8e2007e250..00189a3b07f2161dbdde3a451af1b01f51162afd 100644 (file)
@@ -275,9 +275,10 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
        return po->xmit == packet_direct_xmit;
 }
 
-static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
+                                 struct net_device *sb_dev)
 {
-       return dev_pick_tx_cpu_id(dev, skb, NULL, NULL);
+       return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
 }
 
 static u16 packet_pick_tx_queue(struct sk_buff *skb)
@@ -291,7 +292,7 @@ static u16 packet_pick_tx_queue(struct sk_buff *skb)
                                                    __packet_pick_tx_queue);
                queue_index = netdev_cap_txqueue(dev, queue_index);
        } else {
-               queue_index = __packet_pick_tx_queue(dev, skb);
+               queue_index = __packet_pick_tx_queue(dev, skb, NULL);
        }
 
        return queue_index;