]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net: allow ndo_select_queue to pass netdev
authorAlexander Duyck <alexander.h.duyck@intel.com>
Mon, 9 Jul 2018 16:19:59 +0000 (12:19 -0400)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Mon, 9 Jul 2018 20:41:34 +0000 (13:41 -0700)
This patch makes it so that instead of passing a void pointer as the
accel_priv we instead pass a net_device pointer as sb_dev. Making this
change allows us to pass the subordinate device through to the fallback
function eventually so that we can keep the actual code in the
ndo_select_queue call as focused on possible on the exception cases.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
29 files changed:
drivers/infiniband/hw/hfi1/vnic_main.c
drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/sun/ldmvsw.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/net_failover.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/staging/rtl8188eu/os_dep/os_intfs.c
drivers/staging/rtl8723bs/os_dep/os_intfs.c
include/linux/netdevice.h
net/core/dev.c
net/mac80211/iface.c

index 5d65582fe4d92f4840cdb84db8ae5d6f3dcf8652..616fc9b6fad8f41e28f5114f4d6afd4bd8e8788e 100644 (file)
@@ -423,7 +423,7 @@ static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb,
 
 static u16 hfi1_vnic_select_queue(struct net_device *netdev,
                                  struct sk_buff *skb,
-                                 void *accel_priv,
+                                 struct net_device *sb_dev,
                                  select_queue_fallback_t fallback)
 {
        struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
index 0c8aec62a42539fc90d67000361653fbf846ae3b..61558788b3fadb7546660f3b907820b264e3b1aa 100644 (file)
@@ -95,7 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
 }
 
 static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
-                                void *accel_priv,
+                                struct net_device *sb_dev,
                                 select_queue_fallback_t fallback)
 {
        struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
@@ -107,7 +107,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
        mdata->entropy = opa_vnic_calc_entropy(skb);
        mdata->vl = opa_vnic_get_vl(adapter, skb);
        rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
-                                              accel_priv, fallback);
+                                              sb_dev, fallback);
        skb_pull(skb, sizeof(*mdata));
        return rc;
 }
index 63e3844c5becf5e973e10fa2aa533f668ac8e30b..9a2ea3c1f9495312d2ac0ee47917c5797fe7b9ed 100644 (file)
@@ -4094,7 +4094,8 @@ static inline int bond_slave_override(struct bonding *bond,
 
 
 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        /* This helper function exists to help dev_pick_tx get the correct
         * destination queue.  Using a helper function skips a call to
index f2af87d70594fca1b3c42085858fb323da295506..e3befb1f9204afbde095e43f82b5c074abdfc3f3 100644 (file)
@@ -2213,7 +2213,8 @@ static void ena_netpoll(struct net_device *netdev)
 #endif /* CONFIG_NET_POLL_CONTROLLER */
 
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        u16 qid;
        /* we suspect that this is good for in--kernel network services that
index d5fca2e5a9bc34ad6edfa295e378dfe12078c0e5..32f548e6431db4a7b5b836ce3bfff62c7f504a67 100644 (file)
@@ -2107,7 +2107,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
 };
 
 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
-                                   void *accel_priv,
+                                   struct net_device *sb_dev,
                                    select_queue_fallback_t fallback)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
index af7b5a4d8ba044800b0eb229d8c989c564515e94..e4e1cf907ac65697e6f50bd582410080191e7c34 100644 (file)
@@ -1910,7 +1910,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
 }
 
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback)
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
index a8ce5c55bbb0ca29b5cca28171cd94733e7260eb..0e508e5defce315f2e5254ca238afe26b523054a 100644 (file)
@@ -497,7 +497,8 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
 
 /* select_queue callback */
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback);
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback);
 
 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
                                        struct bnx2x_fastpath *fp,
index 0d91716a25666f2479cc4499a2bdf755779dd962..5dc5e5604f051ae7a3e1f1eebc2f186d5a9b8680 100644 (file)
@@ -930,7 +930,8 @@ static int setup_sge_queues(struct adapter *adap)
 }
 
 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        int txq;
 
index ef9ef703d13a0e0efff11404afb41532f571283f..ff7a74ec8f11f20ab7eda2f9af3424ef1f3ec4fc 100644 (file)
@@ -2022,7 +2022,8 @@ static void hns_nic_get_stats64(struct net_device *ndev,
 
 static u16
 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                    void *accel_priv, select_queue_fallback_t fallback)
+                    struct net_device *sb_dev,
+                    select_queue_fallback_t fallback)
 {
        struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
        struct hns_nic_priv *priv = netdev_priv(ndev);
index abb176df2e7f0f397b04260c8eaa0f0d94ccf81c..8c7a68c57afab07e6198e14d08b1881efe969b34 100644 (file)
@@ -8210,15 +8210,16 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
 
 #ifdef IXGBE_FCOE
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
-                             void *accel_priv, select_queue_fallback_t fallback)
+                             struct net_device *sb_dev,
+                             select_queue_fallback_t fallback)
 {
        struct ixgbe_adapter *adapter;
        struct ixgbe_ring_feature *f;
        int txq;
 
-       if (accel_priv) {
+       if (sb_dev) {
                u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
-               struct net_device *vdev = accel_priv;
+               struct net_device *vdev = sb_dev;
 
                txq = vdev->tc_to_txq[tc].offset;
                txq += reciprocal_scale(skb_get_hash(skb),
index 0227786308af5d70bdfbb19da3fb8d5760d0651f..df2996618cd16a6573ab01cae0748202eb699a3b 100644 (file)
@@ -688,7 +688,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
 }
 
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-                        void *accel_priv, select_queue_fallback_t fallback)
+                        struct net_device *sb_dev,
+                        select_queue_fallback_t fallback)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        u16 rings_p_up = priv->num_tx_rings_p_up;
index ace6545f82e6b343d26acd6d0bb4c55cd6ae4809..c3228b89df463597de1cb546754ea1b8aa4d876d 100644 (file)
@@ -699,7 +699,8 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
-                        void *accel_priv, select_queue_fallback_t fallback);
+                        struct net_device *sb_dev,
+                        select_queue_fallback_t fallback);
 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
                               struct mlx4_en_rx_alloc *frame,
index e2b7586ed7a0b2595f3676c3df072cce382a5411..e1b237ccdf56d9fa17a75e6444d6e718d3a705c6 100644 (file)
@@ -865,7 +865,8 @@ struct mlx5e_profile {
 void mlx5e_build_ptys2ethtool_map(void);
 
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback);
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                          struct mlx5e_tx_wqe *wqe, u16 pi);
index f0739dae7b56961313f59bfd1c83c94a083b31aa..dfcc3710b65f0f86d5ffb20a3303804a21c39f49 100644 (file)
@@ -111,7 +111,8 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
 #endif
 
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback)
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int channel_ix = fallback(dev, skb);
index 68f122140966d4de381b47fa192246eb7606707a..4a7f54c8e7aaad7e9fb26ba4296e0666825f0986 100644 (file)
@@ -1656,7 +1656,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 }
 
 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        /* If skb needs TX timestamp, it is handled in network control queue */
        return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
index a5dd627fe2f9237a1af445c9ce2409fd0976c76c..d42f47f6c632fe8618348d40fc609bfed5deef4a 100644 (file)
@@ -101,7 +101,8 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
 }
 
 static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct vnet_port *port = netdev_priv(dev);
 
index a94f50442613e9f77cec6aff24fbf19a5a33756b..12539b357a78402dfc80a4a654761051a2fa6409 100644 (file)
@@ -234,7 +234,8 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
 }
 
 static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        struct vnet *vp = netdev_priv(dev);
        struct vnet_port *port = __tx_port_find(vp, skb);
index dd1d6e115145d4c14fb25d1883d1e42614e211a9..98c0107d6ca17744f26765c655f7f3d72271d5d4 100644 (file)
@@ -329,7 +329,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
 }
 
 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
-                              void *accel_priv,
+                              struct net_device *sb_dev,
                               select_queue_fallback_t fallback)
 {
        struct net_device_context *ndc = netdev_priv(ndev);
@@ -343,7 +343,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
 
                if (vf_ops->ndo_select_queue)
                        txq = vf_ops->ndo_select_queue(vf_netdev, skb,
-                                                      accel_priv, fallback);
+                                                      sb_dev, fallback);
                else
                        txq = fallback(vf_netdev, skb);
 
index 4f390fa557e4ba0c897b20faefaa85b03f4ec70a..78b549698b7b3ff92babc97a8643d50dd3006cc1 100644 (file)
@@ -115,7 +115,8 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
 }
 
 static u16 net_failover_select_queue(struct net_device *dev,
-                                    struct sk_buff *skb, void *accel_priv,
+                                    struct sk_buff *skb,
+                                    struct net_device *sb_dev,
                                     select_queue_fallback_t fallback)
 {
        struct net_failover_info *nfo_info = netdev_priv(dev);
@@ -128,7 +129,7 @@ static u16 net_failover_select_queue(struct net_device *dev,
 
                if (ops->ndo_select_queue)
                        txq = ops->ndo_select_queue(primary_dev, skb,
-                                                   accel_priv, fallback);
+                                                   sb_dev, fallback);
                else
                        txq = fallback(primary_dev, skb);
 
index b070959737ffe744f08683926a486c66ee08bb4a..3a95eaae0c98e9a18c378d34aa8109d5fe24762c 100644 (file)
@@ -1707,7 +1707,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
-                            void *accel_priv, select_queue_fallback_t fallback)
+                            struct net_device *sb_dev,
+                            select_queue_fallback_t fallback)
 {
        /*
         * This helper function exists to help dev_pick_tx get the correct
index a192a017cc68878360505b93df151de3d0b9b730..76f0f4131197a8d6ddcdba5869745e89cc11dc3a 100644 (file)
@@ -607,7 +607,8 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 }
 
 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct tun_struct *tun = netdev_priv(dev);
        u16 ret;
index 510f6b8e717d7f52eb2cdbb6e334e062f49053af..fa3e8ddfe9a93f78382aaa5031fcd15a29b7be09 100644 (file)
@@ -1279,7 +1279,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
 
 static u16
 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
-                               void *accel_priv, select_queue_fallback_t fallback)
+                               struct net_device *sb_dev,
+                               select_queue_fallback_t fallback)
 {
        skb->priority = cfg80211_classify8021d(skb, NULL);
        return mwifiex_1d_to_wmm_queue[skb->priority];
index 78ebe494fef02b8d31505262f8c551e27aee7dc5..19c4c585f472340428856889634c0428f77d810f 100644 (file)
@@ -148,7 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
 }
 
 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
-                              void *accel_priv,
+                              struct net_device *sb_dev,
                               select_queue_fallback_t fallback)
 {
        struct xenvif *vif = netdev_priv(dev);
index a57daecf1d574fc1a6e25ca5eb043c1617fe2dcc..d67cd379d156df18ca224c9e15e173d676d84440 100644 (file)
@@ -545,7 +545,8 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
 }
 
 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
-                              void *accel_priv, select_queue_fallback_t fallback)
+                              struct net_device *sb_dev,
+                              select_queue_fallback_t fallback)
 {
        unsigned int num_queues = dev->real_num_tx_queues;
        u32 hash;
index add1ba00f3e9ab7c5111f6c3846fa582ed243e53..38e85c8a85c8ba690ae60ef4df9181e959eee0f1 100644 (file)
@@ -253,7 +253,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
 }
 
 static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
-                           void *accel_priv, select_queue_fallback_t fallback)
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct adapter  *padapter = rtw_netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
index ace68f023b49db7aec2fd147830e5e2e3cf3c34f..181642358e3fe1b29339f1c285f3e3a44dbe7f67 100644 (file)
@@ -403,10 +403,9 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
 }
 
 
-static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
-                               , void *accel_priv
-                               , select_queue_fallback_t fallback
-)
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
+                           struct net_device *sb_dev,
+                           select_queue_fallback_t fallback)
 {
        struct adapter  *padapter = rtw_netdev_priv(dev);
        struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
index 46f4c44ce3e4a79acc289d9ee437fc4cad835e1f..bbf062c1ca8a31e9bc15666eb431801f6f64fdd0 100644 (file)
@@ -957,7 +957,8 @@ struct dev_ifalias {
  *     those the driver believes to be appropriate.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- *                         void *accel_priv, select_queue_fallback_t fallback);
+ *                         struct net_device *sb_dev,
+ *                         select_queue_fallback_t fallback);
  *     Called to decide which queue to use when device supports multiple
  *     transmit queues.
  *
@@ -1229,7 +1230,7 @@ struct net_device_ops {
                                                      netdev_features_t features);
        u16                     (*ndo_select_queue)(struct net_device *dev,
                                                    struct sk_buff *skb,
-                                                   void *accel_priv,
+                                                   struct net_device *sb_dev,
                                                    select_queue_fallback_t fallback);
        void                    (*ndo_change_rx_flags)(struct net_device *dev,
                                                       int flags);
@@ -2568,9 +2569,11 @@ void dev_close_many(struct list_head *head, bool unlink);
 void dev_disable_lro(struct net_device *dev);
 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
-                    void *accel_priv, select_queue_fallback_t fallback);
+                    struct net_device *sb_dev,
+                    select_queue_fallback_t fallback);
 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback);
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback);
 int dev_queue_xmit(struct sk_buff *skb);
 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
index b5e538032d5e4cace1bd4948e249f5e00958e311..a051ce27198bbd39447bc7112a76a6e41f8062d9 100644 (file)
@@ -3618,14 +3618,16 @@ static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
 }
 
 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
-                    void *accel_priv, select_queue_fallback_t fallback)
+                    struct net_device *sb_dev,
+                    select_queue_fallback_t fallback)
 {
        return 0;
 }
 EXPORT_SYMBOL(dev_pick_tx_zero);
 
 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
-                      void *accel_priv, select_queue_fallback_t fallback)
+                      struct net_device *sb_dev,
+                      select_queue_fallback_t fallback)
 {
        return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
 }
index 555e389b7dfa34ebf494c9f2432fb6409eff74a9..5e6cf2cee965264dd45cda775b370b6dcb022413 100644 (file)
@@ -1130,7 +1130,7 @@ static void ieee80211_uninit(struct net_device *dev)
 
 static u16 ieee80211_netdev_select_queue(struct net_device *dev,
                                         struct sk_buff *skb,
-                                        void *accel_priv,
+                                        struct net_device *sb_dev,
                                         select_queue_fallback_t fallback)
 {
        return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
@@ -1176,7 +1176,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
 
 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
                                          struct sk_buff *skb,
-                                         void *accel_priv,
+                                         struct net_device *sb_dev,
                                          select_queue_fallback_t fallback)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);