]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Wed, 16 Aug 2017 03:23:23 +0000 (20:23 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 16 Aug 2017 03:23:23 +0000 (20:23 -0700)
22 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
include/linux/net.h
include/net/udp.h
net/core/filter.c
net/ipv4/fib_semantics.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/key/af_key.c
net/sched/sch_api.c
net/sched/sch_atm.c

diff --combined MAINTAINERS
index 2db0f8cd4002f5fd2ba389d1aed53e29de79032a,6f7721d1634c2eb7247538f2cb4d85fa1be1a458..0e967b3ca1c612ad4b3e82d29a6a2cb26102df63
@@@ -2477,7 -2477,7 +2477,7 @@@ Q:      https://patchwork.open-mesh.org/proj
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-class-net-batman-adv
  F:    Documentation/ABI/testing/sysfs-class-net-mesh
 -F:    Documentation/networking/batman-adv.txt
 +F:    Documentation/networking/batman-adv.rst
  F:    include/uapi/linux/batman_adv.h
  F:    net/batman-adv/
  
@@@ -5101,7 -5101,6 +5101,7 @@@ F:      include/linux/of_net.
  F:    include/linux/phy.h
  F:    include/linux/phy_fixed.h
  F:    include/linux/platform_data/mdio-gpio.h
 +F:    include/linux/platform_data/mdio-bcm-unimac.h
  F:    include/trace/events/mdio.h
  F:    include/uapi/linux/mdio.h
  F:    include/uapi/linux/mii.h
@@@ -6148,14 -6147,6 +6148,14 @@@ S:    Maintaine
  F:    drivers/net/ethernet/hisilicon/
  F:    Documentation/devicetree/bindings/net/hisilicon*.txt
  
 +HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
 +M:    Yisen Zhuang <yisen.zhuang@huawei.com>
 +M:    Salil Mehta <salil.mehta@huawei.com>
 +L:    netdev@vger.kernel.org
 +W:    http://www.hisilicon.com
 +S:    Maintained
 +F:    drivers/net/ethernet/hisilicon/hns3/
 +
  HISILICON ROCE DRIVER
  M:    Lijun Ou <oulijun@huawei.com>
  M:    Wei Hu(Xavier) <xavier.huwei@huawei.com>
@@@ -6266,7 -6257,6 +6266,7 @@@ M:      Haiyang Zhang <haiyangz@microsoft.co
  M:    Stephen Hemminger <sthemmin@microsoft.com>
  L:    devel@linuxdriverproject.org
  S:    Maintained
 +F:    Documentation/networking/netvsc.txt
  F:    arch/x86/include/asm/mshyperv.h
  F:    arch/x86/include/uapi/asm/hyperv.h
  F:    arch/x86/kernel/cpu/mshyperv.c
@@@ -8434,9 -8424,7 +8434,9 @@@ F:      include/uapi/linux/uvcvideo.
  
  MEDIATEK ETHERNET DRIVER
  M:    Felix Fietkau <nbd@openwrt.org>
 -M:    John Crispin <blogic@openwrt.org>
 +M:    John Crispin <john@phrozen.org>
 +M:    Sean Wang <sean.wang@mediatek.com>
 +M:    Nelson Chang <nelson.chang@mediatek.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ethernet/mediatek/
@@@ -14016,6 -14004,7 +14016,7 @@@ F:   drivers/block/virtio_blk.
  F:    include/linux/virtio*.h
  F:    include/uapi/linux/virtio_*.h
  F:    drivers/crypto/virtio/
+ F:    mm/balloon_compaction.c
  
  VIRTIO CRYPTO DRIVER
  M:    Gonglei <arei.gonglei@huawei.com>
index daa37750d152bac67671222166a79216111687c8,09ea62ee96d38b3d99bb48cbd0f72dcc715f8315..b9bff1d9801fc6d7f1aba8ad32a08335c4ab486c
@@@ -338,12 -338,10 +338,12 @@@ struct adapter_params 
        unsigned int sf_nsec;             /* # of flash sectors */
        unsigned int sf_fw_start;         /* start of FW image in flash */
  
 -      unsigned int fw_vers;
 -      unsigned int bs_vers;           /* bootstrap version */
 -      unsigned int tp_vers;
 -      unsigned int er_vers;           /* expansion ROM version */
 +      unsigned int fw_vers;             /* firmware version */
 +      unsigned int bs_vers;             /* bootstrap version */
 +      unsigned int tp_vers;             /* TP microcode version */
 +      unsigned int er_vers;             /* expansion ROM version */
 +      unsigned int scfg_vers;           /* Serial Configuration version */
 +      unsigned int vpd_vers;            /* VPD Version */
        u8 api_vers[7];
  
        unsigned short mtus[NMTUS];
@@@ -531,6 -529,7 +531,7 @@@ enum {                                 
        USING_SOFT_PARAMS  = (1 << 6),
        MASTER_PF          = (1 << 7),
        FW_OFLD_CONN       = (1 << 9),
+       ROOT_NO_RELAXED_ORDERING = (1 << 10),
  };
  
  enum {
@@@ -1405,15 -1404,10 +1406,15 @@@ int t4_fw_upgrade(struct adapter *adap
  int t4_fl_pkt_align(struct adapter *adap);
  unsigned int t4_flash_cfg_addr(struct adapter *adapter);
  int t4_check_fw_version(struct adapter *adap);
 +int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
  int t4_get_fw_version(struct adapter *adapter, u32 *vers);
  int t4_get_bs_version(struct adapter *adapter, u32 *vers);
  int t4_get_tp_version(struct adapter *adapter, u32 *vers);
  int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
 +int t4_get_scfg_version(struct adapter *adapter, u32 *vers);
 +int t4_get_vpd_version(struct adapter *adapter, u32 *vers);
 +int t4_get_version_info(struct adapter *adapter);
 +void t4_dump_version_info(struct adapter *adapter);
  int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
               const u8 *fw_data, unsigned int fw_size,
               struct fw_hdr *card_fw, enum dev_state state, int *reset);
index afa6fd688facf9dfb892dd0f15d55551458bb3cb,33bb8678833adc6f83551d992f201f6314e68762..77538cd8184a9b6b36f4b351d47efc54dc25bab0
@@@ -2889,29 -2889,14 +2889,29 @@@ static int cxgb_set_tx_maxrate(struct n
        return err;
  }
  
 -static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
 -                       __be16 proto, struct tc_to_netdev *tc)
 +static int cxgb_setup_tc_cls_u32(struct net_device *dev,
 +                               struct tc_cls_u32_offload *cls_u32)
  {
 -      struct port_info *pi = netdev2pinfo(dev);
 -      struct adapter *adap = netdev2adap(dev);
 +      if (is_classid_clsact_ingress(cls_u32->common.classid) ||
 +          cls_u32->common.chain_index)
 +              return -EOPNOTSUPP;
  
 -      if (chain_index)
 +      switch (cls_u32->command) {
 +      case TC_CLSU32_NEW_KNODE:
 +      case TC_CLSU32_REPLACE_KNODE:
 +              return cxgb4_config_knode(dev, cls_u32);
 +      case TC_CLSU32_DELETE_KNODE:
 +              return cxgb4_delete_knode(dev, cls_u32);
 +      default:
                return -EOPNOTSUPP;
 +      }
 +}
 +
 +static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
 +                       void *type_data)
 +{
 +      struct port_info *pi = netdev2pinfo(dev);
 +      struct adapter *adap = netdev2adap(dev);
  
        if (!(adap->flags & FULL_INIT_DONE)) {
                dev_err(adap->pdev_dev,
                return -EINVAL;
        }
  
 -      if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
 -          tc->type == TC_SETUP_CLSU32) {
 -              switch (tc->cls_u32->command) {
 -              case TC_CLSU32_NEW_KNODE:
 -              case TC_CLSU32_REPLACE_KNODE:
 -                      return cxgb4_config_knode(dev, proto, tc->cls_u32);
 -              case TC_CLSU32_DELETE_KNODE:
 -                      return cxgb4_delete_knode(dev, proto, tc->cls_u32);
 -              default:
 -                      return -EOPNOTSUPP;
 -              }
 +      switch (type) {
 +      case TC_SETUP_CLSU32:
 +              return cxgb_setup_tc_cls_u32(dev, type_data);
 +      default:
 +              return -EOPNOTSUPP;
        }
 -
 -      return -EOPNOTSUPP;
  }
  
  static netdev_features_t cxgb_fix_features(struct net_device *dev,
@@@ -3617,8 -3610,11 +3617,8 @@@ static int adap_init0(struct adapter *a
         * later reporting and B. to warn if the currently loaded firmware
         * is excessively mismatched relative to the driver.)
         */
 -      t4_get_fw_version(adap, &adap->params.fw_vers);
 -      t4_get_bs_version(adap, &adap->params.bs_vers);
 -      t4_get_tp_version(adap, &adap->params.tp_vers);
 -      t4_get_exprom_version(adap, &adap->params.er_vers);
  
 +      t4_get_version_info(adap);
        ret = t4_check_fw_version(adap);
        /* If firmware is too old (not supported by driver) force an update. */
        if (ret)
@@@ -4564,8 -4560,56 +4564,8 @@@ static void cxgb4_check_pcie_caps(struc
  /* Dump basic information about the adapter */
  static void print_adapter_info(struct adapter *adapter)
  {
 -      /* Device information */
 -      dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
 -               adapter->params.vpd.id,
 -               CHELSIO_CHIP_RELEASE(adapter->params.chip));
 -      dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
 -               adapter->params.vpd.sn, adapter->params.vpd.pn);
 -
 -      /* Firmware Version */
 -      if (!adapter->params.fw_vers)
 -              dev_warn(adapter->pdev_dev, "No firmware loaded\n");
 -      else
 -              dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
 -                       FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
 -                       FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
 -                       FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
 -                       FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
 -
 -      /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
 -       * Firmware, so dev_info() is more appropriate here.)
 -       */
 -      if (!adapter->params.bs_vers)
 -              dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
 -      else
 -              dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
 -                       FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
 -                       FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
 -                       FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
 -                       FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
 -
 -      /* TP Microcode Version */
 -      if (!adapter->params.tp_vers)
 -              dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
 -      else
 -              dev_info(adapter->pdev_dev,
 -                       "TP Microcode version: %u.%u.%u.%u\n",
 -                       FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
 -                       FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
 -                       FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
 -                       FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
 -
 -      /* Expansion ROM version */
 -      if (!adapter->params.er_vers)
 -              dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
 -      else
 -              dev_info(adapter->pdev_dev,
 -                       "Expansion ROM version: %u.%u.%u.%u\n",
 -                       FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
 -                       FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
 -                       FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
 -                       FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
 +      /* Hardware/Firmware/etc. Version/Revision IDs */
 +      t4_dump_version_info(adapter);
  
        /* Software/Hardware configuration */
        dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
@@@ -4610,11 -4654,6 +4610,6 @@@ static void print_port_info(const struc
                    dev->name, adap->params.vpd.id, adap->name, buf);
  }
  
- static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
- {
-       pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
- }
  /*
   * Free the following resources:
   * - memory used for tables
@@@ -4864,7 -4903,6 +4859,6 @@@ static int init_one(struct pci_dev *pde
        }
  
        pci_enable_pcie_error_reporting(pdev);
-       enable_pcie_relaxed_ordering(pdev);
        pci_set_master(pdev);
        pci_save_state(pdev);
  
        adapter->msg_enable = DFLT_MSG_ENABLE;
        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
  
+       /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+        * Ingress Packet Data to Free List Buffers in order to allow for
+        * chipset performance optimizations between the Root Complex and
+        * Memory Controllers.  (Messages to the associated Ingress Queue
+        * notifying new Packet Placement in the Free Lists Buffers will be
+        * send without the Relaxed Ordering Attribute thus guaranteeing that
+        * all preceding PCIe Transaction Layer Packets will be processed
+        * first.)  But some Root Complexes have various issues with Upstream
+        * Transaction Layer Packets with the Relaxed Ordering Attribute set.
+        * The PCIe devices which under the Root Complexes will be cleared the
+        * Relaxed Ordering bit in the configuration space, So we check our
+        * PCIe configuration space to see if it's flagged with advice against
+        * using Relaxed Ordering.
+        */
+       if (!pcie_relaxed_ordering_enabled(pdev))
+               adapter->flags |= ROOT_NO_RELAXED_ORDERING;
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->tid_release_lock);
        spin_lock_init(&adapter->win0_lock);
index a69d68ba3d0cb49a8f30f24641e7f274a06c915e,b0837b58c3a1084e8261ff3bf6d6057c4d8e7f54..aa46b23cdfb1ff2e6eb68cc01c8570ca177c2606
@@@ -75,39 -75,6 +75,39 @@@ nfp_flower_cmsg_alloc(struct nfp_app *a
        return skb;
  }
  
 +struct sk_buff *
 +nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
 +{
 +      struct nfp_flower_cmsg_mac_repr *msg;
 +      struct sk_buff *skb;
 +      unsigned int size;
 +
 +      size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
 +      skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR);
 +      if (!skb)
 +              return NULL;
 +
 +      msg = nfp_flower_cmsg_get_data(skb);
 +      memset(msg->reserved, 0, sizeof(msg->reserved));
 +      msg->num_ports = num_ports;
 +
 +      return skb;
 +}
 +
 +void
 +nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
 +                           unsigned int nbi, unsigned int nbi_port,
 +                           unsigned int phys_port)
 +{
 +      struct nfp_flower_cmsg_mac_repr *msg;
 +
 +      msg = nfp_flower_cmsg_get_data(skb);
 +      msg->ports[idx].idx = idx;
 +      msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
 +      msg->ports[idx].nbi_port = nbi_port;
 +      msg->ports[idx].phys_port = phys_port;
 +}
 +
  int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
  {
        struct nfp_flower_cmsg_portmod *msg;
@@@ -148,14 -115,10 +148,10 @@@ nfp_flower_cmsg_portmod_rx(struct nfp_a
                return;
        }
  
-       if (link) {
+       if (link)
                netif_carrier_on(netdev);
-               rtnl_lock();
-               dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
-               rtnl_unlock();
-       } else {
+       else
                netif_carrier_off(netdev);
-       }
        rcu_read_unlock();
  }
  
index d3f96a8f743bf98f2e149da099782558ed216ad3,990a63d7fcb7213fa5c5d6e40a10ec5102af892a..c7407d129c7d5fd11ecdeacae21d5c164613244a
@@@ -746,171 -746,59 +746,171 @@@ static const char *efx_mcdi_phy_test_na
        return NULL;
  }
  
 -#define SFP_PAGE_SIZE 128
 -#define SFP_NUM_PAGES 2
 -static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
 -                                        struct ethtool_eeprom *ee, u8 *data)
 +#define SFP_PAGE_SIZE         128
 +#define SFF_DIAG_TYPE_OFFSET  92
 +#define SFF_DIAG_ADDR_CHANGE  BIT(2)
 +#define SFF_8079_NUM_PAGES    2
 +#define SFF_8472_NUM_PAGES    4
 +#define SFF_8436_NUM_PAGES    5
 +#define SFF_DMT_LEVEL_OFFSET  94
 +
 +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
 + * @efx:      NIC context
 + * @page:     EEPROM page number
 + * @data:     Destination data pointer
 + * @offset:   Offset in page to copy from in to data
 + * @space:    Space available in data
 + *
 + * Return:
 + *   >=0 - amount of data copied
 + *   <0  - error
 + */
 +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
 +                                             unsigned int page,
 +                                             u8 *data, ssize_t offset,
 +                                             ssize_t space)
  {
        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
        MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
        size_t outlen;
 -      int rc;
        unsigned int payload_len;
 -      unsigned int space_remaining = ee->len;
 -      unsigned int page;
 -      unsigned int page_off;
        unsigned int to_copy;
 -      u8 *user_data = data;
 +      int rc;
  
 -      BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN);
 +      if (offset > SFP_PAGE_SIZE)
 +              return -EINVAL;
  
 -      page_off = ee->offset % SFP_PAGE_SIZE;
 -      page = ee->offset / SFP_PAGE_SIZE;
 +      to_copy = min(space, SFP_PAGE_SIZE - offset);
  
 -      while (space_remaining && (page < SFP_NUM_PAGES)) {
 -              MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
 +      MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
 +      rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
 +                              inbuf, sizeof(inbuf),
 +                              outbuf, sizeof(outbuf),
 +                              &outlen);
  
 -              rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO,
 -                                inbuf, sizeof(inbuf),
 -                                outbuf, sizeof(outbuf),
 -                                &outlen);
 -              if (rc)
 -                      return rc;
 +      if (rc)
 +              return rc;
  
 -              if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
 -                            SFP_PAGE_SIZE))
 -                      return -EIO;
 +      if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
 +                      SFP_PAGE_SIZE))
 +              return -EIO;
  
 -              payload_len = MCDI_DWORD(outbuf,
 -                                       GET_PHY_MEDIA_INFO_OUT_DATALEN);
 -              if (payload_len != SFP_PAGE_SIZE)
 -                      return -EIO;
 +      payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
 +      if (payload_len != SFP_PAGE_SIZE)
 +              return -EIO;
  
 -              /* Copy as much as we can into data */
 -              payload_len -= page_off;
 -              to_copy = (space_remaining < payload_len) ?
 -                      space_remaining : payload_len;
 +      memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
 +             to_copy);
  
 -              memcpy(user_data,
 -                     MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
 -                     to_copy);
 +      return to_copy;
 +}
  
 -              space_remaining -= to_copy;
 -              user_data += to_copy;
 -              page_off = 0;
 -              page++;
 +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
 +                                             unsigned int page,
 +                                             u8 byte)
 +{
 +      int rc;
 +      u8 data;
 +
 +      rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
 +      if (rc == 1)
 +              return data;
 +
 +      return rc;
 +}
 +
 +static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
 +{
 +      /* Page zero of the EEPROM includes the diagnostic type at byte 92. */
 +      return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
 +                                                 SFF_DIAG_TYPE_OFFSET);
 +}
 +
 +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
 +{
 +      /* Page zero of the EEPROM includes the DMT level at byte 94. */
 +      return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
 +                                                 SFF_DMT_LEVEL_OFFSET);
 +}
 +
 +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
 +{
 +      struct efx_mcdi_phy_data *phy_data = efx->phy_data;
 +
 +      if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
 +              return phy_data->media;
 +
 +      /* A QSFP+ NIC may actually have an SFP+ module attached.
 +       * The ID is page 0, byte 0.
 +       */
 +      switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
 +      case 0x3:
 +              return MC_CMD_MEDIA_SFP_PLUS;
 +      case 0xc:
 +      case 0xd:
 +              return MC_CMD_MEDIA_QSFP_PLUS;
 +      default:
 +              return 0;
 +      }
 +}
 +
 +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
 +                                        struct ethtool_eeprom *ee, u8 *data)
 +{
 +      int rc;
 +      ssize_t space_remaining = ee->len;
 +      unsigned int page_off;
 +      bool ignore_missing;
 +      int num_pages;
 +      int page;
 +
 +      switch (efx_mcdi_phy_module_type(efx)) {
 +      case MC_CMD_MEDIA_SFP_PLUS:
 +              num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
 +                              SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
 +              page = 0;
 +              ignore_missing = false;
 +              break;
 +      case MC_CMD_MEDIA_QSFP_PLUS:
 +              num_pages = SFF_8436_NUM_PAGES;
 +              page = -1; /* We obtain the lower page by asking for -1. */
 +              ignore_missing = true; /* Ignore missing pages after page 0. */
 +              break;
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +
 +      page_off = ee->offset % SFP_PAGE_SIZE;
 +      page += ee->offset / SFP_PAGE_SIZE;
 +
 +      while (space_remaining && (page < num_pages)) {
 +              rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
 +                                                       data, page_off,
 +                                                       space_remaining);
 +
 +              if (rc > 0) {
 +                      space_remaining -= rc;
 +                      data += rc;
 +                      page_off = 0;
 +                      page++;
 +              } else if (rc == 0) {
 +                      space_remaining = 0;
 +              } else if (ignore_missing && (page > 0)) {
 +                      int intended_size = SFP_PAGE_SIZE - page_off;
 +
 +                      space_remaining -= intended_size;
 +                      if (space_remaining < 0) {
 +                              space_remaining = 0;
 +                      } else {
 +                              memset(data, 0, intended_size);
 +                              data += intended_size;
 +                              page_off = 0;
 +                              page++;
 +                              rc = 0;
 +                      }
 +              } else {
 +                      return rc;
 +              }
        }
  
        return 0;
  static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
                                        struct ethtool_modinfo *modinfo)
  {
 -      struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 +      int sff_8472_level;
 +      int diag_type;
  
 -      switch (phy_cfg->media) {
 +      switch (efx_mcdi_phy_module_type(efx)) {
        case MC_CMD_MEDIA_SFP_PLUS:
 -              modinfo->type = ETH_MODULE_SFF_8079;
 -              modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
 -              return 0;
 +              sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
 +
 +              /* If we can't read the diagnostics level we have none. */
 +              if (sff_8472_level < 0)
 +                      return -EOPNOTSUPP;
 +
 +              /* Check if this module requires the (unsupported) address
 +               * change operation.
 +               */
 +              diag_type = efx_mcdi_phy_diag_type(efx);
 +
 +              if ((sff_8472_level == 0) ||
 +                  (diag_type & SFF_DIAG_ADDR_CHANGE)) {
 +                      modinfo->type = ETH_MODULE_SFF_8079;
 +                      modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
 +              } else {
 +                      modinfo->type = ETH_MODULE_SFF_8472;
 +                      modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
 +              }
 +              break;
 +
 +      case MC_CMD_MEDIA_QSFP_PLUS:
 +              modinfo->type = ETH_MODULE_SFF_8436;
 +              modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
 +              break;
 +
        default:
                return -EOPNOTSUPP;
        }
 +
 +      return 0;
  }
  
  static const struct efx_phy_operations efx_mcdi_phy_ops = {
@@@ -1076,7 -938,6 +1076,6 @@@ enum efx_stats_action 
  static int efx_mcdi_mac_stats(struct efx_nic *efx,
                              enum efx_stats_action action, int clear)
  {
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
        MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
        int rc;
        int change = action == EFX_STATS_PULL ? 0 : 1;
                              MAC_STATS_IN_PERIODIC_NOEVENT, 1,
                              MAC_STATS_IN_PERIOD_MS, period);
        MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+       if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+               struct efx_ef10_nic_data *nic_data = efx->nic_data;
+               MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+       }
  
        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
                                NULL, 0, NULL);
index 875cf3a60adbca974a323c0c716481b6c544ba44,82863e9273eb66ce9058a5974d2f35988290e1dc..4b2a454cf7b63fda5514cf38f71b6ef6e8410f8e
@@@ -78,7 -78,7 +78,7 @@@
  #include "iwl-eeprom-parse.h"
  
  #include "mvm.h"
 -#include "fw-dbg.h"
 +#include "fw/dbg.h"
  #include "iwl-phy-db.h"
  
  #define MVM_UCODE_ALIVE_TIMEOUT       HZ
@@@ -144,6 -144,134 +144,6 @@@ static int iwl_mvm_send_dqa_cmd(struct 
        return ret;
  }
  
 -void iwl_free_fw_paging(struct iwl_mvm *mvm)
 -{
 -      int i;
 -
 -      if (!mvm->fw_paging_db[0].fw_paging_block)
 -              return;
 -
 -      for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
 -              struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
 -
 -              if (!paging->fw_paging_block) {
 -                      IWL_DEBUG_FW(mvm,
 -                                   "Paging: block %d already freed, continue to next page\n",
 -                                   i);
 -
 -                      continue;
 -              }
 -              dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
 -                             paging->fw_paging_size, DMA_BIDIRECTIONAL);
 -
 -              __free_pages(paging->fw_paging_block,
 -                           get_order(paging->fw_paging_size));
 -              paging->fw_paging_block = NULL;
 -      }
 -      kfree(mvm->trans->paging_download_buf);
 -      mvm->trans->paging_download_buf = NULL;
 -      mvm->trans->paging_db = NULL;
 -
 -      memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 -}
 -
 -static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
 -{
 -      int sec_idx, idx;
 -      u32 offset = 0;
 -
 -      /*
 -       * find where is the paging image start point:
 -       * if CPU2 exist and it's in paging format, then the image looks like:
 -       * CPU1 sections (2 or more)
 -       * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
 -       * CPU2 sections (not paged)
 -       * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
 -       * non paged to CPU2 paging sec
 -       * CPU2 paging CSS
 -       * CPU2 paging image (including instruction and data)
 -       */
 -      for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
 -              if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
 -                      sec_idx++;
 -                      break;
 -              }
 -      }
 -
 -      /*
 -       * If paging is enabled there should be at least 2 more sections left
 -       * (one for CSS and one for Paging data)
 -       */
 -      if (sec_idx >= image->num_sec - 1) {
 -              IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
 -              iwl_free_fw_paging(mvm);
 -              return -EINVAL;
 -      }
 -
 -      /* copy the CSS block to the dram */
 -      IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
 -                   sec_idx);
 -
 -      memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
 -             image->sec[sec_idx].data,
 -             mvm->fw_paging_db[0].fw_paging_size);
 -      dma_sync_single_for_device(mvm->trans->dev,
 -                                 mvm->fw_paging_db[0].fw_paging_phys,
 -                                 mvm->fw_paging_db[0].fw_paging_size,
 -                                 DMA_BIDIRECTIONAL);
 -
 -      IWL_DEBUG_FW(mvm,
 -                   "Paging: copied %d CSS bytes to first block\n",
 -                   mvm->fw_paging_db[0].fw_paging_size);
 -
 -      sec_idx++;
 -
 -      /*
 -       * copy the paging blocks to the dram
 -       * loop index start from 1 since that CSS block already copied to dram
 -       * and CSS index is 0.
 -       * loop stop at num_of_paging_blk since that last block is not full.
 -       */
 -      for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
 -              struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
 -
 -              memcpy(page_address(block->fw_paging_block),
 -                     image->sec[sec_idx].data + offset,
 -                     block->fw_paging_size);
 -              dma_sync_single_for_device(mvm->trans->dev,
 -                                         block->fw_paging_phys,
 -                                         block->fw_paging_size,
 -                                         DMA_BIDIRECTIONAL);
 -
 -
 -              IWL_DEBUG_FW(mvm,
 -                           "Paging: copied %d paging bytes to block %d\n",
 -                           mvm->fw_paging_db[idx].fw_paging_size,
 -                           idx);
 -
 -              offset += mvm->fw_paging_db[idx].fw_paging_size;
 -      }
 -
 -      /* copy the last paging block */
 -      if (mvm->num_of_pages_in_last_blk > 0) {
 -              struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
 -
 -              memcpy(page_address(block->fw_paging_block),
 -                     image->sec[sec_idx].data + offset,
 -                     FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
 -              dma_sync_single_for_device(mvm->trans->dev,
 -                                         block->fw_paging_phys,
 -                                         block->fw_paging_size,
 -                                         DMA_BIDIRECTIONAL);
 -
 -              IWL_DEBUG_FW(mvm,
 -                           "Paging: copied %d pages in the last block %d\n",
 -                           mvm->num_of_pages_in_last_blk, idx);
 -      }
 -
 -      return 0;
 -}
 -
  void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
                                   struct iwl_rx_cmd_buffer *rxb)
  {
                               le32_to_cpu(dump_data[i]));
  }
  
 -static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
 -                                 const struct fw_img *image)
 -{
 -      struct page *block;
 -      dma_addr_t phys = 0;
 -      int blk_idx, order, num_of_pages, size, dma_enabled;
 -
 -      if (mvm->fw_paging_db[0].fw_paging_block)
 -              return 0;
 -
 -      dma_enabled = is_device_dma_capable(mvm->trans->dev);
 -
 -      /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
 -      BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
 -
 -      num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
 -      mvm->num_of_paging_blk =
 -              DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
 -      mvm->num_of_pages_in_last_blk =
 -              num_of_pages -
 -              NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
 -
 -      IWL_DEBUG_FW(mvm,
 -                   "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
 -                   mvm->num_of_paging_blk,
 -                   mvm->num_of_pages_in_last_blk);
 -
 -      /*
 -       * Allocate CSS and paging blocks in dram.
 -       */
 -      for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
 -              /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
 -              size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
 -              order = get_order(size);
 -              block = alloc_pages(GFP_KERNEL, order);
 -              if (!block) {
 -                      /* free all the previous pages since we failed */
 -                      iwl_free_fw_paging(mvm);
 -                      return -ENOMEM;
 -              }
 -
 -              mvm->fw_paging_db[blk_idx].fw_paging_block = block;
 -              mvm->fw_paging_db[blk_idx].fw_paging_size = size;
 -
 -              if (dma_enabled) {
 -                      phys = dma_map_page(mvm->trans->dev, block, 0,
 -                                          PAGE_SIZE << order,
 -                                          DMA_BIDIRECTIONAL);
 -                      if (dma_mapping_error(mvm->trans->dev, phys)) {
 -                              /*
 -                               * free the previous pages and the current one
 -                               * since we failed to map_page.
 -                               */
 -                              iwl_free_fw_paging(mvm);
 -                              return -ENOMEM;
 -                      }
 -                      mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
 -              } else {
 -                      mvm->fw_paging_db[blk_idx].fw_paging_phys =
 -                              PAGING_ADDR_SIG |
 -                              blk_idx << BLOCK_2_EXP_SIZE;
 -              }
 -
 -              if (!blk_idx)
 -                      IWL_DEBUG_FW(mvm,
 -                                   "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
 -                                   order);
 -              else
 -                      IWL_DEBUG_FW(mvm,
 -                                   "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
 -                                   order);
 -      }
 -
 -      return 0;
 -}
 -
 -static int iwl_save_fw_paging(struct iwl_mvm *mvm,
 -                            const struct fw_img *fw)
 -{
 -      int ret;
 -
 -      ret = iwl_alloc_fw_paging_mem(mvm, fw);
 -      if (ret)
 -              return ret;
 -
 -      return iwl_fill_paging_mem(mvm, fw);
 -}
 -
 -/* send paging cmd to FW in case CPU2 has paging image */
 -static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
 -{
 -      struct iwl_fw_paging_cmd paging_cmd = {
 -              .flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
 -                                   PAGING_CMD_IS_ENABLED |
 -                                   (mvm->num_of_pages_in_last_blk <<
 -                                    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
 -              .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
 -              .block_num = cpu_to_le32(mvm->num_of_paging_blk),
 -      };
 -      int blk_idx;
 -
 -      /* loop for for all paging blocks + CSS block */
 -      for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
 -              dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
 -              __le32 phy_addr;
 -
 -              addr = addr >> PAGE_2_EXP_SIZE;
 -              phy_addr = cpu_to_le32(addr);
 -              paging_cmd.device_phy_addr[blk_idx] = phy_addr;
 -      }
 -
 -      return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
 -                                                  IWL_ALWAYS_LONG_GROUP, 0),
 -                                  0, sizeof(paging_cmd), &paging_cmd);
 -}
 -
 -/*
 - * Send paging item cmd to FW in case CPU2 has paging image
 - */
 -static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
 -{
 -      int ret;
 -      struct iwl_fw_get_item_cmd fw_get_item_cmd = {
 -              .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
 -      };
 -
 -      struct iwl_fw_get_item_resp *item_resp;
 -      struct iwl_host_cmd cmd = {
 -              .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 -              .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
 -              .data = { &fw_get_item_cmd, },
 -      };
 -
 -      cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
 -
 -      ret = iwl_mvm_send_cmd(mvm, &cmd);
 -      if (ret) {
 -              IWL_ERR(mvm,
 -                      "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
 -                      ret);
 -              return ret;
 -      }
 -
 -      item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
 -      if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
 -              IWL_ERR(mvm,
 -                      "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
 -                      le32_to_cpu(item_resp->item_id));
 -              ret = -EIO;
 -              goto exit;
 -      }
 -
 -      /* Add an extra page for headers */
 -      mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
 -                                                FW_PAGING_SIZE,
 -                                                GFP_KERNEL);
 -      if (!mvm->trans->paging_download_buf) {
 -              ret = -ENOMEM;
 -              goto exit;
 -      }
 -      mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
 -      mvm->trans->paging_db = mvm->fw_paging_db;
 -      IWL_DEBUG_FW(mvm,
 -                   "Paging: got paging request address (paging_req_addr 0x%08x)\n",
 -                   mvm->trans->paging_req_addr);
 -
 -exit:
 -      iwl_free_resp(&cmd);
 -
 -      return ret;
 -}
 -
  static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                         struct iwl_rx_packet *pkt, void *data)
  {
@@@ -244,6 -544,48 +244,6 @@@ static bool iwl_wait_phy_db_entry(struc
        return false;
  }
  
 -static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
 -{
 -      const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
 -      int ret;
 -
 -      /*
 -       * Configure and operate fw paging mechanism.
 -       * The driver configures the paging flow only once.
 -       * The CPU2 paging image is included in the IWL_UCODE_INIT image.
 -       */
 -      if (!fw->paging_mem_size)
 -              return 0;
 -
 -      /*
 -       * When dma is not enabled, the driver needs to copy / write
 -       * the downloaded / uploaded page to / from the smem.
 -       * This gets the location of the place were the pages are
 -       * stored.
 -       */
 -      if (!is_device_dma_capable(mvm->trans->dev)) {
 -              ret = iwl_trans_get_paging_item(mvm);
 -              if (ret) {
 -                      IWL_ERR(mvm, "failed to get FW paging item\n");
 -                      return ret;
 -              }
 -      }
 -
 -      ret = iwl_save_fw_paging(mvm, fw);
 -      if (ret) {
 -              IWL_ERR(mvm, "failed to save the FW paging image\n");
 -              return ret;
 -      }
 -
 -      ret = iwl_send_paging_cmd(mvm, fw);
 -      if (ret) {
 -              IWL_ERR(mvm, "failed to send the paging cmd\n");
 -              iwl_free_fw_paging(mvm);
 -              return ret;
 -      }
 -
 -      return 0;
 -}
  static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                                         enum iwl_ucode_type ucode_type)
  {
        struct iwl_mvm_alive_data alive_data;
        const struct fw_img *fw;
        int ret, i;
 -      enum iwl_ucode_type old_type = mvm->cur_ucode;
 +      enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
        static const u16 alive_cmd[] = { MVM_ALIVE };
        struct iwl_sf_region st_fwrd_space;
  
                fw = iwl_get_ucode_image(mvm->fw, ucode_type);
        if (WARN_ON(!fw))
                return -EINVAL;
 -      mvm->cur_ucode = ucode_type;
 +      iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
        clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  
        iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
  
        ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
        if (ret) {
 -              mvm->cur_ucode = old_type;
 +              iwl_fw_set_current_image(&mvm->fwrt, old_type);
                iwl_remove_notification(&mvm->notif_wait, &alive_wait);
                return ret;
        }
                                "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
                                iwl_read_prph(trans, SB_CPU_1_STATUS),
                                iwl_read_prph(trans, SB_CPU_2_STATUS));
 -              mvm->cur_ucode = old_type;
 +              iwl_fw_set_current_image(&mvm->fwrt, old_type);
                return ret;
        }
  
        if (!alive_data.valid) {
                IWL_ERR(mvm, "Loaded ucode is not valid!\n");
 -              mvm->cur_ucode = old_type;
 +              iwl_fw_set_current_image(&mvm->fwrt, old_type);
                return -EIO;
        }
  
         */
  
        memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
 -      if (iwl_mvm_is_dqa_supported(mvm))
 -              mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
 -      else
 -              mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 +      mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
  
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@@ -429,7 -774,7 +429,7 @@@ error
  static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
  {
        struct iwl_phy_cfg_cmd phy_cfg_cmd;
 -      enum iwl_ucode_type ucode_type = mvm->cur_ucode;
 +      enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
  
        /* Set parameters */
        phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@@@ -454,7 -799,7 +454,7 @@@ int iwl_run_init_mvm_ucode(struct iwl_m
        };
        int ret;
  
 -      if (iwl_mvm_has_new_tx_api(mvm))
 +      if (iwl_mvm_has_unified_ucode(mvm))
                return iwl_run_unified_mvm_ucode(mvm, true);
  
        lockdep_assert_held(&mvm->mutex);
@@@ -565,6 -910,95 +565,6 @@@ out
        return ret;
  }
  
 -static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
 -                                        struct iwl_rx_packet *pkt)
 -{
 -      struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
 -      int i, lmac;
 -      int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
 -
 -      if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
 -              return;
 -
 -      mvm->smem_cfg.num_lmacs = lmac_num;
 -      mvm->smem_cfg.num_txfifo_entries =
 -              ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
 -      mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
 -
 -      for (lmac = 0; lmac < lmac_num; lmac++) {
 -              struct iwl_shared_mem_lmac_cfg *lmac_cfg =
 -                      &mem_cfg->lmac_smem[lmac];
 -
 -              for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
 -                      mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
 -                              le32_to_cpu(lmac_cfg->txfifo_size[i]);
 -              mvm->smem_cfg.lmac[lmac].rxfifo1_size =
 -                      le32_to_cpu(lmac_cfg->rxfifo1_size);
 -      }
 -}
 -
 -static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
 -                                   struct iwl_rx_packet *pkt)
 -{
 -      struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
 -      int i;
 -
 -      mvm->smem_cfg.num_lmacs = 1;
 -
 -      mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
 -      for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
 -              mvm->smem_cfg.lmac[0].txfifo_size[i] =
 -                      le32_to_cpu(mem_cfg->txfifo_size[i]);
 -
 -      mvm->smem_cfg.lmac[0].rxfifo1_size =
 -              le32_to_cpu(mem_cfg->rxfifo_size[0]);
 -      mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
 -
 -      /* new API has more data, from rxfifo_addr field and on */
 -      if (fw_has_capa(&mvm->fw->ucode_capa,
 -                      IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
 -              BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
 -                           sizeof(mem_cfg->internal_txfifo_size));
 -
 -              for (i = 0;
 -                   i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
 -                   i++)
 -                      mvm->smem_cfg.internal_txfifo_size[i] =
 -                              le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
 -      }
 -}
 -
 -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 -{
 -      struct iwl_host_cmd cmd = {
 -              .flags = CMD_WANT_SKB,
 -              .data = { NULL, },
 -              .len = { 0, },
 -      };
 -      struct iwl_rx_packet *pkt;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      if (fw_has_capa(&mvm->fw->ucode_capa,
 -                      IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
 -              cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
 -      else
 -              cmd.id = SHARED_MEM_CFG;
 -
 -      if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
 -              return;
 -
 -      pkt = cmd.resp_pkt;
 -      if (iwl_mvm_has_new_tx_api(mvm))
 -              iwl_mvm_parse_shared_mem_a000(mvm, pkt);
 -      else
 -              iwl_mvm_parse_shared_mem(mvm, pkt);
 -
 -      IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 -
 -      iwl_free_resp(&cmd);
 -}
 -
  static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
  {
        struct iwl_ltr_config_cmd cmd = {
@@@ -614,8 -1048,8 +614,8 @@@ static union acpi_object *iwl_mvm_sar_f
                                                    union acpi_object *data,
                                                    int data_size)
  {
 +      union acpi_object *wifi_pkg = NULL;
        int i;
 -      union acpi_object *wifi_pkg;
  
        /*
         * We need at least two packages, one for the revision and one
@@@ -841,8 -1275,10 +841,10 @@@ static int iwl_mvm_sar_get_wgds_table(s
  
                        entry = &wifi_pkg->package.elements[idx++];
                        if ((entry->type != ACPI_TYPE_INTEGER) ||
-                           (entry->integer.value > U8_MAX))
-                               return -EINVAL;
+                           (entry->integer.value > U8_MAX)) {
+                               ret = -EINVAL;
+                               goto out_free;
+                       }
  
                        mvm->geo_profiles[i].values[j] = entry->integer.value;
                }
@@@ -1031,7 -1467,7 +1033,7 @@@ static int iwl_mvm_load_rt_fw(struct iw
  {
        int ret;
  
 -      if (iwl_mvm_has_new_tx_api(mvm))
 +      if (iwl_mvm_has_unified_ucode(mvm))
                return iwl_run_unified_mvm_ucode(mvm, false);
  
        ret = iwl_run_init_mvm_ucode(mvm, false);
        if (ret)
                return ret;
  
 -      return iwl_mvm_init_paging(mvm);
 +      return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
  }
  
  int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
        }
  
 -      iwl_mvm_get_shared_mem_conf(mvm);
 +      iwl_get_shared_mem_conf(&mvm->fwrt);
  
        ret = iwl_mvm_sf_update(mvm, NULL, false);
        if (ret)
                IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
  
 -      mvm->fw_dbg_conf = FW_DBG_INVALID;
 +      mvm->fwrt.dump.conf = FW_DBG_INVALID;
        /* if we have a destination, assume EARLY START */
        if (mvm->fw->dbg_dest_tlv)
 -              mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
 -      iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
 +              mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
 +      iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
  
        ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
        if (ret)
                goto error;
  
 -      /* Send phy db control command and then phy db calibration*/
 -      if (!iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_unified_ucode(mvm)) {
 +              /* Send phy db control command and then phy db calibration */
                ret = iwl_send_phy_db_data(mvm->phy_db);
                if (ret)
                        goto error;
  
        /* Init RSS configuration */
        /* TODO - remove a000 disablement when we have RXQ config API */
 -      if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (iwl_mvm_has_new_rx_api(mvm) &&
 +          mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
                ret = iwl_send_rss_cfg_cmd(mvm);
                if (ret) {
                        IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
        /* reset quota debouncing buffer - 0xff will yield invalid data */
        memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
  
 -      /* Enable DQA-mode if required */
 -      if (iwl_mvm_is_dqa_supported(mvm)) {
 -              ret = iwl_mvm_send_dqa_cmd(mvm);
 -              if (ret)
 -                      goto error;
 -      } else {
 -              IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
 -      }
 +      ret = iwl_mvm_send_dqa_cmd(mvm);
 +      if (ret)
 +              goto error;
  
        /* Add auxiliary station for scanning */
        ret = iwl_mvm_add_aux_sta(mvm);
index 2d1404c9fbf4de1bf6813f0eab537e0c2963a04b,ce901be5fba87e3674f06b030cfae8759e063c3b..66f534aab2404ea9b63129da1e9bea806910e59e
@@@ -87,6 -87,7 +87,6 @@@
  #include "fw/error-dump.h"
  #include "iwl-prph.h"
  #include "iwl-nvm-parse.h"
 -#include "fw-dbg.h"
  
  static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
@@@ -445,18 -446,8 +445,18 @@@ int iwl_mvm_mac_setup_register(struct i
        ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
        if (iwl_mvm_has_new_rx_api(mvm))
                ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
 -      if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
 +
 +      if (fw_has_capa(&mvm->fw->ucode_capa,
 +                      IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
                ieee80211_hw_set(hw, AP_LINK_PS);
 +      } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
 +              /*
 +               * we absolutely need this for the new TX API since that comes
 +               * with many more queues than the current code can deal with
 +               * for station powersave
 +               */
 +              return -EINVAL;
 +      }
  
        if (mvm->trans->num_rx_queues > 1)
                ieee80211_hw_set(hw, USES_RSS);
        if (mvm->trans->max_skb_frags)
                hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              hw->queues = mvm->first_agg_queue;
 -      else
 -              hw->queues = IEEE80211_MAX_QUEUES;
 +      hw->queues = IEEE80211_MAX_QUEUES;
        hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
        hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
                                    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
@@@ -805,7 -799,7 +805,7 @@@ static void iwl_mvm_mac_tx(struct ieee8
                goto drop;
        }
  
 -      if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
 +      if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
            !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
            !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
                goto drop;
        /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
        if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
                     ieee80211_is_mgmt(hdr->frame_control) &&
 -                   !ieee80211_is_deauth(hdr->frame_control) &&
 -                   !ieee80211_is_disassoc(hdr->frame_control) &&
 -                   !ieee80211_is_action(hdr->frame_control)))
 +                   !ieee80211_is_bufferable_mmpdu(hdr->frame_control)))
                sta = NULL;
  
        if (sta) {
@@@ -849,11 -845,11 +849,11 @@@ static inline bool iwl_enable_tx_ampdu(
        return true;
  }
  
 -#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
 -      do {                                                    \
 -              if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))        \
 -                      break;                                  \
 -              iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
 +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)         \
 +      do {                                                            \
 +              if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))                \
 +                      break;                                          \
 +              iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt);    \
        } while (0)
  
  static void
@@@ -870,8 -866,7 +870,8 @@@ iwl_mvm_ampdu_check_trigger(struct iwl_
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
  
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        switch (action) {
@@@ -1034,8 -1029,8 +1034,8 @@@ static void iwl_mvm_restart_cleanup(str
         * on D3->D0 transition
         */
        if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
 -              mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
 -              iwl_mvm_fw_error_dump(mvm);
 +              mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
 +              iwl_fw_error_dump(&mvm->fwrt);
        }
  
        /* cleanup all stale references (scan, roc), but keep the
  
        iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 -      memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
        memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
 -      memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
  
  
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
 -      mvm->fw_dbg_conf = FW_DBG_INVALID;
 +      mvm->fwrt.dump.conf = FW_DBG_INVALID;
  
        /* keep statistics ticking */
        iwl_mvm_accu_radio_stats(mvm);
@@@ -1258,16 -1255,16 +1258,16 @@@ static void iwl_mvm_mac_stop(struct iee
         * Lock and clear the firmware running bit here already, so that
         * new commands coming in elsewhere, e.g. from debugfs, will not
         * be able to proceed. This is important here because one of those
 -       * debugfs files causes the fw_dump_wk to be triggered, and if we
 +       * debugfs files causes the firmware dump to be triggered, and if we
         * don't stop debugfs accesses before canceling that it could be
         * retriggered after we flush it but before we've cleared the bit.
         */
        clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  
 -      cancel_delayed_work_sync(&mvm->fw_dump_wk);
 +      iwl_fw_cancel_dump(&mvm->fwrt);
        cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
        cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
 -      iwl_mvm_free_fw_dump_desc(mvm);
 +      iwl_fw_free_dump_desc(&mvm->fwrt);
  
        mutex_lock(&mvm->mutex);
        __iwl_mvm_mac_stop(mvm);
@@@ -1373,15 -1370,17 +1373,15 @@@ static int iwl_mvm_mac_add_interface(st
                        goto out_release;
                }
  
 -              if (iwl_mvm_is_dqa_supported(mvm)) {
 -                      /*
 -                       * Only queue for this station is the mcast queue,
 -                       * which shouldn't be in TFD mask anyway
 -                       */
 -                      ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
 -                                                     0, vif->type,
 -                                                     IWL_STA_MULTICAST);
 -                      if (ret)
 -                              goto out_release;
 -              }
 +              /*
 +               * Only queue for this station is the mcast queue,
 +               * which shouldn't be in TFD mask anyway
 +               */
 +              ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
 +                                             0, vif->type,
 +                                             IWL_STA_MULTICAST);
 +              if (ret)
 +                      goto out_release;
  
                iwl_mvm_vif_dbgfs_register(mvm, vif);
                goto out_unlock;
                if (ret)
                        goto out_unref_phy;
  
 -              ret = iwl_mvm_add_bcast_sta(mvm, vif);
 +              ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
                if (ret)
                        goto out_unbind;
  
   out_release:
        if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
                mvm->vif_count--;
 -
 -      iwl_mvm_mac_ctxt_release(mvm, vif);
   out_unlock:
        mutex_unlock(&mvm->mutex);
  
  static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
                                        struct ieee80211_vif *vif)
  {
 -      u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
 -
 -      if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) {
 -              /*
 -               * mac80211 first removes all the stations of the vif and
 -               * then removes the vif. When it removes a station it also
 -               * flushes the AMPDU session. So by now, all the AMPDU sessions
 -               * of all the stations of this vif are closed, and the queues
 -               * of these AMPDU sessions are properly closed.
 -               * We still need to take care of the shared queues of the vif.
 -               * Flush them here.
 -               * For DQA mode there is no need - broacast and multicast queue
 -               * are flushed separately.
 -               */
 -              mutex_lock(&mvm->mutex);
 -              iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
 -              mutex_unlock(&mvm->mutex);
 -
 -              /*
 -               * There are transports that buffer a few frames in the host.
 -               * For these, the flush above isn't enough since while we were
 -               * flushing, the transport might have sent more frames to the
 -               * device. To solve this, wait here until the transport is
 -               * empty. Technically, this could have replaced the flush
 -               * above, but flush is much faster than draining. So flush
 -               * first, and drain to make sure we have no frames in the
 -               * transport anymore.
 -               * If a station still had frames on the shared queues, it is
 -               * already marked as draining, so to complete the draining, we
 -               * just need to wait until the transport is empty.
 -               */
 -              iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
 -      }
 -
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                /*
                 * Flush the ROC worker which will flush the OFFCHANNEL queue.
                 * queue are sent in ROC session.
                 */
                flush_work(&mvm->roc_done_wk);
 -      } else {
 -              /*
 -               * By now, all the AC queues are empty. The AGG queues are
 -               * empty too. We already got all the Tx responses for all the
 -               * packets in the queues. The drain work can have been
 -               * triggered. Flush it.
 -               */
 -              flush_work(&mvm->sta_drained_wk);
        }
  }
  
@@@ -1513,7 -1556,7 +1513,7 @@@ static void iwl_mvm_mac_remove_interfac
  
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                mvm->p2p_device_vif = NULL;
 -              iwl_mvm_rm_bcast_sta(mvm, vif);
 +              iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
                iwl_mvm_binding_remove_vif(mvm, vif);
                iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
                mvmvif->phy_ctxt = NULL;
        iwl_mvm_mac_ctxt_remove(mvm, vif);
  
  out_release:
 -      iwl_mvm_mac_ctxt_release(mvm, vif);
        mutex_unlock(&mvm->mutex);
  }
  
@@@ -2361,18 -2405,15 +2361,18 @@@ static void __iwl_mvm_mac_sta_notify(st
        unsigned long txqs = 0, tids = 0;
        int tid;
  
 +      /*
 +       * If we have TVQM then we get too high queue numbers - luckily
 +       * we really shouldn't get here with that because such hardware
 +       * should have firmware supporting buffer station offload.
 +       */
 +      if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 +              return;
 +
        spin_lock_bh(&mvmsta->lock);
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
                struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
  
 -              if (!iwl_mvm_is_dqa_supported(mvm) &&
 -                  tid_data->state != IWL_AGG_ON &&
 -                  tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
 -                      continue;
 -
                if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
                        continue;
  
  
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
 -              if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
 -                      ieee80211_sta_block_awake(hw, sta, true);
 -
                for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
                        ieee80211_sta_set_buffered(sta, tid, true);
  
@@@ -2528,8 -2572,7 +2528,8 @@@ iwl_mvm_tdls_check_trigger(struct iwl_m
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
        tdls_trig = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (!(tdls_trig->action_bitmap & BIT(action)))
            memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "TDLS event occurred, peer %pM, action %d",
 -                                  peer_addr, action);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "TDLS event occurred, peer %pM, action %d",
 +                              peer_addr, action);
  }
  
  static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
        spin_lock_bh(&mvm_sta->lock);
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                tid_data = &mvm_sta->tid_data[i];
-               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+                       /*
+                        * The first deferred frame should've stopped the MAC
+                        * queues, so we should never get a second deferred
+                        * frame for the RA/TID.
+                        */
+                       iwl_mvm_start_mac_queues(mvm, info->hw_queue);
                        ieee80211_free_txskb(mvm->hw, skb);
+               }
        }
        spin_unlock_bh(&mvm_sta->lock);
  }
@@@ -2578,6 -2631,9 +2588,6 @@@ static int iwl_mvm_mac_sta_state(struc
        if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
                return -EINVAL;
  
 -      /* if a STA is being removed, reuse its ID */
 -      flush_work(&mvm->sta_drained_wk);
 -
        /*
         * If we are in a STA removal flow and in DQA mode:
         *
         * make sure the worker is no longer handling frames for this STA.
         */
        if (old_state == IEEE80211_STA_NONE &&
 -          new_state == IEEE80211_STA_NOTEXIST &&
 -          iwl_mvm_is_dqa_supported(mvm)) {
 +          new_state == IEEE80211_STA_NOTEXIST) {
                iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
                flush_work(&mvm->add_stream_wk);
  
@@@ -3835,9 -3892,7 +3845,9 @@@ static int iwl_mvm_pre_channel_switch(s
        IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
                           chsw->chandef.center_freq1);
  
 -      iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
 +      iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
 +                                     ieee80211_vif_to_wdev(vif),
 +                                     FW_DBG_TRIGGER_CHANNEL_SWITCH);
  
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
@@@ -3974,7 -4029,8 +3984,7 @@@ static void iwl_mvm_mac_flush(struct ie
                return;
  
        /* Make sure we're done with the deferred traffic before flushing */
 -      if (iwl_mvm_is_dqa_supported(mvm))
 -              flush_work(&mvm->add_stream_wk);
 +      flush_work(&mvm->add_stream_wk);
  
        mutex_lock(&mvm->mutex);
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@@ -4111,11 -4167,11 +4121,11 @@@ static void iwl_mvm_event_mlme_callback
                                        struct ieee80211_vif *vif,
                                        const struct ieee80211_event *event)
  {
 -#define CHECK_MLME_TRIGGER(_cnt, _fmt...)                     \
 -      do {                                                    \
 -              if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))   \
 -                      break;                                  \
 -              iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt);   \
 +#define CHECK_MLME_TRIGGER(_cnt, _fmt...)                             \
 +      do {                                                            \
 +              if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))           \
 +                      break;                                          \
 +              iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt);      \
        } while (0)
  
        struct iwl_fw_dbg_trigger_tlv *trig;
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (event->u.mlme.data == ASSOC_EVENT) {
@@@ -4168,17 -4223,16 +4178,17 @@@ static void iwl_mvm_event_bar_rx_callba
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "BAR received from %pM, tid %d, ssn %d",
 -                                  event->u.ba.sta->addr, event->u.ba.tid,
 -                                  event->u.ba.ssn);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "BAR received from %pM, tid %d, ssn %d",
 +                              event->u.ba.sta->addr, event->u.ba.tid,
 +                              event->u.ba.ssn);
  }
  
  static void
@@@ -4194,16 -4248,15 +4204,16 @@@ iwl_mvm_event_frame_timeout_callback(st
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "Frame from %pM timed out, tid %d",
 -                                  event->u.ba.sta->addr, event->u.ba.tid);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "Frame from %pM timed out, tid %d",
 +                              event->u.ba.sta->addr, event->u.ba.tid);
  }
  
  static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
@@@ -4237,8 -4290,7 +4247,8 @@@ void iwl_mvm_sync_rx_queues_internal(st
        lockdep_assert_held(&mvm->mutex);
  
        /* TODO - remove a000 disablement when we have RXQ config API */
 -      if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
 +      if (!iwl_mvm_has_new_rx_api(mvm) ||
 +          mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
                return;
  
        notif->cookie = mvm->queue_sync_cookie;
index 6b8e57b7234ac442a18ffbff9d89b068e63b22e9,71c8b800ffa99874bd4120e59e2aa7f51212d2da..67ffd9774712b26c4a25d921a043959e12cfd113
@@@ -63,6 -63,7 +63,6 @@@
  #include "iwl-trans.h"
  #include "mvm.h"
  #include "fw-api.h"
 -#include "fw-dbg.h"
  
  static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
                                   int queue, struct ieee80211_sta *sta)
@@@ -635,9 -636,9 +635,9 @@@ static bool iwl_mvm_reorder(struct iwl_
  
        baid_data = rcu_dereference(mvm->baid_map[baid]);
        if (!baid_data) {
-               WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
-                    "Received baid %d, but no data exists for this BAID\n",
-                    baid);
+               IWL_DEBUG_RX(mvm,
+                            "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+                             baid, reorder);
                return false;
        }
  
@@@ -758,7 -759,9 +758,9 @@@ static void iwl_mvm_agg_rx_received(str
  
        data = rcu_dereference(mvm->baid_map[baid]);
        if (!data) {
-               WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN));
+               IWL_DEBUG_RX(mvm,
+                            "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+                             baid, reorder_data);
                goto out;
        }
  
@@@ -851,7 -854,7 +853,7 @@@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm 
  
        rcu_read_lock();
  
 -      if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
 +      if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
                u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
  
                if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
                        rssi = le32_to_cpu(rssi_trig->rssi);
  
                        trig_check =
 -                              iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
 +                              iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                                            ieee80211_vif_to_wdev(mvmsta->vif),
                                                              trig);
                        if (trig_check && rx_status->signal < rssi)
 -                              iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
 +                              iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                                                      NULL);
                }
  
                if (ieee80211_is_data(hdr->frame_control))
index 922cd5379841f0ca7486e9f0f765c0f28437b7e7,027ee5e72172c85f9eaa98f1fd1a26489ab74820..b14830be0c22b03fa53a9132591f2b048d3afbf8
@@@ -121,7 -121,8 +121,8 @@@ int iwl_mvm_sta_send_to_fw(struct iwl_m
                .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
                .add_modify = update ? 1 : 0,
                .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
-                                                STA_FLG_MIMO_EN_MSK),
+                                                STA_FLG_MIMO_EN_MSK |
+                                                STA_FLG_RTS_MIMO_PROT),
                .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
        };
        int ret;
@@@ -290,12 -291,66 +291,12 @@@ static void iwl_mvm_rx_agg_session_expi
                goto unlock;
  
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-       ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
-                                         sta->addr, ba_data->tid);
+       ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+                                     sta->addr, ba_data->tid);
  unlock:
        rcu_read_unlock();
  }
  
 -static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
 -                               struct ieee80211_sta *sta)
 -{
 -      unsigned long used_hw_queues;
 -      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 -      unsigned int wdg_timeout =
 -              iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
 -      u32 ac;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
 -
 -      /* Find available queues, and allocate them to the ACs */
 -      for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 -              u8 queue = find_first_zero_bit(&used_hw_queues,
 -                                             mvm->first_agg_queue);
 -
 -              if (queue >= mvm->first_agg_queue) {
 -                      IWL_ERR(mvm, "Failed to allocate STA queue\n");
 -                      return -EBUSY;
 -              }
 -
 -              __set_bit(queue, &used_hw_queues);
 -              mvmsta->hw_queue[ac] = queue;
 -      }
 -
 -      /* Found a place for all queues - enable them */
 -      for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 -              iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
 -                                    mvmsta->hw_queue[ac],
 -                                    iwl_mvm_ac_to_tx_fifo[ac], 0,
 -                                    wdg_timeout);
 -              mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
 -      }
 -
 -      return 0;
 -}
 -
 -static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
 -                                  struct ieee80211_sta *sta)
 -{
 -      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 -      unsigned long sta_msk;
 -      int i;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      /* disable the TDLS STA-specific queues */
 -      sta_msk = mvmsta->tfd_queue_msk;
 -      for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
 -              iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 -}
 -
  /* Disable aggregations for a bitmap of TIDs for a given station */
  static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
                                        unsigned long disable_agg_tids,
@@@ -703,7 -758,7 +704,7 @@@ static int iwl_mvm_sta_alloc_queue(stru
  {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_trans_txq_scd_cfg cfg = {
 -              .fifo = iwl_mvm_ac_to_tx_fifo[ac],
 +              .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
                .sta_id = mvmsta->sta_id,
                .tid = tid,
                .frame_limit = IWL_FRAME_LIMIT,
@@@ -1261,7 -1316,7 +1262,7 @@@ static void iwl_mvm_realloc_queues_afte
                        u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  
                        cfg.tid = i;
 -                      cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
 +                      cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
                        cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
                                         txq_id ==
                                         IWL_MVM_DQA_BSS_CLIENT_QUEUE);
                        mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
                }
        }
 -
 -      atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
  }
  
  int iwl_mvm_add_sta(struct iwl_mvm *mvm,
  
        spin_lock_init(&mvm_sta->lock);
  
 -      /* In DQA mode, if this is a HW restart, re-alloc existing queues */
 -      if (iwl_mvm_is_dqa_supported(mvm) &&
 -          test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
 +      /* if this is a HW restart re-alloc existing queues */
 +      if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
                goto update_fw;
        }
        mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
  
        /* HW restart, don't assume the memory has been zeroed */
 -      atomic_set(&mvm->pending_frames[sta_id], 0);
        mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
        mvm_sta->tfd_queue_msk = 0;
  
 -      /*
 -       * Allocate new queues for a TDLS station, unless we're in DQA mode,
 -       * and then they'll be allocated dynamically
 -       */
 -      if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
 -              ret = iwl_mvm_tdls_sta_init(mvm, sta);
 -              if (ret)
 -                      return ret;
 -      } else if (!iwl_mvm_is_dqa_supported(mvm)) {
 -              for (i = 0; i < IEEE80211_NUM_ACS; i++)
 -                      if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
 -                              mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 -      }
 -
        /* for HW restart - reset everything but the sequence number */
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                u16 seq = mvm_sta->tid_data[i].seq_number;
                memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
                mvm_sta->tid_data[i].seq_number = seq;
  
 -              if (!iwl_mvm_is_dqa_supported(mvm))
 -                      continue;
 -
                /*
                 * Mark all queues for this STA as unallocated and defer TX
                 * frames until the queue is allocated
                mvm_sta->dup_data = dup_data;
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_new_tx_api(mvm)) {
                ret = iwl_mvm_reserve_sta_stream(mvm, sta,
                                                 ieee80211_vif_type_p2p(vif));
                if (ret)
@@@ -1386,6 -1462,8 +1387,6 @@@ update_fw
        return 0;
  
  err:
 -      if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
 -              iwl_mvm_tdls_sta_deinit(mvm, sta);
        return ret;
  }
  
@@@ -1458,6 -1536,79 +1459,6 @@@ static int iwl_mvm_rm_sta_common(struc
        return 0;
  }
  
 -void iwl_mvm_sta_drained_wk(struct work_struct *wk)
 -{
 -      struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
 -      u8 sta_id;
 -
 -      /*
 -       * The mutex is needed because of the SYNC cmd, but not only: if the
 -       * work would run concurrently with iwl_mvm_rm_sta, it would run before
 -       * iwl_mvm_rm_sta sets the station as busy, and exit. Then
 -       * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
 -       * that later.
 -       */
 -      mutex_lock(&mvm->mutex);
 -
 -      for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
 -              int ret;
 -              struct ieee80211_sta *sta =
 -                      rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 -                                                lockdep_is_held(&mvm->mutex));
 -
 -              /*
 -               * This station is in use or RCU-removed; the latter happens in
 -               * managed mode, where mac80211 removes the station before we
 -               * can remove it from firmware (we can only do that after the
 -               * MAC is marked unassociated), and possibly while the deauth
 -               * frame to disconnect from the AP is still queued. Then, the
 -               * station pointer is -ENOENT when the last skb is reclaimed.
 -               */
 -              if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
 -                      continue;
 -
 -              if (PTR_ERR(sta) == -EINVAL) {
 -                      IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
 -                              sta_id);
 -                      continue;
 -              }
 -
 -              if (!sta) {
 -                      IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
 -                              sta_id);
 -                      continue;
 -              }
 -
 -              WARN_ON(PTR_ERR(sta) != -EBUSY);
 -              /* This station was removed and we waited until it got drained,
 -               * we can now proceed and remove it.
 -               */
 -              ret = iwl_mvm_rm_sta_common(mvm, sta_id);
 -              if (ret) {
 -                      IWL_ERR(mvm,
 -                              "Couldn't remove sta %d after it was drained\n",
 -                              sta_id);
 -                      continue;
 -              }
 -              RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
 -              clear_bit(sta_id, mvm->sta_drained);
 -
 -              if (mvm->tfd_drained[sta_id]) {
 -                      unsigned long i, msk = mvm->tfd_drained[sta_id];
 -
 -                      for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
 -                              iwl_mvm_disable_txq(mvm, i, i,
 -                                                  IWL_MAX_TID_COUNT, 0);
 -
 -                      mvm->tfd_drained[sta_id] = 0;
 -                      IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
 -                                     sta_id, msk);
 -              }
 -      }
 -
 -      mutex_unlock(&mvm->mutex);
 -}
 -
  static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif,
                                       struct iwl_mvm_sta *mvm_sta)
  int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
                                  struct iwl_mvm_sta *mvm_sta)
  {
 -      int i, ret;
 +      int i;
  
        for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
                u16 txq_id;
 +              int ret;
  
                spin_lock_bh(&mvm_sta->lock);
                txq_id = mvm_sta->tid_data[i].txq_id;
  
                ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
                if (ret)
 -                      break;
 +                      return ret;
        }
  
 -      return ret;
 +      return 0;
  }
  
  int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
        if (iwl_mvm_has_new_rx_api(mvm))
                kfree(mvm_sta->dup_data);
  
 -      if ((vif->type == NL80211_IFTYPE_STATION &&
 -           mvmvif->ap_sta_id == sta_id) ||
 -          iwl_mvm_is_dqa_supported(mvm)){
 -              ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 -              if (ret)
 -                      return ret;
 -              /* flush its queues here since we are freeing mvm_sta */
 -              ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
 -              if (ret)
 -                      return ret;
 -              if (iwl_mvm_has_new_tx_api(mvm)) {
 -                      ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
 -              } else {
 -                      u32 q_mask = mvm_sta->tfd_queue_msk;
 +      ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 +      if (ret)
 +              return ret;
  
 -                      ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 -                                                           q_mask);
 -              }
 -              if (ret)
 -                      return ret;
 -              ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 -
 -              /* If DQA is supported - the queues can be disabled now */
 -              if (iwl_mvm_is_dqa_supported(mvm)) {
 -                      iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
 -                      /*
 -                       * If pending_frames is set at this point - it must be
 -                       * driver internal logic error, since queues are empty
 -                       * and removed successuly.
 -                       * warn on it but set it to 0 anyway to avoid station
 -                       * not being removed later in the function
 -                       */
 -                      WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
 -              }
 +      /* flush its queues here since we are freeing mvm_sta */
 +      ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
 +      if (ret)
 +              return ret;
 +      if (iwl_mvm_has_new_tx_api(mvm)) {
 +              ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
 +      } else {
 +              u32 q_mask = mvm_sta->tfd_queue_msk;
  
 -              /* If there is a TXQ still marked as reserved - free it */
 -              if (iwl_mvm_is_dqa_supported(mvm) &&
 -                  mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
 -                      u8 reserved_txq = mvm_sta->reserved_queue;
 -                      enum iwl_mvm_queue_status *status;
 -
 -                      /*
 -                       * If no traffic has gone through the reserved TXQ - it
 -                       * is still marked as IWL_MVM_QUEUE_RESERVED, and
 -                       * should be manually marked as free again
 -                       */
 -                      spin_lock_bh(&mvm->queue_info_lock);
 -                      status = &mvm->queue_info[reserved_txq].status;
 -                      if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
 -                               (*status != IWL_MVM_QUEUE_FREE),
 -                               "sta_id %d reserved txq %d status %d",
 -                               sta_id, reserved_txq, *status)) {
 -                              spin_unlock_bh(&mvm->queue_info_lock);
 -                              return -EINVAL;
 -                      }
 +              ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 +                                                   q_mask);
 +      }
 +      if (ret)
 +              return ret;
 +
 +      ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 +
 +      iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
 +
 +      /* If there is a TXQ still marked as reserved - free it */
 +      if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
 +              u8 reserved_txq = mvm_sta->reserved_queue;
 +              enum iwl_mvm_queue_status *status;
  
 -                      *status = IWL_MVM_QUEUE_FREE;
 +              /*
 +               * If no traffic has gone through the reserved TXQ - it
 +               * is still marked as IWL_MVM_QUEUE_RESERVED, and
 +               * should be manually marked as free again
 +               */
 +              spin_lock_bh(&mvm->queue_info_lock);
 +              status = &mvm->queue_info[reserved_txq].status;
 +              if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
 +                       (*status != IWL_MVM_QUEUE_FREE),
 +                       "sta_id %d reserved txq %d status %d",
 +                       sta_id, reserved_txq, *status)) {
                        spin_unlock_bh(&mvm->queue_info_lock);
 +                      return -EINVAL;
                }
  
 -              if (vif->type == NL80211_IFTYPE_STATION &&
 -                  mvmvif->ap_sta_id == sta_id) {
 -                      /* if associated - we can't remove the AP STA now */
 -                      if (vif->bss_conf.assoc)
 -                              return ret;
 +              *status = IWL_MVM_QUEUE_FREE;
 +              spin_unlock_bh(&mvm->queue_info_lock);
 +      }
 +
 +      if (vif->type == NL80211_IFTYPE_STATION &&
 +          mvmvif->ap_sta_id == sta_id) {
 +              /* if associated - we can't remove the AP STA now */
 +              if (vif->bss_conf.assoc)
 +                      return ret;
  
 -                      /* unassoc - go ahead - remove the AP STA now */
 -                      mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
 +              /* unassoc - go ahead - remove the AP STA now */
 +              mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
  
 -                      /* clear d0i3_ap_sta_id if no longer relevant */
 -                      if (mvm->d0i3_ap_sta_id == sta_id)
 -                              mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
 -              }
 +              /* clear d0i3_ap_sta_id if no longer relevant */
 +              if (mvm->d0i3_ap_sta_id == sta_id)
 +                      mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
        }
  
        /*
         * calls the drain worker.
         */
        spin_lock_bh(&mvm_sta->lock);
 +      spin_unlock_bh(&mvm_sta->lock);
  
 -      /*
 -       * There are frames pending on the AC queues for this station.
 -       * We need to wait until all the frames are drained...
 -       */
 -      if (atomic_read(&mvm->pending_frames[sta_id])) {
 -              rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
 -                                 ERR_PTR(-EBUSY));
 -              spin_unlock_bh(&mvm_sta->lock);
 -
 -              /* disable TDLS sta queues on drain complete */
 -              if (sta->tdls) {
 -                      mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
 -                      IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
 -              }
 -
 -              ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 -      } else {
 -              spin_unlock_bh(&mvm_sta->lock);
 -
 -              if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
 -                      iwl_mvm_tdls_sta_deinit(mvm, sta);
 -
 -              ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
 -              RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
 -      }
 +      ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
 +      RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
  
        return ret;
  }
@@@ -1693,7 -1879,7 +1694,7 @@@ static void iwl_mvm_enable_aux_queue(st
                                                    IWL_MAX_TID_COUNT,
                                                    wdg_timeout);
                mvm->aux_queue = queue;
 -      } else if (iwl_mvm_is_dqa_supported(mvm)) {
 +      } else {
                struct iwl_trans_txq_scd_cfg cfg = {
                        .fifo = IWL_MVM_TX_FIFO_MCAST,
                        .sta_id = mvm->aux_sta.sta_id,
  
                iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
                                   wdg_timeout);
 -      } else {
 -              iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
 -                                    IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
        }
  }
  
@@@ -1803,7 -1992,7 +1804,7 @@@ int iwl_mvm_send_add_bcast_sta(struct i
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_new_tx_api(mvm)) {
                if (vif->type == NL80211_IFTYPE_AP ||
                    vif->type == NL80211_IFTYPE_ADHOC)
                        queue = mvm->probe_queue;
@@@ -1890,7 -2079,8 +1891,7 @@@ int iwl_mvm_send_rm_bcast_sta(struct iw
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (iwl_mvm_is_dqa_supported(mvm))
 -              iwl_mvm_free_bcast_sta_queues(mvm, vif);
 +      iwl_mvm_free_bcast_sta_queues(mvm, vif);
  
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
        if (ret)
  int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 -      u32 qmask = 0;
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (!iwl_mvm_is_dqa_supported(mvm)) {
 -              qmask = iwl_mvm_mac_get_queues_mask(vif);
 -
 -              /*
 -               * The firmware defines the TFD queue mask to only be relevant
 -               * for *unicast* queues, so the multicast (CAB) queue shouldn't
 -               * be included. This only happens in NL80211_IFTYPE_AP vif type,
 -               * so the next line will only have an effect there.
 -               */
 -              qmask &= ~BIT(vif->cab_queue);
 -      }
 -
 -      return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
 +      return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
                                        ieee80211_vif_type_p2p(vif),
                                        IWL_STA_GENERAL_PURPOSE);
  }
   * @mvm: the mvm component
   * @vif: the interface to which the broadcast station is added
   * @bsta: the broadcast station to add. */
 -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
@@@ -1947,7 -2150,7 +1948,7 @@@ void iwl_mvm_dealloc_bcast_sta(struct i
   * Send the FW a request to remove the station from it's internal data
   * structures, and in addition remove it from the local data structure.
   */
 -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  {
        int ret;
  
@@@ -1986,6 -2189,9 +1987,6 @@@ int iwl_mvm_add_mcast_sta(struct iwl_mv
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              return 0;
 -
        if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
                    vif->type != NL80211_IFTYPE_ADHOC))
                return -ENOTSUPP;
@@@ -2050,6 -2256,9 +2051,6 @@@ int iwl_mvm_rm_mcast_sta(struct iwl_mv
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              return 0;
 -
        iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
  
        iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
@@@ -2299,6 -2508,8 +2300,6 @@@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *
                mvm_sta->tid_disable_agg &= ~BIT(tid);
        } else {
                /* In DQA-mode the queue isn't removed on agg termination */
 -              if (!iwl_mvm_is_dqa_supported(mvm))
 -                      mvm_sta->tfd_queue_msk &= ~BIT(queue);
                mvm_sta->tid_disable_agg |= BIT(tid);
        }
  
@@@ -2401,17 -2612,19 +2402,17 @@@ int iwl_mvm_sta_tx_agg_start(struct iwl
                        ret = -ENXIO;
                        goto release_locks;
                }
 -      } else if (iwl_mvm_is_dqa_supported(mvm) &&
 -                 unlikely(mvm->queue_info[txq_id].status ==
 +      } else if (unlikely(mvm->queue_info[txq_id].status ==
                            IWL_MVM_QUEUE_SHARED)) {
                ret = -ENXIO;
                IWL_DEBUG_TX_QUEUES(mvm,
                                    "Can't start tid %d agg on shared queue!\n",
                                    tid);
                goto release_locks;
 -      } else if (!iwl_mvm_is_dqa_supported(mvm) ||
 -          mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
 +      } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
                txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 -                                               mvm->first_agg_queue,
 -                                               mvm->last_agg_queue);
 +                                               IWL_MVM_DQA_MIN_DATA_QUEUE,
 +                                               IWL_MVM_DQA_MAX_DATA_QUEUE);
                if (txq_id < 0) {
                        ret = txq_id;
                        IWL_ERR(mvm, "Failed to allocate agg queue\n");
@@@ -2529,34 -2742,37 +2530,34 @@@ int iwl_mvm_sta_tx_agg_oper(struct iwl_
        queue_status = mvm->queue_info[queue].status;
        spin_unlock_bh(&mvm->queue_info_lock);
  
 -      /* In DQA mode, the existing queue might need to be reconfigured */
 -      if (iwl_mvm_is_dqa_supported(mvm)) {
 -              /* Maybe there is no need to even alloc a queue... */
 -              if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
 -                      alloc_queue = false;
 +      /* Maybe there is no need to even alloc a queue... */
 +      if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
 +              alloc_queue = false;
  
 +      /*
 +       * Only reconfig the SCD for the queue if the window size has
 +       * changed from current (become smaller)
 +       */
 +      if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
                /*
 -               * Only reconfig the SCD for the queue if the window size has
 -               * changed from current (become smaller)
 +               * If reconfiguring an existing queue, it first must be
 +               * drained
                 */
 -              if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
 -                      /*
 -                       * If reconfiguring an existing queue, it first must be
 -                       * drained
 -                       */
 -                      ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 -                                                           BIT(queue));
 -                      if (ret) {
 -                              IWL_ERR(mvm,
 -                                      "Error draining queue before reconfig\n");
 -                              return ret;
 -                      }
 +              ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 +                                                   BIT(queue));
 +              if (ret) {
 +                      IWL_ERR(mvm,
 +                              "Error draining queue before reconfig\n");
 +                      return ret;
 +              }
  
 -                      ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
 -                                                 mvmsta->sta_id, tid,
 -                                                 buf_size, ssn);
 -                      if (ret) {
 -                              IWL_ERR(mvm,
 -                                      "Error reconfiguring TXQ #%d\n", queue);
 -                              return ret;
 -                      }
 +              ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
 +                                         mvmsta->sta_id, tid,
 +                                         buf_size, ssn);
 +              if (ret) {
 +                      IWL_ERR(mvm,
 +                              "Error reconfiguring TXQ #%d\n", queue);
 +                      return ret;
                }
        }
  
@@@ -2652,6 -2868,18 +2653,6 @@@ int iwl_mvm_sta_tx_agg_stop(struct iwl_
                                    "ssn = %d, next_recl = %d\n",
                                    tid_data->ssn, tid_data->next_reclaimed);
  
 -              /*
 -               * There are still packets for this RA / TID in the HW.
 -               * Not relevant for DQA mode, since there is no need to disable
 -               * the queue.
 -               */
 -              if (!iwl_mvm_is_dqa_supported(mvm) &&
 -                  tid_data->ssn != tid_data->next_reclaimed) {
 -                      tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
 -                      err = 0;
 -                      break;
 -              }
 -
                tid_data->ssn = 0xffff;
                tid_data->state = IWL_AGG_OFF;
                spin_unlock_bh(&mvmsta->lock);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 -
 -              if (!iwl_mvm_is_dqa_supported(mvm)) {
 -                      int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
 -
 -                      iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
 -              }
                return 0;
        case IWL_AGG_STARTING:
        case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@@ -2728,6 -2962,13 +2729,6 @@@ int iwl_mvm_sta_tx_agg_flush(struct iwl
                iwl_mvm_drain_sta(mvm, mvmsta, false);
  
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 -
 -              if (!iwl_mvm_is_dqa_supported(mvm)) {
 -                      int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
 -
 -                      iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
 -                                          tid, 0);
 -              }
        }
  
        return 0;
@@@ -3346,6 -3587,15 +3347,6 @@@ void iwl_mvm_sta_modify_sleep_tx_count(
                        u16 n_queued;
  
                        tid_data = &mvmsta->tid_data[tid];
 -                      if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
 -                               tid_data->state != IWL_AGG_ON &&
 -                               tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
 -                               "TID %d state is %d\n",
 -                               tid, tid_data->state)) {
 -                              spin_unlock_bh(&mvmsta->lock);
 -                              ieee80211_sta_eosp(sta);
 -                              return;
 -                      }
  
                        n_queued = iwl_mvm_tid_queued(mvm, tid_data);
                        if (n_queued > remaining) {
@@@ -3439,8 -3689,13 +3440,8 @@@ void iwl_mvm_sta_modify_disable_tx_ap(s
  
        mvm_sta->disable_tx = disable;
  
 -      /*
 -       * Tell mac80211 to start/stop queuing tx for this station,
 -       * but don't stop queuing if there are still pending frames
 -       * for this station.
 -       */
 -      if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
 -              ieee80211_sta_block_awake(mvm->hw, sta, disable);
 +      /* Tell mac80211 to start/stop queuing tx for this station */
 +      ieee80211_sta_block_awake(mvm->hw, sta, disable);
  
        iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
  
index 6d7d1a66af815ff68880f5857e3feffa39d899a1,5fcc9dd6be56de52fa0a063969cf58011b8eb3d8..321e47874ceb1394a06a9b7f5017b3d27e0958aa
@@@ -74,6 -74,7 +74,6 @@@
  #include "iwl-eeprom-parse.h"
  #include "mvm.h"
  #include "sta.h"
 -#include "fw-dbg.h"
  
  static void
  iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
  
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
                return;
  
        if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "BAR sent to %pM, tid %d, ssn %d",
 -                                  addr, tid, ssn);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "BAR sent to %pM, tid %d, ssn %d",
 +                              addr, tid, ssn);
  }
  
  #define OPT_HDR(type, skb, off) \
@@@ -184,8 -185,14 +184,14 @@@ static u16 iwl_mvm_tx_csum(struct iwl_m
        else
                udp_hdr(skb)->check = 0;
  
-       /* mac header len should include IV, size is in words */
-       if (info->control.hw_key)
+       /*
+        * mac header len should include IV, size is in words unless
+        * the IV is added by the firmware like in WEP.
+        * In new Tx API, the IV is always added by the firmware.
+        */
+       if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+           info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+           info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
                mh_len += info->control.hw_key->iv_len;
        mh_len /= 2;
        offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
@@@ -552,6 -559,9 +558,6 @@@ static int iwl_mvm_get_ctrl_vif_queue(s
  {
        struct iwl_mvm_vif *mvmvif;
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              return info->hw_queue;
 -
        mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
  
        switch (info->control.vif->type) {
@@@ -650,7 -660,8 +656,7 @@@ int iwl_mvm_tx_skb_non_sta(struct iwl_m
  
                        if (ap_sta_id != IWL_MVM_INVALID_STA)
                                sta_id = ap_sta_id;
 -              } else if (iwl_mvm_is_dqa_supported(mvm) &&
 -                         info.control.vif->type == NL80211_IFTYPE_MONITOR) {
 +              } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
                        queue = mvm->aux_queue;
                }
        }
                return -1;
        }
  
 -      /*
 -       * Increase the pending frames counter, so that later when a reply comes
 -       * in and the counter is decreased - we don't start getting negative
 -       * values.
 -       * Note that we don't need to make sure it isn't agg'd, since we're
 -       * TXing non-sta
 -       * For DQA mode - we shouldn't increase it though
 -       */
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              atomic_inc(&mvm->pending_frames[sta_id]);
 -
        return 0;
  }
  
@@@ -736,7 -758,7 +742,7 @@@ static int iwl_mvm_tx_tso(struct iwl_mv
        max_amsdu_len = sta->max_amsdu_len;
  
        /* the Tx FIFO to which this A-MSDU will be routed */
 -      txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 +      txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
  
        /*
         * Don't send an AMSDU that will be longer than the TXF.
         * fifo to be able to send bursts.
         */
        max_amsdu_len = min_t(unsigned int, max_amsdu_len,
 -                            mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
 +                            mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
 +                            256);
  
        if (unlikely(dbg_max_amsdu_len))
                max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@@ -979,13 -1000,22 +985,13 @@@ static int iwl_mvm_tx_mpdu(struct iwl_m
                }
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
 -              txq_id = mvmsta->tid_data[tid].txq_id;
 -
 -      if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
 -              /* default to TID 0 for non-QoS packets */
 -              u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
 -
 -              txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
 -      }
 +      txq_id = mvmsta->tid_data[tid].txq_id;
  
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
  
        /* Check if TXQ needs to be allocated or re-activated */
        if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
 -                   !mvmsta->tid_data[tid].is_tid_active) &&
 -          iwl_mvm_is_dqa_supported(mvm)) {
 +                   !mvmsta->tid_data[tid].is_tid_active)) {
                /* If TXQ needs to be allocated... */
                if (txq_id == IWL_MVM_INVALID_QUEUE) {
                        iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
                                    txq_id);
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_new_tx_api(mvm)) {
                /* Keep track of the time of the last frame for this RA/TID */
                mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
  
  
        spin_unlock(&mvmsta->lock);
  
 -      /* Increase pending frames count if this isn't AMPDU or DQA queue */
 -      if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
 -              atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 -
        return 0;
  
  drop_unlock_sta:
@@@ -1114,7 -1148,8 +1120,7 @@@ static void iwl_mvm_check_ratid_empty(s
        lockdep_assert_held(&mvmsta->lock);
  
        if ((tid_data->state == IWL_AGG_ON ||
 -           tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
 -           iwl_mvm_is_dqa_supported(mvm)) &&
 +           tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
            iwl_mvm_tid_queued(mvm, tid_data) == 0) {
                /*
                 * Now that this aggregation or DQA queue is empty tell
                IWL_DEBUG_TX_QUEUES(mvm,
                                    "Can continue DELBA flow ssn = next_recl = %d\n",
                                    tid_data->next_reclaimed);
 -              if (!iwl_mvm_is_dqa_supported(mvm)) {
 -                      u8 mac80211_ac = tid_to_mac80211_ac[tid];
 -
 -                      iwl_mvm_disable_txq(mvm, tid_data->txq_id,
 -                                          vif->hw_queue[mac80211_ac], tid,
 -                                          CMD_ASYNC);
 -              }
                tid_data->state = IWL_AGG_OFF;
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
@@@ -1259,7 -1301,7 +1265,7 @@@ static void iwl_mvm_tx_status_check_tri
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
        status_trig = (void *)trig->data;
  
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
                return;
  
        for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
                if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
                        continue;
  
 -              iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                          "Tx status %d was received",
 -                                          status & TX_STATUS_MSK);
 +              iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                                      "Tx status %d was received",
 +                                      status & TX_STATUS_MSK);
                break;
        }
  }
@@@ -1345,10 -1387,10 +1351,10 @@@ static void iwl_mvm_rx_tx_cmd_single(st
                        info->flags |= IEEE80211_TX_STAT_ACK;
                        break;
                case TX_STATUS_FAIL_DEST_PS:
 -                      /* In DQA, the FW should have stopped the queue and not
 +                      /* the FW should have stopped the queue and not
                         * return this status
                         */
 -                      WARN_ON(iwl_mvm_is_dqa_supported(mvm));
 +                      WARN_ON(1);
                        info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
                        break;
                default:
                ieee80211_tx_status(mvm->hw, skb);
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
 -              /* If this is an aggregation queue, we use the ssn since:
 -               * ssn = wifi seq_num % 256.
 -               * The seq_ctl is the sequence control of the packet to which
 -               * this Tx response relates. But if there is a hole in the
 -               * bitmap of the BA we received, this Tx response may allow to
 -               * reclaim the hole and all the subsequent packets that were
 -               * already acked. In that case, seq_ctl != ssn, and the next
 -               * packet to be reclaimed will be ssn and not seq_ctl. In that
 -               * case, several packets will be reclaimed even if
 -               * frame_count = 1.
 -               *
 -               * The ssn is the index (% 256) of the latest packet that has
 -               * treated (acked / dropped) + 1.
 -               */
 -              next_reclaimed = ssn;
 -      } else {
 -              /* The next packet to be reclaimed is the one after this one */
 -              next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
 -      }
 +      /* This is an aggregation queue or might become one, so we use
 +       * the ssn since: ssn = wifi seq_num % 256.
 +       * The seq_ctl is the sequence control of the packet to which
 +       * this Tx response relates. But if there is a hole in the
 +       * bitmap of the BA we received, this Tx response may allow to
 +       * reclaim the hole and all the subsequent packets that were
 +       * already acked. In that case, seq_ctl != ssn, and the next
 +       * packet to be reclaimed will be ssn and not seq_ctl. In that
 +       * case, several packets will be reclaimed even if
 +       * frame_count = 1.
 +       *
 +       * The ssn is the index (% 256) of the latest packet that has
 +       * treated (acked / dropped) + 1.
 +       */
 +      next_reclaimed = ssn;
  
        IWL_DEBUG_TX_REPLY(mvm,
                           "TXQ %d status %s (0x%08x)\n",
                mvmsta = NULL;
        }
  
 -      /*
 -       * If the txq is not an AMPDU queue, there is no chance we freed
 -       * several skbs. Check that out...
 -       */
 -      if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
 -              goto out;
 -
 -      /* We can't free more than one frame at once on a shared queue */
 -      WARN_ON(skb_freed > 1);
 -
 -      /* If we have still frames for this STA nothing to do here */
 -      if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
 -              goto out;
 -
 -      if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
 -
 -              /*
 -               * If there are no pending frames for this STA and
 -               * the tx to this station is not disabled, notify
 -               * mac80211 that this station can now wake up in its
 -               * STA table.
 -               * If mvmsta is not NULL, sta is valid.
 -               */
 -
 -              spin_lock_bh(&mvmsta->lock);
 -
 -              if (!mvmsta->disable_tx)
 -                      ieee80211_sta_block_awake(mvm->hw, sta, false);
 -
 -              spin_unlock_bh(&mvmsta->lock);
 -      }
 -
 -      if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
 -              /*
 -               * We are draining and this was the last packet - pre_rcu_remove
 -               * has been called already. We might be after the
 -               * synchronize_net already.
 -               * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
 -               */
 -              set_bit(sta_id, mvm->sta_drained);
 -              schedule_work(&mvm->sta_drained_wk);
 -      }
 -
  out:
        rcu_read_unlock();
  }
@@@ -1564,8 -1654,9 +1570,8 @@@ static void iwl_mvm_rx_tx_cmd_agg(struc
        struct iwl_mvm_sta *mvmsta;
        int queue = SEQ_TO_QUEUE(sequence);
  
 -      if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
 -                       (!iwl_mvm_is_dqa_supported(mvm) ||
 -                        (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
 +      if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 +                       (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
                return;
  
        if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@@ -1730,6 -1821,8 +1736,8 @@@ void iwl_mvm_rx_ba_notif(struct iwl_mv
        struct iwl_mvm_tid_data *tid_data;
        struct iwl_mvm_sta *mvmsta;
  
+       ba_info.flags = IEEE80211_TX_STAT_AMPDU;
        if (iwl_mvm_has_new_tx_api(mvm)) {
                struct iwl_mvm_compressed_ba_notif *ba_res =
                        (void *)pkt->data;
index 87712aeac31fd7d6aff0124154f3dea34fc2c725,84f4ba01e14fa2e84878dc75fda5d050e3500880..2126b9adbb08360b78d5c44312bf22ec78a9eb21
@@@ -510,9 -510,17 +510,17 @@@ static const struct pci_device_id iwl_h
  
  /* 9000 Series */
        {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
@@@ -805,11 -825,11 +825,11 @@@ static int iwl_pci_resume(struct devic
        /*
         * Enable rfkill interrupt (in order to keep track of the rfkill
         * status). Must be locked to avoid processing a possible rfkill
 -       * interrupt while in iwl_trans_check_hw_rf_kill().
 +       * interrupt while in iwl_pcie_check_hw_rf_kill().
         */
        mutex_lock(&trans_pcie->mutex);
        iwl_enable_rfkill_int(trans);
 -      iwl_trans_check_hw_rf_kill(trans);
 +      iwl_pcie_check_hw_rf_kill(trans);
        mutex_unlock(&trans_pcie->mutex);
  
        return 0;
diff --combined include/linux/net.h
index b5c15b31709b794a924587da591977139f0e56fe,ebeb48c920054d705924453f6ed6c6aca0233af4..d97d80d7fdf8a9c97714d1349b5534ef5509e902
@@@ -37,7 -37,7 +37,7 @@@ struct net
  
  /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
   * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
-  * Eventually all flags will be in sk->sk_wq_flags.
+  * Eventually all flags will be in sk->sk_wq->flags.
   */
  #define SOCKWQ_ASYNC_NOSPACE  0
  #define SOCKWQ_ASYNC_WAITDATA 1
@@@ -190,16 -190,8 +190,16 @@@ struct proto_ops 
                                       struct pipe_inode_info *pipe, size_t len, unsigned int flags);
        int             (*set_peek_off)(struct sock *sk, int val);
        int             (*peek_len)(struct socket *sock);
 +
 +      /* The following functions are called internally by kernel with
 +       * sock lock already held.
 +       */
        int             (*read_sock)(struct sock *sk, read_descriptor_t *desc,
                                     sk_read_actor_t recv_actor);
 +      int             (*sendpage_locked)(struct sock *sk, struct page *page,
 +                                         int offset, size_t size, int flags);
 +      int             (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
 +                                        size_t size);
  };
  
  #define DECLARE_SOCKADDR(type, dst, src)      \
@@@ -287,8 -279,6 +287,8 @@@ do {                                                                       
  
  int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
                   size_t num, size_t len);
 +int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
 +                        struct kvec *vec, size_t num, size_t len);
  int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
                   size_t num, size_t len, int flags);
  
@@@ -307,8 -297,6 +307,8 @@@ int kernel_setsockopt(struct socket *so
                      unsigned int optlen);
  int kernel_sendpage(struct socket *sock, struct page *page, int offset,
                    size_t size, int flags);
 +int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
 +                         size_t size, int flags);
  int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
  int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
  
diff --combined include/net/udp.h
index 20dcdca4e85c65f2453303623d665d114661732d,586de4b811b5678c72a045a3ede9852164b9b3e2..4e5f23fec35e6d4eb165872cc9ebec7dc31bc6d3
@@@ -287,7 -287,7 +287,7 @@@ int udp_lib_setsockopt(struct sock *sk
  struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
                             __be32 daddr, __be16 dport, int dif);
  struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 -                             __be32 daddr, __be16 dport, int dif,
 +                             __be32 daddr, __be16 dport, int dif, int sdif,
                               struct udp_table *tbl, struct sk_buff *skb);
  struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
                                 __be16 sport, __be16 dport);
@@@ -298,7 -298,7 +298,7 @@@ struct sock *udp6_lib_lookup(struct ne
  struct sock *__udp6_lib_lookup(struct net *net,
                               const struct in6_addr *saddr, __be16 sport,
                               const struct in6_addr *daddr, __be16 dport,
 -                             int dif, struct udp_table *tbl,
 +                             int dif, int sdif, struct udp_table *tbl,
                               struct sk_buff *skb);
  struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
                                 __be16 sport, __be16 dport);
@@@ -366,12 -366,13 +366,13 @@@ static inline bool udp_skb_is_linear(st
  static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
                                  struct iov_iter *to)
  {
-       int n, copy = len - off;
+       int n;
  
-       n = copy_to_iter(skb->data + off, copy, to);
-       if (n == copy)
+       n = copy_to_iter(skb->data + off, len, to);
+       if (n == len)
                return 0;
  
+       iov_iter_revert(to, n);
        return -EFAULT;
  }
  
diff --combined net/core/filter.c
index 5afe3ac191ecd4f5f7de2106f22f7901746eb354,6280a602604c2e0e05c57d0c2104b5f4791f6928..e0688a855c473d4da604b5c960315011869ee8ba
@@@ -55,7 -55,6 +55,7 @@@
  #include <net/sock_reuseport.h>
  #include <net/busy_poll.h>
  #include <net/tcp.h>
 +#include <linux/bpf_trace.h>
  
  /**
   *    sk_filter_trim_cap - run a packet through a socket filter
@@@ -514,27 -513,14 +514,27 @@@ do_pass
                                break;
                        }
  
 -                      /* Convert JEQ into JNE when 'jump_true' is next insn. */
 -                      if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
 -                              insn->code = BPF_JMP | BPF_JNE | bpf_src;
 +                      /* Convert some jumps when 'jump_true' is next insn. */
 +                      if (fp->jt == 0) {
 +                              switch (BPF_OP(fp->code)) {
 +                              case BPF_JEQ:
 +                                      insn->code = BPF_JMP | BPF_JNE | bpf_src;
 +                                      break;
 +                              case BPF_JGT:
 +                                      insn->code = BPF_JMP | BPF_JLE | bpf_src;
 +                                      break;
 +                              case BPF_JGE:
 +                                      insn->code = BPF_JMP | BPF_JLT | bpf_src;
 +                                      break;
 +                              default:
 +                                      goto jmp_rest;
 +                              }
 +
                                target = i + fp->jf + 1;
                                BPF_EMIT_JMP;
                                break;
                        }
 -
 +jmp_rest:
                        /* Other jumps are mapped into two insns: Jxx and JA. */
                        target = i + fp->jt + 1;
                        insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
@@@ -1792,8 -1778,6 +1792,8 @@@ static const struct bpf_func_proto bpf_
  struct redirect_info {
        u32 ifindex;
        u32 flags;
 +      struct bpf_map *map;
 +      struct bpf_map *map_to_flush;
  };
  
  static DEFINE_PER_CPU(struct redirect_info, redirect_info);
@@@ -1807,7 -1791,6 +1807,7 @@@ BPF_CALL_2(bpf_redirect, u32, ifindex, 
  
        ri->ifindex = ifindex;
        ri->flags = flags;
 +      ri->map = NULL;
  
        return TC_ACT_REDIRECT;
  }
@@@ -1835,29 -1818,6 +1835,29 @@@ static const struct bpf_func_proto bpf_
        .arg2_type      = ARG_ANYTHING,
  };
  
 +BPF_CALL_3(bpf_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +
 +      if (unlikely(flags))
 +              return XDP_ABORTED;
 +
 +      ri->ifindex = ifindex;
 +      ri->flags = flags;
 +      ri->map = map;
 +
 +      return XDP_REDIRECT;
 +}
 +
 +static const struct bpf_func_proto bpf_redirect_map_proto = {
 +      .func           = bpf_redirect_map,
 +      .gpl_only       = false,
 +      .ret_type       = RET_INTEGER,
 +      .arg1_type      = ARG_CONST_MAP_PTR,
 +      .arg2_type      = ARG_ANYTHING,
 +      .arg3_type      = ARG_ANYTHING,
 +};
 +
  BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
  {
        return task_get_classid(skb);
@@@ -2064,8 -2024,8 +2064,8 @@@ static int bpf_skb_proto_4_to_6(struct 
                return ret;
  
        if (skb_is_gso(skb)) {
 -              /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to
 -               * be changed into SKB_GSO_TCPV6.
 +              /* SKB_GSO_TCPV4 needs to be changed into
 +               * SKB_GSO_TCPV6.
                 */
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
                        skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4;
@@@ -2100,8 -2060,8 +2100,8 @@@ static int bpf_skb_proto_6_to_4(struct 
                return ret;
  
        if (skb_is_gso(skb)) {
 -              /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to
 -               * be changed into SKB_GSO_TCPV4.
 +              /* SKB_GSO_TCPV6 needs to be changed into
 +               * SKB_GSO_TCPV4.
                 */
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
                        skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6;
@@@ -2452,142 -2412,6 +2452,142 @@@ static const struct bpf_func_proto bpf_
        .arg2_type      = ARG_ANYTHING,
  };
  
 +static int __bpf_tx_xdp(struct net_device *dev,
 +                      struct bpf_map *map,
 +                      struct xdp_buff *xdp,
 +                      u32 index)
 +{
 +      int err;
 +
 +      if (!dev->netdev_ops->ndo_xdp_xmit) {
 +              bpf_warn_invalid_xdp_redirect(dev->ifindex);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp);
 +      if (err)
 +              return err;
 +
 +      if (map)
 +              __dev_map_insert_ctx(map, index);
 +      else
 +              dev->netdev_ops->ndo_xdp_flush(dev);
 +
 +      return err;
 +}
 +
 +void xdp_do_flush_map(void)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +      struct bpf_map *map = ri->map_to_flush;
 +
 +      ri->map = NULL;
 +      ri->map_to_flush = NULL;
 +
 +      if (map)
 +              __dev_map_flush(map);
 +}
 +EXPORT_SYMBOL_GPL(xdp_do_flush_map);
 +
 +int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
 +                      struct bpf_prog *xdp_prog)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +      struct bpf_map *map = ri->map;
 +      u32 index = ri->ifindex;
 +      struct net_device *fwd;
 +      int err = -EINVAL;
 +
 +      ri->ifindex = 0;
 +      ri->map = NULL;
 +
 +      fwd = __dev_map_lookup_elem(map, index);
 +      if (!fwd)
 +              goto out;
 +
 +      if (ri->map_to_flush && (ri->map_to_flush != map))
 +              xdp_do_flush_map();
 +
 +      err = __bpf_tx_xdp(fwd, map, xdp, index);
 +      if (likely(!err))
 +              ri->map_to_flush = map;
 +
 +out:
 +      trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
 +      return err;
 +}
 +
 +int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
 +                  struct bpf_prog *xdp_prog)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +      struct net_device *fwd;
 +      u32 index = ri->ifindex;
 +
 +      if (ri->map)
 +              return xdp_do_redirect_map(dev, xdp, xdp_prog);
 +
 +      fwd = dev_get_by_index_rcu(dev_net(dev), index);
 +      ri->ifindex = 0;
 +      ri->map = NULL;
 +      if (unlikely(!fwd)) {
 +              bpf_warn_invalid_xdp_redirect(index);
 +              return -EINVAL;
 +      }
 +
 +      trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT);
 +
 +      return __bpf_tx_xdp(fwd, NULL, xdp, 0);
 +}
 +EXPORT_SYMBOL_GPL(xdp_do_redirect);
 +
 +int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +      unsigned int len;
 +      u32 index = ri->ifindex;
 +
 +      dev = dev_get_by_index_rcu(dev_net(dev), index);
 +      ri->ifindex = 0;
 +      if (unlikely(!dev)) {
 +              bpf_warn_invalid_xdp_redirect(index);
 +              goto err;
 +      }
 +
 +      if (unlikely(!(dev->flags & IFF_UP)))
 +              goto err;
 +
 +      len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
 +      if (skb->len > len)
 +              goto err;
 +
 +      skb->dev = dev;
 +      return 0;
 +err:
 +      return -EINVAL;
 +}
 +EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
 +
 +BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +
 +      if (unlikely(flags))
 +              return XDP_ABORTED;
 +
 +      ri->ifindex = ifindex;
 +      ri->flags = flags;
 +      return XDP_REDIRECT;
 +}
 +
 +static const struct bpf_func_proto bpf_xdp_redirect_proto = {
 +      .func           = bpf_xdp_redirect,
 +      .gpl_only       = false,
 +      .ret_type       = RET_INTEGER,
 +      .arg1_type      = ARG_ANYTHING,
 +      .arg2_type      = ARG_ANYTHING,
 +};
 +
  bool bpf_helper_changes_pkt_data(void *func)
  {
        if (func == bpf_skb_vlan_push ||
@@@ -3187,10 -3011,6 +3187,10 @@@ xdp_func_proto(enum bpf_func_id func_id
                return &bpf_get_smp_processor_id_proto;
        case BPF_FUNC_xdp_adjust_head:
                return &bpf_xdp_adjust_head_proto;
 +      case BPF_FUNC_redirect:
 +              return &bpf_xdp_redirect_proto;
 +      case BPF_FUNC_redirect_map:
 +              return &bpf_redirect_map_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@@ -3490,11 -3310,6 +3490,11 @@@ void bpf_warn_invalid_xdp_action(u32 ac
  }
  EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
  
 +void bpf_warn_invalid_xdp_redirect(u32 ifindex)
 +{
 +      WARN_ONCE(1, "Illegal XDP redirect to unsupported device ifindex(%i)\n", ifindex);
 +}
 +
  static bool __is_valid_sock_ops_access(int off, int size)
  {
        if (off < 0 || off >= sizeof(struct bpf_sock_ops))
@@@ -3690,6 -3505,7 +3690,7 @@@ static u32 bpf_convert_ctx_access(enum 
                                              bpf_target_off(struct sk_buff, tc_index, 2,
                                                             target_size));
  #else
+               *target_size = 2;
                if (type == BPF_WRITE)
                        *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
                else
                *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
  #else
+               *target_size = 4;
                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
  #endif
                break;
diff --combined net/ipv4/fib_semantics.c
index 632b454ce77c09df066e54abdbe67251b9103b72,ec3a9ce281a6ffb86b62e21f7284fd7c801668f0..d521caf57385fa05f76036708057b95052330cb1
@@@ -44,7 -44,6 +44,7 @@@
  #include <net/netlink.h>
  #include <net/nexthop.h>
  #include <net/lwtunnel.h>
 +#include <net/fib_notifier.h>
  
  #include "fib_lookup.h"
  
@@@ -1084,15 -1083,17 +1084,17 @@@ struct fib_info *fib_create_info(struc
        fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
        if (!fi)
                goto failure;
-       fib_info_cnt++;
        if (cfg->fc_mx) {
                fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
-               if (!fi->fib_metrics)
-                       goto failure;
+               if (unlikely(!fi->fib_metrics)) {
+                       kfree(fi);
+                       return ERR_PTR(err);
+               }
                atomic_set(&fi->fib_metrics->refcnt, 1);
-       } else
+       } else {
                fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
+       }
+       fib_info_cnt++;
        fi->fib_net = net;
        fi->fib_protocol = cfg->fc_protocol;
        fi->fib_scope = cfg->fc_scope;
@@@ -1343,8 -1344,6 +1345,8 @@@ int fib_dump_info(struct sk_buff *skb, 
                            IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
                                rtm->rtm_flags |= RTNH_F_DEAD;
                }
 +              if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD)
 +                      rtm->rtm_flags |= RTNH_F_OFFLOAD;
  #ifdef CONFIG_IP_ROUTE_CLASSID
                if (fi->fib_nh[0].nh_tclassid &&
                    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
@@@ -1452,14 -1451,14 +1454,14 @@@ static int call_fib_nh_notifiers(struc
                if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
                    fib_nh->nh_flags & RTNH_F_LINKDOWN)
                        break;
 -              return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
 -                                        &info.info);
 +              return call_fib4_notifiers(dev_net(fib_nh->nh_dev), event_type,
 +                                         &info.info);
        case FIB_EVENT_NH_DEL:
                if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
                     fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
                    (fib_nh->nh_flags & RTNH_F_DEAD))
 -                      return call_fib_notifiers(dev_net(fib_nh->nh_dev),
 -                                                event_type, &info.info);
 +                      return call_fib4_notifiers(dev_net(fib_nh->nh_dev),
 +                                                 event_type, &info.info);
        default:
                break;
        }
diff --combined net/ipv4/route.c
index 618bbe1405fc57dfe8d9632f94a97fc9dd9f2a08,7effa62beed3fa9065ab7d6365b1d79143bfae79..d400c05431063fc7bdd15b83ab540acc86decb3d
@@@ -2236,7 -2236,7 +2236,7 @@@ add
        if (!rth)
                return ERR_PTR(-ENOBUFS);
  
 -      rth->rt_iif     = orig_oif ? : 0;
 +      rth->rt_iif = orig_oif;
        if (res->table)
                rth->rt_table_id = res->table->tb_id;
  
@@@ -2439,12 -2439,6 +2439,12 @@@ struct rtable *ip_route_output_key_hash
                /* L3 master device is the loopback for that domain */
                dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
                        net->loopback_dev;
 +
 +              /* make sure orig_oif points to fib result device even
 +               * though packet rx/tx happens over loopback or l3mdev
 +               */
 +              orig_oif = FIB_RES_OIF(*res);
 +
                fl4->flowi4_oif = dev_out->ifindex;
                flags |= RTCF_LOCAL;
                goto make_route;
@@@ -2756,12 -2750,13 +2756,13 @@@ static int inet_rtm_getroute(struct sk_
                err = 0;
                if (IS_ERR(rt))
                        err = PTR_ERR(rt);
+               else
+                       skb_dst_set(skb, &rt->dst);
        }
  
        if (err)
                goto errout_free;
  
-       skb_dst_set(skb, &rt->dst);
        if (rtm->rtm_flags & RTM_F_NOTIFY)
                rt->rt_flags |= RTCF_NOTIFY;
  
@@@ -3073,8 -3068,7 +3074,8 @@@ int __init ip_rt_init(void
        xfrm_init();
        xfrm4_init();
  #endif
 -      rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
 +      rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
 +                    RTNL_FLAG_DOIT_UNLOCKED);
  
  #ifdef CONFIG_SYSCTL
        register_pernet_subsys(&sysctl_route_ops);
diff --combined net/ipv4/tcp_ipv4.c
index c8784ab378521ccb24f1359359ba4b9177a2a8e0,e9252c7df8091a8e0d2fc9d7e5722e9fd605a857..5af8b809dfbc0b64cb09c4b547ca32afb88072c1
@@@ -85,6 -85,8 +85,6 @@@
  #include <crypto/hash.h>
  #include <linux/scatterlist.h>
  
 -int sysctl_tcp_low_latency __read_mostly;
 -
  #ifdef CONFIG_TCP_MD5SIG
  static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
                               __be32 daddr, __be32 saddr, const struct tcphdr *th);
@@@ -383,7 -385,7 +383,7 @@@ void tcp_v4_err(struct sk_buff *icmp_sk
  
        sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
                                       th->dest, iph->saddr, ntohs(th->source),
 -                                     inet_iif(icmp_skb));
 +                                     inet_iif(icmp_skb), 0);
        if (!sk) {
                __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return;
@@@ -659,8 -661,7 +659,8 @@@ static void tcp_v4_send_reset(const str
                sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
                                             ip_hdr(skb)->saddr,
                                             th->source, ip_hdr(skb)->daddr,
 -                                           ntohs(th->source), inet_iif(skb));
 +                                           ntohs(th->source), inet_iif(skb),
 +                                           tcp_v4_sdif(skb));
                /* don't send rst if it can't find key */
                if (!sk1)
                        goto out;
@@@ -1268,7 -1269,7 +1268,7 @@@ static void tcp_v4_init_req(struct requ
  
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
 -      ireq->opt = tcp_v4_save_options(skb);
 +      ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
  }
  
  static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@@ -1457,7 -1458,7 +1457,7 @@@ int tcp_v4_do_rcv(struct sock *sk, stru
                                sk->sk_rx_dst = NULL;
                        }
                }
 -              tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
 +              tcp_rcv_established(sk, skb, tcp_hdr(skb));
                return 0;
        }
  
@@@ -1524,7 -1525,7 +1524,7 @@@ void tcp_v4_early_demux(struct sk_buff 
        sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
                                       iph->saddr, th->source,
                                       iph->daddr, ntohs(th->dest),
 -                                     skb->skb_iif);
 +                                     skb->skb_iif, inet_sdif(skb));
        if (sk) {
                skb->sk = sk;
                skb->destructor = sock_edemux;
        }
  }
  
 -/* Packet is added to VJ-style prequeue for processing in process
 - * context, if a reader task is waiting. Apparently, this exciting
 - * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
 - * failed somewhere. Latency? Burstiness? Well, at least now we will
 - * see, why it failed. 8)8)                             --ANK
 - *
 - */
 -bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 -{
 -      struct tcp_sock *tp = tcp_sk(sk);
 -
 -      if (sysctl_tcp_low_latency || !tp->ucopy.task)
 -              return false;
 -
 -      if (skb->len <= tcp_hdrlen(skb) &&
 -          skb_queue_len(&tp->ucopy.prequeue) == 0)
 -              return false;
 -
 -      /* Before escaping RCU protected region, we need to take care of skb
 -       * dst. Prequeue is only enabled for established sockets.
 -       * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
 -       * Instead of doing full sk_rx_dst validity here, let's perform
 -       * an optimistic check.
 -       */
 -      if (likely(sk->sk_rx_dst))
 -              skb_dst_drop(skb);
 -      else
 -              skb_dst_force_safe(skb);
 -
 -      __skb_queue_tail(&tp->ucopy.prequeue, skb);
 -      tp->ucopy.memory += skb->truesize;
 -      if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
 -          tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
 -              struct sk_buff *skb1;
 -
 -              BUG_ON(sock_owned_by_user(sk));
 -              __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
 -                              skb_queue_len(&tp->ucopy.prequeue));
 -
 -              while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
 -                      sk_backlog_rcv(sk, skb1);
 -
 -              tp->ucopy.memory = 0;
 -      } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
 -              wake_up_interruptible_sync_poll(sk_sleep(sk),
 -                                         POLLIN | POLLRDNORM | POLLRDBAND);
 -              if (!inet_csk_ack_scheduled(sk))
 -                      inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 -                                                (3 * tcp_rto_min(sk)) / 4,
 -                                                TCP_RTO_MAX);
 -      }
 -      return true;
 -}
 -EXPORT_SYMBOL(tcp_prequeue);
 -
  bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
  {
        u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
@@@ -1589,7 -1645,6 +1589,7 @@@ EXPORT_SYMBOL(tcp_filter)
  int tcp_v4_rcv(struct sk_buff *skb)
  {
        struct net *net = dev_net(skb->dev);
 +      int sdif = inet_sdif(skb);
        const struct iphdr *iph;
        const struct tcphdr *th;
        bool refcounted;
  
  lookup:
        sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
 -                             th->dest, &refcounted);
 +                             th->dest, sdif, &refcounted);
        if (!sk)
                goto no_tcp_socket;
  
@@@ -1667,6 -1722,8 +1667,8 @@@ process
                 */
                sock_hold(sk);
                refcounted = true;
+               if (tcp_filter(sk, skb))
+                       goto discard_and_relse;
                nsk = tcp_check_req(sk, skb, req, false);
                if (!nsk) {
                        reqsk_put(req);
                }
                if (nsk == sk) {
                        reqsk_put(req);
-               } else if (tcp_filter(sk, skb)) {
-                       goto discard_and_relse;
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v4_send_reset(nsk, skb);
                        goto discard_and_relse;
        tcp_segs_in(tcp_sk(sk), skb);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
 -              if (!tcp_prequeue(sk, skb))
 -                      ret = tcp_v4_do_rcv(sk, skb);
 +              ret = tcp_v4_do_rcv(sk, skb);
        } else if (tcp_add_backlog(sk, skb)) {
                goto discard_and_relse;
        }
@@@ -1768,8 -1824,7 +1768,8 @@@ do_time_wait
                                                        __tcp_hdrlen(th),
                                                        iph->saddr, th->source,
                                                        iph->daddr, th->dest,
 -                                                      inet_iif(skb));
 +                                                      inet_iif(skb),
 +                                                      sdif);
                if (sk2) {
                        inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
@@@ -1881,6 -1936,9 +1881,6 @@@ void tcp_v4_destroy_sock(struct sock *s
        }
  #endif
  
 -      /* Clean prequeue, it must be empty really */
 -      __skb_queue_purge(&tp->ucopy.prequeue);
 -
        /* Clean up a referenced TCP bind bucket. */
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
diff --combined net/ipv6/route.c
index 11ff19ba7efd074ccd0ba51432723186b2497eb1,94d6a13d47f0e9ec5ff4cbc50b90d4cd9ca3f38a..dc021ed6dd378219f181805d9ece531fe19ccae1
@@@ -417,14 -417,11 +417,11 @@@ static void ip6_dst_ifdown(struct dst_e
        struct net_device *loopback_dev =
                dev_net(dev)->loopback_dev;
  
-       if (dev != loopback_dev) {
-               if (idev && idev->dev == dev) {
-                       struct inet6_dev *loopback_idev =
-                               in6_dev_get(loopback_dev);
-                       if (loopback_idev) {
-                               rt->rt6i_idev = loopback_idev;
-                               in6_dev_put(idev);
-                       }
+       if (idev && idev->dev != loopback_dev) {
+               struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
+               if (loopback_idev) {
+                       rt->rt6i_idev = loopback_idev;
+                       in6_dev_put(idev);
                }
        }
  }
@@@ -3330,9 -3327,6 +3327,9 @@@ static int rt6_nexthop_info(struct sk_b
                        goto nla_put_failure;
        }
  
 +      if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
 +              *flags |= RTNH_F_OFFLOAD;
 +
        /* not needed for multipath encoding b/c it has a rtnexthop struct */
        if (!skip_oif && rt->dst.dev &&
            nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
@@@ -3611,11 -3605,8 +3608,11 @@@ static int inet6_rtm_getroute(struct sk
                struct net_device *dev;
                int flags = 0;
  
 -              dev = __dev_get_by_index(net, iif);
 +              rcu_read_lock();
 +
 +              dev = dev_get_by_index_rcu(net, iif);
                if (!dev) {
 +                      rcu_read_unlock();
                        err = -ENODEV;
                        goto errout;
                }
  
                if (!fibmatch)
                        dst = ip6_route_input_lookup(net, dev, &fl6, flags);
 +
 +              rcu_read_unlock();
        } else {
                fl6.flowi6_oif = oif;
  
@@@ -3732,10 -3721,10 +3729,10 @@@ static int ip6_route_dev_notify(struct 
                /* NETDEV_UNREGISTER could be fired for multiple times by
                 * netdev_wait_allrefs(). Make sure we only call this once.
                 */
-               in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+               in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-               in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
-               in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+               in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
+               in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
  #endif
        }
  
@@@ -3929,7 -3918,6 +3926,7 @@@ static int __net_init ip6_route_net_ini
                         ip6_template_metrics, true);
  
  #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 +      net->ipv6.fib6_has_custom_rules = false;
        net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
                                               sizeof(*net->ipv6.ip6_prohibit_entry),
                                               GFP_KERNEL);
@@@ -4105,10 -4093,9 +4102,10 @@@ int __init ip6_route_init(void
                goto fib6_rules_init;
  
        ret = -ENOBUFS;
 -      if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
 -          __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
 -          __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
 +      if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
 +          __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
 +          __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
 +                          RTNL_FLAG_DOIT_UNLOCKED))
                goto out_register_late_subsys;
  
        ret = register_netdevice_notifier(&ip6_route_dev_notifier);
diff --combined net/ipv6/tcp_ipv6.c
index f776ec4ecf6d76857c8ab05d30ad440c3584eff3,206210125fd71d129a9ed2ead51a536749ab62b8..d79a1af3252ee698412fbcb339febaa38b4347c6
@@@ -350,7 -350,7 +350,7 @@@ static void tcp_v6_err(struct sk_buff *
        sk = __inet6_lookup_established(net, &tcp_hashinfo,
                                        &hdr->daddr, th->dest,
                                        &hdr->saddr, ntohs(th->source),
 -                                      skb->dev->ifindex);
 +                                      skb->dev->ifindex, inet6_sdif(skb));
  
        if (!sk) {
                __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
@@@ -918,8 -918,7 +918,8 @@@ static void tcp_v6_send_reset(const str
                                           &tcp_hashinfo, NULL, 0,
                                           &ipv6h->saddr,
                                           th->source, &ipv6h->daddr,
 -                                         ntohs(th->source), tcp_v6_iif(skb));
 +                                         ntohs(th->source), tcp_v6_iif(skb),
 +                                         tcp_v6_sdif(skb));
                if (!sk1)
                        goto out;
  
@@@ -1297,7 -1296,7 +1297,7 @@@ static int tcp_v6_do_rcv(struct sock *s
                        }
                }
  
 -              tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
 +              tcp_rcv_established(sk, skb, tcp_hdr(skb));
                if (opt_skb)
                        goto ipv6_pktoptions;
                return 0;
@@@ -1398,7 -1397,6 +1398,7 @@@ static void tcp_v6_fill_cb(struct sk_bu
  
  static int tcp_v6_rcv(struct sk_buff *skb)
  {
 +      int sdif = inet6_sdif(skb);
        const struct tcphdr *th;
        const struct ipv6hdr *hdr;
        bool refcounted;
  
  lookup:
        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
 -                              th->source, th->dest, inet6_iif(skb),
 +                              th->source, th->dest, inet6_iif(skb), sdif,
                                &refcounted);
        if (!sk)
                goto no_tcp_socket;
@@@ -1458,6 -1456,8 +1458,8 @@@ process
                }
                sock_hold(sk);
                refcounted = true;
+               if (tcp_filter(sk, skb))
+                       goto discard_and_relse;
                nsk = tcp_check_req(sk, skb, req, false);
                if (!nsk) {
                        reqsk_put(req);
                if (nsk == sk) {
                        reqsk_put(req);
                        tcp_v6_restore_cb(skb);
-               } else if (tcp_filter(sk, skb)) {
-                       goto discard_and_relse;
                } else if (tcp_child_process(sk, nsk, skb)) {
                        tcp_v6_send_reset(nsk, skb);
                        goto discard_and_relse;
        tcp_segs_in(tcp_sk(sk), skb);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
 -              if (!tcp_prequeue(sk, skb))
 -                      ret = tcp_v6_do_rcv(sk, skb);
 +              ret = tcp_v6_do_rcv(sk, skb);
        } else if (tcp_add_backlog(sk, skb)) {
                goto discard_and_relse;
        }
@@@ -1565,8 -1564,7 +1565,8 @@@ do_time_wait
                                            skb, __tcp_hdrlen(th),
                                            &ipv6_hdr(skb)->saddr, th->source,
                                            &ipv6_hdr(skb)->daddr,
 -                                          ntohs(th->dest), tcp_v6_iif(skb));
 +                                          ntohs(th->dest), tcp_v6_iif(skb),
 +                                          sdif);
                if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
                        inet_twsk_deschedule_put(tw);
@@@ -1613,7 -1611,7 +1613,7 @@@ static void tcp_v6_early_demux(struct s
        sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
                                        &hdr->saddr, th->source,
                                        &hdr->daddr, ntohs(th->dest),
 -                                      inet6_iif(skb));
 +                                      inet6_iif(skb), inet6_sdif(skb));
        if (sk) {
                skb->sk = sk;
                skb->destructor = sock_edemux;
diff --combined net/key/af_key.c
index 10d7133e4fe9b484476d1b6196fb0bbed3d88bf1,98f4d8211b9a9d9bc26e7d9979c5c2bf27c1b344..a00d607e7224d422e6f3ae3b139bee9706852d7b
@@@ -228,7 -228,7 +228,7 @@@ static int pfkey_broadcast_one(struct s
  #define BROADCAST_ONE         1
  #define BROADCAST_REGISTERED  2
  #define BROADCAST_PROMISC_ONLY        4
- static int pfkey_broadcast(struct sk_buff *skb,
+ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                           int broadcast_flags, struct sock *one_sk,
                           struct net *net)
  {
        rcu_read_unlock();
  
        if (one_sk != NULL)
-               err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
+               err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
  
        kfree_skb(skb2);
        kfree_skb(skb);
@@@ -311,7 -311,7 +311,7 @@@ static int pfkey_do_dump(struct pfkey_s
                hdr = (struct sadb_msg *) pfk->dump.skb->data;
                hdr->sadb_msg_seq = 0;
                hdr->sadb_msg_errno = rc;
-               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
                pfk->dump.skb = NULL;
        }
@@@ -355,7 -355,7 +355,7 @@@ static int pfkey_error(const struct sad
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
                             sizeof(uint64_t));
  
-       pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
  
        return 0;
  }
@@@ -1389,7 -1389,7 +1389,7 @@@ static int pfkey_getspi(struct sock *sk
  
        xfrm_state_put(x);
  
-       pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
+       pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
  
        return 0;
  }
@@@ -1476,7 -1476,7 +1476,7 @@@ static int key_notify_sa(struct xfrm_st
        hdr->sadb_msg_seq = c->seq;
        hdr->sadb_msg_pid = c->portid;
  
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
+       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
  
        return 0;
  }
@@@ -1589,7 -1589,7 +1589,7 @@@ static int pfkey_get(struct sock *sk, s
        out_hdr->sadb_msg_reserved = 0;
        out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
        out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-       pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
  
        return 0;
  }
@@@ -1694,8 -1694,8 +1694,8 @@@ static int pfkey_register(struct sock *
                return -ENOBUFS;
        }
  
-       pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
+       pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
+                       sock_net(sk));
        return 0;
  }
  
@@@ -1712,7 -1712,8 +1712,8 @@@ static int unicast_flush_resp(struct so
        hdr->sadb_msg_errno = (uint8_t) 0;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
  
-       return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
+                              sock_net(sk));
  }
  
  static int key_notify_sa_flush(const struct km_event *c)
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        hdr->sadb_msg_reserved = 0;
  
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
+       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
  
        return 0;
  }
@@@ -1790,7 -1791,7 +1791,7 @@@ static int dump_sa(struct xfrm_state *x
        out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
  
        if (pfk->dump.skb)
-               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
        pfk->dump.skb = out_skb;
  
@@@ -1878,7 -1879,7 +1879,7 @@@ static int pfkey_promisc(struct sock *s
                new_hdr->sadb_msg_errno = 0;
        }
  
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
+       pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
        return 0;
  }
  
@@@ -2206,7 -2207,7 +2207,7 @@@ static int key_notify_policy(struct xfr
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = c->seq;
        out_hdr->sadb_msg_pid = c->portid;
-       pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
        return 0;
  
  }
@@@ -2398,6 -2399,8 +2399,6 @@@ static int pfkey_spddelete(struct sock 
  
  out:
        xfrm_pol_put(xp);
 -      if (err == 0)
 -              xfrm_garbage_collect(net);
        return err;
  }
  
@@@ -2424,7 -2427,7 +2425,7 @@@ static int key_pol_get_resp(struct soc
        out_hdr->sadb_msg_errno = 0;
        out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
        out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-       pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
        err = 0;
  
  out:
@@@ -2648,6 -2651,8 +2649,6 @@@ static int pfkey_spdget(struct sock *sk
  
  out:
        xfrm_pol_put(xp);
 -      if (delete && err == 0)
 -              xfrm_garbage_collect(net);
        return err;
  }
  
@@@ -2678,7 -2683,7 +2679,7 @@@ static int dump_sp(struct xfrm_policy *
        out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
  
        if (pfk->dump.skb)
-               pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+               pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
                                &pfk->sk, sock_net(&pfk->sk));
        pfk->dump.skb = out_skb;
  
@@@ -2735,7 -2740,7 +2736,7 @@@ static int key_notify_policy_flush(cons
        hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        hdr->sadb_msg_reserved = 0;
-       pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
+       pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
        return 0;
  
  }
@@@ -2747,6 -2752,8 +2748,6 @@@ static int pfkey_spdflush(struct sock *
        int err, err2;
  
        err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true);
 -      if (!err)
 -              xfrm_garbage_collect(net);
        err2 = unicast_flush_resp(sk, hdr);
        if (err || err2) {
                if (err == -ESRCH) /* empty table - old silent behavior */
@@@ -2797,7 -2804,7 +2798,7 @@@ static int pfkey_process(struct sock *s
        void *ext_hdrs[SADB_EXT_MAX];
        int err;
  
-       pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
+       pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
                        BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
  
        memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@@ -3018,7 -3025,8 +3019,8 @@@ static int key_notify_sa_expire(struct 
        out_hdr->sadb_msg_seq = 0;
        out_hdr->sadb_msg_pid = 0;
  
-       pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+       pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+                       xs_net(x));
        return 0;
  }
  
@@@ -3206,7 -3214,8 +3208,8 @@@ static int pfkey_send_acquire(struct xf
                       xfrm_ctx->ctx_len);
        }
  
-       return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+                              xs_net(x));
  }
  
  static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@@ -3402,7 -3411,8 +3405,8 @@@ static int pfkey_send_new_mapping(struc
        n_port->sadb_x_nat_t_port_port = sport;
        n_port->sadb_x_nat_t_port_reserved = 0;
  
-       return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+       return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+                              xs_net(x));
  }
  
  #ifdef CONFIG_NET_KEY_MIGRATE
@@@ -3593,7 -3603,7 +3597,7 @@@ static int pfkey_send_migrate(const str
        }
  
        /* broadcast migrate message to sockets */
-       pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
+       pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
  
        return 0;
  
diff --combined net/sched/sch_api.c
index 816c8092e601aa026971906143102b44169e0a81,a3fa144b864871088209386fd573bded1886432f..361377fbd78098c64853576da065731a7487b5bd
@@@ -286,9 -286,6 +286,6 @@@ static struct Qdisc *qdisc_match_from_r
  void qdisc_hash_add(struct Qdisc *q, bool invisible)
  {
        if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
-               struct Qdisc *root = qdisc_dev(q)->qdisc;
-               WARN_ON_ONCE(root == &noop_qdisc);
                ASSERT_RTNL();
                hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
                if (invisible)
@@@ -1952,14 -1949,14 +1949,14 @@@ static int __init pktsched_init(void
        register_qdisc(&mq_qdisc_ops);
        register_qdisc(&noqueue_qdisc_ops);
  
 -      rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
 -      rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
 +      rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
 +      rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
 -                    NULL);
 -      rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
 -      rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
 +                    0);
 +      rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
 +      rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
 -                    NULL);
 +                    0);
  
        return 0;
  }
diff --combined net/sched/sch_atm.c
index 0af4b1c6f6740022ce195f799e0f8eb9be5228ff,c403c87aff7a44bccfdd5f07e2e00ea0698a5c90..2732950766a9e9daa6e00286f98ed814b78014ca
@@@ -41,7 -41,6 +41,7 @@@
  #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
  
  struct atm_flow_data {
 +      struct Qdisc_class_common common;
        struct Qdisc            *q;     /* FIFO, TBF, etc. */
        struct tcf_proto __rcu  *filter_list;
        struct tcf_block        *block;
@@@ -50,6 -49,7 +50,6 @@@
                                           struct sk_buff *skb); /* chaining */
        struct atm_qdisc_data   *parent;        /* parent qdisc */
        struct socket           *sock;          /* for closing */
 -      u32                     classid;        /* x:y type ID */
        int                     ref;            /* reference count */
        struct gnet_stats_basic_packed  bstats;
        struct gnet_stats_queue qstats;
@@@ -75,7 -75,7 +75,7 @@@ static inline struct atm_flow_data *loo
        struct atm_flow_data *flow;
  
        list_for_each_entry(flow, &p->flows, list) {
 -              if (flow->classid == classid)
 +              if (flow->common.classid == classid)
                        return flow;
        }
        return NULL;
@@@ -293,7 -293,7 +293,7 @@@ static int atm_tc_change(struct Qdisc *
        flow->old_pop = flow->vcc->pop;
        flow->parent = p;
        flow->vcc->pop = sch_atm_pop;
 -      flow->classid = classid;
 +      flow->common.classid = classid;
        flow->ref = 1;
        flow->excess = excess;
        list_add(&flow->list, &p->link.list);
@@@ -549,7 -549,7 +549,7 @@@ static int atm_tc_init(struct Qdisc *sc
  
        p->link.vcc = NULL;
        p->link.sock = NULL;
 -      p->link.classid = sch->handle;
 +      p->link.common.classid = sch->handle;
        p->link.ref = 1;
        tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
        return 0;
@@@ -572,8 -572,10 +572,10 @@@ static void atm_tc_destroy(struct Qdis
        struct atm_flow_data *flow, *tmp;
  
        pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
-       list_for_each_entry(flow, &p->flows, list)
+       list_for_each_entry(flow, &p->flows, list) {
                tcf_block_put(flow->block);
+               flow->block = NULL;
+       }
  
        list_for_each_entry_safe(flow, tmp, &p->flows, list) {
                if (flow->ref > 1)
@@@ -594,7 -596,7 +596,7 @@@ static int atm_tc_dump_class(struct Qdi
                sch, p, flow, skb, tcm);
        if (list_empty(&flow->list))
                return -EINVAL;
 -      tcm->tcm_handle = flow->classid;
 +      tcm->tcm_handle = flow->common.classid;
        tcm->tcm_info = flow->q->handle;
  
        nest = nla_nest_start(skb, TCA_OPTIONS);
                        goto nla_put_failure;
        }
        if (flow->excess) {
 -              if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
 +              if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
                        goto nla_put_failure;
        } else {
                if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))