]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
authorKalle Valo <kvalo@codeaurora.org>
Thu, 24 Aug 2017 11:29:12 +0000 (14:29 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Thu, 24 Aug 2017 11:47:42 +0000 (14:47 +0300)
Stephen Rothwell reported quite a few conflicts in iwlwifi between
wireless-drivers and wireless-drivers-next. To avoid any problems later in
other trees merge w-d to w-d-next to fix those conflicts early.

1  2 
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c

index a1cd2f41b0267d7e293ca4b9b582089e02026487,c73a6438ce8fbcd12a8e12e359c5f7d25e66a76f..887f6d8fc8a7c65915334af1d3ee9b002ea115ce
@@@ -246,8 -246,6 +246,8 @@@ typedef unsigned int __bitwise iwl_ucod
   * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
   * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
   * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
 + * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
 + *    include information about ACL time sharing.
   *
   * @NUM_IWL_UCODE_TLV_API: number of bits used
   */
@@@ -262,9 -260,7 +262,9 @@@ enum iwl_ucode_tlv_api 
        IWL_UCODE_TLV_API_STA_TYPE              = (__force iwl_ucode_tlv_api_t)30,
        IWL_UCODE_TLV_API_NAN2_VER2             = (__force iwl_ucode_tlv_api_t)31,
        /* API Set 1 */
 +      IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE   = (__force iwl_ucode_tlv_api_t)34,
        IWL_UCODE_TLV_API_NEW_RX_STATS          = (__force iwl_ucode_tlv_api_t)35,
 +      IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL     = (__force iwl_ucode_tlv_api_t)37,
  
        NUM_IWL_UCODE_TLV_API
  #ifdef __CHECKER__
@@@ -332,6 -328,7 +332,7 @@@ typedef unsigned int __bitwise iwl_ucod
   * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
   *    command size (command version 4) that supports toggling ACK TX
   *    power reduction.
+  * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
   *
   * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
   */
@@@ -377,6 -374,7 +378,7 @@@ enum iwl_ucode_tlv_capa 
        IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG        = (__force iwl_ucode_tlv_capa_t)80,
        IWL_UCODE_TLV_CAPA_LQM_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)81,
        IWL_UCODE_TLV_CAPA_TX_POWER_ACK                 = (__force iwl_ucode_tlv_capa_t)84,
+       IWL_UCODE_TLV_CAPA_MLME_OFFLOAD                 = (__force iwl_ucode_tlv_capa_t)96,
  
        NUM_IWL_UCODE_TLV_CAPA
  #ifdef __CHECKER__
index b82a3d0f64b0e86a33fe86970b1eacae98c61a93,d19c74827fbb6094147ab48fc266cd6dbbb15e28..3e057b539d5b76dede3bf18de696003e041ace7a
@@@ -276,10 -276,10 +276,10 @@@ struct iwl_pwr_tx_backoff 
   * @fw_name_pre: Firmware filename prefix. The api version and extension
   *    (.ucode) will be added to filename before loading from disk. The
   *    filename is constructed as fw_name_pre<api>.ucode.
-  * @fw_name_pre_next_step: same as @fw_name_pre, only for next step
+  * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
   *    (if supported)
-  * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
-  *    step. Supported only in integrated solutions.
+  * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
+  *    next step. Supported only in integrated solutions.
   * @ucode_api_max: Highest version of uCode API supported by driver.
   * @ucode_api_min: Lowest version of uCode API supported by driver.
   * @max_inst_size: The maximal length of the fw inst section
@@@ -330,7 -330,7 +330,7 @@@ struct iwl_cfg 
        /* params specific to an individual device within a device family */
        const char *name;
        const char *fw_name_pre;
-       const char *fw_name_pre_next_step;
+       const char *fw_name_pre_b_or_c_step;
        const char *fw_name_pre_rf_next_step;
        /* params not likely to change within a device family */
        const struct iwl_base_params *base_params;
@@@ -463,9 -463,6 +463,9 @@@ extern const struct iwl_cfg iwla000_2ac
  extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
  extern const struct iwl_cfg iwla000_2ac_cfg_jf;
  extern const struct iwl_cfg iwla000_2ax_cfg_hr;
 +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0;
 +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0;
 +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0;
  #endif /* CONFIG_IWLMVM */
  
  #endif /* __IWL_CONFIG_H__ */
index bd3902f888e0b8f81526946ae418103bbf754681,4e0f86fe0a6f0874a96d41ae257c9621a2f035ee..99676d6c4713ca679106982f48c10038e7310da6
@@@ -216,8 -216,9 +216,9 @@@ static int iwl_request_firmware(struct 
        const char *fw_pre_name;
  
        if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
-           CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
-               fw_pre_name = cfg->fw_name_pre_next_step;
+           (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
+            CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
+               fw_pre_name = cfg->fw_name_pre_b_or_c_step;
        else if (drv->trans->cfg->integrated &&
                 CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
                 cfg->fw_name_pre_rf_next_step)
@@@ -478,8 -479,8 +479,8 @@@ static int iwl_set_default_calib(struc
        return 0;
  }
  
 -static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
 -                                 struct iwl_ucode_capabilities *capa)
 +static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
 +                                  struct iwl_ucode_capabilities *capa)
  {
        const struct iwl_ucode_api *ucode_api = (void *)data;
        u32 api_index = le32_to_cpu(ucode_api->api_index);
        int i;
  
        if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
 -              IWL_ERR(drv,
 -                      "api flags index %d larger than supported by driver\n",
 -                      api_index);
 -              /* don't return an error so we can load FW that has more bits */
 -              return 0;
 +              IWL_WARN(drv,
 +                       "api flags index %d larger than supported by driver\n",
 +                       api_index);
 +              return;
        }
  
        for (i = 0; i < 32; i++) {
                if (api_flags & BIT(i))
                        __set_bit(i + 32 * api_index, capa->_api);
        }
 -
 -      return 0;
  }
  
 -static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
 -                                    struct iwl_ucode_capabilities *capa)
 +static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
 +                                     struct iwl_ucode_capabilities *capa)
  {
        const struct iwl_ucode_capa *ucode_capa = (void *)data;
        u32 api_index = le32_to_cpu(ucode_capa->api_index);
        int i;
  
        if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
 -              IWL_ERR(drv,
 -                      "capa flags index %d larger than supported by driver\n",
 -                      api_index);
 -              /* don't return an error so we can load FW that has more bits */
 -              return 0;
 +              IWL_WARN(drv,
 +                       "capa flags index %d larger than supported by driver\n",
 +                       api_index);
 +              return;
        }
  
        for (i = 0; i < 32; i++) {
                if (api_flags & BIT(i))
                        __set_bit(i + 32 * api_index, capa->_capa);
        }
 -
 -      return 0;
  }
  
  static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
@@@ -759,12 -766,14 +760,12 @@@ static int iwl_parse_tlv_firmware(struc
                case IWL_UCODE_TLV_API_CHANGES_SET:
                        if (tlv_len != sizeof(struct iwl_ucode_api))
                                goto invalid_tlv_len;
 -                      if (iwl_set_ucode_api_flags(drv, tlv_data, capa))
 -                              goto tlv_error;
 +                      iwl_set_ucode_api_flags(drv, tlv_data, capa);
                        break;
                case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
                        if (tlv_len != sizeof(struct iwl_ucode_capa))
                                goto invalid_tlv_len;
 -                      if (iwl_set_ucode_capabilities(drv, tlv_data, capa))
 -                              goto tlv_error;
 +                      iwl_set_ucode_capabilities(drv, tlv_data, capa);
                        break;
                case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
                        if (tlv_len != sizeof(u32))
index aa382f7199882c6a3a292dd83e22f05e967bfa8e,3ee6767392b61151efc774610223e2f6216d22f3..b46796944cf2d7d2acca8a92787c5101e58f4ff7
@@@ -79,7 -79,6 +79,7 @@@
  /* NVM offsets (in words) definitions */
  enum wkp_nvm_offsets {
        /* NVM HW-Section offset (in words) definitions */
 +      SUBSYSTEM_ID = 0x0A,
        HW_ADDR = 0x15,
  
        /* NVM SW-Section offset (in words) definitions */
@@@ -184,26 -183,22 +184,26 @@@ static struct ieee80211_rate iwl_cfg802
   * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
   * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
   *    on same channel on 2.4 or same UNII band on 5.2
 - * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
 - * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
 - * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
 - * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
 + * @NVM_CHANNEL_UNIFORM: uniform spreading required
 + * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
 + * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
 + * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
 + * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
 + * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
   */
  enum iwl_nvm_channel_flags {
 -      NVM_CHANNEL_VALID = BIT(0),
 -      NVM_CHANNEL_IBSS = BIT(1),
 -      NVM_CHANNEL_ACTIVE = BIT(3),
 -      NVM_CHANNEL_RADAR = BIT(4),
 -      NVM_CHANNEL_INDOOR_ONLY = BIT(5),
 -      NVM_CHANNEL_GO_CONCURRENT = BIT(6),
 -      NVM_CHANNEL_WIDE = BIT(8),
 -      NVM_CHANNEL_40MHZ = BIT(9),
 -      NVM_CHANNEL_80MHZ = BIT(10),
 -      NVM_CHANNEL_160MHZ = BIT(11),
 +      NVM_CHANNEL_VALID               = BIT(0),
 +      NVM_CHANNEL_IBSS                = BIT(1),
 +      NVM_CHANNEL_ACTIVE              = BIT(3),
 +      NVM_CHANNEL_RADAR               = BIT(4),
 +      NVM_CHANNEL_INDOOR_ONLY         = BIT(5),
 +      NVM_CHANNEL_GO_CONCURRENT       = BIT(6),
 +      NVM_CHANNEL_UNIFORM             = BIT(7),
 +      NVM_CHANNEL_20MHZ               = BIT(8),
 +      NVM_CHANNEL_40MHZ               = BIT(9),
 +      NVM_CHANNEL_80MHZ               = BIT(10),
 +      NVM_CHANNEL_160MHZ              = BIT(11),
 +      NVM_CHANNEL_DC_HIGH             = BIT(12),
  };
  
  #define CHECK_AND_PRINT_I(x)  \
@@@ -259,12 -254,13 +259,12 @@@ static u32 iwl_get_channel_flags(u8 ch_
  static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                struct iwl_nvm_data *data,
                                const __le16 * const nvm_ch_flags,
 -                              bool lar_supported)
 +                              bool lar_supported, bool no_wide_in_5ghz)
  {
        int ch_idx;
        int n_channels = 0;
        struct ieee80211_channel *channel;
        u16 ch_flags;
 -      bool is_5ghz;
        int num_of_ch, num_2ghz_channels;
        const u8 *nvm_chan;
  
        }
  
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
 +              bool is_5ghz = (ch_idx >= num_2ghz_channels);
 +
                ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
  
 -              if (ch_idx >= num_2ghz_channels &&
 -                  !data->sku_cap_band_52GHz_enable)
 +              if (is_5ghz && !data->sku_cap_band_52GHz_enable)
                        continue;
  
 +              /* workaround to disable wide channels in 5GHz */
 +              if (no_wide_in_5ghz && is_5ghz) {
 +                      ch_flags &= ~(NVM_CHANNEL_40MHZ |
 +                                   NVM_CHANNEL_80MHZ |
 +                                   NVM_CHANNEL_160MHZ);
 +              }
 +
                if (ch_flags & NVM_CHANNEL_160MHZ)
                        data->vht160_supported = true;
  
                n_channels++;
  
                channel->hw_value = nvm_chan[ch_idx];
 -              channel->band = (ch_idx < num_2ghz_channels) ?
 -                              NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
 +              channel->band = is_5ghz ?
 +                              NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
                channel->center_freq =
                        ieee80211_channel_to_frequency(
                                channel->hw_value, channel->band);
                 * is not used in mvm, and is used for backwards compatibility
                 */
                channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
 -              is_5ghz = channel->band == NL80211_BAND_5GHZ;
  
                /* don't put limitations in case we're using LAR */
                if (!lar_supported)
                        channel->flags = 0;
  
                IWL_DEBUG_EEPROM(dev,
 -                               "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
 +                               "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
                                 channel->hw_value,
                                 is_5ghz ? "5.2" : "2.4",
                                 ch_flags,
                                 CHECK_AND_PRINT_I(RADAR),
                                 CHECK_AND_PRINT_I(INDOOR_ONLY),
                                 CHECK_AND_PRINT_I(GO_CONCURRENT),
 -                               CHECK_AND_PRINT_I(WIDE),
 +                               CHECK_AND_PRINT_I(UNIFORM),
 +                               CHECK_AND_PRINT_I(20MHZ),
                                 CHECK_AND_PRINT_I(40MHZ),
                                 CHECK_AND_PRINT_I(80MHZ),
                                 CHECK_AND_PRINT_I(160MHZ),
 +                               CHECK_AND_PRINT_I(DC_HIGH),
                                 channel->max_power,
                                 ((ch_flags & NVM_CHANNEL_IBSS) &&
                                  !(ch_flags & NVM_CHANNEL_RADAR))
@@@ -445,15 -432,14 +445,15 @@@ static void iwl_init_vht_hw_capab(cons
  
  void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
                     struct iwl_nvm_data *data, const __le16 *nvm_ch_flags,
 -                   u8 tx_chains, u8 rx_chains, bool lar_supported)
 +                   u8 tx_chains, u8 rx_chains, bool lar_supported,
 +                   bool no_wide_in_5ghz)
  {
        int n_channels;
        int n_used = 0;
        struct ieee80211_supported_band *sband;
  
        n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
 -                                        lar_supported);
 +                                        lar_supported, no_wide_in_5ghz);
        sband = &data->bands[NL80211_BAND_2GHZ];
        sband->band = NL80211_BAND_2GHZ;
        sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@@ -582,7 -568,7 +582,7 @@@ static void iwl_set_hw_address_family_8
                                           const struct iwl_cfg *cfg,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
 -                                         const __le16 *nvm_hw)
 +                                         const __be16 *nvm_hw)
  {
        const u8 *hw_addr;
  
  
  static int iwl_set_hw_address(struct iwl_trans *trans,
                              const struct iwl_cfg *cfg,
 -                            struct iwl_nvm_data *data, const __le16 *nvm_hw,
 +                            struct iwl_nvm_data *data, const __be16 *nvm_hw,
                              const __le16 *mac_override)
  {
        if (cfg->mac_addr_from_csr) {
        return 0;
  }
  
 +static bool
 +iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
 +                      const __be16 *nvm_hw)
 +{
 +      /*
 +       * Workaround a bug in Indonesia SKUs where the regulatory in
 +       * some 7000-family OTPs erroneously allow wide channels in
 +       * 5GHz.  To check for Indonesia, we take the SKU value from
 +       * bits 1-4 in the subsystem ID and check if it is either 5 or
 +       * 9.  In those cases, we need to force-disable wide channels
 +       * in 5GHz otherwise the FW will throw a sysassert when we try
 +       * to use them.
 +       */
 +      if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
 +              /*
 +               * Unlike the other sections in the NVM, the hw
 +               * section uses big-endian.
 +               */
 +              u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID);
 +              u8 sku = (subsystem_id & 0x1e) >> 1;
 +
 +              if (sku == 5 || sku == 9) {
 +                      IWL_DEBUG_EEPROM(dev,
 +                                       "disabling wide channels in 5GHz (0x%0x %d)\n",
 +                                       subsystem_id, sku);
 +                      return true;
 +              }
 +      }
 +
 +      return false;
 +}
 +
  struct iwl_nvm_data *
  iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 -                 const __le16 *nvm_hw, const __le16 *nvm_sw,
 +                 const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
        struct device *dev = trans->dev;
        struct iwl_nvm_data *data;
        bool lar_enabled;
 +      bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
        u32 sku, radio_cfg;
        u16 lar_config;
        const __le16 *ch_section;
        }
  
        iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
 -                      lar_fw_supported && lar_enabled);
 +                      lar_fw_supported && lar_enabled, no_wide_in_5ghz);
        data->calib_version = 255;
  
        return data;
@@@ -832,7 -785,8 +832,8 @@@ iwl_parse_nvm_mcc_info(struct device *d
                       int num_of_ch, __le32 *channels, u16 fw_mcc)
  {
        int ch_idx;
-       u16 ch_flags, prev_ch_flags = 0;
+       u16 ch_flags;
+       u32 reg_rule_flags, prev_reg_rule_flags = 0;
        const u8 *nvm_chan = cfg->ext_nvm ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd;
                        continue;
                }
  
+               reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+                                                            ch_flags, cfg);
                /* we can't continue the same rule */
-               if (ch_idx == 0 || prev_ch_flags != ch_flags ||
+               if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
                    center_freq - prev_center_freq > 20) {
                        valid_rules++;
                        new_rule = true;
                rule->power_rule.max_eirp =
                        DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
  
-               rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
-                                                         ch_flags, cfg);
+               rule->flags = reg_rule_flags;
  
                /* rely on auto-calculation to merge BW of contiguous chans */
                rule->flags |= NL80211_RRF_AUTO_BW;
                rule->freq_range.max_bandwidth_khz = 0;
  
-               prev_ch_flags = ch_flags;
                prev_center_freq = center_freq;
+               prev_reg_rule_flags = reg_rule_flags;
  
                IWL_DEBUG_DEV(dev, IWL_DL_LAR,
-                             "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x): %s\n",
 -                            "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
++                            "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
                              center_freq,
                              band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
                              CHECK_AND_PRINT_I(VALID),
 +                            CHECK_AND_PRINT_I(IBSS),
                              CHECK_AND_PRINT_I(ACTIVE),
                              CHECK_AND_PRINT_I(RADAR),
 -                            CHECK_AND_PRINT_I(WIDE),
 +                            CHECK_AND_PRINT_I(INDOOR_ONLY),
 +                            CHECK_AND_PRINT_I(GO_CONCURRENT),
 +                            CHECK_AND_PRINT_I(UNIFORM),
 +                            CHECK_AND_PRINT_I(20MHZ),
                              CHECK_AND_PRINT_I(40MHZ),
                              CHECK_AND_PRINT_I(80MHZ),
                              CHECK_AND_PRINT_I(160MHZ),
 -                            CHECK_AND_PRINT_I(INDOOR_ONLY),
 -                            CHECK_AND_PRINT_I(GO_CONCURRENT),
 +                            CHECK_AND_PRINT_I(DC_HIGH),
-                             ch_flags,
+                             ch_flags, reg_rule_flags,
                              ((ch_flags & NVM_CHANNEL_ACTIVE) &&
                               !(ch_flags & NVM_CHANNEL_RADAR))
                                         ? "Ad-Hoc" : "");
index 3d65ab49a8a65060082293cae1d221f59247a77e,82863e9273eb66ce9058a5974d2f35988290e1dc..83485493a79aaea1a6c8d130cadb6be10371847c
@@@ -78,7 -78,7 +78,7 @@@
  #include "iwl-eeprom-parse.h"
  
  #include "mvm.h"
 -#include "fw-dbg.h"
 +#include "fw/dbg.h"
  #include "iwl-phy-db.h"
  
  #define MVM_UCODE_ALIVE_TIMEOUT       HZ
@@@ -144,6 -144,134 +144,6 @@@ static int iwl_mvm_send_dqa_cmd(struct 
        return ret;
  }
  
 -void iwl_free_fw_paging(struct iwl_mvm *mvm)
 -{
 -      int i;
 -
 -      if (!mvm->fw_paging_db[0].fw_paging_block)
 -              return;
 -
 -      for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
 -              struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
 -
 -              if (!paging->fw_paging_block) {
 -                      IWL_DEBUG_FW(mvm,
 -                                   "Paging: block %d already freed, continue to next page\n",
 -                                   i);
 -
 -                      continue;
 -              }
 -              dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
 -                             paging->fw_paging_size, DMA_BIDIRECTIONAL);
 -
 -              __free_pages(paging->fw_paging_block,
 -                           get_order(paging->fw_paging_size));
 -              paging->fw_paging_block = NULL;
 -      }
 -      kfree(mvm->trans->paging_download_buf);
 -      mvm->trans->paging_download_buf = NULL;
 -      mvm->trans->paging_db = NULL;
 -
 -      memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 -}
 -
 -static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
 -{
 -      int sec_idx, idx;
 -      u32 offset = 0;
 -
 -      /*
 -       * find where is the paging image start point:
 -       * if CPU2 exist and it's in paging format, then the image looks like:
 -       * CPU1 sections (2 or more)
 -       * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
 -       * CPU2 sections (not paged)
 -       * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
 -       * non paged to CPU2 paging sec
 -       * CPU2 paging CSS
 -       * CPU2 paging image (including instruction and data)
 -       */
 -      for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
 -              if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
 -                      sec_idx++;
 -                      break;
 -              }
 -      }
 -
 -      /*
 -       * If paging is enabled there should be at least 2 more sections left
 -       * (one for CSS and one for Paging data)
 -       */
 -      if (sec_idx >= image->num_sec - 1) {
 -              IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
 -              iwl_free_fw_paging(mvm);
 -              return -EINVAL;
 -      }
 -
 -      /* copy the CSS block to the dram */
 -      IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
 -                   sec_idx);
 -
 -      memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
 -             image->sec[sec_idx].data,
 -             mvm->fw_paging_db[0].fw_paging_size);
 -      dma_sync_single_for_device(mvm->trans->dev,
 -                                 mvm->fw_paging_db[0].fw_paging_phys,
 -                                 mvm->fw_paging_db[0].fw_paging_size,
 -                                 DMA_BIDIRECTIONAL);
 -
 -      IWL_DEBUG_FW(mvm,
 -                   "Paging: copied %d CSS bytes to first block\n",
 -                   mvm->fw_paging_db[0].fw_paging_size);
 -
 -      sec_idx++;
 -
 -      /*
 -       * copy the paging blocks to the dram
 -       * loop index start from 1 since that CSS block already copied to dram
 -       * and CSS index is 0.
 -       * loop stop at num_of_paging_blk since that last block is not full.
 -       */
 -      for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
 -              struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
 -
 -              memcpy(page_address(block->fw_paging_block),
 -                     image->sec[sec_idx].data + offset,
 -                     block->fw_paging_size);
 -              dma_sync_single_for_device(mvm->trans->dev,
 -                                         block->fw_paging_phys,
 -                                         block->fw_paging_size,
 -                                         DMA_BIDIRECTIONAL);
 -
 -
 -              IWL_DEBUG_FW(mvm,
 -                           "Paging: copied %d paging bytes to block %d\n",
 -                           mvm->fw_paging_db[idx].fw_paging_size,
 -                           idx);
 -
 -              offset += mvm->fw_paging_db[idx].fw_paging_size;
 -      }
 -
 -      /* copy the last paging block */
 -      if (mvm->num_of_pages_in_last_blk > 0) {
 -              struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
 -
 -              memcpy(page_address(block->fw_paging_block),
 -                     image->sec[sec_idx].data + offset,
 -                     FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
 -              dma_sync_single_for_device(mvm->trans->dev,
 -                                         block->fw_paging_phys,
 -                                         block->fw_paging_size,
 -                                         DMA_BIDIRECTIONAL);
 -
 -              IWL_DEBUG_FW(mvm,
 -                           "Paging: copied %d pages in the last block %d\n",
 -                           mvm->num_of_pages_in_last_blk, idx);
 -      }
 -
 -      return 0;
 -}
 -
  void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
                                   struct iwl_rx_cmd_buffer *rxb)
  {
                               le32_to_cpu(dump_data[i]));
  }
  
 -static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
 -                                 const struct fw_img *image)
 -{
 -      struct page *block;
 -      dma_addr_t phys = 0;
 -      int blk_idx, order, num_of_pages, size, dma_enabled;
 -
 -      if (mvm->fw_paging_db[0].fw_paging_block)
 -              return 0;
 -
 -      dma_enabled = is_device_dma_capable(mvm->trans->dev);
 -
 -      /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
 -      BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
 -
 -      num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
 -      mvm->num_of_paging_blk =
 -              DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
 -      mvm->num_of_pages_in_last_blk =
 -              num_of_pages -
 -              NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
 -
 -      IWL_DEBUG_FW(mvm,
 -                   "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
 -                   mvm->num_of_paging_blk,
 -                   mvm->num_of_pages_in_last_blk);
 -
 -      /*
 -       * Allocate CSS and paging blocks in dram.
 -       */
 -      for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
 -              /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
 -              size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
 -              order = get_order(size);
 -              block = alloc_pages(GFP_KERNEL, order);
 -              if (!block) {
 -                      /* free all the previous pages since we failed */
 -                      iwl_free_fw_paging(mvm);
 -                      return -ENOMEM;
 -              }
 -
 -              mvm->fw_paging_db[blk_idx].fw_paging_block = block;
 -              mvm->fw_paging_db[blk_idx].fw_paging_size = size;
 -
 -              if (dma_enabled) {
 -                      phys = dma_map_page(mvm->trans->dev, block, 0,
 -                                          PAGE_SIZE << order,
 -                                          DMA_BIDIRECTIONAL);
 -                      if (dma_mapping_error(mvm->trans->dev, phys)) {
 -                              /*
 -                               * free the previous pages and the current one
 -                               * since we failed to map_page.
 -                               */
 -                              iwl_free_fw_paging(mvm);
 -                              return -ENOMEM;
 -                      }
 -                      mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
 -              } else {
 -                      mvm->fw_paging_db[blk_idx].fw_paging_phys =
 -                              PAGING_ADDR_SIG |
 -                              blk_idx << BLOCK_2_EXP_SIZE;
 -              }
 -
 -              if (!blk_idx)
 -                      IWL_DEBUG_FW(mvm,
 -                                   "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
 -                                   order);
 -              else
 -                      IWL_DEBUG_FW(mvm,
 -                                   "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
 -                                   order);
 -      }
 -
 -      return 0;
 -}
 -
 -static int iwl_save_fw_paging(struct iwl_mvm *mvm,
 -                            const struct fw_img *fw)
 -{
 -      int ret;
 -
 -      ret = iwl_alloc_fw_paging_mem(mvm, fw);
 -      if (ret)
 -              return ret;
 -
 -      return iwl_fill_paging_mem(mvm, fw);
 -}
 -
 -/* send paging cmd to FW in case CPU2 has paging image */
 -static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
 -{
 -      struct iwl_fw_paging_cmd paging_cmd = {
 -              .flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
 -                                   PAGING_CMD_IS_ENABLED |
 -                                   (mvm->num_of_pages_in_last_blk <<
 -                                    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
 -              .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
 -              .block_num = cpu_to_le32(mvm->num_of_paging_blk),
 -      };
 -      int blk_idx;
 -
 -      /* loop for for all paging blocks + CSS block */
 -      for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
 -              dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
 -              __le32 phy_addr;
 -
 -              addr = addr >> PAGE_2_EXP_SIZE;
 -              phy_addr = cpu_to_le32(addr);
 -              paging_cmd.device_phy_addr[blk_idx] = phy_addr;
 -      }
 -
 -      return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
 -                                                  IWL_ALWAYS_LONG_GROUP, 0),
 -                                  0, sizeof(paging_cmd), &paging_cmd);
 -}
 -
 -/*
 - * Send paging item cmd to FW in case CPU2 has paging image
 - */
 -static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
 -{
 -      int ret;
 -      struct iwl_fw_get_item_cmd fw_get_item_cmd = {
 -              .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
 -      };
 -
 -      struct iwl_fw_get_item_resp *item_resp;
 -      struct iwl_host_cmd cmd = {
 -              .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 -              .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
 -              .data = { &fw_get_item_cmd, },
 -      };
 -
 -      cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
 -
 -      ret = iwl_mvm_send_cmd(mvm, &cmd);
 -      if (ret) {
 -              IWL_ERR(mvm,
 -                      "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
 -                      ret);
 -              return ret;
 -      }
 -
 -      item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
 -      if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
 -              IWL_ERR(mvm,
 -                      "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
 -                      le32_to_cpu(item_resp->item_id));
 -              ret = -EIO;
 -              goto exit;
 -      }
 -
 -      /* Add an extra page for headers */
 -      mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
 -                                                FW_PAGING_SIZE,
 -                                                GFP_KERNEL);
 -      if (!mvm->trans->paging_download_buf) {
 -              ret = -ENOMEM;
 -              goto exit;
 -      }
 -      mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
 -      mvm->trans->paging_db = mvm->fw_paging_db;
 -      IWL_DEBUG_FW(mvm,
 -                   "Paging: got paging request address (paging_req_addr 0x%08x)\n",
 -                   mvm->trans->paging_req_addr);
 -
 -exit:
 -      iwl_free_resp(&cmd);
 -
 -      return ret;
 -}
 -
  static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                         struct iwl_rx_packet *pkt, void *data)
  {
@@@ -244,6 -544,48 +244,6 @@@ static bool iwl_wait_phy_db_entry(struc
        return false;
  }
  
 -static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
 -{
 -      const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
 -      int ret;
 -
 -      /*
 -       * Configure and operate fw paging mechanism.
 -       * The driver configures the paging flow only once.
 -       * The CPU2 paging image is included in the IWL_UCODE_INIT image.
 -       */
 -      if (!fw->paging_mem_size)
 -              return 0;
 -
 -      /*
 -       * When dma is not enabled, the driver needs to copy / write
 -       * the downloaded / uploaded page to / from the smem.
 -       * This gets the location of the place were the pages are
 -       * stored.
 -       */
 -      if (!is_device_dma_capable(mvm->trans->dev)) {
 -              ret = iwl_trans_get_paging_item(mvm);
 -              if (ret) {
 -                      IWL_ERR(mvm, "failed to get FW paging item\n");
 -                      return ret;
 -              }
 -      }
 -
 -      ret = iwl_save_fw_paging(mvm, fw);
 -      if (ret) {
 -              IWL_ERR(mvm, "failed to save the FW paging image\n");
 -              return ret;
 -      }
 -
 -      ret = iwl_send_paging_cmd(mvm, fw);
 -      if (ret) {
 -              IWL_ERR(mvm, "failed to send the paging cmd\n");
 -              iwl_free_fw_paging(mvm);
 -              return ret;
 -      }
 -
 -      return 0;
 -}
  static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                                         enum iwl_ucode_type ucode_type)
  {
        struct iwl_mvm_alive_data alive_data;
        const struct fw_img *fw;
        int ret, i;
 -      enum iwl_ucode_type old_type = mvm->cur_ucode;
 +      enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
        static const u16 alive_cmd[] = { MVM_ALIVE };
        struct iwl_sf_region st_fwrd_space;
  
                fw = iwl_get_ucode_image(mvm->fw, ucode_type);
        if (WARN_ON(!fw))
                return -EINVAL;
 -      mvm->cur_ucode = ucode_type;
 +      iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
        clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  
        iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
  
        ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
        if (ret) {
 -              mvm->cur_ucode = old_type;
 +              iwl_fw_set_current_image(&mvm->fwrt, old_type);
                iwl_remove_notification(&mvm->notif_wait, &alive_wait);
                return ret;
        }
                                "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
                                iwl_read_prph(trans, SB_CPU_1_STATUS),
                                iwl_read_prph(trans, SB_CPU_2_STATUS));
 -              mvm->cur_ucode = old_type;
 +              iwl_fw_set_current_image(&mvm->fwrt, old_type);
                return ret;
        }
  
        if (!alive_data.valid) {
                IWL_ERR(mvm, "Loaded ucode is not valid!\n");
 -              mvm->cur_ucode = old_type;
 +              iwl_fw_set_current_image(&mvm->fwrt, old_type);
                return -EIO;
        }
  
         */
  
        memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
 -      if (iwl_mvm_is_dqa_supported(mvm))
 -              mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
 -      else
 -              mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 +      mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
  
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@@ -388,7 -733,7 +388,7 @@@ static int iwl_run_unified_mvm_ucode(st
        }
  
        if (IWL_MVM_PARSE_NVM && read_nvm) {
 -              ret = iwl_nvm_init(mvm, true);
 +              ret = iwl_nvm_init(mvm);
                if (ret) {
                        IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
                        goto error;
  
        /* Read the NVM only at driver load time, no need to do this twice */
        if (!IWL_MVM_PARSE_NVM && read_nvm) {
 -              ret = iwl_mvm_nvm_get_from_fw(mvm);
 -              if (ret) {
 +              mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt);
 +              if (IS_ERR(mvm->nvm_data)) {
 +                      ret = PTR_ERR(mvm->nvm_data);
 +                      mvm->nvm_data = NULL;
                        IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
                        return ret;
                }
@@@ -431,7 -774,7 +431,7 @@@ error
  static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
  {
        struct iwl_phy_cfg_cmd phy_cfg_cmd;
 -      enum iwl_ucode_type ucode_type = mvm->cur_ucode;
 +      enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
  
        /* Set parameters */
        phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
@@@ -456,7 -799,7 +456,7 @@@ int iwl_run_init_mvm_ucode(struct iwl_m
        };
        int ret;
  
 -      if (iwl_mvm_has_new_tx_api(mvm))
 +      if (iwl_mvm_has_unified_ucode(mvm))
                return iwl_run_unified_mvm_ucode(mvm, true);
  
        lockdep_assert_held(&mvm->mutex);
        ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
        if (ret) {
                IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
 -              goto error;
 +              goto remove_notif;
        }
  
        if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
                ret = iwl_mvm_send_bt_init_conf(mvm);
                if (ret)
 -                      goto error;
 +                      goto remove_notif;
        }
  
        /* Read the NVM only at driver load time, no need to do this twice */
        if (read_nvm) {
 -              /* Read nvm */
 -              ret = iwl_nvm_init(mvm, true);
 +              ret = iwl_nvm_init(mvm);
                if (ret) {
                        IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
 -                      goto error;
 +                      goto remove_notif;
                }
        }
  
        if (mvm->nvm_file_name)
                iwl_mvm_load_nvm_to_nic(mvm);
  
 -      ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
 -      WARN_ON(ret);
 +      WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans));
  
        /*
         * abort after reading the nvm in case RF Kill is on, we will complete
        if (iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm,
                                  "jump over all phy activities due to RF kill\n");
 -              iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 -              ret = 1;
 -              goto out;
 +              goto remove_notif;
        }
  
        mvm->calibrating = true;
        /* Send TX valid antennas before triggering calibrations */
        ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
        if (ret)
 -              goto error;
 +              goto remove_notif;
  
 -      /*
 -       * Send phy configurations command to init uCode
 -       * to start the 16.0 uCode init image internal calibrations.
 -       */
        ret = iwl_send_phy_cfg_cmd(mvm);
        if (ret) {
                IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
                        ret);
 -              goto error;
 +              goto remove_notif;
        }
  
        /*
         * just wait for the calibration complete notification.
         */
        ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
 -                      MVM_UCODE_CALIB_TIMEOUT);
 +                                  MVM_UCODE_CALIB_TIMEOUT);
 +      if (!ret)
 +              goto out;
  
 -      if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
 +      if (iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
 -              ret = 1;
 +              ret = 0;
 +      } else {
 +              IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
 +                      ret);
        }
 +
        goto out;
  
 -error:
 +remove_notif:
        iwl_remove_notification(&mvm->notif_wait, &calib_wait);
  out:
        mvm->calibrating = false;
        return ret;
  }
  
 -static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
 -                                        struct iwl_rx_packet *pkt)
 -{
 -      struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
 -      int i, lmac;
 -      int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
 -
 -      if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
 -              return;
 -
 -      mvm->smem_cfg.num_lmacs = lmac_num;
 -      mvm->smem_cfg.num_txfifo_entries =
 -              ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
 -      mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
 -
 -      for (lmac = 0; lmac < lmac_num; lmac++) {
 -              struct iwl_shared_mem_lmac_cfg *lmac_cfg =
 -                      &mem_cfg->lmac_smem[lmac];
 -
 -              for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
 -                      mvm->smem_cfg.lmac[lmac].txfifo_size[i] =
 -                              le32_to_cpu(lmac_cfg->txfifo_size[i]);
 -              mvm->smem_cfg.lmac[lmac].rxfifo1_size =
 -                      le32_to_cpu(lmac_cfg->rxfifo1_size);
 -      }
 -}
 -
 -static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
 -                                   struct iwl_rx_packet *pkt)
 -{
 -      struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
 -      int i;
 -
 -      mvm->smem_cfg.num_lmacs = 1;
 -
 -      mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
 -      for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
 -              mvm->smem_cfg.lmac[0].txfifo_size[i] =
 -                      le32_to_cpu(mem_cfg->txfifo_size[i]);
 -
 -      mvm->smem_cfg.lmac[0].rxfifo1_size =
 -              le32_to_cpu(mem_cfg->rxfifo_size[0]);
 -      mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
 -
 -      /* new API has more data, from rxfifo_addr field and on */
 -      if (fw_has_capa(&mvm->fw->ucode_capa,
 -                      IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
 -              BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) !=
 -                           sizeof(mem_cfg->internal_txfifo_size));
 -
 -              for (i = 0;
 -                   i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size);
 -                   i++)
 -                      mvm->smem_cfg.internal_txfifo_size[i] =
 -                              le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
 -      }
 -}
 -
 -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 -{
 -      struct iwl_host_cmd cmd = {
 -              .flags = CMD_WANT_SKB,
 -              .data = { NULL, },
 -              .len = { 0, },
 -      };
 -      struct iwl_rx_packet *pkt;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      if (fw_has_capa(&mvm->fw->ucode_capa,
 -                      IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
 -              cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
 -      else
 -              cmd.id = SHARED_MEM_CFG;
 -
 -      if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
 -              return;
 -
 -      pkt = cmd.resp_pkt;
 -      if (iwl_mvm_has_new_tx_api(mvm))
 -              iwl_mvm_parse_shared_mem_a000(mvm, pkt);
 -      else
 -              iwl_mvm_parse_shared_mem(mvm, pkt);
 -
 -      IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 -
 -      iwl_free_resp(&cmd);
 -}
 -
  static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
  {
        struct iwl_ltr_config_cmd cmd = {
@@@ -614,8 -1048,8 +614,8 @@@ static union acpi_object *iwl_mvm_sar_f
                                                    union acpi_object *data,
                                                    int data_size)
  {
 +      union acpi_object *wifi_pkg = NULL;
        int i;
 -      union acpi_object *wifi_pkg;
  
        /*
         * We need at least two packages, one for the revision and one
@@@ -841,8 -1275,10 +841,10 @@@ static int iwl_mvm_sar_get_wgds_table(s
  
                        entry = &wifi_pkg->package.elements[idx++];
                        if ((entry->type != ACPI_TYPE_INTEGER) ||
-                           (entry->integer.value > U8_MAX))
-                               return -EINVAL;
+                           (entry->integer.value > U8_MAX)) {
+                               ret = -EINVAL;
+                               goto out_free;
+                       }
  
                        mvm->geo_profiles[i].values[j] = entry->integer.value;
                }
@@@ -995,17 -1431,6 +997,17 @@@ static int iwl_mvm_sar_geo_init(struct 
  {
        return 0;
  }
 +
 +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
 +                             int prof_b)
 +{
 +      return -ENOENT;
 +}
 +
 +int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 +{
 +      return -ENOENT;
 +}
  #endif /* CONFIG_ACPI */
  
  static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
@@@ -1042,7 -1467,7 +1044,7 @@@ static int iwl_mvm_load_rt_fw(struct iw
  {
        int ret;
  
 -      if (iwl_mvm_has_new_tx_api(mvm))
 +      if (iwl_mvm_has_unified_ucode(mvm))
                return iwl_run_unified_mvm_ucode(mvm, false);
  
        ret = iwl_run_init_mvm_ucode(mvm, false);
  
        if (ret) {
                IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
 -              /* this can't happen */
 -              if (WARN_ON(ret > 0))
 -                      ret = -ERFKILL;
                return ret;
        }
  
        if (ret)
                return ret;
  
 -      return iwl_mvm_init_paging(mvm);
 +      return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
  }
  
  int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
        }
  
 -      iwl_mvm_get_shared_mem_conf(mvm);
 +      iwl_get_shared_mem_conf(&mvm->fwrt);
  
        ret = iwl_mvm_sf_update(mvm, NULL, false);
        if (ret)
                IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
  
 -      mvm->fw_dbg_conf = FW_DBG_INVALID;
 +      mvm->fwrt.dump.conf = FW_DBG_INVALID;
        /* if we have a destination, assume EARLY START */
        if (mvm->fw->dbg_dest_tlv)
 -              mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
 -      iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
 +              mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
 +      iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
  
        ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
        if (ret)
                goto error;
  
 -      /* Send phy db control command and then phy db calibration*/
 -      if (!iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_unified_ucode(mvm)) {
 +              /* Send phy db control command and then phy db calibration */
                ret = iwl_send_phy_db_data(mvm->phy_db);
                if (ret)
                        goto error;
  
        /* Init RSS configuration */
        /* TODO - remove a000 disablement when we have RXQ config API */
 -      if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (iwl_mvm_has_new_rx_api(mvm) &&
 +          mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
                ret = iwl_send_rss_cfg_cmd(mvm);
                if (ret) {
                        IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
        /* reset quota debouncing buffer - 0xff will yield invalid data */
        memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
  
 -      /* Enable DQA-mode if required */
 -      if (iwl_mvm_is_dqa_supported(mvm)) {
 -              ret = iwl_mvm_send_dqa_cmd(mvm);
 -              if (ret)
 -                      goto error;
 -      } else {
 -              IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
 -      }
 +      ret = iwl_mvm_send_dqa_cmd(mvm);
 +      if (ret)
 +              goto error;
  
        /* Add auxiliary station for scanning */
        ret = iwl_mvm_add_aux_sta(mvm);
        }
  
        /* TODO: read the budget from BIOS / Platform NVM */
 -      if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
 +
 +      /*
 +       * In case there is no budget from BIOS / Platform NVM the default
 +       * budget should be 2000mW (cooling state 0).
 +       */
 +      if (iwl_mvm_is_ctdp_supported(mvm)) {
                ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
                                           mvm->cooling_dev.cur_state);
                if (ret)
        if (ret)
                goto error;
  
 +      iwl_mvm_leds_sync(mvm);
 +
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
        return 0;
   error:
index cfabe302c9c7aa34bd5039fb791e5755327763e3,ce901be5fba87e3674f06b030cfae8759e063c3b..d7feb1ab4dc9e0586e37f6bdff5edd815e943148
@@@ -87,6 -87,7 +87,6 @@@
  #include "fw/error-dump.h"
  #include "iwl-prph.h"
  #include "iwl-nvm-parse.h"
 -#include "fw-dbg.h"
  
  static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
@@@ -445,18 -446,8 +445,18 @@@ int iwl_mvm_mac_setup_register(struct i
        ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
        if (iwl_mvm_has_new_rx_api(mvm))
                ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
 -      if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF))
 +
 +      if (fw_has_capa(&mvm->fw->ucode_capa,
 +                      IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
                ieee80211_hw_set(hw, AP_LINK_PS);
 +      } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
 +              /*
 +               * we absolutely need this for the new TX API since that comes
 +               * with many more queues than the current code can deal with
 +               * for station powersave
 +               */
 +              return -EINVAL;
 +      }
  
        if (mvm->trans->num_rx_queues > 1)
                ieee80211_hw_set(hw, USES_RSS);
        if (mvm->trans->max_skb_frags)
                hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              hw->queues = mvm->first_agg_queue;
 -      else
 -              hw->queues = IEEE80211_MAX_QUEUES;
 +      hw->queues = IEEE80211_MAX_QUEUES;
        hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
        hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
                                    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
@@@ -805,7 -799,7 +805,7 @@@ static void iwl_mvm_mac_tx(struct ieee8
                goto drop;
        }
  
 -      if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
 +      if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
            !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
            !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
                goto drop;
        /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
        if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
                     ieee80211_is_mgmt(hdr->frame_control) &&
 -                   !ieee80211_is_deauth(hdr->frame_control) &&
 -                   !ieee80211_is_disassoc(hdr->frame_control) &&
 -                   !ieee80211_is_action(hdr->frame_control)))
 +                   !ieee80211_is_bufferable_mmpdu(hdr->frame_control)))
                sta = NULL;
  
        if (sta) {
@@@ -849,11 -845,11 +849,11 @@@ static inline bool iwl_enable_tx_ampdu(
        return true;
  }
  
 -#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
 -      do {                                                    \
 -              if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))        \
 -                      break;                                  \
 -              iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
 +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)         \
 +      do {                                                            \
 +              if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))                \
 +                      break;                                          \
 +              iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt);    \
        } while (0)
  
  static void
@@@ -870,8 -866,7 +870,8 @@@ iwl_mvm_ampdu_check_trigger(struct iwl_
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
  
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        switch (action) {
@@@ -1034,8 -1029,8 +1034,8 @@@ static void iwl_mvm_restart_cleanup(str
         * on D3->D0 transition
         */
        if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
 -              mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
 -              iwl_mvm_fw_error_dump(mvm);
 +              mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
 +              iwl_fw_error_dump(&mvm->fwrt);
        }
  
        /* cleanup all stale references (scan, roc), but keep the
  
        iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
 -      memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
        memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
 -      memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
  
  
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
 -      mvm->fw_dbg_conf = FW_DBG_INVALID;
 +      mvm->fwrt.dump.conf = FW_DBG_INVALID;
  
        /* keep statistics ticking */
        iwl_mvm_accu_radio_stats(mvm);
@@@ -1258,16 -1255,16 +1258,16 @@@ static void iwl_mvm_mac_stop(struct iee
         * Lock and clear the firmware running bit here already, so that
         * new commands coming in elsewhere, e.g. from debugfs, will not
         * be able to proceed. This is important here because one of those
 -       * debugfs files causes the fw_dump_wk to be triggered, and if we
 +       * debugfs files causes the firmware dump to be triggered, and if we
         * don't stop debugfs accesses before canceling that it could be
         * retriggered after we flush it but before we've cleared the bit.
         */
        clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
  
 -      cancel_delayed_work_sync(&mvm->fw_dump_wk);
 +      iwl_fw_cancel_dump(&mvm->fwrt);
        cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
        cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
 -      iwl_mvm_free_fw_dump_desc(mvm);
 +      iwl_fw_free_dump_desc(&mvm->fwrt);
  
        mutex_lock(&mvm->mutex);
        __iwl_mvm_mac_stop(mvm);
@@@ -1373,15 -1370,17 +1373,15 @@@ static int iwl_mvm_mac_add_interface(st
                        goto out_release;
                }
  
 -              if (iwl_mvm_is_dqa_supported(mvm)) {
 -                      /*
 -                       * Only queue for this station is the mcast queue,
 -                       * which shouldn't be in TFD mask anyway
 -                       */
 -                      ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
 -                                                     0, vif->type,
 -                                                     IWL_STA_MULTICAST);
 -                      if (ret)
 -                              goto out_release;
 -              }
 +              /*
 +               * Only queue for this station is the mcast queue,
 +               * which shouldn't be in TFD mask anyway
 +               */
 +              ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
 +                                             0, vif->type,
 +                                             IWL_STA_MULTICAST);
 +              if (ret)
 +                      goto out_release;
  
                iwl_mvm_vif_dbgfs_register(mvm, vif);
                goto out_unlock;
                if (ret)
                        goto out_unref_phy;
  
 -              ret = iwl_mvm_add_bcast_sta(mvm, vif);
 +              ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
                if (ret)
                        goto out_unbind;
  
   out_release:
        if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
                mvm->vif_count--;
 -
 -      iwl_mvm_mac_ctxt_release(mvm, vif);
   out_unlock:
        mutex_unlock(&mvm->mutex);
  
  static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
                                        struct ieee80211_vif *vif)
  {
 -      u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
 -
 -      if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) {
 -              /*
 -               * mac80211 first removes all the stations of the vif and
 -               * then removes the vif. When it removes a station it also
 -               * flushes the AMPDU session. So by now, all the AMPDU sessions
 -               * of all the stations of this vif are closed, and the queues
 -               * of these AMPDU sessions are properly closed.
 -               * We still need to take care of the shared queues of the vif.
 -               * Flush them here.
 -               * For DQA mode there is no need - broacast and multicast queue
 -               * are flushed separately.
 -               */
 -              mutex_lock(&mvm->mutex);
 -              iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
 -              mutex_unlock(&mvm->mutex);
 -
 -              /*
 -               * There are transports that buffer a few frames in the host.
 -               * For these, the flush above isn't enough since while we were
 -               * flushing, the transport might have sent more frames to the
 -               * device. To solve this, wait here until the transport is
 -               * empty. Technically, this could have replaced the flush
 -               * above, but flush is much faster than draining. So flush
 -               * first, and drain to make sure we have no frames in the
 -               * transport anymore.
 -               * If a station still had frames on the shared queues, it is
 -               * already marked as draining, so to complete the draining, we
 -               * just need to wait until the transport is empty.
 -               */
 -              iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk);
 -      }
 -
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                /*
                 * Flush the ROC worker which will flush the OFFCHANNEL queue.
                 * queue are sent in ROC session.
                 */
                flush_work(&mvm->roc_done_wk);
 -      } else {
 -              /*
 -               * By now, all the AC queues are empty. The AGG queues are
 -               * empty too. We already got all the Tx responses for all the
 -               * packets in the queues. The drain work can have been
 -               * triggered. Flush it.
 -               */
 -              flush_work(&mvm->sta_drained_wk);
        }
  }
  
@@@ -1513,7 -1556,7 +1513,7 @@@ static void iwl_mvm_mac_remove_interfac
  
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                mvm->p2p_device_vif = NULL;
 -              iwl_mvm_rm_bcast_sta(mvm, vif);
 +              iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
                iwl_mvm_binding_remove_vif(mvm, vif);
                iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
                mvmvif->phy_ctxt = NULL;
        iwl_mvm_mac_ctxt_remove(mvm, vif);
  
  out_release:
 -      iwl_mvm_mac_ctxt_release(mvm, vif);
        mutex_unlock(&mvm->mutex);
  }
  
@@@ -2023,7 -2067,8 +2023,7 @@@ static void iwl_mvm_bss_info_changed_st
                 * We received a beacon from the associated AP so
                 * remove the session protection.
                 */
 -              iwl_mvm_remove_time_event(mvm, mvmvif,
 -                                        &mvmvif->time_event_data);
 +              iwl_mvm_stop_session_protection(mvm, vif);
  
                iwl_mvm_sf_update(mvm, vif, false);
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@@@ -2360,18 -2405,15 +2360,18 @@@ static void __iwl_mvm_mac_sta_notify(st
        unsigned long txqs = 0, tids = 0;
        int tid;
  
 +      /*
 +       * If we have TVQM then we get too high queue numbers - luckily
 +       * we really shouldn't get here with that because such hardware
 +       * should have firmware supporting buffer station offload.
 +       */
 +      if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 +              return;
 +
        spin_lock_bh(&mvmsta->lock);
        for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
                struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
  
 -              if (!iwl_mvm_is_dqa_supported(mvm) &&
 -                  tid_data->state != IWL_AGG_ON &&
 -                  tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
 -                      continue;
 -
                if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
                        continue;
  
  
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
 -              if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
 -                      ieee80211_sta_block_awake(hw, sta, true);
 -
                for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
                        ieee80211_sta_set_buffered(sta, tid, true);
  
@@@ -2527,8 -2572,7 +2527,8 @@@ iwl_mvm_tdls_check_trigger(struct iwl_m
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
        tdls_trig = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (!(tdls_trig->action_bitmap & BIT(action)))
            memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "TDLS event occurred, peer %pM, action %d",
 -                                  peer_addr, action);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "TDLS event occurred, peer %pM, action %d",
 +                              peer_addr, action);
  }
  
  static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
        spin_lock_bh(&mvm_sta->lock);
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                tid_data = &mvm_sta->tid_data[i];
-               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+               while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+                       /*
+                        * The first deferred frame should've stopped the MAC
+                        * queues, so we should never get a second deferred
+                        * frame for the RA/TID.
+                        */
+                       iwl_mvm_start_mac_queues(mvm, info->hw_queue);
                        ieee80211_free_txskb(mvm->hw, skb);
+               }
        }
        spin_unlock_bh(&mvm_sta->lock);
  }
@@@ -2577,6 -2631,9 +2587,6 @@@ static int iwl_mvm_mac_sta_state(struc
        if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
                return -EINVAL;
  
 -      /* if a STA is being removed, reuse its ID */
 -      flush_work(&mvm->sta_drained_wk);
 -
        /*
         * If we are in a STA removal flow and in DQA mode:
         *
         * make sure the worker is no longer handling frames for this STA.
         */
        if (old_state == IEEE80211_STA_NONE &&
 -          new_state == IEEE80211_STA_NOTEXIST &&
 -          iwl_mvm_is_dqa_supported(mvm)) {
 +          new_state == IEEE80211_STA_NOTEXIST) {
                iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
                flush_work(&mvm->add_stream_wk);
  
@@@ -3834,9 -3892,7 +3844,9 @@@ static int iwl_mvm_pre_channel_switch(s
        IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
                           chsw->chandef.center_freq1);
  
 -      iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
 +      iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
 +                                     ieee80211_vif_to_wdev(vif),
 +                                     FW_DBG_TRIGGER_CHANNEL_SWITCH);
  
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
  
                /* Schedule the time event to a bit before beacon 1,
                 * to make sure we're in the new channel when the
 -               * GO/AP arrives.
 +               * GO/AP arrives. In case count <= 1 immediately schedule the
 +               * TE (this might result with some packet loss or connection
 +               * loss).
                 */
 -              apply_time = chsw->device_timestamp +
 -                      ((vif->bss_conf.beacon_int * (chsw->count - 1) -
 -                        IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
 +              if (chsw->count <= 1)
 +                      apply_time = 0;
 +              else
 +                      apply_time = chsw->device_timestamp +
 +                              ((vif->bss_conf.beacon_int * (chsw->count - 1) -
 +                                IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
  
                if (chsw->block_tx)
                        iwl_mvm_csa_client_absent(mvm, vif);
@@@ -3978,7 -4029,8 +3988,7 @@@ static void iwl_mvm_mac_flush(struct ie
                return;
  
        /* Make sure we're done with the deferred traffic before flushing */
 -      if (iwl_mvm_is_dqa_supported(mvm))
 -              flush_work(&mvm->add_stream_wk);
 +      flush_work(&mvm->add_stream_wk);
  
        mutex_lock(&mvm->mutex);
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@@ -4115,11 -4167,11 +4125,11 @@@ static void iwl_mvm_event_mlme_callback
                                        struct ieee80211_vif *vif,
                                        const struct ieee80211_event *event)
  {
 -#define CHECK_MLME_TRIGGER(_cnt, _fmt...)                     \
 -      do {                                                    \
 -              if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))   \
 -                      break;                                  \
 -              iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt);   \
 +#define CHECK_MLME_TRIGGER(_cnt, _fmt...)                             \
 +      do {                                                            \
 +              if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))           \
 +                      break;                                          \
 +              iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt);      \
        } while (0)
  
        struct iwl_fw_dbg_trigger_tlv *trig;
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (event->u.mlme.data == ASSOC_EVENT) {
@@@ -4172,17 -4223,16 +4182,17 @@@ static void iwl_mvm_event_bar_rx_callba
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "BAR received from %pM, tid %d, ssn %d",
 -                                  event->u.ba.sta->addr, event->u.ba.tid,
 -                                  event->u.ba.ssn);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "BAR received from %pM, tid %d, ssn %d",
 +                              event->u.ba.sta->addr, event->u.ba.tid,
 +                              event->u.ba.ssn);
  }
  
  static void
@@@ -4198,16 -4248,15 +4208,16 @@@ iwl_mvm_event_frame_timeout_callback(st
  
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                         ieee80211_vif_to_wdev(vif), trig))
                return;
  
        if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "Frame from %pM timed out, tid %d",
 -                                  event->u.ba.sta->addr, event->u.ba.tid);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "Frame from %pM timed out, tid %d",
 +                              event->u.ba.sta->addr, event->u.ba.tid);
  }
  
  static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
@@@ -4241,8 -4290,7 +4251,8 @@@ void iwl_mvm_sync_rx_queues_internal(st
        lockdep_assert_held(&mvm->mutex);
  
        /* TODO - remove a000 disablement when we have RXQ config API */
 -      if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm))
 +      if (!iwl_mvm_has_new_rx_api(mvm) ||
 +          mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
                return;
  
        notif->cookie = mvm->queue_sync_cookie;
index 44c873082a31f47effc988a2a7ce4b35d7f9349d,8999a1199d60d27bdec88d6c638feb85403c3ced..ba7bd049d3d4e0238c0dd81e598ce097972d762e
@@@ -622,9 -622,7 +622,9 @@@ static int rs_tl_turn_on_agg_for_tid(st
  
        IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
                     sta->addr, tid);
 -      ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
 +
 +      /* start BA session until the peer sends del BA */
 +      ret = ieee80211_start_tx_ba_session(sta, tid, 0);
        if (ret == -EAGAIN) {
                /*
                 * driver and mac80211 is out of sync
        return ret;
  }
  
 -static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid,
 -                            struct iwl_lq_sta *lq_data,
 +static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 +                            u8 tid, struct iwl_lq_sta *lq_sta,
                              struct ieee80211_sta *sta)
  {
 -      if (tid < IWL_MAX_TID_COUNT)
 -              rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta);
 -      else
 +      struct iwl_mvm_tid_data *tid_data;
 +
 +      /*
 +       * In AP mode, tid can be equal to IWL_MAX_TID_COUNT
 +       * when the frame is not QoS
 +       */
 +      if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) {
                IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
                        tid, IWL_MAX_TID_COUNT);
 +              return;
 +      } else if (tid == IWL_MAX_TID_COUNT) {
 +              return;
 +      }
 +
 +      tid_data = &mvmsta->tid_data[tid];
 +      if ((tid_data->state == IWL_AGG_OFF) &&
 +          (lq_sta->tx_agg_tid_en & BIT(tid)) &&
 +          (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
 +              IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
 +              rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta);
 +      }
  }
  
  static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@@ -771,38 -753,8 +771,38 @@@ static int rs_collect_tpc_data(struct i
                                    window);
  }
  
 +static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm,
 +                                  struct iwl_mvm_sta *mvmsta,
 +                                  u8 tid, int successes)
 +{
 +      struct iwl_mvm_tid_data *tid_data;
 +
 +      if (tid >= IWL_MAX_TID_COUNT)
 +              return;
 +
 +      tid_data = &mvmsta->tid_data[tid];
 +
 +      /*
 +       * Measure if there're enough successful transmits per second.
 +       * These statistics are used only to decide if we can start a
 +       * BA session, so it should be updated only when A-MPDU is
 +       * off.
 +       */
 +      if (tid_data->state != IWL_AGG_OFF)
 +              return;
 +
 +      if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) ||
 +          (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
 +              tid_data->tx_count_last = tid_data->tx_count;
 +              tid_data->tx_count = 0;
 +              tid_data->tpt_meas_start = jiffies;
 +      } else {
 +              tid_data->tx_count += successes;
 +      }
 +}
 +
  static int rs_collect_tlc_data(struct iwl_mvm *mvm,
 -                             struct iwl_lq_sta *lq_sta,
 +                             struct iwl_mvm_sta *mvmsta, u8 tid,
                               struct iwl_scale_tbl_info *tbl,
                               int scale_index, int attempts, int successes)
  {
                return -EINVAL;
  
        if (tbl->column != RS_COLUMN_INVALID) {
 -              struct lq_sta_pers *pers = &lq_sta->pers;
 +              struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
  
                pers->tx_stats[tbl->column][scale_index].total += attempts;
                pers->tx_stats[tbl->column][scale_index].success += successes;
        }
  
 +      rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes);
 +
        /* Select window for current tx bit rate */
        window = &(tbl->win[scale_index]);
        return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
@@@ -1261,7 -1211,12 +1261,7 @@@ void iwl_mvm_rs_tx_status(struct iwl_mv
        if (time_after(jiffies,
                       (unsigned long)(lq_sta->last_tx +
                                       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
 -              int t;
 -
                IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
 -              for (t = 0; t < IWL_MAX_TID_COUNT; t++)
 -                      ieee80211_stop_tx_ba_session(sta, t);
 -
                iwl_mvm_rs_rate_init(mvm, sta, info->band, false);
                return;
        }
         * first index into rate scale table.
         */
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-               rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+               rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
                                    info->status.ampdu_len,
                                    info->status.ampdu_ack_len,
                                    reduced_txp);
                if (info->status.ampdu_ack_len == 0)
                        info->status.ampdu_len = 1;
  
-               rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
-                                   lq_rate.index,
 -              rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
++              rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
                                    info->status.ampdu_len,
                                    info->status.ampdu_ack_len);
  
                                continue;
  
                        rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
-                                           lq_rate.index, 1,
+                                           tx_resp_rate.index, 1,
                                            i < retries ? 0 : legacy_success,
                                            reduced_txp);
 -                      rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
 +                      rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
-                                           lq_rate.index, 1,
+                                           tx_resp_rate.index, 1,
                                            i < retries ? 0 : legacy_success);
                }
  
@@@ -1719,14 -1673,14 +1718,14 @@@ static void rs_set_amsdu_len(struct iwl
                             struct iwl_scale_tbl_info *tbl,
                             enum rs_action scale_action)
  {
 -      struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
 +      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
  
        if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
            tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
            scale_action == RS_ACTION_DOWNSCALE)
 -              sta_priv->tlc_amsdu = false;
 +              mvmsta->tlc_amsdu = false;
        else
 -              sta_priv->tlc_amsdu = true;
 +              mvmsta->tlc_amsdu = true;
  }
  
  /*
@@@ -2274,10 -2228,11 +2273,10 @@@ static void rs_rate_scale_perform(struc
        u16 high_low;
        s32 sr;
        u8 prev_agg = lq_sta->is_agg;
 -      struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
 -      struct iwl_mvm_tid_data *tid_data;
 +      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct rs_rate *rate;
  
 -      lq_sta->is_agg = !!sta_priv->agg_tids;
 +      lq_sta->is_agg = !!mvmsta->agg_tids;
  
        /*
         * Select rate-scale / modulation-mode table to work with in
@@@ -2525,12 -2480,44 +2524,12 @@@ lq_update
                }
        }
  
 +      if (!ndp)
 +              rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta);
 +
        if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
 -              /* If the "active" (non-search) mode was legacy,
 -               * and we've tried switching antennas,
 -               * but we haven't been able to try HT modes (not available),
 -               * stay with best antenna legacy modulation for a while
 -               * before next round of mode comparisons. */
                tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
 -              if (is_legacy(&tbl1->rate)) {
 -                      IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
 -
 -                      if (tid != IWL_MAX_TID_COUNT) {
 -                              tid_data = &sta_priv->tid_data[tid];
 -                              if (tid_data->state != IWL_AGG_OFF) {
 -                                      IWL_DEBUG_RATE(mvm,
 -                                                     "Stop aggregation on tid %d\n",
 -                                                     tid);
 -                                      ieee80211_stop_tx_ba_session(sta, tid);
 -                              }
 -                      }
 -                      rs_set_stay_in_table(mvm, 1, lq_sta);
 -              } else {
 -              /* If we're in an HT mode, and all 3 mode switch actions
 -               * have been tried and compared, stay in this best modulation
 -               * mode for a while before next round of mode comparisons. */
 -                      if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
 -                          (lq_sta->tx_agg_tid_en & (1 << tid)) &&
 -                          (tid != IWL_MAX_TID_COUNT)) {
 -                              tid_data = &sta_priv->tid_data[tid];
 -                              if (tid_data->state == IWL_AGG_OFF && !ndp) {
 -                                      IWL_DEBUG_RATE(mvm,
 -                                                     "try to aggregate tid %d\n",
 -                                                     tid);
 -                                      rs_tl_turn_on_agg(mvm, tid,
 -                                                        lq_sta, sta);
 -                              }
 -                      }
 -                      rs_set_stay_in_table(mvm, 0, lq_sta);
 -              }
 +              rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta);
        }
  }
  
@@@ -2913,10 -2900,10 +2912,10 @@@ static void rs_get_rate(void *mvm_r, st
  static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
                          gfp_t gfp)
  {
 -      struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
 +      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
        struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
 -      struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
 +      struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
  
        IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
  
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
        lq_sta->pers.last_rssi = S8_MIN;
  
 -      return &sta_priv->lq_sta;
 +      return &mvmsta->lq_sta;
  }
  
  static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@@ -3122,8 -3109,8 +3121,8 @@@ void iwl_mvm_rs_rate_init(struct iwl_mv
        struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 -      struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
 -      struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
 +      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 +      struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
        struct ieee80211_supported_band *sband;
        unsigned long supp; /* must be unsigned long for for_each_set_bit */
  
  
        sband = hw->wiphy->bands[band];
  
 -      lq_sta->lq.sta_id = sta_priv->sta_id;
 -      sta_priv->tlc_amsdu = false;
 +      lq_sta->lq.sta_id = mvmsta->sta_id;
 +      mvmsta->tlc_amsdu = false;
  
        for (j = 0; j < LQ_SIZE; j++)
                rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
  
        IWL_DEBUG_RATE(mvm,
                       "LQ: *** rate scale station global init for station %d ***\n",
 -                     sta_priv->sta_id);
 +                     mvmsta->sta_id);
        /* TODO: what is a good starting rate for STA? About middle? Maybe not
         * the lowest or the highest rate.. Could consider using RSSI from
         * previous packets? Need to have IEEE 802.1X auth succeed immediately
index 4fbf102b3a985071c7c9af43379ac4a5c1d91901,71c8b800ffa99874bd4120e59e2aa7f51212d2da..67ffd9774712b26c4a25d921a043959e12cfd113
@@@ -63,6 -63,7 +63,6 @@@
  #include "iwl-trans.h"
  #include "mvm.h"
  #include "fw-api.h"
 -#include "fw-dbg.h"
  
  static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
                                   int queue, struct ieee80211_sta *sta)
@@@ -635,9 -636,9 +635,9 @@@ static bool iwl_mvm_reorder(struct iwl_
  
        baid_data = rcu_dereference(mvm->baid_map[baid]);
        if (!baid_data) {
-               WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN),
-                    "Received baid %d, but no data exists for this BAID - reorder data 0x%x\n",
-                    baid, reorder);
+               IWL_DEBUG_RX(mvm,
+                            "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+                             baid, reorder);
                return false;
        }
  
@@@ -758,9 -759,9 +758,9 @@@ static void iwl_mvm_agg_rx_received(str
  
        data = rcu_dereference(mvm->baid_map[baid]);
        if (!data) {
-               WARN(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN),
-                    "OLD_SN isn't set, but no data exists for baid %d - reorder data 0x%x\n",
-                    baid, reorder_data);
+               IWL_DEBUG_RX(mvm,
+                            "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+                             baid, reorder_data);
                goto out;
        }
  
@@@ -853,7 -854,7 +853,7 @@@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm 
  
        rcu_read_lock();
  
 -      if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) {
 +      if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
                u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
  
                if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
                        rssi = le32_to_cpu(rssi_trig->rssi);
  
                        trig_check =
 -                              iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif,
 +                              iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
 +                                                            ieee80211_vif_to_wdev(mvmsta->vif),
                                                              trig);
                        if (trig_check && rx_status->signal < rssi)
 -                              iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
 +                              iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                                                      NULL);
                }
  
                if (ieee80211_is_data(hdr->frame_control))
index f88202c38d4c77d14ad55273f642d4f8b557ebe2,027ee5e72172c85f9eaa98f1fd1a26489ab74820..411a2055dc451d2ce18421bd4c520b9068bba942
@@@ -121,7 -121,8 +121,8 @@@ int iwl_mvm_sta_send_to_fw(struct iwl_m
                .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
                .add_modify = update ? 1 : 0,
                .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
-                                                STA_FLG_MIMO_EN_MSK),
+                                                STA_FLG_MIMO_EN_MSK |
+                                                STA_FLG_RTS_MIMO_PROT),
                .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
        };
        int ret;
@@@ -290,12 -291,66 +291,12 @@@ static void iwl_mvm_rx_agg_session_expi
                goto unlock;
  
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-       ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
-                                         sta->addr, ba_data->tid);
+       ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+                                     sta->addr, ba_data->tid);
  unlock:
        rcu_read_unlock();
  }
  
 -static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
 -                               struct ieee80211_sta *sta)
 -{
 -      unsigned long used_hw_queues;
 -      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 -      unsigned int wdg_timeout =
 -              iwl_mvm_get_wd_timeout(mvm, NULL, true, false);
 -      u32 ac;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL);
 -
 -      /* Find available queues, and allocate them to the ACs */
 -      for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 -              u8 queue = find_first_zero_bit(&used_hw_queues,
 -                                             mvm->first_agg_queue);
 -
 -              if (queue >= mvm->first_agg_queue) {
 -                      IWL_ERR(mvm, "Failed to allocate STA queue\n");
 -                      return -EBUSY;
 -              }
 -
 -              __set_bit(queue, &used_hw_queues);
 -              mvmsta->hw_queue[ac] = queue;
 -      }
 -
 -      /* Found a place for all queues - enable them */
 -      for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 -              iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
 -                                    mvmsta->hw_queue[ac],
 -                                    iwl_mvm_ac_to_tx_fifo[ac], 0,
 -                                    wdg_timeout);
 -              mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
 -      }
 -
 -      return 0;
 -}
 -
 -static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
 -                                  struct ieee80211_sta *sta)
 -{
 -      struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 -      unsigned long sta_msk;
 -      int i;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      /* disable the TDLS STA-specific queues */
 -      sta_msk = mvmsta->tfd_queue_msk;
 -      for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
 -              iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 -}
 -
  /* Disable aggregations for a bitmap of TIDs for a given station */
  static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
                                        unsigned long disable_agg_tids,
@@@ -703,7 -758,7 +704,7 @@@ static int iwl_mvm_sta_alloc_queue(stru
  {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_trans_txq_scd_cfg cfg = {
 -              .fifo = iwl_mvm_ac_to_tx_fifo[ac],
 +              .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
                .sta_id = mvmsta->sta_id,
                .tid = tid,
                .frame_limit = IWL_FRAME_LIMIT,
@@@ -1261,7 -1316,7 +1262,7 @@@ static void iwl_mvm_realloc_queues_afte
                        u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  
                        cfg.tid = i;
 -                      cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
 +                      cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
                        cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
                                         txq_id ==
                                         IWL_MVM_DQA_BSS_CLIENT_QUEUE);
                        mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
                }
        }
 +}
  
 -      atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
 +static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
 +                                    struct iwl_mvm_int_sta *sta,
 +                                    const u8 *addr,
 +                                    u16 mac_id, u16 color)
 +{
 +      struct iwl_mvm_add_sta_cmd cmd;
 +      int ret;
 +      u32 status;
 +
 +      lockdep_assert_held(&mvm->mutex);
 +
 +      memset(&cmd, 0, sizeof(cmd));
 +      cmd.sta_id = sta->sta_id;
 +      cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
 +                                                           color));
 +      if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
 +              cmd.station_type = sta->type;
 +
 +      if (!iwl_mvm_has_new_tx_api(mvm))
 +              cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
 +      cmd.tid_disable_tx = cpu_to_le16(0xffff);
 +
 +      if (addr)
 +              memcpy(cmd.addr, addr, ETH_ALEN);
 +
 +      ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 +                                        iwl_mvm_add_sta_cmd_size(mvm),
 +                                        &cmd, &status);
 +      if (ret)
 +              return ret;
 +
 +      switch (status & IWL_ADD_STA_STATUS_MASK) {
 +      case ADD_STA_SUCCESS:
 +              IWL_DEBUG_INFO(mvm, "Internal station added.\n");
 +              return 0;
 +      default:
 +              ret = -EIO;
 +              IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
 +                      status);
 +              break;
 +      }
 +      return ret;
  }
  
  int iwl_mvm_add_sta(struct iwl_mvm *mvm,
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_rxq_dup_data *dup_data;
        int i, ret, sta_id;
 +      bool sta_update = false;
 +      unsigned int sta_flags = 0;
  
        lockdep_assert_held(&mvm->mutex);
  
  
        spin_lock_init(&mvm_sta->lock);
  
 -      /* In DQA mode, if this is a HW restart, re-alloc existing queues */
 -      if (iwl_mvm_is_dqa_supported(mvm) &&
 -          test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
 +      /* if this is a HW restart re-alloc existing queues */
 +      if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
 +              struct iwl_mvm_int_sta tmp_sta = {
 +                      .sta_id = sta_id,
 +                      .type = mvm_sta->sta_type,
 +              };
 +
 +              /*
 +               * First add an empty station since allocating
 +               * a queue requires a valid station
 +               */
 +              ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
 +                                               mvmvif->id, mvmvif->color);
 +              if (ret)
 +                      goto err;
 +
                iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
 +              sta_update = true;
 +              sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
                goto update_fw;
        }
  
        mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
  
        /* HW restart, don't assume the memory has been zeroed */
 -      atomic_set(&mvm->pending_frames[sta_id], 0);
        mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
        mvm_sta->tfd_queue_msk = 0;
  
 -      /*
 -       * Allocate new queues for a TDLS station, unless we're in DQA mode,
 -       * and then they'll be allocated dynamically
 -       */
 -      if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
 -              ret = iwl_mvm_tdls_sta_init(mvm, sta);
 -              if (ret)
 -                      return ret;
 -      } else if (!iwl_mvm_is_dqa_supported(mvm)) {
 -              for (i = 0; i < IEEE80211_NUM_ACS; i++)
 -                      if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
 -                              mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 -      }
 -
        /* for HW restart - reset everything but the sequence number */
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                u16 seq = mvm_sta->tid_data[i].seq_number;
                memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
                mvm_sta->tid_data[i].seq_number = seq;
  
 -              if (!iwl_mvm_is_dqa_supported(mvm))
 -                      continue;
 -
                /*
                 * Mark all queues for this STA as unallocated and defer TX
                 * frames until the queue is allocated
                mvm_sta->dup_data = dup_data;
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_new_tx_api(mvm)) {
                ret = iwl_mvm_reserve_sta_stream(mvm, sta,
                                                 ieee80211_vif_type_p2p(vif));
                if (ret)
        }
  
  update_fw:
 -      ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
 +      ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
        if (ret)
                goto err;
  
        return 0;
  
  err:
 -      if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
 -              iwl_mvm_tdls_sta_deinit(mvm, sta);
        return ret;
  }
  
@@@ -1520,6 -1536,79 +1521,6 @@@ static int iwl_mvm_rm_sta_common(struc
        return 0;
  }
  
 -void iwl_mvm_sta_drained_wk(struct work_struct *wk)
 -{
 -      struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk);
 -      u8 sta_id;
 -
 -      /*
 -       * The mutex is needed because of the SYNC cmd, but not only: if the
 -       * work would run concurrently with iwl_mvm_rm_sta, it would run before
 -       * iwl_mvm_rm_sta sets the station as busy, and exit. Then
 -       * iwl_mvm_rm_sta would set the station as busy, and nobody will clean
 -       * that later.
 -       */
 -      mutex_lock(&mvm->mutex);
 -
 -      for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) {
 -              int ret;
 -              struct ieee80211_sta *sta =
 -                      rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 -                                                lockdep_is_held(&mvm->mutex));
 -
 -              /*
 -               * This station is in use or RCU-removed; the latter happens in
 -               * managed mode, where mac80211 removes the station before we
 -               * can remove it from firmware (we can only do that after the
 -               * MAC is marked unassociated), and possibly while the deauth
 -               * frame to disconnect from the AP is still queued. Then, the
 -               * station pointer is -ENOENT when the last skb is reclaimed.
 -               */
 -              if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT)
 -                      continue;
 -
 -              if (PTR_ERR(sta) == -EINVAL) {
 -                      IWL_ERR(mvm, "Drained sta %d, but it is internal?\n",
 -                              sta_id);
 -                      continue;
 -              }
 -
 -              if (!sta) {
 -                      IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n",
 -                              sta_id);
 -                      continue;
 -              }
 -
 -              WARN_ON(PTR_ERR(sta) != -EBUSY);
 -              /* This station was removed and we waited until it got drained,
 -               * we can now proceed and remove it.
 -               */
 -              ret = iwl_mvm_rm_sta_common(mvm, sta_id);
 -              if (ret) {
 -                      IWL_ERR(mvm,
 -                              "Couldn't remove sta %d after it was drained\n",
 -                              sta_id);
 -                      continue;
 -              }
 -              RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
 -              clear_bit(sta_id, mvm->sta_drained);
 -
 -              if (mvm->tfd_drained[sta_id]) {
 -                      unsigned long i, msk = mvm->tfd_drained[sta_id];
 -
 -                      for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
 -                              iwl_mvm_disable_txq(mvm, i, i,
 -                                                  IWL_MAX_TID_COUNT, 0);
 -
 -                      mvm->tfd_drained[sta_id] = 0;
 -                      IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
 -                                     sta_id, msk);
 -              }
 -      }
 -
 -      mutex_unlock(&mvm->mutex);
 -}
 -
  static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif,
                                       struct iwl_mvm_sta *mvm_sta)
  int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
                                  struct iwl_mvm_sta *mvm_sta)
  {
 -      int i, ret;
 +      int i;
  
        for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
                u16 txq_id;
 +              int ret;
  
                spin_lock_bh(&mvm_sta->lock);
                txq_id = mvm_sta->tid_data[i].txq_id;
  
                ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
                if (ret)
 -                      break;
 +                      return ret;
        }
  
 -      return ret;
 +      return 0;
  }
  
  int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
        if (iwl_mvm_has_new_rx_api(mvm))
                kfree(mvm_sta->dup_data);
  
 -      if ((vif->type == NL80211_IFTYPE_STATION &&
 -           mvmvif->ap_sta_id == sta_id) ||
 -          iwl_mvm_is_dqa_supported(mvm)){
 -              ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 -              if (ret)
 -                      return ret;
 -              /* flush its queues here since we are freeing mvm_sta */
 -              ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
 -              if (ret)
 -                      return ret;
 -              if (iwl_mvm_has_new_tx_api(mvm)) {
 -                      ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
 -              } else {
 -                      u32 q_mask = mvm_sta->tfd_queue_msk;
 +      ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 +      if (ret)
 +              return ret;
  
 -                      ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 -                                                           q_mask);
 -              }
 -              if (ret)
 -                      return ret;
 -              ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 -
 -              /* If DQA is supported - the queues can be disabled now */
 -              if (iwl_mvm_is_dqa_supported(mvm)) {
 -                      iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
 -                      /*
 -                       * If pending_frames is set at this point - it must be
 -                       * driver internal logic error, since queues are empty
 -                       * and removed successuly.
 -                       * warn on it but set it to 0 anyway to avoid station
 -                       * not being removed later in the function
 -                       */
 -                      WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
 -              }
 +      /* flush its queues here since we are freeing mvm_sta */
 +      ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
 +      if (ret)
 +              return ret;
 +      if (iwl_mvm_has_new_tx_api(mvm)) {
 +              ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
 +      } else {
 +              u32 q_mask = mvm_sta->tfd_queue_msk;
  
 -              /* If there is a TXQ still marked as reserved - free it */
 -              if (iwl_mvm_is_dqa_supported(mvm) &&
 -                  mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
 -                      u8 reserved_txq = mvm_sta->reserved_queue;
 -                      enum iwl_mvm_queue_status *status;
 -
 -                      /*
 -                       * If no traffic has gone through the reserved TXQ - it
 -                       * is still marked as IWL_MVM_QUEUE_RESERVED, and
 -                       * should be manually marked as free again
 -                       */
 -                      spin_lock_bh(&mvm->queue_info_lock);
 -                      status = &mvm->queue_info[reserved_txq].status;
 -                      if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
 -                               (*status != IWL_MVM_QUEUE_FREE),
 -                               "sta_id %d reserved txq %d status %d",
 -                               sta_id, reserved_txq, *status)) {
 -                              spin_unlock_bh(&mvm->queue_info_lock);
 -                              return -EINVAL;
 -                      }
 +              ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 +                                                   q_mask);
 +      }
 +      if (ret)
 +              return ret;
  
 -                      *status = IWL_MVM_QUEUE_FREE;
 +      ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 +
 +      iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
 +
 +      /* If there is a TXQ still marked as reserved - free it */
 +      if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
 +              u8 reserved_txq = mvm_sta->reserved_queue;
 +              enum iwl_mvm_queue_status *status;
 +
 +              /*
 +               * If no traffic has gone through the reserved TXQ - it
 +               * is still marked as IWL_MVM_QUEUE_RESERVED, and
 +               * should be manually marked as free again
 +               */
 +              spin_lock_bh(&mvm->queue_info_lock);
 +              status = &mvm->queue_info[reserved_txq].status;
 +              if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
 +                       (*status != IWL_MVM_QUEUE_FREE),
 +                       "sta_id %d reserved txq %d status %d",
 +                       sta_id, reserved_txq, *status)) {
                        spin_unlock_bh(&mvm->queue_info_lock);
 +                      return -EINVAL;
                }
  
 -              if (vif->type == NL80211_IFTYPE_STATION &&
 -                  mvmvif->ap_sta_id == sta_id) {
 -                      /* if associated - we can't remove the AP STA now */
 -                      if (vif->bss_conf.assoc)
 -                              return ret;
 +              *status = IWL_MVM_QUEUE_FREE;
 +              spin_unlock_bh(&mvm->queue_info_lock);
 +      }
  
 -                      /* unassoc - go ahead - remove the AP STA now */
 -                      mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
 +      if (vif->type == NL80211_IFTYPE_STATION &&
 +          mvmvif->ap_sta_id == sta_id) {
 +              /* if associated - we can't remove the AP STA now */
 +              if (vif->bss_conf.assoc)
 +                      return ret;
  
 -                      /* clear d0i3_ap_sta_id if no longer relevant */
 -                      if (mvm->d0i3_ap_sta_id == sta_id)
 -                              mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
 -              }
 +              /* unassoc - go ahead - remove the AP STA now */
 +              mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
 +
 +              /* clear d0i3_ap_sta_id if no longer relevant */
 +              if (mvm->d0i3_ap_sta_id == sta_id)
 +                      mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
        }
  
        /*
         * calls the drain worker.
         */
        spin_lock_bh(&mvm_sta->lock);
 +      spin_unlock_bh(&mvm_sta->lock);
  
 -      /*
 -       * There are frames pending on the AC queues for this station.
 -       * We need to wait until all the frames are drained...
 -       */
 -      if (atomic_read(&mvm->pending_frames[sta_id])) {
 -              rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
 -                                 ERR_PTR(-EBUSY));
 -              spin_unlock_bh(&mvm_sta->lock);
 -
 -              /* disable TDLS sta queues on drain complete */
 -              if (sta->tdls) {
 -                      mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
 -                      IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
 -              }
 -
 -              ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
 -      } else {
 -              spin_unlock_bh(&mvm_sta->lock);
 -
 -              if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
 -                      iwl_mvm_tdls_sta_deinit(mvm, sta);
 -
 -              ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
 -              RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
 -      }
 +      ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
 +      RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
  
        return ret;
  }
@@@ -1699,6 -1823,50 +1700,6 @@@ void iwl_mvm_dealloc_int_sta(struct iwl
        sta->sta_id = IWL_MVM_INVALID_STA;
  }
  
 -static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
 -                                    struct iwl_mvm_int_sta *sta,
 -                                    const u8 *addr,
 -                                    u16 mac_id, u16 color)
 -{
 -      struct iwl_mvm_add_sta_cmd cmd;
 -      int ret;
 -      u32 status;
 -
 -      lockdep_assert_held(&mvm->mutex);
 -
 -      memset(&cmd, 0, sizeof(cmd));
 -      cmd.sta_id = sta->sta_id;
 -      cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
 -                                                           color));
 -      if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
 -              cmd.station_type = sta->type;
 -
 -      if (!iwl_mvm_has_new_tx_api(mvm))
 -              cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
 -      cmd.tid_disable_tx = cpu_to_le16(0xffff);
 -
 -      if (addr)
 -              memcpy(cmd.addr, addr, ETH_ALEN);
 -
 -      ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
 -                                        iwl_mvm_add_sta_cmd_size(mvm),
 -                                        &cmd, &status);
 -      if (ret)
 -              return ret;
 -
 -      switch (status & IWL_ADD_STA_STATUS_MASK) {
 -      case ADD_STA_SUCCESS:
 -              IWL_DEBUG_INFO(mvm, "Internal station added.\n");
 -              return 0;
 -      default:
 -              ret = -EIO;
 -              IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
 -                      status);
 -              break;
 -      }
 -      return ret;
 -}
 -
  static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
  {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                                                    IWL_MAX_TID_COUNT,
                                                    wdg_timeout);
                mvm->aux_queue = queue;
 -      } else if (iwl_mvm_is_dqa_supported(mvm)) {
 +      } else {
                struct iwl_trans_txq_scd_cfg cfg = {
                        .fifo = IWL_MVM_TX_FIFO_MCAST,
                        .sta_id = mvm->aux_sta.sta_id,
  
                iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
                                   wdg_timeout);
 -      } else {
 -              iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
 -                                    IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
        }
  }
  
@@@ -1821,7 -1992,7 +1822,7 @@@ int iwl_mvm_send_add_bcast_sta(struct i
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_new_tx_api(mvm)) {
                if (vif->type == NL80211_IFTYPE_AP ||
                    vif->type == NL80211_IFTYPE_ADHOC)
                        queue = mvm->probe_queue;
@@@ -1908,7 -2079,8 +1909,7 @@@ int iwl_mvm_send_rm_bcast_sta(struct iw
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (iwl_mvm_is_dqa_supported(mvm))
 -              iwl_mvm_free_bcast_sta_queues(mvm, vif);
 +      iwl_mvm_free_bcast_sta_queues(mvm, vif);
  
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
        if (ret)
  int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 -      u32 qmask = 0;
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (!iwl_mvm_is_dqa_supported(mvm)) {
 -              qmask = iwl_mvm_mac_get_queues_mask(vif);
 -
 -              /*
 -               * The firmware defines the TFD queue mask to only be relevant
 -               * for *unicast* queues, so the multicast (CAB) queue shouldn't
 -               * be included. This only happens in NL80211_IFTYPE_AP vif type,
 -               * so the next line will only have an effect there.
 -               */
 -              qmask &= ~BIT(vif->cab_queue);
 -      }
 -
 -      return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
 +      return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
                                        ieee80211_vif_type_p2p(vif),
                                        IWL_STA_GENERAL_PURPOSE);
  }
   * @mvm: the mvm component
   * @vif: the interface to which the broadcast station is added
   * @bsta: the broadcast station to add. */
 -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
@@@ -1965,7 -2150,7 +1966,7 @@@ void iwl_mvm_dealloc_bcast_sta(struct i
   * Send the FW a request to remove the station from it's internal data
   * structures, and in addition remove it from the local data structure.
   */
 -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
  {
        int ret;
  
@@@ -2004,6 -2189,9 +2005,6 @@@ int iwl_mvm_add_mcast_sta(struct iwl_mv
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              return 0;
 -
        if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
                    vif->type != NL80211_IFTYPE_ADHOC))
                return -ENOTSUPP;
@@@ -2068,6 -2256,9 +2069,6 @@@ int iwl_mvm_rm_mcast_sta(struct iwl_mv
  
        lockdep_assert_held(&mvm->mutex);
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              return 0;
 -
        iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
  
        iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
@@@ -2317,6 -2508,8 +2318,6 @@@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *
                mvm_sta->tid_disable_agg &= ~BIT(tid);
        } else {
                /* In DQA-mode the queue isn't removed on agg termination */
 -              if (!iwl_mvm_is_dqa_supported(mvm))
 -                      mvm_sta->tfd_queue_msk &= ~BIT(queue);
                mvm_sta->tid_disable_agg |= BIT(tid);
        }
  
@@@ -2419,17 -2612,19 +2420,17 @@@ int iwl_mvm_sta_tx_agg_start(struct iwl
                        ret = -ENXIO;
                        goto release_locks;
                }
 -      } else if (iwl_mvm_is_dqa_supported(mvm) &&
 -                 unlikely(mvm->queue_info[txq_id].status ==
 +      } else if (unlikely(mvm->queue_info[txq_id].status ==
                            IWL_MVM_QUEUE_SHARED)) {
                ret = -ENXIO;
                IWL_DEBUG_TX_QUEUES(mvm,
                                    "Can't start tid %d agg on shared queue!\n",
                                    tid);
                goto release_locks;
 -      } else if (!iwl_mvm_is_dqa_supported(mvm) ||
 -          mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
 +      } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
                txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 -                                               mvm->first_agg_queue,
 -                                               mvm->last_agg_queue);
 +                                               IWL_MVM_DQA_MIN_DATA_QUEUE,
 +                                               IWL_MVM_DQA_MAX_DATA_QUEUE);
                if (txq_id < 0) {
                        ret = txq_id;
                        IWL_ERR(mvm, "Failed to allocate agg queue\n");
@@@ -2547,34 -2742,37 +2548,34 @@@ int iwl_mvm_sta_tx_agg_oper(struct iwl_
        queue_status = mvm->queue_info[queue].status;
        spin_unlock_bh(&mvm->queue_info_lock);
  
 -      /* In DQA mode, the existing queue might need to be reconfigured */
 -      if (iwl_mvm_is_dqa_supported(mvm)) {
 -              /* Maybe there is no need to even alloc a queue... */
 -              if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
 -                      alloc_queue = false;
 +      /* Maybe there is no need to even alloc a queue... */
 +      if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
 +              alloc_queue = false;
  
 +      /*
 +       * Only reconfig the SCD for the queue if the window size has
 +       * changed from current (become smaller)
 +       */
 +      if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
                /*
 -               * Only reconfig the SCD for the queue if the window size has
 -               * changed from current (become smaller)
 +               * If reconfiguring an existing queue, it first must be
 +               * drained
                 */
 -              if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
 -                      /*
 -                       * If reconfiguring an existing queue, it first must be
 -                       * drained
 -                       */
 -                      ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 -                                                           BIT(queue));
 -                      if (ret) {
 -                              IWL_ERR(mvm,
 -                                      "Error draining queue before reconfig\n");
 -                              return ret;
 -                      }
 +              ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
 +                                                   BIT(queue));
 +              if (ret) {
 +                      IWL_ERR(mvm,
 +                              "Error draining queue before reconfig\n");
 +                      return ret;
 +              }
  
 -                      ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
 -                                                 mvmsta->sta_id, tid,
 -                                                 buf_size, ssn);
 -                      if (ret) {
 -                              IWL_ERR(mvm,
 -                                      "Error reconfiguring TXQ #%d\n", queue);
 -                              return ret;
 -                      }
 +              ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
 +                                         mvmsta->sta_id, tid,
 +                                         buf_size, ssn);
 +              if (ret) {
 +                      IWL_ERR(mvm,
 +                              "Error reconfiguring TXQ #%d\n", queue);
 +                      return ret;
                }
        }
  
@@@ -2670,6 -2868,18 +2671,6 @@@ int iwl_mvm_sta_tx_agg_stop(struct iwl_
                                    "ssn = %d, next_recl = %d\n",
                                    tid_data->ssn, tid_data->next_reclaimed);
  
 -              /*
 -               * There are still packets for this RA / TID in the HW.
 -               * Not relevant for DQA mode, since there is no need to disable
 -               * the queue.
 -               */
 -              if (!iwl_mvm_is_dqa_supported(mvm) &&
 -                  tid_data->ssn != tid_data->next_reclaimed) {
 -                      tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA;
 -                      err = 0;
 -                      break;
 -              }
 -
                tid_data->ssn = 0xffff;
                tid_data->state = IWL_AGG_OFF;
                spin_unlock_bh(&mvmsta->lock);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 -
 -              if (!iwl_mvm_is_dqa_supported(mvm)) {
 -                      int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
 -
 -                      iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
 -              }
                return 0;
        case IWL_AGG_STARTING:
        case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@@ -2746,6 -2962,13 +2747,6 @@@ int iwl_mvm_sta_tx_agg_flush(struct iwl
                iwl_mvm_drain_sta(mvm, mvmsta, false);
  
                iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 -
 -              if (!iwl_mvm_is_dqa_supported(mvm)) {
 -                      int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
 -
 -                      iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
 -                                          tid, 0);
 -              }
        }
  
        return 0;
@@@ -3364,6 -3587,15 +3365,6 @@@ void iwl_mvm_sta_modify_sleep_tx_count(
                        u16 n_queued;
  
                        tid_data = &mvmsta->tid_data[tid];
 -                      if (WARN(!iwl_mvm_is_dqa_supported(mvm) &&
 -                               tid_data->state != IWL_AGG_ON &&
 -                               tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
 -                               "TID %d state is %d\n",
 -                               tid, tid_data->state)) {
 -                              spin_unlock_bh(&mvmsta->lock);
 -                              ieee80211_sta_eosp(sta);
 -                              return;
 -                      }
  
                        n_queued = iwl_mvm_tid_queued(mvm, tid_data);
                        if (n_queued > remaining) {
@@@ -3457,8 -3689,13 +3458,8 @@@ void iwl_mvm_sta_modify_disable_tx_ap(s
  
        mvm_sta->disable_tx = disable;
  
 -      /*
 -       * Tell mac80211 to start/stop queuing tx for this station,
 -       * but don't stop queuing if there are still pending frames
 -       * for this station.
 -       */
 -      if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
 -              ieee80211_sta_block_awake(mvm->hw, sta, disable);
 +      /* Tell mac80211 to start/stop queuing tx for this station */
 +      ieee80211_sta_block_awake(mvm->hw, sta, disable);
  
        iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
  
index 52f9e8a761249adc87ab3783768f4ce861ac2785,5fcc9dd6be56de52fa0a063969cf58011b8eb3d8..172b5e63d3fbebaaa63e7d6968c212b1f32f1a22
@@@ -74,6 -74,7 +74,6 @@@
  #include "iwl-eeprom-parse.h"
  #include "mvm.h"
  #include "sta.h"
 -#include "fw-dbg.h"
  
  static void
  iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
        ba_trig = (void *)trig->data;
  
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
                return;
  
        if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
                return;
  
 -      iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                  "BAR sent to %pM, tid %d, ssn %d",
 -                                  addr, tid, ssn);
 +      iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                              "BAR sent to %pM, tid %d, ssn %d",
 +                              addr, tid, ssn);
  }
  
  #define OPT_HDR(type, skb, off) \
@@@ -184,8 -185,14 +184,14 @@@ static u16 iwl_mvm_tx_csum(struct iwl_m
        else
                udp_hdr(skb)->check = 0;
  
-       /* mac header len should include IV, size is in words */
-       if (info->control.hw_key)
+       /*
+        * mac header len should include IV, size is in words unless
+        * the IV is added by the firmware like in WEP.
+        * In new Tx API, the IV is always added by the firmware.
+        */
+       if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+           info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+           info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
                mh_len += info->control.hw_key->iv_len;
        mh_len /= 2;
        offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
@@@ -552,6 -559,9 +558,6 @@@ static int iwl_mvm_get_ctrl_vif_queue(s
  {
        struct iwl_mvm_vif *mvmvif;
  
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              return info->hw_queue;
 -
        mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
  
        switch (info->control.vif->type) {
@@@ -650,7 -660,8 +656,7 @@@ int iwl_mvm_tx_skb_non_sta(struct iwl_m
  
                        if (ap_sta_id != IWL_MVM_INVALID_STA)
                                sta_id = ap_sta_id;
 -              } else if (iwl_mvm_is_dqa_supported(mvm) &&
 -                         info.control.vif->type == NL80211_IFTYPE_MONITOR) {
 +              } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
                        queue = mvm->aux_queue;
                }
        }
                return -1;
        }
  
 -      /*
 -       * Increase the pending frames counter, so that later when a reply comes
 -       * in and the counter is decreased - we don't start getting negative
 -       * values.
 -       * Note that we don't need to make sure it isn't agg'd, since we're
 -       * TXing non-sta
 -       * For DQA mode - we shouldn't increase it though
 -       */
 -      if (!iwl_mvm_is_dqa_supported(mvm))
 -              atomic_inc(&mvm->pending_frames[sta_id]);
 -
        return 0;
  }
  
@@@ -736,7 -758,7 +742,7 @@@ static int iwl_mvm_tx_tso(struct iwl_mv
        max_amsdu_len = sta->max_amsdu_len;
  
        /* the Tx FIFO to which this A-MSDU will be routed */
 -      txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 +      txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
  
        /*
         * Don't send an AMSDU that will be longer than the TXF.
         * fifo to be able to send bursts.
         */
        max_amsdu_len = min_t(unsigned int, max_amsdu_len,
 -                            mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256);
 +                            mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
 +                            256);
  
        if (unlikely(dbg_max_amsdu_len))
                max_amsdu_len = min_t(unsigned int, max_amsdu_len,
@@@ -979,13 -1000,22 +985,13 @@@ static int iwl_mvm_tx_mpdu(struct iwl_m
                }
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
 -              txq_id = mvmsta->tid_data[tid].txq_id;
 -
 -      if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
 -              /* default to TID 0 for non-QoS packets */
 -              u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
 -
 -              txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
 -      }
 +      txq_id = mvmsta->tid_data[tid].txq_id;
  
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
  
        /* Check if TXQ needs to be allocated or re-activated */
        if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
 -                   !mvmsta->tid_data[tid].is_tid_active) &&
 -          iwl_mvm_is_dqa_supported(mvm)) {
 +                   !mvmsta->tid_data[tid].is_tid_active)) {
                /* If TXQ needs to be allocated... */
                if (txq_id == IWL_MVM_INVALID_QUEUE) {
                        iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
                                    txq_id);
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) {
 +      if (!iwl_mvm_has_new_tx_api(mvm)) {
                /* Keep track of the time of the last frame for this RA/TID */
                mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
  
  
        spin_unlock(&mvmsta->lock);
  
 -      /* Increase pending frames count if this isn't AMPDU or DQA queue */
 -      if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)
 -              atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 -
        return 0;
  
  drop_unlock_sta:
@@@ -1114,7 -1148,8 +1120,7 @@@ static void iwl_mvm_check_ratid_empty(s
        lockdep_assert_held(&mvmsta->lock);
  
        if ((tid_data->state == IWL_AGG_ON ||
 -           tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA ||
 -           iwl_mvm_is_dqa_supported(mvm)) &&
 +           tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
            iwl_mvm_tid_queued(mvm, tid_data) == 0) {
                /*
                 * Now that this aggregation or DQA queue is empty tell
                IWL_DEBUG_TX_QUEUES(mvm,
                                    "Can continue DELBA flow ssn = next_recl = %d\n",
                                    tid_data->next_reclaimed);
 -              if (!iwl_mvm_is_dqa_supported(mvm)) {
 -                      u8 mac80211_ac = tid_to_mac80211_ac[tid];
 -
 -                      iwl_mvm_disable_txq(mvm, tid_data->txq_id,
 -                                          vif->hw_queue[mac80211_ac], tid,
 -                                          CMD_ASYNC);
 -              }
                tid_data->state = IWL_AGG_OFF;
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
@@@ -1259,7 -1301,7 +1265,7 @@@ static void iwl_mvm_tx_status_check_tri
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
        status_trig = (void *)trig->data;
  
 -      if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
 +      if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
                return;
  
        for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
                if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
                        continue;
  
 -              iwl_mvm_fw_dbg_collect_trig(mvm, trig,
 -                                          "Tx status %d was received",
 -                                          status & TX_STATUS_MSK);
 +              iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
 +                                      "Tx status %d was received",
 +                                      status & TX_STATUS_MSK);
                break;
        }
  }
@@@ -1331,7 -1373,6 +1337,7 @@@ static void iwl_mvm_rx_tx_cmd_single(st
        while (!skb_queue_empty(&skbs)) {
                struct sk_buff *skb = __skb_dequeue(&skbs);
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 +              bool flushed = false;
  
                skb_freed++;
  
                case TX_STATUS_DIRECT_DONE:
                        info->flags |= IEEE80211_TX_STAT_ACK;
                        break;
 +              case TX_STATUS_FAIL_FIFO_FLUSHED:
 +              case TX_STATUS_FAIL_DRAIN_FLOW:
 +                      flushed = true;
 +                      break;
                case TX_STATUS_FAIL_DEST_PS:
 -                      /* In DQA, the FW should have stopped the queue and not
 +                      /* the FW should have stopped the queue and not
                         * return this status
                         */
 -                      WARN_ON(iwl_mvm_is_dqa_supported(mvm));
 +                      WARN_ON(1);
                        info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
                        break;
                default:
                /* Single frame failure in an AMPDU queue => send BAR */
                if (info->flags & IEEE80211_TX_CTL_AMPDU &&
                    !(info->flags & IEEE80211_TX_STAT_ACK) &&
 -                  !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
 +                  !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
                info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  
                ieee80211_tx_status(mvm->hw, skb);
        }
  
 -      if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
 -              /* If this is an aggregation queue, we use the ssn since:
 -               * ssn = wifi seq_num % 256.
 -               * The seq_ctl is the sequence control of the packet to which
 -               * this Tx response relates. But if there is a hole in the
 -               * bitmap of the BA we received, this Tx response may allow to
 -               * reclaim the hole and all the subsequent packets that were
 -               * already acked. In that case, seq_ctl != ssn, and the next
 -               * packet to be reclaimed will be ssn and not seq_ctl. In that
 -               * case, several packets will be reclaimed even if
 -               * frame_count = 1.
 -               *
 -               * The ssn is the index (% 256) of the latest packet that has
 -               * treated (acked / dropped) + 1.
 -               */
 -              next_reclaimed = ssn;
 -      } else {
 -              /* The next packet to be reclaimed is the one after this one */
 -              next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
 -      }
 +      /* This is an aggregation queue or might become one, so we use
 +       * the ssn since: ssn = wifi seq_num % 256.
 +       * The seq_ctl is the sequence control of the packet to which
 +       * this Tx response relates. But if there is a hole in the
 +       * bitmap of the BA we received, this Tx response may allow to
 +       * reclaim the hole and all the subsequent packets that were
 +       * already acked. In that case, seq_ctl != ssn, and the next
 +       * packet to be reclaimed will be ssn and not seq_ctl. In that
 +       * case, several packets will be reclaimed even if
 +       * frame_count = 1.
 +       *
 +       * The ssn is the index (% 256) of the latest packet that has
 +       * treated (acked / dropped) + 1.
 +       */
 +      next_reclaimed = ssn;
  
        IWL_DEBUG_TX_REPLY(mvm,
                           "TXQ %d status %s (0x%08x)\n",
                mvmsta = NULL;
        }
  
 -      /*
 -       * If the txq is not an AMPDU queue, there is no chance we freed
 -       * several skbs. Check that out...
 -       */
 -      if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue)
 -              goto out;
 -
 -      /* We can't free more than one frame at once on a shared queue */
 -      WARN_ON(skb_freed > 1);
 -
 -      /* If we have still frames for this STA nothing to do here */
 -      if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
 -              goto out;
 -
 -      if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
 -
 -              /*
 -               * If there are no pending frames for this STA and
 -               * the tx to this station is not disabled, notify
 -               * mac80211 that this station can now wake up in its
 -               * STA table.
 -               * If mvmsta is not NULL, sta is valid.
 -               */
 -
 -              spin_lock_bh(&mvmsta->lock);
 -
 -              if (!mvmsta->disable_tx)
 -                      ieee80211_sta_block_awake(mvm->hw, sta, false);
 -
 -              spin_unlock_bh(&mvmsta->lock);
 -      }
 -
 -      if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
 -              /*
 -               * We are draining and this was the last packet - pre_rcu_remove
 -               * has been called already. We might be after the
 -               * synchronize_net already.
 -               * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
 -               */
 -              set_bit(sta_id, mvm->sta_drained);
 -              schedule_work(&mvm->sta_drained_wk);
 -      }
 -
  out:
        rcu_read_unlock();
  }
@@@ -1520,7 -1605,7 +1526,7 @@@ static const char *iwl_get_agg_tx_statu
        AGG_TX_STATE_(BT_PRIO);
        AGG_TX_STATE_(FEW_BYTES);
        AGG_TX_STATE_(ABORT);
 -      AGG_TX_STATE_(LAST_SENT_TTL);
 +      AGG_TX_STATE_(TX_ON_AIR_DROP);
        AGG_TX_STATE_(LAST_SENT_TRY_CNT);
        AGG_TX_STATE_(LAST_SENT_BT_KILL);
        AGG_TX_STATE_(SCD_QUERY);
@@@ -1569,8 -1654,9 +1575,8 @@@ static void iwl_mvm_rx_tx_cmd_agg(struc
        struct iwl_mvm_sta *mvmsta;
        int queue = SEQ_TO_QUEUE(sequence);
  
 -      if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
 -                       (!iwl_mvm_is_dqa_supported(mvm) ||
 -                        (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
 +      if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
 +                       (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
                return;
  
        if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@@ -1735,6 -1821,8 +1741,8 @@@ void iwl_mvm_rx_ba_notif(struct iwl_mv
        struct iwl_mvm_tid_data *tid_data;
        struct iwl_mvm_sta *mvmsta;
  
+       ba_info.flags = IEEE80211_TX_STAT_AMPDU;
        if (iwl_mvm_has_new_tx_api(mvm)) {
                struct iwl_mvm_compressed_ba_notif *ba_res =
                        (void *)pkt->data;
index 5398a0917f062ea3c6efdc3f8b99bf1302d8e1e5,84f4ba01e14fa2e84878dc75fda5d050e3500880..858765fed8f8567d555435329ffa7c33c3d778e0
@@@ -430,7 -430,6 +430,7 @@@ static const struct pci_device_id iwl_h
        {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
  
  /* 8000 Series */
        {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
  
  /* 9000 Series */
        {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
@@@ -691,23 -710,12 +711,23 @@@ static int iwl_pci_probe(struct pci_de
                iwl_trans->cfg = cfg_7265d;
        }
  
 -      if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb) {
 -              if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF)
 -                      cfg = &iwla000_2ac_cfg_jf;
 -              else if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR)
 -                      cfg = &iwla000_2ac_cfg_hr;
 -
 +      if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb &&
 +          iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
 +              u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
 +              u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
 +              u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR);
 +
 +              if (rf_id_chp == jf_chp_id) {
 +                      if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
 +                              cfg = &iwla000_2ax_cfg_qnj_jf_b0;
 +                      else
 +                              cfg = &iwla000_2ac_cfg_jf;
 +              } else if (rf_id_chp == hr_chp_id) {
 +                      if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
 +                              cfg = &iwla000_2ax_cfg_qnj_hr_a0;
 +                      else
 +                              cfg = &iwla000_2ac_cfg_hr;
 +              }
                iwl_trans->cfg = cfg;
        }
  #endif
@@@ -817,11 -825,11 +837,11 @@@ static int iwl_pci_resume(struct devic
        /*
         * Enable rfkill interrupt (in order to keep track of the rfkill
         * status). Must be locked to avoid processing a possible rfkill
 -       * interrupt while in iwl_trans_check_hw_rf_kill().
 +       * interrupt while in iwl_pcie_check_hw_rf_kill().
         */
        mutex_lock(&trans_pcie->mutex);
        iwl_enable_rfkill_int(trans);
 -      iwl_trans_check_hw_rf_kill(trans);
 +      iwl_pcie_check_hw_rf_kill(trans);
        mutex_unlock(&trans_pcie->mutex);
  
        return 0;