]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
iwlwifi: pcie: allow to pretend to have Tx CSUM for debug
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Wed, 21 Oct 2015 06:00:07 +0000 (09:00 +0300)
committerEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Sun, 20 Dec 2015 12:48:22 +0000 (14:48 +0200)
Allow to configure the driver to pretend to have TX CSUM
offload support. This will be useful to test the TSO flows
that will come in further patches.
This configuration is disabled by default.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c

index 777fd6c50ba4e6f55a2d658d6f6769af21e34808..6534537a2daf2aaf178c71cef693967c03460dfe 100644 (file)
@@ -478,6 +478,7 @@ struct iwl_hcmd_arr {
  *     in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
  * @wide_cmd_header: firmware supports wide host command header
+ * @sw_csum_tx: transport should compute the TCP checksum
  * @command_groups: array of command groups, each member is an array of the
  *     commands in the group; for debugging only
  * @command_groups_size: number of command groups, to avoid illegal access
@@ -497,6 +498,7 @@ struct iwl_trans_config {
        bool bc_table_dword;
        bool scd_set_active;
        bool wide_cmd_header;
+       bool sw_csum_tx;
        const struct iwl_hcmd_arr *command_groups;
        int command_groups_size;
  
index 1f8f61604416e4f46b758574ff56262a653c2ad2..b00c03fcd4473cc670cd1489c3504c767de62c65 100644 (file)
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK       1
 #define IWL_MVM_TOF_IS_RESPONDER               0
+#define IWL_MVM_SW_TX_CSUM_OFFLOAD             0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index 0fb10fd57b3b7dd7d2be69484a6698a66e2dc3dc..0227b29d43c8fca2db0452791fc17c85c89c44d4 100644 (file)
@@ -667,6 +667,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        if (!iwl_mvm_is_csum_supported(mvm))
                hw->netdev_features &= ~NETIF_F_RXCSUM;
 
+       if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+               hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
        ret = ieee80211_register_hw(mvm->hw);
        if (ret)
                iwl_mvm_leds_exit(mvm);
index ffcebea1e88f6d33d9af94d9a467647ceb084ecb..50b8c01fadbd5f05fb278f886e87da15a569ba39 100644 (file)
@@ -541,6 +541,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.scd_set_active = true;
 
        trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
+       trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
 
        /* Set a short watchdog for the command queue */
        trans_cfg.cmd_q_wdg_timeout =
index 92a35d9641785e1158d91b20e1543eab5b922d89..3d47dd7576ee3e3af227de1619071e943bd35b99 100644 (file)
@@ -307,6 +307,8 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
  * @wide_cmd_header: true when ucode supports wide command header format
+ * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
+ *     frame.
  * @rx_page_order: page order for receive buffer size
  * @reg_lock: protect hw register access
  * @mutex: to protect stop_device / start_fw / start_hw
@@ -361,6 +363,7 @@ struct iwl_trans_pcie {
        bool bc_table_dword;
        bool scd_set_active;
        bool wide_cmd_header;
+       bool sw_csum_tx;
        u32 rx_page_order;
 
        /*protect hw register */
index a5bf24ed2a19b75ce5f00737ee629c5763294224..97e22fbda5165c8f8083c5d08cea5506cc6d6397 100644 (file)
@@ -1442,6 +1442,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
+       trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
 
        trans->command_groups = trans_cfg->command_groups;
        trans->command_groups_size = trans_cfg->command_groups_size;
index 2952915e273f3823298ced2294f4ee73332cdf37..6c460a567ff94252858554e6c8d24bf912a77545 100644 (file)
@@ -1823,6 +1823,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      "TX on unused queue %d\n", txq_id))
                return -EINVAL;
 
+       if (unlikely(trans_pcie->sw_csum_tx &&
+                    skb->ip_summed == CHECKSUM_PARTIAL)) {
+               int offs = skb_checksum_start_offset(skb);
+               int csum_offs = offs + skb->csum_offset;
+               __wsum csum;
+
+               if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
+                       return -1;
+
+               csum = skb_checksum(skb, offs, skb->len - offs, 0);
+               *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
+       }
+
        if (skb_is_nonlinear(skb) &&
            skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
            __skb_linearize(skb))