]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nfp: tls: set skb decrypted flag
authorDirk van der Merwe <dirk.vandermerwe@netronome.com>
Tue, 11 Jun 2019 04:40:03 +0000 (21:40 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 11 Jun 2019 19:22:26 +0000 (12:22 -0700)
Firmware indicates when a packet has been decrypted by reusing the
currently unused BPF flag.  Transfer this information into the skb
and provide a statistic of all decrypted segments.

Signed-off-by: Dirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c

index 661fa5941b919c6989d8db34a05e1fdf78a9175c..7bfc819d1e8505d558b4de0bacbe119ff6844fe5 100644 (file)
@@ -240,7 +240,7 @@ struct nfp_net_tx_ring {
 #define PCIE_DESC_RX_I_TCP_CSUM_OK     cpu_to_le16(BIT(11))
 #define PCIE_DESC_RX_I_UDP_CSUM                cpu_to_le16(BIT(10))
 #define PCIE_DESC_RX_I_UDP_CSUM_OK     cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_BPF               cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_DECRYPTED         cpu_to_le16(BIT(8))
 #define PCIE_DESC_RX_EOP               cpu_to_le16(BIT(7))
 #define PCIE_DESC_RX_IP4_CSUM          cpu_to_le16(BIT(6))
 #define PCIE_DESC_RX_IP4_CSUM_OK       cpu_to_le16(BIT(5))
@@ -367,6 +367,7 @@ struct nfp_net_rx_ring {
  * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK
  * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported
  * @hw_csum_rx_error:   Counter of packets with bad checksums
+ * @hw_tls_rx:     Number of packets with TLS decrypted by hardware
  * @tx_sync:       Seqlock for atomic updates of TX stats
  * @tx_pkts:       Number of Transmitted packets
  * @tx_bytes:      Number of Transmitted bytes
@@ -415,6 +416,7 @@ struct nfp_net_r_vector {
        u64 hw_csum_rx_ok;
        u64 hw_csum_rx_inner_ok;
        u64 hw_csum_rx_complete;
+       u64 hw_tls_rx;
 
        u64 hw_csum_rx_error;
        u64 rx_replace_buf_alloc_fail;
index e221847d9a3e16d2143909aa7181c2cd920d12a6..349678425aedfb05748e60c0df41373b6524792f 100644 (file)
@@ -1951,6 +1951,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 
                nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
 
+#ifdef CONFIG_TLS_DEVICE
+               if (rxd->rxd.flags & PCIE_DESC_RX_DECRYPTED) {
+                       skb->decrypted = true;
+                       u64_stats_update_begin(&r_vec->rx_sync);
+                       r_vec->hw_tls_rx++;
+                       u64_stats_update_end(&r_vec->rx_sync);
+               }
+#endif
+
                if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                               le16_to_cpu(rxd->rxd.vlan));
index 3a8e1af7042d93be4c5b71b67ddc20872e179c9d..d9cbe84ac6ade4105af2e9c93a2f847f944cf8f6 100644 (file)
@@ -150,7 +150,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
 
 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
 #define NN_ET_SWITCH_STATS_LEN 9
-#define NN_RVEC_GATHER_STATS   12
+#define NN_RVEC_GATHER_STATS   13
 #define NN_RVEC_PER_Q_STATS    3
 #define NN_CTRL_PATH_STATS     1
 
@@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
        data = nfp_pr_et(data, "hw_rx_csum_complete");
        data = nfp_pr_et(data, "hw_rx_csum_err");
        data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
+       data = nfp_pr_et(data, "rx_tls_decrypted");
        data = nfp_pr_et(data, "hw_tx_csum");
        data = nfp_pr_et(data, "hw_tx_inner_csum");
        data = nfp_pr_et(data, "tx_gather");
@@ -475,19 +476,20 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
                        tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
                        tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
                        tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
+                       tmp[5] = nn->r_vecs[i].hw_tls_rx;
                } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
 
                do {
                        start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
                        data[1] = nn->r_vecs[i].tx_pkts;
                        data[2] = nn->r_vecs[i].tx_busy;
-                       tmp[5] = nn->r_vecs[i].hw_csum_tx;
-                       tmp[6] = nn->r_vecs[i].hw_csum_tx_inner;
-                       tmp[7] = nn->r_vecs[i].tx_gather;
-                       tmp[8] = nn->r_vecs[i].tx_lso;
-                       tmp[9] = nn->r_vecs[i].hw_tls_tx;
-                       tmp[10] = nn->r_vecs[i].tls_tx_fallback;
-                       tmp[11] = nn->r_vecs[i].tls_tx_no_fallback;
+                       tmp[6] = nn->r_vecs[i].hw_csum_tx;
+                       tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
+                       tmp[8] = nn->r_vecs[i].tx_gather;
+                       tmp[9] = nn->r_vecs[i].tx_lso;
+                       tmp[10] = nn->r_vecs[i].hw_tls_tx;
+                       tmp[11] = nn->r_vecs[i].tls_tx_fallback;
+                       tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
                } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
 
                data += NN_RVEC_PER_Q_STATS;