]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'Convert-skb_frag_t-to-bio_vec'
authorDavid S. Miller <davem@davemloft.net>
Tue, 23 Jul 2019 03:47:56 +0000 (20:47 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 23 Jul 2019 03:47:56 +0000 (20:47 -0700)
Matthew Wilcox says:

====================
Convert skb_frag_t to bio_vec

The skb_frag_t and bio_vec are fundamentally the same (page, offset,
length) tuple.  This patch series unifies the two, leaving the
skb_frag_t typedef in place.  This has the immediate advantage that
we already have iov_iter support for bvecs and don't need to add
support for iterating skbuffs.  It enables a long-term plan to use
bvecs more broadly within the kernel and should make network-storage
drivers able to do less work converting between skbuffs and biovecs.

It will consume more memory on 32-bit kernels.  If that proves
problematic, we can look at ways of addressing it.

v3: Rebase on latest Linus with net-next merged.
  - Reorder the uncontroversial 'Use skb accessors' patches first so you
    can apply just those two if you want to hold off on the full
    conversion.
  - Convert all the users of 'struct skb_frag_struct' to skb_frag_t.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
76 files changed:
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/hsi/clients/ssi_protocol.c
drivers/infiniband/hw/hfi1/vnic_sdma.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/iavf/iavf_txrx.h
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx_edma.c
drivers/net/xen-netback/netback.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/staging/octeon/ethernet-tx.c
drivers/staging/unisys/visornic/visornic_main.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
include/linux/bvec.h
include/linux/skbuff.h
net/core/skbuff.c
net/core/tso.c
net/ipv4/tcp.c
net/kcm/kcmsock.c
net/tls/tls_device.c

index 551bca6fef242904d39400a6d958ac1f829c270a..c70cb5f272cf05a8c7ab962976e9bc324ea57c91 100644 (file)
@@ -1134,7 +1134,9 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                        }
                        /* Update the skb. */
                        if (merge) {
-                               skb_shinfo(skb)->frags[i - 1].size += copy;
+                               skb_frag_size_add(
+                                               &skb_shinfo(skb)->frags[i - 1],
+                                               copy);
                        } else {
                                skb_fill_page_desc(skb, i, page, off, copy);
                                if (off + copy < pg_size) {
@@ -1247,7 +1249,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
 
                i = skb_shinfo(skb)->nr_frags;
                if (skb_can_coalesce(skb, i, page, offset)) {
-                       skb_shinfo(skb)->frags[i - 1].size += copy;
+                       skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
                } else if (i < MAX_SKB_FRAGS) {
                        get_page(page);
                        skb_fill_page_desc(skb, i, page, offset, copy);
index 9aeed98b87a1d19624eb6b8d31e1acd88769f9b1..c9e3f928b93de07d23f67c973237d40f75f70f0f 100644 (file)
@@ -181,7 +181,8 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
                sg = sg_next(sg);
                BUG_ON(!sg);
                frag = &skb_shinfo(skb)->frags[i];
-               sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
+               sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
+                               frag->page_offset);
        }
 }
 
index af1b1ffcb38e8da865eac0ed902049e0d3ecf0bc..05a140504a999440f0c4c9203f9fe02ae8ec76c7 100644 (file)
@@ -102,7 +102,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
                goto bail_txadd;
 
        for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
 
                /* combine physically continuous fragments later? */
                ret = sdma_txadd_page(sde->dd,
index 147051404194dea8749ec9f206138a8cbc436c4b..7be91e896f2d032d0a5e238566507d00b7eaec1b 100644 (file)
@@ -2175,7 +2175,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
                        dma_addr = skb_frag_dma_map(vp->gendev, frag,
                                                    0,
-                                                   frag->size,
+                                                   skb_frag_size(frag),
                                                    DMA_TO_DEVICE);
                        if (dma_mapping_error(vp->gendev, dma_addr)) {
                                for(i = i-1; i >= 0; i--)
index ea34bcb868b57fba300df2d9d393aaa2ad82a926..e43d922f043e69565ef949a2ef24287601d9717a 100644 (file)
@@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
        u32 thiscopy, remainder;
        struct sk_buff *skb = tcb->skb;
        u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
-       struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+       skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
        struct phy_device *phydev = adapter->netdev->phydev;
        dma_addr_t dma_addr;
        struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
                                frag++;
                        }
                } else {
-                       desc[frag].len_vlan = frags[i - 1].size;
+                       desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
                        dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
                                                    &frags[i - 1],
                                                    0,
-                                                   frags[i - 1].size,
+                                                   desc[frag].len_vlan,
                                                    DMA_TO_DEVICE);
                        desc[frag].addr_lo = lower_32_bits(dma_addr);
                        desc[frag].addr_hi = upper_32_bits(dma_addr);
index 533094233659b6e218d87a1eb422a303eec98b65..230726d7b74f6343e7d5262e1a02ba455fc0b713 100644 (file)
@@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
        struct xgbe_ring *ring = channel->tx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_packet_data *packet;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t skb_dma;
        unsigned int start_index, cur_index;
        unsigned int offset, tso, vlan, datalen, len;
index 3dd0cecddba80d6538dba78a0bbaf30332d9881f..98f8f203315441e429d1471bd0a8058b02a9f429 100644 (file)
@@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
                             struct xgbe_ring *ring, struct sk_buff *skb,
                             struct xgbe_packet_data *packet)
 {
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int context_desc;
        unsigned int len;
        unsigned int i;
index 10b1c053e70a9203b3f83207864d3e8cce1d2dfc..949bff4d2921a8e239caef43a2c7e7d115b8dd2a 100644 (file)
@@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
                                nr_frags = skb_shinfo(skb)->nr_frags;
 
                                for (i = 0; i < 2 && i < nr_frags; i++)
-                                       len += skb_shinfo(skb)->frags[i].size;
+                                       len += skb_frag_size(
+                                               &skb_shinfo(skb)->frags[i]);
 
                                /* HW requires header must reside in 3 buffer */
                                if (unlikely(hdr_len > len)) {
index e3538ba7d0e72f27fce22cc628c431a8e847a1cc..a3ec738da336df46154619cae8caca0344eca0ca 100644 (file)
@@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
        tpd->len = cpu_to_le16(maplen);
 
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
                if (++txq->write_idx == txq->count)
                        txq->write_idx = 0;
index be7f9cebb675ea3991fef4a4d7c85995b109774d..179ad62a2bd21ed7a91f1c4659aae2aa72673cbb 100644 (file)
@@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
        }
 
        for (f = 0; f < nr_frags; f++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
                use_tpd = atl1c_get_tpd(adapter, type);
                memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
index 7f14e010bfeb25eaa30aa5da475b688b520fd2b7..4f7b65825c159b41e6b3d10535d5fbe9df8a395e 100644 (file)
@@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                u16 i;
                u16 seg_num;
 
-               frag = &skb_shinfo(skb)->frags[f];
                buf_len = skb_frag_size(frag);
 
                seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
index b5c6dc914720d067fc6bd121cb05fe406d405b54..5f420c17bf3e3d342dfba7ff5f6d816836514925 100644 (file)
@@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                u16 i, nseg;
 
-               frag = &skb_shinfo(skb)->frags[f];
                buf_len = skb_frag_size(frag);
 
                nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
index 4632dd5dbad1f4fbeaad6722807428e0640e91e3..148734b166f04b1cfec014bbbbd8c1616df85752 100644 (file)
@@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        flags = 0;
 
        for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int len = skb_frag_size(frag);
 
                index = (index + 1) % BGMAC_TX_RING_SLOTS;
index 7134d2c3eb1c4ccdcdd9cdd97497adedddefa159..74dd28b821058af399d7355c93f669d15d6181ff 100644 (file)
@@ -888,7 +888,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 {
        unsigned int payload = offset_and_len >> 16;
        unsigned int len = offset_and_len & 0xffff;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct page *page = data;
        u16 prod = rxr->rx_prod;
        struct sk_buff *skb;
index 7767ae6fa1fd91a7f339018126b8f21d298d78c2..e338272931d14be95613a4c0ea97b610dd800439 100644 (file)
@@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        head_unmap->nvecs++;
 
        for (i = 0, vect_id = 0; i < vectors - 1; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                u32             size = skb_frag_size(frag);
 
                if (unlikely(size == 0)) {
index 99f49d059414624f2a359b54c91eebfb2178f50a..f96a42af101437a8a46b8847915267145180a2b1 100644 (file)
@@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
        for (i = 0; i < nfrags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               len = frag->size;
+               len = skb_frag_size(frag);
 
                paddr = skb_frag_dma_map(priv->device, frag, 0, len,
                                         DMA_TO_DEVICE);
index eab805579f9646b42a30e71757e3f6d6432ba7e2..7f3b2e3b0868ef7a330bb9f26e2b10d180ccac01 100644 (file)
@@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf)
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
@@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf)
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
@@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        } else {
                int i, frags;
-               struct skb_frag_struct *frag;
+               skb_frag_t *frag;
                struct octnic_gather *g;
 
                spin_lock(&lio->glist_lock[q_idx]);
@@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                        frag = &skb_shinfo(skb)->frags[i - 1];
 
                        g->sg[(i >> 2)].ptr[(i & 3)] =
-                               dma_map_page(&oct->pci_dev->dev,
-                                            frag->page.p,
-                                            frag->page_offset,
-                                            frag->size,
-                                            DMA_TO_DEVICE);
+                               skb_frag_dma_map(&oct->pci_dev->dev,
+                                                frag, 0, skb_frag_size(frag),
+                                                DMA_TO_DEVICE);
 
                        if (dma_mapping_error(&oct->pci_dev->dev,
                                              g->sg[i >> 2].ptr[i & 3])) {
@@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                                        frag = &skb_shinfo(skb)->frags[j - 1];
                                        dma_unmap_page(&oct->pci_dev->dev,
                                                       g->sg[j >> 2].ptr[j & 3],
-                                                      frag->size,
+                                                      skb_frag_size(frag),
                                                       DMA_TO_DEVICE);
                                }
                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                                return NETDEV_TX_BUSY;
                        }
 
-                       add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+                       add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+                                   (i & 3));
                        i++;
                }
 
index db0b90555acbcb368626757b803474710d135082..370d76822ee078f58b177dd1e9879565085c76a5 100644 (file)
@@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf)
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
@@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf)
 
        i = 1;
        while (frags--) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                pci_unmap_page((lio->oct_dev)->pci_dev,
                               g->sg[(i >> 2)].ptr[(i & 3)],
-                              frag->size, DMA_TO_DEVICE);
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                i++;
        }
 
@@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                ndata.reqtype = REQTYPE_NORESP_NET;
 
        } else {
-               struct skb_frag_struct *frag;
+               skb_frag_t *frag;
                struct octnic_gather *g;
                int i, frags;
 
@@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                        frag = &skb_shinfo(skb)->frags[i - 1];
 
                        g->sg[(i >> 2)].ptr[(i & 3)] =
-                               dma_map_page(&oct->pci_dev->dev,
-                                            frag->page.p,
-                                            frag->page_offset,
-                                            frag->size,
-                                            DMA_TO_DEVICE);
+                               skb_frag_dma_map(&oct->pci_dev->dev,
+                                                frag, 0, skb_frag_size(frag),
+                                                DMA_TO_DEVICE);
                        if (dma_mapping_error(&oct->pci_dev->dev,
                                              g->sg[i >> 2].ptr[i & 3])) {
                                dma_unmap_single(&oct->pci_dev->dev,
@@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                                        frag = &skb_shinfo(skb)->frags[j - 1];
                                        dma_unmap_page(&oct->pci_dev->dev,
                                                       g->sg[j >> 2].ptr[j & 3],
-                                                      frag->size,
+                                                      skb_frag_size(frag),
                                                       DMA_TO_DEVICE);
                                }
                                dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                                return NETDEV_TX_BUSY;
                        }
 
-                       add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+                       add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+                                   (i & 3));
                        i++;
                }
 
index 192bc92da881d44db113a6d8005c70b9bd7867dc..c0266a87794c2121a6b203b72d636670ae2d4618 100644 (file)
@@ -1588,9 +1588,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
                goto doorbell;
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
                size = skb_frag_size(frag);
index 89db739b781910e8b482b1b24b7a99e0dd322df7..310a232e00f0b987da5eb28d02c16f102092c80d 100644 (file)
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
        struct port_info *pi = netdev_priv(qs->netdev);
        struct sk_buff *skb = NULL;
        struct cpl_rx_pkt *cpl;
-       struct skb_frag_struct *rx_frag;
+       skb_frag_t *rx_frag;
        int nr_frags;
        int offset = 0;
 
index 9003eb6716cd8621840bbbc75e0ba0540dab7e0a..46dd6b4886b6d528e7fed8255eab569b781021d1 100644 (file)
@@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
                        buflen = skb_headlen(skb);
                } else {
                        skb_frag = skb_si->frags + frag;
-                       buffer = page_address(skb_frag_page(skb_frag)) +
-                                skb_frag->page_offset;
-                       buflen = skb_frag->size;
+                       buffer = skb_frag_address(skb_frag);
+                       buflen = skb_frag_size(skb_frag);
                }
 
                if (frag == last_frag) {
index 2edb86ec9fe9ec5945c0de54706eb79dd187d967..e00a94a038790f68f0a824e1f43cd7068faddb5a 100644 (file)
@@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                len = skb_frag_size(frag);
 
                busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
index 223709443ea4e6b31f4f79f5eded6722975b8ae2..b6ff8930740928515d2113cb15c823aafad8dd11 100644 (file)
@@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
                              int active_offloads)
 {
        struct enetc_tx_swbd *tx_swbd;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int len = skb_headlen(skb);
        union enetc_tx_bd temp_bd;
        union enetc_tx_bd *txbd;
index e5610a4da5390d705b82bc6ca71231b2c4beb088..c01d3ec3e9af34a2ca8aa980bc24eec2044d0377 100644 (file)
@@ -365,7 +365,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                status = fec16_to_cpu(bdp->cbd_sc);
                status &= ~BD_ENET_TX_STATS;
                status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-               frag_len = skb_shinfo(skb)->frags[frag].size;
+               frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
 
                /* Handle the last BD specially */
                if (frag == nr_frags - 1) {
@@ -387,7 +387,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                        ebdp->cbd_esc = cpu_to_fec32(estatus);
                }
 
-               bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+               bufaddr = skb_frag_address(this_frag);
 
                index = fec_enet_get_bd_index(bdp, &txq->bd);
                if (((unsigned long) bufaddr) & fep->tx_align ||
index 349970557c5227e1f4f102119e849f9c00176450..95a6b0926170e98166e06eca40a274392e2350c3 100644 (file)
@@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               int len = frag->size;
+               int len = skb_frag_size(frag);
 
                addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
                ret = dma_mapping_error(priv->dev, addr);
index 2235dd55fab2d16da36bbeba8b4aa2097cbf800f..1545536ef76914d0634dfc0c9863d680b3bfcb5b 100644 (file)
@@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso(
        int frag_num;
        struct sk_buff *skb = *out_skb;
        struct sk_buff *new_skb = NULL;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
 
        size = skb_headlen(skb);
        buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
@@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
        struct hnae_ring *ring = ring_data->ring;
        struct device *dev = ring_to_dev(ring);
        struct netdev_queue *dev_queue;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int buf_num;
        int seg_num;
        dma_addr_t dma;
index 310afa70883131b83b9e68983b002689d972e2e5..69f7ef810654c0c408f589e7dd8e8f72ea204b7d 100644 (file)
@@ -1033,7 +1033,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
        struct hns3_desc *desc = &ring->desc[ring->next_to_use];
        struct device *dev = ring_to_dev(ring);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int frag_buf_num;
        int k, sizeoflast;
        dma_addr_t dma;
@@ -1086,7 +1086,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 
                dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
        } else {
-               frag = (struct skb_frag_struct *)priv;
+               frag = (skb_frag_t *)priv;
                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
        }
 
@@ -1159,7 +1159,7 @@ static int hns3_nic_bd_num(struct sk_buff *skb)
        bd_num = hns3_tx_bd_count(size);
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int frag_bd_num;
 
                size = skb_frag_size(frag);
@@ -1290,7 +1290,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
                &tx_ring_data(priv, skb->queue_mapping);
        struct hns3_enet_ring *ring = ring_data->ring;
        struct netdev_queue *dev_queue;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int next_to_use_head;
        int buf_num;
        int seg_num;
index 9c78251f9c39fb600cf5d771013e8759df1d9a56..0e13d1c7e474602b85a9bdd73190b3e7a2c9145c 100644 (file)
@@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
        struct hinic_hwdev *hwdev = nic_dev->hwdev;
        struct hinic_hwif *hwif = hwdev->hwif;
        struct pci_dev *pdev = hwif->pdev;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma_addr;
        int i, j;
 
index 395dde44448359a06a5669fb725eb6f7f35cc05e..9e43c9ace9c27a52cf5acd9c40895901f9ec8584 100644 (file)
@@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
                                       ctrl);
        /* skb fragments */
        for (i = 0; i < nr_frags; ++i) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                len = skb_frag_size(frag);
 
                if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
index f703fa58458ef55981dd1cb4d660f2625f65f26c..6b6ba1c3823521ffd5e211afddbbc96d65a27469 100644 (file)
@@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
-               frag = &skb_shinfo(skb)->frags[f];
                len = skb_frag_size(frag);
                offset = 0;
 
index e4baa13b3cda95c123c2a8eaeda396f0de3583e7..a0c001d6d9d298c4d5bb5174ffa4b2e24f27782d 100644 (file)
@@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
 
-               frag = &skb_shinfo(skb)->frags[f];
                len = skb_frag_size(frag);
                offset = 0;
 
index 90270b4a16829f12e9651a3d5542f74016643af1..9ffff7886085466621c6f29f5ed9e2fd1dfd0ccd 100644 (file)
@@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
        struct sk_buff *skb = first->skb;
        struct fm10k_tx_buffer *tx_buffer;
        struct fm10k_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned char *data;
        dma_addr_t dma;
        unsigned int data_len, size;
@@ -1074,7 +1074,8 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
index 2a2fe3ec79269f5fd05a7348176c8677be543ee2..f162252f01b5040eb65995c256b3500a5553d293 100644 (file)
@@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag, *stale;
+       const skb_frag_t *frag, *stale;
        int nr_frags, sum;
 
        /* no need to check if number of frags is less than 7 */
@@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct i40e_tx_buffer *tx_bi;
        struct i40e_tx_desc *tx_desc;
        u16 i = tx_ring->next_to_use;
index 100e92d2982f2d7e7fc8683272948a974db2f467..36d37f31a287e2ae5309b7c9b545e552037395f1 100644 (file)
@@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
  **/
 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        int count = 0, size = skb_headlen(skb);
 
index 0cca1b589b562420d4643d2a43af03a3c16a2c6c..fae7cd1c618a50d5dff8b8c657d7ed211aa9ac7c 100644 (file)
@@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
  **/
 bool __iavf_chk_linearize(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag, *stale;
+       const skb_frag_t *frag, *stale;
        int nr_frags, sum;
 
        /* no need to check if number of frags is less than 7 */
@@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct iavf_tx_buffer *tx_bi;
        struct iavf_tx_desc *tx_desc;
        u16 i = tx_ring->next_to_use;
index 71e7d090f8db06c9ebe08f2c68a4215a840026c8..dd3348f9da9dc3a7e06d76ed4baf76bca101bd72 100644 (file)
@@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
  **/
 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        int count = 0, size = skb_headlen(skb);
 
index 3c83230434b6d435a2a7555ddf7a781565267aa2..dd7392f293bf53aa37badb5be6954ab2b76c1016 100644 (file)
@@ -1521,7 +1521,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
 {
        u64 td_offset, td_tag, td_cmd;
        u16 i = tx_ring->next_to_use;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int data_len, size;
        struct ice_tx_desc *tx_desc;
        struct ice_tx_buf *tx_buf;
@@ -1923,7 +1923,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
  */
 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int count = 0, size = skb_headlen(skb);
 
@@ -1954,7 +1954,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
  */
 static bool __ice_chk_linearize(struct sk_buff *skb)
 {
-       const struct skb_frag_struct *frag, *stale;
+       const skb_frag_t *frag, *stale;
        int nr_frags, sum;
 
        /* no need to check if number of frags is less than 7 */
index b4df3e319467ef1cede74868d51ee24f70c0f255..749645d7f9b77b59390bb1a9d14a0d0793f30466 100644 (file)
@@ -5918,7 +5918,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
        struct sk_buff *skb = first->skb;
        struct igb_tx_buffer *tx_buffer;
        union e1000_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma;
        unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
@@ -6074,7 +6074,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (igb_maybe_stop_tx(tx_ring, count + 3)) {
                /* this is a hard error */
index 34cd30d7162f9ab1a14523767ca92122ec7eb61e..0f2b68f4bb0fea5874e3073f7fa9ce8a0b40a6ea 100644 (file)
@@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
                goto dma_error;
 
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag;
 
                count++;
                i++;
index aa9323e55406e323367dc1777fbd51ea6221678a..9ffe71424ecefd2e319c857f0ac1e288cc39963e 100644 (file)
@@ -861,7 +861,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
        struct igc_tx_buffer *tx_buffer;
        union igc_adv_tx_desc *tx_desc;
        u32 tx_flags = first->tx_flags;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        u16 i = tx_ring->next_to_use;
        unsigned int data_len, size;
        dma_addr_t dma;
@@ -1015,7 +1015,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (igc_maybe_stop_tx(tx_ring, count + 3)) {
                /* this is a hard error */
index e5ac2d3fd816da4dcca82b8b52b0eb7482314347..0940a0da16f2863dee68502865d25e21f8b8c50d 100644 (file)
@@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
        }
 
        for (f = 0; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[f];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                len = skb_frag_size(frag);
                offset = 0;
 
index cbaf712d6529bf0e1d20289c8d0d4a0466b79623..e12d23d1fa64a86aff4ccff885b975ce1b391903 100644 (file)
@@ -1785,7 +1785,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
 static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
                            struct sk_buff *skb)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
        unsigned char *va;
        unsigned int pull_len;
 
@@ -1840,7 +1840,7 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
                                              skb_headlen(skb),
                                              DMA_FROM_DEVICE);
        } else {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
 
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
@@ -8186,7 +8186,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
        struct sk_buff *skb = first->skb;
        struct ixgbe_tx_buffer *tx_buffer;
        union ixgbe_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma;
        unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
@@ -8605,7 +8605,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
         * otherwise try next time
         */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+               count += TXD_USE_COUNT(skb_frag_size(
+                                               &skb_shinfo(skb)->frags[f]));
 
        if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
index d2b41f9f87f80513a793eafe5e65899464488a9c..bdfccaf38edddd7d4428329a6cb29c9f13d960f3 100644 (file)
@@ -3949,7 +3949,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
        struct sk_buff *skb = first->skb;
        struct ixgbevf_tx_buffer *tx_buffer;
        union ixgbe_adv_tx_desc *tx_desc;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        dma_addr_t dma;
        unsigned int data_len, size;
        u32 tx_flags = first->tx_flags;
index 0b668357db4dd24de638dd8a995790f482d9b95f..ff6393fd64ac4dde6d741c221de5c9adddd22c9f 100644 (file)
@@ -2030,12 +2030,12 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
        bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
        int i, nr_frags = skb_shinfo(skb)->nr_frags;
        int mask = jme->tx_ring_mask;
-       const struct skb_frag_struct *frag;
        u32 len;
        int ret = 0;
 
        for (i = 0 ; i < nr_frags ; ++i) {
-               frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
                ctxdesc = txdesc + ((idx + i + 2) & (mask));
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
@@ -2046,7 +2046,6 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
                        jme_drop_tx_map(jme, idx, i);
                        goto out;
                }
-
        }
 
        len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
index 895bfed26a8a58b487280d85650f99cdb7fa5067..15cc678f5e5b86a7e8625f4f80cea153e73814a1 100644 (file)
@@ -2350,10 +2350,10 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
 
        for (i = 0; i < nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               void *addr = page_address(frag->page.p) + frag->page_offset;
+               void *addr = skb_frag_address(frag);
 
                tx_desc = mvneta_txq_next_desc_get(txq);
-               tx_desc->data_size = frag->size;
+               tx_desc->data_size = skb_frag_size(frag);
 
                tx_desc->buf_phys_addr =
                        dma_map_single(pp->dev->dev.parent, addr,
index c51f1d5b550b1131d18fc7240e5503d461c43f4f..937e4b928b9460b328f7eb5aacb1d2ef396372b8 100644 (file)
@@ -2911,14 +2911,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               void *addr = page_address(frag->page.p) + frag->page_offset;
+               void *addr = skb_frag_address(frag);
 
                tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
                mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
-               mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+               mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
 
                buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
-                                             frag->size, DMA_TO_DEVICE);
+                                             skb_frag_size(frag),
+                                             DMA_TO_DEVICE);
                if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
                        mvpp2_txq_desc_put(txq);
                        goto cleanup;
index c39d7f4ab1d4c036df56ed0e0ed5a2422f2c77d3..00991df44ed6fb0fbb948b64ca6aeb179c536a92 100644 (file)
@@ -696,7 +696,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
        txd = itxd;
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                unsigned int offset = 0;
                int frag_size = skb_frag_size(frag);
 
@@ -781,7 +781,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 static inline int mtk_cal_txd_req(struct sk_buff *skb)
 {
        int i, nfrags;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
 
        nfrags = 1;
        if (skb_is_gso(skb)) {
index 36a92b19e613d8dba0a5f57461e7de3d09635310..4d5ca302c067126b8627cb4809485b45c10e2460 100644 (file)
@@ -772,9 +772,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
 
        /* Map fragments if any */
        for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
-               const struct skb_frag_struct *frag;
-
-               frag = &shinfo->frags[i_frag];
+               const skb_frag_t *frag = &shinfo->frags[i_frag];
                byte_count = skb_frag_size(frag);
                dma = skb_frag_dma_map(ddev, frag,
                                       0, byte_count,
index 600e92cb629a273bdfa72b77cbae33df07831c5c..acf25cc38fa19426f2dd41c382aa1ec64c452fd6 100644 (file)
@@ -210,7 +210,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                int fsz = skb_frag_size(frag);
 
                dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
index 13e6bf13ac4de023026d4f3e69538efcaee66582..15a8be6bad27f275302076fc33776f9bc2bdcf23 100644 (file)
@@ -1434,7 +1434,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
 }
 
 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
-                                        const struct skb_frag_struct *fragment,
+                                        const skb_frag_t *fragment,
                                         unsigned int frame_length)
 {
        /* called only from within lan743x_tx_xmit_frame
@@ -1607,9 +1607,8 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
                goto finish;
 
        for (j = 0; j < nr_frags; j++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
 
-               frag = &(skb_shinfo(skb)->frags[j]);
                if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
                        /* upon error no need to call
                         *      lan743x_tx_frame_end
index d8b7fba96d58ed109a4962e0128909f9ea283e50..9ead6ecb7586dfcf780da518d26fa4162ba52103 100644 (file)
@@ -1286,7 +1286,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
 {
        u8 *va;
        struct vlan_ethhdr *veh;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        __wsum vsum;
 
        va = addr;
@@ -1318,7 +1318,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
 {
        struct myri10ge_priv *mgp = ss->mgp;
        struct sk_buff *skb;
-       struct skb_frag_struct *rx_frags;
+       skb_frag_t *rx_frags;
        struct myri10ge_rx_buf *rx;
        int i, idx, remainder, bytes;
        struct pci_dev *pdev = mgp->pdev;
@@ -1351,7 +1351,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
                return 0;
        }
        rx_frags = skb_shinfo(skb)->frags;
-       /* Fill skb_frag_struct(s) with data from our receive */
+       /* Fill skb_frag_t(s) with data from our receive */
        for (i = 0, remainder = len; remainder > 0; i++) {
                myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
                skb_fill_page_desc(skb, i, rx->info[idx].page,
@@ -1365,7 +1365,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
 
        /* remove padding */
        rx_frags[0].page_offset += MXGEFW_PAD;
-       rx_frags[0].size -= MXGEFW_PAD;
+       skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
        len -= MXGEFW_PAD;
 
        skb->len = len;
@@ -2628,7 +2628,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
        struct myri10ge_slice_state *ss;
        struct mcp_kreq_ether_send *req;
        struct myri10ge_tx_buf *tx;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        struct netdev_queue *netdev_queue;
        dma_addr_t bus;
        u32 low;
index 9903805717da4ebfb7f6a222eb4d9aafd8755753..6f97b554f7da7ab49782a8f5cd4b77b7aa48d56f 100644 (file)
@@ -975,7 +975,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
-       const struct skb_frag_struct *frag;
+       const skb_frag_t *frag;
        int f, nr_frags, wr_idx, md_bytes;
        struct nfp_net_tx_ring *tx_ring;
        struct nfp_net_r_vector *r_vec;
@@ -1155,7 +1155,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
        todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
 
        while (todo--) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag;
                struct nfp_net_tx_buf *tx_buf;
                struct sk_buff *skb;
                int fidx, nr_frags;
@@ -1270,7 +1270,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
 static void
 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
 {
-       const struct skb_frag_struct *frag;
+       const skb_frag_t *frag;
        struct netdev_queue *nd_q;
 
        while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
index 58e2eaf770148d142480f77c704e7359f28a8ee2..c692a41e45480686fcdfc32ead4a21c854555d1b 100644 (file)
@@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev,
                struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
 {
        struct netxen_skb_frag *nf;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int i, nr_frags;
        dma_addr_t map;
 
@@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        struct pci_dev *pdev;
        int i, k;
        int delta = 0;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
 
        u32 producer;
        int frag_count;
index 14f26bf3b388bdce2913241d5790ec52617c249d..ac61f614de37926423f03602d74b7a0ae6d741ad 100644 (file)
@@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
                             struct qlcnic_cmd_buffer *pbuf)
 {
        struct qlcnic_skb_frag *nf;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        int i, nr_frags;
        dma_addr_t map;
 
index 707665b62eb71190ddcd960f96c3e63385df238d..bebe38d74d6682790f009208bb4106027798054c 100644 (file)
@@ -1385,15 +1385,13 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
        }
 
        for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag;
-
-               frag = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
-               tpbuf->length = frag->size;
-               tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
-                                              frag->page.p, frag->page_offset,
-                                              tpbuf->length, DMA_TO_DEVICE);
+               tpbuf->length = skb_frag_size(frag);
+               tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
+                                                  frag, 0, tpbuf->length,
+                                                  DMA_TO_DEVICE);
                ret = dma_mapping_error(adpt->netdev->dev.parent,
                                        tpbuf->dma_addr);
                if (ret)
index 031cf9c3435a61f453b3aa8bc3af8a2057a389c2..8c4195a9a2cc625e0a616e39d4fabc3909f040e7 100644 (file)
@@ -503,7 +503,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
        struct xlgmac_desc_data *desc_data;
        unsigned int offset, datalen, len;
        struct xlgmac_pkt_info *pkt_info;
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int tso, vlan;
        dma_addr_t skb_dma;
        unsigned int i;
index 1f8e9601592a679025cc1451b9c686c38ac3176f..a1f5a1e6104018cf9d54e6ca9a76a15b51c94c8b 100644 (file)
@@ -116,7 +116,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
                               struct sk_buff *skb,
                               struct xlgmac_pkt_info *pkt_info)
 {
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned int context_desc;
        unsigned int len;
        unsigned int i;
index 5d6960fe3309290fb9fb772b28a05916d8481193..0f8a924fc60c3e1d4df94641d602fc041906c4ec 100644 (file)
@@ -1501,7 +1501,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
        bdx_tx_db_inc_wptr(db);
 
        for (i = 0; i < nr_frags; i++) {
-               const struct skb_frag_struct *frag;
+               const skb_frag_t *frag;
 
                frag = &skb_shinfo(skb)->frags[i];
                db->wptr->len = skb_frag_size(frag);
index 72514c46b4786ae26ba6d16a2c6d32b9a2cfb078..ace7ffaf391307baa46e1cb81ceb1ecd2be2f25d 100644 (file)
@@ -1324,10 +1324,10 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
        total_len += skb_headlen(skb);
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
+               skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
                total_len += skb_frag_size(f);
-               sg_set_page(&urb->sg[i + s], f->page.p, f->size,
+               sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
                                f->page_offset);
        }
        urb->transfer_buffer_length = total_len;
index 2a1918f25e47bcdc976e85d7587f04003b86da71..03feaeae89cd3a92d5b940db58d898bed5c20bf2 100644 (file)
@@ -657,8 +657,7 @@ static void
 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
                    struct vmxnet3_rx_buf_info *rbi)
 {
-       struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
-               skb_shinfo(skb)->nr_frags;
+       skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
 
        BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 
@@ -755,7 +754,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                u32 buf_size;
 
                buf_offset = 0;
@@ -956,7 +955,7 @@ static int txd_estimate(const struct sk_buff *skb)
        int i;
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
        }
index 74834131cf7c2ab2bacfdb80a1daf4a1cad79a3c..fd3b2b3d1b5c6d5ca802d36e74a7da4645ab6993 100644 (file)
@@ -1052,8 +1052,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
        if (nr_frags) {
                seq_printf(s, "    nr_frags = %d\n", nr_frags);
                for (i = 0; i < nr_frags; i++) {
-                       const struct skb_frag_struct *frag =
-                                       &skb_shinfo(skb)->frags[i];
+                       const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
                        len = skb_frag_size(frag);
                        p = skb_frag_address_safe(frag);
index eae00aafaa88e9c6f901558307fb368d0425e939..8b01ef8269da3f96d6d80788136c32f079ce03b1 100644 (file)
@@ -1657,7 +1657,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
                                     len);
                } else {
                        frag = &skb_shinfo(skb)->frags[f];
-                       len = frag->size;
+                       len = skb_frag_size(frag);
                        wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
                }
 
@@ -1678,8 +1678,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
 
                        if (!headlen) {
                                pa = skb_frag_dma_map(dev, frag,
-                                                     frag->size - len, lenmss,
-                                                     DMA_TO_DEVICE);
+                                                     skb_frag_size(frag) - len,
+                                                     lenmss, DMA_TO_DEVICE);
                                vring->ctx[i].mapped_as = wil_mapped_as_page;
                        } else {
                                pa = dma_map_single(dev,
@@ -1900,8 +1900,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
 
        /* middle segments */
        for (; f < nr_frags; f++) {
-               const struct skb_frag_struct *frag =
-                               &skb_shinfo(skb)->frags[f];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
                int len = skb_frag_size(frag);
 
                *_d = *d;
index dc040cd4ab064ed4915ff7b733484824c6d35cae..71b7ad4b645469aafcc834b86a0b744e8e524178 100644 (file)
@@ -1471,7 +1471,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
        /* Rest of the descriptors are from the SKB fragments */
        for (f = 0; f < nr_frags; f++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
-               int len = frag->size;
+               int len = skb_frag_size(frag);
 
                wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
                             len, descs_used);
index 1d9940d4e8c7d0b627d428589325c10012971af2..a96c5c2a2c5af5eb05004c731b55f20b7575fb40 100644 (file)
@@ -1055,7 +1055,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
                        int j;
                        skb->truesize += skb->data_len;
                        for (j = 0; j < i; j++)
-                               put_page(frags[j].page.p);
+                               put_page(skb_frag_page(&frags[j]));
                        return -ENOMEM;
                }
 
@@ -1067,7 +1067,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
                        BUG();
 
                offset += len;
-               frags[i].page.p = page;
+               __skb_frag_set_page(&frags[i], page);
                frags[i].page_offset = 0;
                skb_frag_size_set(&frags[i], len);
        }
index 4d0caeebc80210d3a130a2ec19574e0e1824b1bb..5aa0f1268bca54845610d7eaf3222b3e9868d604 100644 (file)
@@ -3515,7 +3515,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
        int cnt, elements = 0;
 
        for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
 
                elements += qeth_get_elements_for_range(
                        (addr_t)skb_frag_address(frag),
index ba4603d76284134fde962343b89dd6d1edf6cc96..d0550384cc38df4a6d64aad87d41fc7cde33b39a 100644 (file)
@@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(fcoe_get_wwn);
 u32 fcoe_fc_crc(struct fc_frame *fp)
 {
        struct sk_buff *skb = fp_skb(fp);
-       struct skb_frag_struct *frag;
+       skb_frag_t *frag;
        unsigned char *data;
        unsigned long off, len, clen;
        u32 crc;
index 20f513fbaa85dd3f2aa475cdcbb5df3530e6e53a..cc12c78f73f1c8d0472caba7c31146a51dbc9b94 100644 (file)
@@ -280,11 +280,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
                hw_buffer.s.size = skb_headlen(skb);
                CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+                       skb_frag_t *fs = skb_shinfo(skb)->frags + i;
 
                        hw_buffer.s.addr =
-                               XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) +
-                                              fs->page_offset));
+                               XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
                        hw_buffer.s.size = fs->size;
                        CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
                }
index 9d4f1dab0968dd04d5ed75daf42f240dfe00c71c..b889b04a6e25e2538cda4fe647b8e2513b6ded54 100644 (file)
@@ -285,8 +285,8 @@ static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
                        count = add_physinfo_entries(page_to_pfn(
                                  skb_frag_page(&skb_shinfo(skb)->frags[frag])),
                                  skb_shinfo(skb)->frags[frag].page_offset,
-                                 skb_shinfo(skb)->frags[frag].size, count,
-                                 frags_max, frags);
+                                 skb_frag_size(&skb_shinfo(skb)->frags[frag]),
+                                 count, frags_max, frags);
                        /* add_physinfo_entries only returns
                         * zero if the frags array is out of room
                         * That should never happen because we
index 24309d937d8cb775e6155b0fe3c39ac0b72b379a..93212b9fd31004f9aab3b902aa8cd0b5eb620a41 100644 (file)
@@ -899,9 +899,9 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
                skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
 
                sg_init_table(&ccmd->sg, 1);
-               sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
-                           dfrag->page_offset);
-               get_page(dfrag->page.p);
+               sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
+                               skb_frag_size(dfrag), dfrag->page_offset);
+               get_page(skb_frag_page(dfrag));
 
                cmd->se_cmd.t_data_sg = &ccmd->sg;
                cmd->se_cmd.t_data_nents = 1;
@@ -1403,7 +1403,8 @@ static void cxgbit_lro_skb_dump(struct sk_buff *skb)
                        pdu_cb->ddigest, pdu_cb->frags);
        for (i = 0; i < ssi->nr_frags; i++)
                pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
-                       skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
+                       skb, i, ssi->frags[i].page_offset,
+                       skb_frag_size(&ssi->frags[i]));
 }
 
 static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
@@ -1447,7 +1448,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
                hpdu_cb->frags++;
                hpdu_cb->hfrag_idx = hfrag_idx;
 
-               len = hssi->frags[hfrag_idx].size;
+               len = skb_frag_size(&hssi->frags[hfrag_idx]);;
                hskb->len += len;
                hskb->data_len += len;
                hskb->truesize += len;
@@ -1467,7 +1468,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
 
                        get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
 
-                       len += hssi->frags[dfrag_idx].size;
+                       len += skb_frag_size(&hssi->frags[dfrag_idx]);
 
                        hssi->nr_frags++;
                        hpdu_cb->frags++;
index a032f01e928c5e71ce805c3aa51b563b82090b87..7f2b2ea9399c7f431ec6726c44a8b5ad74104fc4 100644 (file)
 struct bio_vec {
        struct page     *bv_page;
        unsigned int    bv_len;
-       unsigned int    bv_offset;
+       union {
+               __u32           page_offset;
+               unsigned int    bv_offset;
+       };
 };
 
 struct bvec_iter {
index d8af86d995d6fe0b7fe8afc3a21d7d8faac2d7ce..718742b1c50502f1891378621d3c622ff388c008 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/compiler.h>
 #include <linux/time.h>
 #include <linux/bug.h>
+#include <linux/bvec.h>
 #include <linux/cache.h>
 #include <linux/rbtree.h>
 #include <linux/socket.h>
@@ -308,20 +309,7 @@ extern int sysctl_max_skb_frags;
  */
 #define GSO_BY_FRAGS   0xFFFF
 
-typedef struct skb_frag_struct skb_frag_t;
-
-struct skb_frag_struct {
-       struct {
-               struct page *p;
-       } page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
-       __u32 page_offset;
-       __u32 size;
-#else
-       __u16 page_offset;
-       __u16 size;
-#endif
-};
+typedef struct bio_vec skb_frag_t;
 
 /**
  * skb_frag_size - Returns the size of a skb fragment
@@ -329,7 +317,7 @@ struct skb_frag_struct {
  */
 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
 {
-       return frag->size;
+       return frag->bv_len;
 }
 
 /**
@@ -339,7 +327,7 @@ static inline unsigned int skb_frag_size(const skb_frag_t *frag)
  */
 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
 {
-       frag->size = size;
+       frag->bv_len = size;
 }
 
 /**
@@ -349,7 +337,7 @@ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
  */
 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
 {
-       frag->size += delta;
+       frag->bv_len += delta;
 }
 
 /**
@@ -359,7 +347,7 @@ static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
  */
 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 {
-       frag->size -= delta;
+       frag->bv_len -= delta;
 }
 
 /**
@@ -379,7 +367,7 @@ static inline bool skb_frag_must_loop(struct page *p)
  *     skb_frag_foreach_page - loop over pages in a fragment
  *
  *     @f:             skb frag to operate on
- *     @f_off:         offset from start of f->page.p
+ *     @f_off:         offset from start of f->bv_page
  *     @f_len:         length from f_off to loop over
  *     @p:             (temp var) current page
  *     @p_off:         (temp var) offset from start of current page,
@@ -2089,7 +2077,7 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
         * that not all callers have unique ownership of the page but rely
         * on page_is_pfmemalloc doing the right thing(tm).
         */
-       frag->page.p              = page;
+       frag->bv_page             = page;
        frag->page_offset         = off;
        skb_frag_size_set(frag, size);
 
@@ -2877,7 +2865,7 @@ static inline void skb_propagate_pfmemalloc(struct page *page,
  */
 static inline struct page *skb_frag_page(const skb_frag_t *frag)
 {
-       return frag->page.p;
+       return frag->bv_page;
 }
 
 /**
@@ -2963,7 +2951,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
  */
 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
 {
-       frag->page.p = page;
+       frag->bv_page = page;
 }
 
 /**
@@ -3166,7 +3154,7 @@ static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
        if (skb_zcopy(skb))
                return false;
        if (i) {
-               const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
 
                return page == skb_frag_page(frag) &&
                       off == frag->page_offset + skb_frag_size(frag);
index 0338820ee0ec67ce5b399b629984a0d05033e8c5..0b788df5a75b8abe33dfe425be9a20cb8ad04c11 100644 (file)
@@ -2485,19 +2485,19 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
        for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
 
-               if (offset < frag->size)
+               if (offset < skb_frag_size(frag))
                        break;
 
-               offset -= frag->size;
+               offset -= skb_frag_size(frag);
        }
 
        for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
 
-               slen = min_t(size_t, len, frag->size - offset);
+               slen = min_t(size_t, len, skb_frag_size(frag) - offset);
 
                while (slen) {
-                       ret = kernel_sendpage_locked(sk, frag->page.p,
+                       ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
                                                     frag->page_offset + offset,
                                                     slen, MSG_DONTWAIT);
                        if (ret <= 0)
@@ -2975,11 +2975,15 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
        skb_zerocopy_clone(to, from, GFP_ATOMIC);
 
        for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+               int size;
+
                if (!len)
                        break;
                skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
-               skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
-               len -= skb_shinfo(to)->frags[j].size;
+               size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
+                                       len);
+               skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
+               len -= size;
                skb_frag_ref(to, j);
                j++;
        }
@@ -3293,7 +3297,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 {
        int from, to, merge, todo;
-       struct skb_frag_struct *fragfrom, *fragto;
+       skb_frag_t *fragfrom, *fragto;
 
        BUG_ON(shiftlen > skb->len);
 
@@ -3360,7 +3364,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 
                } else {
                        __skb_frag_ref(fragfrom);
-                       fragto->page = fragfrom->page;
+                       fragto->bv_page = fragfrom->bv_page;
                        fragto->page_offset = fragfrom->page_offset;
                        skb_frag_size_set(fragto, todo);
 
@@ -3625,10 +3629,10 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
        struct page *page;
 
        page = virt_to_head_page(frag_skb->head);
-       head_frag.page.p = page;
+       __skb_frag_set_page(&head_frag, page);
        head_frag.page_offset = frag_skb->data -
                (unsigned char *)page_address(page);
-       head_frag.size = skb_headlen(frag_skb);
+       skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
        return head_frag;
 }
 
@@ -4021,7 +4025,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
 
                pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
 
-               frag->page.p      = page;
+               __skb_frag_set_page(frag, page);
                frag->page_offset = first_offset;
                skb_frag_size_set(frag, first_size);
 
index 43f4eba6193397cce5d0e87e3de4e37e294b02c7..d4d5c077ad7293aa71c3a64f67629e1079060227 100644 (file)
@@ -55,8 +55,8 @@ void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
                skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
 
                /* Move to next segment */
-               tso->size = frag->size;
-               tso->data = page_address(frag->page.p) + frag->page_offset;
+               tso->size = skb_frag_size(frag);
+               tso->data = skb_frag_address(frag);
                tso->next_frag_idx++;
        }
 }
@@ -79,8 +79,8 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso)
                skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx];
 
                /* Move to next segment */
-               tso->size = frag->size;
-               tso->data = page_address(frag->page.p) + frag->page_offset;
+               tso->size = skb_frag_size(frag);
+               tso->data = skb_frag_address(frag);
                tso->next_frag_idx++;
        }
 }
index 776905899ac06bcbaa7ece1f580303478e736d56..f62f0e7e3cdd370233faaef6d81c6415ade0f5b6 100644 (file)
@@ -1776,19 +1776,21 @@ static int tcp_zerocopy_receive(struct sock *sk,
                                break;
                        frags = skb_shinfo(skb)->frags;
                        while (offset) {
-                               if (frags->size > offset)
+                               if (skb_frag_size(frags) > offset)
                                        goto out;
-                               offset -= frags->size;
+                               offset -= skb_frag_size(frags);
                                frags++;
                        }
                }
-               if (frags->size != PAGE_SIZE || frags->page_offset) {
+               if (skb_frag_size(frags) != PAGE_SIZE || frags->page_offset) {
                        int remaining = zc->recv_skip_hint;
+                       int size = skb_frag_size(frags);
 
-                       while (remaining && (frags->size != PAGE_SIZE ||
+                       while (remaining && (size != PAGE_SIZE ||
                                             frags->page_offset)) {
-                               remaining -= frags->size;
+                               remaining -= size;
                                frags++;
+                               size = skb_frag_size(frags);
                        }
                        zc->recv_skip_hint -= remaining;
                        break;
@@ -3781,7 +3783,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
                return 1;
 
        for (i = 0; i < shi->nr_frags; ++i) {
-               const struct skb_frag_struct *f = &shi->frags[i];
+               const skb_frag_t *f = &shi->frags[i];
                unsigned int offset = f->page_offset;
                struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
 
index 5dbc0c48f8cb61c819fc1ced07890bef1016962e..05f63c4300e973379789c6be10186198a152c2e5 100644 (file)
@@ -635,15 +635,15 @@ static int kcm_write_msgs(struct kcm_sock *kcm)
                        frag_offset = 0;
 do_frag:
                        frag = &skb_shinfo(skb)->frags[fragidx];
-                       if (WARN_ON(!frag->size)) {
+                       if (WARN_ON(!skb_frag_size(frag))) {
                                ret = -EINVAL;
                                goto out;
                        }
 
                        ret = kernel_sendpage(psock->sk->sk_socket,
-                                             frag->page.p,
+                                             skb_frag_page(frag),
                                              frag->page_offset + frag_offset,
-                                             frag->size - frag_offset,
+                                             skb_frag_size(frag) - frag_offset,
                                              MSG_DONTWAIT);
                        if (ret <= 0) {
                                if (ret == -EAGAIN) {
@@ -678,7 +678,7 @@ static int kcm_write_msgs(struct kcm_sock *kcm)
                        sent += ret;
                        frag_offset += ret;
                        KCM_STATS_ADD(psock->stats.tx_bytes, ret);
-                       if (frag_offset < frag->size) {
+                       if (frag_offset < skb_frag_size(frag)) {
                                /* Not finished with this frag */
                                goto do_frag;
                        }
index 7c0b2b778703f7080c28d4919084390d4da05e7c..4ec8a06fa5d1fc94b0ee6910c4c258758f8ef0be 100644 (file)
@@ -243,14 +243,14 @@ static void tls_append_frag(struct tls_record_info *record,
        skb_frag_t *frag;
 
        frag = &record->frags[record->num_frags - 1];
-       if (frag->page.p == pfrag->page &&
-           frag->page_offset + frag->size == pfrag->offset) {
-               frag->size += size;
+       if (skb_frag_page(frag) == pfrag->page &&
+           frag->page_offset + skb_frag_size(frag) == pfrag->offset) {
+               skb_frag_size_add(frag, size);
        } else {
                ++frag;
-               frag->page.p = pfrag->page;
+               __skb_frag_set_page(frag, pfrag->page);
                frag->page_offset = pfrag->offset;
-               frag->size = size;
+               skb_frag_size_set(frag, size);
                ++record->num_frags;
                get_page(pfrag->page);
        }
@@ -301,8 +301,8 @@ static int tls_push_record(struct sock *sk,
                frag = &record->frags[i];
                sg_unmark_end(&offload_ctx->sg_tx_data[i]);
                sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
-                           frag->size, frag->page_offset);
-               sk_mem_charge(sk, frag->size);
+                           skb_frag_size(frag), frag->page_offset);
+               sk_mem_charge(sk, skb_frag_size(frag));
                get_page(skb_frag_page(frag));
        }
        sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);