1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/of_platform.h>
36 #include <linux/of_mdio.h>
37 #include <linux/of_net.h>
39 #include <linux/if_arp.h>
40 #include <linux/if_vlan.h>
41 #include <linux/icmp.h>
43 #include <linux/ipv6.h>
44 #include <linux/udp.h>
45 #include <linux/tcp.h>
46 #include <linux/net.h>
47 #include <linux/skbuff.h>
48 #include <linux/etherdevice.h>
49 #include <linux/if_ether.h>
50 #include <linux/highmem.h>
51 #include <linux/percpu.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/sort.h>
54 #include <soc/fsl/bman.h>
55 #include <soc/fsl/qman.h>
58 #include "fman_port.h"
62 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
63 * using trace events only need to #include <trace/events/sched.h>
65 #define CREATE_TRACE_POINTS
66 #include "dpaa_eth_trace.h"
68 static int debug = -1;
69 module_param(debug, int, 0444);
70 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
72 static u16 tx_timeout = 1000;
73 module_param(tx_timeout, ushort, 0444);
74 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
76 #define FM_FD_STAT_RX_ERRORS \
77 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
78 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
79 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
80 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
81 FM_FD_ERR_PRS_HDR_ERR)
83 #define FM_FD_STAT_TX_ERRORS \
84 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
85 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
87 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
88 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
91 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
92 /* Ingress congestion threshold on FMan ports
93 * The size in bytes of the ingress tail-drop threshold on FMan ports.
94 * Traffic piling up above this value will be rejected by QMan and discarded
98 /* Size in bytes of the FQ taildrop threshold */
99 #define DPAA_FQ_TD 0x200000
101 #define DPAA_CS_THRESHOLD_1G 0x06000000
102 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
103 * The size in bytes of the egress Congestion State notification threshold on
104 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
105 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
106 * and the larger the frame size, the more acute the problem.
107 * So we have to find a balance between these factors:
108 * - avoiding the device staying congested for a prolonged time (risking
109 * the netdev watchdog to fire - see also the tx_timeout module param);
110 * - affecting performance of protocols such as TCP, which otherwise
111 * behave well under the congestion notification mechanism;
112 * - preventing the Tx cores from tightly-looping (as if the congestion
113 * threshold was too low to be effective);
114 * - running out of memory if the CS threshold is set too high.
117 #define DPAA_CS_THRESHOLD_10G 0x10000000
118 /* The size in bytes of the egress Congestion State notification threshold on
119 * 10G ports, range 0x1000 .. 0x10000000
122 /* Largest value that the FQD's OAL field can hold */
123 #define FSL_QMAN_MAX_OAL 127
125 /* Default alignment for start of data in an Rx FD */
126 #define DPAA_FD_DATA_ALIGNMENT 16
128 /* Values for the L3R field of the FM Parse Results
130 /* L3 Type field: First IP Present IPv4 */
131 #define FM_L3_PARSE_RESULT_IPV4 0x8000
132 /* L3 Type field: First IP Present IPv6 */
133 #define FM_L3_PARSE_RESULT_IPV6 0x4000
134 /* Values for the L4R field of the FM Parse Results */
135 /* L4 Type field: UDP */
136 #define FM_L4_PARSE_RESULT_UDP 0x40
137 /* L4 Type field: TCP */
138 #define FM_L4_PARSE_RESULT_TCP 0x20
140 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
141 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
143 #define FSL_DPAA_BPID_INV 0xff
144 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128
145 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80
147 #define DPAA_TX_PRIV_DATA_SIZE 16
148 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
149 #define DPAA_TIME_STAMP_SIZE 8
150 #define DPAA_HASH_RESULTS_SIZE 8
151 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
152 dpaa_rx_extra_headroom)
154 #define DPAA_ETH_RX_QUEUES 128
156 #define DPAA_ENQUEUE_RETRIES 100000
158 enum port_type {RX, TX};
161 struct dpaa_fq *tx_defq;
162 struct dpaa_fq *tx_errq;
163 struct dpaa_fq *rx_defq;
164 struct dpaa_fq *rx_errq;
167 /* All the dpa bps in use at any moment */
168 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
170 /* The raw buffer size must be cacheline aligned */
171 #define DPAA_BP_RAW_SIZE 4096
172 /* When using more than one buffer pool, the raw sizes are as follows:
175 * 3 bp: 1KB, 2KB, 4KB
176 * 4 bp: 1KB, 2KB, 4KB, 8KB
178 static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
180 size_t res = DPAA_BP_RAW_SIZE / 4;
183 for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
188 /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
189 * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
190 * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
191 * half-page-aligned buffers, so we reserve some more space for start-of-buffer
194 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
196 static int dpaa_max_frm;
198 static int dpaa_rx_extra_headroom;
200 #define dpaa_get_max_mtu() \
201 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
203 static int dpaa_netdev_init(struct net_device *net_dev,
204 const struct net_device_ops *dpaa_ops,
207 struct dpaa_priv *priv = netdev_priv(net_dev);
208 struct device *dev = net_dev->dev.parent;
209 struct dpaa_percpu_priv *percpu_priv;
213 /* Although we access another CPU's private data here
214 * we do it at initialization so it is safe
216 for_each_possible_cpu(i) {
217 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
218 percpu_priv->net_dev = net_dev;
221 net_dev->netdev_ops = dpaa_ops;
222 mac_addr = priv->mac_dev->addr;
224 net_dev->mem_start = priv->mac_dev->res->start;
225 net_dev->mem_end = priv->mac_dev->res->end;
227 net_dev->min_mtu = ETH_MIN_MTU;
228 net_dev->max_mtu = dpaa_get_max_mtu();
230 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
233 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
234 /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
235 * For conformity, we'll still declare GSO explicitly.
237 net_dev->features |= NETIF_F_GSO;
239 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
240 /* we do not want shared skbs on TX */
241 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
243 net_dev->features |= net_dev->hw_features;
244 net_dev->vlan_features = net_dev->features;
246 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
247 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
249 net_dev->ethtool_ops = &dpaa_ethtool_ops;
251 net_dev->needed_headroom = priv->tx_headroom;
252 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
254 /* start without the RUNNING flag, phylib controls it later */
255 netif_carrier_off(net_dev);
257 err = register_netdev(net_dev);
259 dev_err(dev, "register_netdev() = %d\n", err);
266 static int dpaa_stop(struct net_device *net_dev)
268 struct mac_device *mac_dev;
269 struct dpaa_priv *priv;
272 priv = netdev_priv(net_dev);
273 mac_dev = priv->mac_dev;
275 netif_tx_stop_all_queues(net_dev);
276 /* Allow the Fman (Tx) port to process in-flight frames before we
277 * try switching it off.
279 usleep_range(5000, 10000);
281 err = mac_dev->stop(mac_dev);
283 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
286 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
287 error = fman_port_disable(mac_dev->port[i]);
293 phy_disconnect(net_dev->phydev);
294 net_dev->phydev = NULL;
299 static void dpaa_tx_timeout(struct net_device *net_dev)
301 struct dpaa_percpu_priv *percpu_priv;
302 const struct dpaa_priv *priv;
304 priv = netdev_priv(net_dev);
305 percpu_priv = this_cpu_ptr(priv->percpu_priv);
307 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
308 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
310 percpu_priv->stats.tx_errors++;
313 /* Calculates the statistics for the given device by adding the statistics
314 * collected by each CPU.
316 static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev,
317 struct rtnl_link_stats64 *s)
319 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
320 struct dpaa_priv *priv = netdev_priv(net_dev);
321 struct dpaa_percpu_priv *percpu_priv;
322 u64 *netstats = (u64 *)s;
326 for_each_possible_cpu(i) {
327 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
329 cpustats = (u64 *)&percpu_priv->stats;
331 /* add stats from all CPUs */
332 for (j = 0; j < numstats; j++)
333 netstats[j] += cpustats[j];
339 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
341 struct platform_device *of_dev;
342 struct dpaa_eth_data *eth_data;
343 struct device *dpaa_dev, *dev;
344 struct device_node *mac_node;
345 struct mac_device *mac_dev;
347 dpaa_dev = &pdev->dev;
348 eth_data = dpaa_dev->platform_data;
350 return ERR_PTR(-ENODEV);
352 mac_node = eth_data->mac_node;
354 of_dev = of_find_device_by_node(mac_node);
356 dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
357 mac_node->full_name);
358 of_node_put(mac_node);
359 return ERR_PTR(-EINVAL);
361 of_node_put(mac_node);
365 mac_dev = dev_get_drvdata(dev);
367 dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
369 return ERR_PTR(-EINVAL);
375 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
377 const struct dpaa_priv *priv;
378 struct mac_device *mac_dev;
379 struct sockaddr old_addr;
382 priv = netdev_priv(net_dev);
384 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
386 err = eth_mac_addr(net_dev, addr);
388 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
392 mac_dev = priv->mac_dev;
394 err = mac_dev->change_addr(mac_dev->fman_mac,
395 (enet_addr_t *)net_dev->dev_addr);
397 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
399 /* reverting to previous address */
400 eth_mac_addr(net_dev, &old_addr);
408 static void dpaa_set_rx_mode(struct net_device *net_dev)
410 const struct dpaa_priv *priv;
413 priv = netdev_priv(net_dev);
415 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
416 priv->mac_dev->promisc = !priv->mac_dev->promisc;
417 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
418 priv->mac_dev->promisc);
420 netif_err(priv, drv, net_dev,
421 "mac_dev->set_promisc() = %d\n",
425 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
427 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
431 static struct dpaa_bp *dpaa_bpid2pool(int bpid)
433 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
436 return dpaa_bp_array[bpid];
439 /* checks if this bpool is already allocated */
440 static bool dpaa_bpid2pool_use(int bpid)
442 if (dpaa_bpid2pool(bpid)) {
443 atomic_inc(&dpaa_bp_array[bpid]->refs);
450 /* called only once per bpid by dpaa_bp_alloc_pool() */
451 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
453 dpaa_bp_array[bpid] = dpaa_bp;
454 atomic_set(&dpaa_bp->refs, 1);
457 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
461 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
462 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
467 /* If the pool is already specified, we only create one per bpid */
468 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
469 dpaa_bpid2pool_use(dpaa_bp->bpid))
472 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
473 dpaa_bp->pool = bman_new_pool();
474 if (!dpaa_bp->pool) {
475 pr_err("%s: bman_new_pool() failed\n",
480 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
483 if (dpaa_bp->seed_cb) {
484 err = dpaa_bp->seed_cb(dpaa_bp);
486 goto pool_seed_failed;
489 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
494 pr_err("%s: pool seeding failed\n", __func__);
495 bman_free_pool(dpaa_bp->pool);
500 /* remove and free all the buffers from the given buffer pool */
501 static void dpaa_bp_drain(struct dpaa_bp *bp)
507 struct bm_buffer bmb[8];
510 ret = bman_acquire(bp->pool, bmb, num);
513 /* we have less than 8 buffers left;
514 * drain them one by one
520 /* Pool is fully drained */
526 for (i = 0; i < num; i++)
527 bp->free_buf_cb(bp, &bmb[i]);
531 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
533 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
535 /* the mapping between bpid and dpaa_bp is done very late in the
536 * allocation procedure; if something failed before the mapping, the bp
537 * was not configured, therefore we don't need the below instructions
542 if (!atomic_dec_and_test(&bp->refs))
548 dpaa_bp_array[bp->bpid] = NULL;
549 bman_free_pool(bp->pool);
552 static void dpaa_bps_free(struct dpaa_priv *priv)
556 for (i = 0; i < DPAA_BPS_NUM; i++)
557 dpaa_bp_free(priv->dpaa_bps[i]);
560 /* Use multiple WQs for FQ assignment:
561 * - Tx Confirmation queues go to WQ1.
562 * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
563 * to be scheduled, in case there are many more FQs in WQ3).
564 * - Rx Default and Tx queues go to WQ3 (no differentiation between
565 * Rx and Tx traffic).
566 * This ensures that Tx-confirmed buffers are timely released. In particular,
567 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
568 * are greatly outnumbered by other FQs in the system, while
569 * dequeue scheduling is round-robin.
571 static inline void dpaa_assign_wq(struct dpaa_fq *fq)
573 switch (fq->fq_type) {
574 case FQ_TYPE_TX_CONFIRM:
575 case FQ_TYPE_TX_CONF_MQ:
578 case FQ_TYPE_RX_ERROR:
579 case FQ_TYPE_TX_ERROR:
582 case FQ_TYPE_RX_DEFAULT:
587 WARN(1, "Invalid FQ type %d for FQID %d!\n",
588 fq->fq_type, fq->fqid);
592 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
593 u32 start, u32 count,
594 struct list_head *list,
595 enum dpaa_fq_type fq_type)
597 struct dpaa_fq *dpaa_fq;
600 dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count,
605 for (i = 0; i < count; i++) {
606 dpaa_fq[i].fq_type = fq_type;
607 dpaa_fq[i].fqid = start ? start + i : 0;
608 list_add_tail(&dpaa_fq[i].list, list);
611 for (i = 0; i < count; i++)
612 dpaa_assign_wq(dpaa_fq + i);
617 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
618 struct fm_port_fqs *port_fqs)
620 struct dpaa_fq *dpaa_fq;
622 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
624 goto fq_alloc_failed;
626 port_fqs->rx_errq = &dpaa_fq[0];
628 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
630 goto fq_alloc_failed;
632 port_fqs->rx_defq = &dpaa_fq[0];
634 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
635 goto fq_alloc_failed;
637 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
639 goto fq_alloc_failed;
641 port_fqs->tx_errq = &dpaa_fq[0];
643 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
645 goto fq_alloc_failed;
647 port_fqs->tx_defq = &dpaa_fq[0];
649 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
650 goto fq_alloc_failed;
655 dev_err(dev, "dpaa_fq_alloc() failed\n");
659 static u32 rx_pool_channel;
660 static DEFINE_SPINLOCK(rx_pool_channel_init);
662 static int dpaa_get_channel(void)
664 spin_lock(&rx_pool_channel_init);
665 if (!rx_pool_channel) {
669 ret = qman_alloc_pool(&pool);
672 rx_pool_channel = pool;
674 spin_unlock(&rx_pool_channel_init);
675 if (!rx_pool_channel)
677 return rx_pool_channel;
680 static void dpaa_release_channel(void)
682 qman_release_pool(rx_pool_channel);
685 static void dpaa_eth_add_channel(u16 channel)
687 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
688 const cpumask_t *cpus = qman_affine_cpus();
689 struct qman_portal *portal;
692 for_each_cpu(cpu, cpus) {
693 portal = qman_get_affine_portal(cpu);
694 qman_p_static_dequeue_add(portal, pool);
698 /* Congestion group state change notification callback.
699 * Stops the device's egress queues while they are congested and
700 * wakes them upon exiting congested state.
701 * Also updates some CGR-related stats.
703 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
706 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
707 struct dpaa_priv, cgr_data.cgr);
710 priv->cgr_data.congestion_start_jiffies = jiffies;
711 netif_tx_stop_all_queues(priv->net_dev);
712 priv->cgr_data.cgr_congested_count++;
714 priv->cgr_data.congested_jiffies +=
715 (jiffies - priv->cgr_data.congestion_start_jiffies);
716 netif_tx_wake_all_queues(priv->net_dev);
720 static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
722 struct qm_mcc_initcgr initcgr;
726 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
728 if (netif_msg_drv(priv))
729 pr_err("%s: Error %d allocating CGR ID\n",
733 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
735 /* Enable Congestion State Change Notifications and CS taildrop */
736 memset(&initcgr, 0, sizeof(initcgr));
737 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
738 initcgr.cgr.cscn_en = QM_CGR_EN;
740 /* Set different thresholds based on the MAC speed.
741 * This may turn suboptimal if the MAC is reconfigured at a speed
742 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
743 * In such cases, we ought to reconfigure the threshold, too.
745 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
746 cs_th = DPAA_CS_THRESHOLD_10G;
748 cs_th = DPAA_CS_THRESHOLD_1G;
749 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
751 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
752 initcgr.cgr.cstd_en = QM_CGR_EN;
754 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
757 if (netif_msg_drv(priv))
758 pr_err("%s: Error %d creating CGR with ID %d\n",
759 __func__, err, priv->cgr_data.cgr.cgrid);
760 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
763 if (netif_msg_drv(priv))
764 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
765 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
766 priv->cgr_data.cgr.chan);
772 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
774 const struct qman_fq *template)
776 fq->fq_base = *template;
777 fq->net_dev = priv->net_dev;
779 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
780 fq->channel = priv->channel;
783 static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
785 struct fman_port *port,
786 const struct qman_fq *template)
788 fq->fq_base = *template;
789 fq->net_dev = priv->net_dev;
792 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
793 fq->channel = (u16)fman_port_get_qman_channel_id(port);
795 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
799 static void dpaa_fq_setup(struct dpaa_priv *priv,
800 const struct dpaa_fq_cbs *fq_cbs,
801 struct fman_port *tx_port)
803 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
804 const cpumask_t *affine_cpus = qman_affine_cpus();
805 u16 portals[NR_CPUS];
808 for_each_cpu(cpu, affine_cpus)
809 portals[num_portals++] = qman_affine_channel(cpu);
810 if (num_portals == 0)
811 dev_err(priv->net_dev->dev.parent,
812 "No Qman software (affine) channels found");
814 /* Initialize each FQ in the list */
815 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
816 switch (fq->fq_type) {
817 case FQ_TYPE_RX_DEFAULT:
818 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
820 case FQ_TYPE_RX_ERROR:
821 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
824 dpaa_setup_egress(priv, fq, tx_port,
825 &fq_cbs->egress_ern);
826 /* If we have more Tx queues than the number of cores,
827 * just ignore the extra ones.
829 if (egress_cnt < DPAA_ETH_TXQ_NUM)
830 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
832 case FQ_TYPE_TX_CONF_MQ:
833 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
835 case FQ_TYPE_TX_CONFIRM:
836 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
838 case FQ_TYPE_TX_ERROR:
839 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
842 dev_warn(priv->net_dev->dev.parent,
843 "Unknown FQ type detected!\n");
848 /* Make sure all CPUs receive a corresponding Tx queue. */
849 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
850 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
851 if (fq->fq_type != FQ_TYPE_TX)
853 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
854 if (egress_cnt == DPAA_ETH_TXQ_NUM)
860 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
861 struct qman_fq *tx_fq)
865 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
866 if (priv->egress_fqs[i] == tx_fq)
872 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
874 const struct dpaa_priv *priv;
875 struct qman_fq *confq = NULL;
876 struct qm_mcc_initfq initfq;
882 priv = netdev_priv(dpaa_fq->net_dev);
883 dev = dpaa_fq->net_dev->dev.parent;
885 if (dpaa_fq->fqid == 0)
886 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
888 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
890 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
892 dev_err(dev, "qman_create_fq() failed\n");
895 fq = &dpaa_fq->fq_base;
898 memset(&initfq, 0, sizeof(initfq));
900 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
901 /* Note: we may get to keep an empty FQ in cache */
902 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
904 /* Try to reduce the number of portal interrupts for
905 * Tx Confirmation FQs.
907 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
908 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
911 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
913 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
915 /* Put all egress queues in a congestion group of their own.
916 * Sensu stricto, the Tx confirmation queues are Rx FQs,
917 * rather than Tx - but they nonetheless account for the
918 * memory footprint on behalf of egress traffic. We therefore
919 * place them in the netdev's CGR, along with the Tx FQs.
921 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
922 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
923 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
924 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
925 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
926 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
927 /* Set a fixed overhead accounting, in an attempt to
928 * reduce the impact of fixed-size skb shells and the
929 * driver's needed headroom on system memory. This is
930 * especially the case when the egress traffic is
931 * composed of small datagrams.
932 * Unfortunately, QMan's OAL value is capped to an
933 * insufficient value, but even that is better than
934 * no overhead accounting at all.
936 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
937 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
938 qm_fqd_set_oal(&initfq.fqd,
939 min(sizeof(struct sk_buff) +
941 (size_t)FSL_QMAN_MAX_OAL));
945 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
946 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
947 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
950 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
951 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
953 confq = priv->conf_fqs[queue_id];
956 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
957 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
958 * A2V=1 (contextA A2 field is valid)
959 * A0V=1 (contextA A0 field is valid)
960 * B0V=1 (contextB field is valid)
961 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
962 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
964 qm_fqd_context_a_set64(&initfq.fqd,
965 0x1e00000080000000ULL);
969 /* Put all the ingress queues in our "ingress CGR". */
970 if (priv->use_ingress_cgr &&
971 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
972 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
973 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
974 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
975 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
976 /* Set a fixed overhead accounting, just like for the
979 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
980 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
981 qm_fqd_set_oal(&initfq.fqd,
982 min(sizeof(struct sk_buff) +
984 (size_t)FSL_QMAN_MAX_OAL));
987 /* Initialization common to all ingress queues */
988 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
989 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
990 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
991 initfq.fqd.context_a.stashing.exclusive =
992 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
993 QM_STASHING_EXCL_ANNOTATION;
994 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
995 DIV_ROUND_UP(sizeof(struct qman_fq),
999 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1001 dev_err(dev, "qman_init_fq(%u) = %d\n",
1002 qman_fq_fqid(fq), err);
1003 qman_destroy_fq(fq);
1008 dpaa_fq->fqid = qman_fq_fqid(fq);
1013 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1015 const struct dpaa_priv *priv;
1016 struct dpaa_fq *dpaa_fq;
1021 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1022 priv = netdev_priv(dpaa_fq->net_dev);
1024 if (dpaa_fq->init) {
1025 err = qman_retire_fq(fq, NULL);
1026 if (err < 0 && netif_msg_drv(priv))
1027 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1028 qman_fq_fqid(fq), err);
1030 error = qman_oos_fq(fq);
1031 if (error < 0 && netif_msg_drv(priv)) {
1032 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1033 qman_fq_fqid(fq), error);
1039 qman_destroy_fq(fq);
1040 list_del(&dpaa_fq->list);
1045 static int dpaa_fq_free(struct device *dev, struct list_head *list)
1047 struct dpaa_fq *dpaa_fq, *tmp;
1051 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1052 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1053 if (error < 0 && err >= 0)
1060 static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1061 struct dpaa_fq *defq,
1062 struct dpaa_buffer_layout *buf_layout)
1064 struct fman_buffer_prefix_content buf_prefix_content;
1065 struct fman_port_params params;
1068 memset(¶ms, 0, sizeof(params));
1069 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1071 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1072 buf_prefix_content.pass_prs_result = true;
1073 buf_prefix_content.pass_hash_result = true;
1074 buf_prefix_content.pass_time_stamp = false;
1075 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1077 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1078 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1080 err = fman_port_config(port, ¶ms);
1082 pr_err("%s: fman_port_config failed\n", __func__);
1084 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1086 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1089 err = fman_port_init(port);
1091 pr_err("%s: fm_port_init failed\n", __func__);
1094 static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1095 size_t count, struct dpaa_fq *errq,
1096 struct dpaa_fq *defq,
1097 struct dpaa_buffer_layout *buf_layout)
1099 struct fman_buffer_prefix_content buf_prefix_content;
1100 struct fman_port_rx_params *rx_p;
1101 struct fman_port_params params;
1104 memset(¶ms, 0, sizeof(params));
1105 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1107 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1108 buf_prefix_content.pass_prs_result = true;
1109 buf_prefix_content.pass_hash_result = true;
1110 buf_prefix_content.pass_time_stamp = false;
1111 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1113 rx_p = ¶ms.specific_params.rx_params;
1114 rx_p->err_fqid = errq->fqid;
1115 rx_p->dflt_fqid = defq->fqid;
1117 count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
1118 rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
1119 for (i = 0; i < count; i++) {
1120 rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid;
1121 rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
1124 err = fman_port_config(port, ¶ms);
1126 pr_err("%s: fman_port_config failed\n", __func__);
1128 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1130 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1133 err = fman_port_init(port);
1135 pr_err("%s: fm_port_init failed\n", __func__);
1138 static void dpaa_eth_init_ports(struct mac_device *mac_dev,
1139 struct dpaa_bp **bps, size_t count,
1140 struct fm_port_fqs *port_fqs,
1141 struct dpaa_buffer_layout *buf_layout,
1144 struct fman_port *rxport = mac_dev->port[RX];
1145 struct fman_port *txport = mac_dev->port[TX];
1147 dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1148 port_fqs->tx_defq, &buf_layout[TX]);
1149 dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1150 port_fqs->rx_defq, &buf_layout[RX]);
1153 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1154 struct bm_buffer *bmb, int cnt)
1158 err = bman_release(dpaa_bp->pool, bmb, cnt);
1159 /* Should never occur, address anyway to avoid leaking the buffers */
1160 if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
1162 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1167 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1169 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1170 struct dpaa_bp *dpaa_bp;
1173 memset(bmb, 0, sizeof(bmb));
1176 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1182 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1184 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1187 } while (j < ARRAY_SIZE(bmb) &&
1188 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1189 sgt[i - 1].bpid == sgt[i].bpid);
1191 dpaa_bman_release(dpaa_bp, bmb, j);
1192 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1195 static void dpaa_fd_release(const struct net_device *net_dev,
1196 const struct qm_fd *fd)
1198 struct qm_sg_entry *sgt;
1199 struct dpaa_bp *dpaa_bp;
1200 struct bm_buffer bmb;
1205 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1207 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1211 if (qm_fd_get_format(fd) == qm_fd_sg) {
1212 vaddr = phys_to_virt(qm_fd_addr(fd));
1213 sgt = vaddr + qm_fd_get_offset(fd);
1215 dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1218 dpaa_release_sgt_members(sgt);
1220 addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1222 if (dma_mapping_error(dpaa_bp->dev, addr)) {
1223 dev_err(dpaa_bp->dev, "DMA mapping failed");
1226 bm_buffer_set64(&bmb, addr);
1229 dpaa_bman_release(dpaa_bp, &bmb, 1);
1232 static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1233 const union qm_mr_entry *msg)
1235 switch (msg->ern.rc & QM_MR_RC_MASK) {
1236 case QM_MR_RC_CGR_TAILDROP:
1237 percpu_priv->ern_cnt.cg_tdrop++;
1240 percpu_priv->ern_cnt.wred++;
1242 case QM_MR_RC_ERROR:
1243 percpu_priv->ern_cnt.err_cond++;
1245 case QM_MR_RC_ORPWINDOW_EARLY:
1246 percpu_priv->ern_cnt.early_window++;
1248 case QM_MR_RC_ORPWINDOW_LATE:
1249 percpu_priv->ern_cnt.late_window++;
1251 case QM_MR_RC_FQ_TAILDROP:
1252 percpu_priv->ern_cnt.fq_tdrop++;
1254 case QM_MR_RC_ORPWINDOW_RETIRED:
1255 percpu_priv->ern_cnt.fq_retired++;
1257 case QM_MR_RC_ORP_ZERO:
1258 percpu_priv->ern_cnt.orp_zero++;
1263 /* Turn on HW checksum computation for this outgoing frame.
1264 * If the current protocol is not something we support in this regard
1265 * (or if the stack has already computed the SW checksum), we do nothing.
1267 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1270 * Note that this function may modify the fd->cmd field and the skb data buffer
1271 * (the Parse Results area).
1273 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1274 struct sk_buff *skb,
1276 char *parse_results)
1278 struct fman_prs_result *parse_result;
1279 u16 ethertype = ntohs(skb->protocol);
1280 struct ipv6hdr *ipv6h = NULL;
1285 if (skb->ip_summed != CHECKSUM_PARTIAL)
1288 /* Note: L3 csum seems to be already computed in sw, but we can't choose
1289 * L4 alone from the FM configuration anyway.
1292 /* Fill in some fields of the Parse Results array, so the FMan
1293 * can find them as if they came from the FMan Parser.
1295 parse_result = (struct fman_prs_result *)parse_results;
1297 /* If we're dealing with VLAN, get the real Ethernet type */
1298 if (ethertype == ETH_P_8021Q) {
1299 /* We can't always assume the MAC header is set correctly
1300 * by the stack, so reset to beginning of skb->data
1302 skb_reset_mac_header(skb);
1303 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1306 /* Fill in the relevant L3 parse result fields
1307 * and read the L4 protocol type
1309 switch (ethertype) {
1311 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1314 l4_proto = iph->protocol;
1317 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1318 ipv6h = ipv6_hdr(skb);
1320 l4_proto = ipv6h->nexthdr;
1323 /* We shouldn't even be here */
1324 if (net_ratelimit())
1325 netif_alert(priv, tx_err, priv->net_dev,
1326 "Can't compute HW csum for L3 proto 0x%x\n",
1327 ntohs(skb->protocol));
1332 /* Fill in the relevant L4 parse result fields */
1335 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1338 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1341 if (net_ratelimit())
1342 netif_alert(priv, tx_err, priv->net_dev,
1343 "Can't compute HW csum for L4 proto 0x%x\n",
1349 /* At index 0 is IPOffset_1 as defined in the Parse Results */
1350 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1351 parse_result->l4_off = (u8)skb_transport_offset(skb);
1353 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1354 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1356 /* On P1023 and similar platforms fd->cmd interpretation could
1357 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1358 * is not set so we do not need to check; in the future, if/when
1359 * using context_a we need to check this bit
1366 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1368 struct device *dev = dpaa_bp->dev;
1369 struct bm_buffer bmb[8];
1374 for (i = 0; i < 8; i++) {
1375 new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
1376 if (unlikely(!new_buf)) {
1377 dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1379 goto release_previous_buffs;
1381 new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
1383 addr = dma_map_single(dev, new_buf,
1384 dpaa_bp->size, DMA_FROM_DEVICE);
1385 if (unlikely(dma_mapping_error(dev, addr))) {
1386 dev_err(dpaa_bp->dev, "DMA map failed");
1387 goto release_previous_buffs;
1391 bm_buffer_set64(&bmb[i], addr);
1395 return dpaa_bman_release(dpaa_bp, bmb, i);
1397 release_previous_buffs:
1398 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1400 bm_buffer_set64(&bmb[i], 0);
1401 /* Avoid releasing a completely null buffer; bman_release() requires
1402 * at least one buffer.
1410 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1414 /* Give each CPU an allotment of "config_count" buffers */
1415 for_each_possible_cpu(i) {
1416 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1419 /* Although we access another CPU's counters here
1420 * we do it at boot time so it is safe
1422 for (j = 0; j < dpaa_bp->config_count; j += 8)
1423 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1428 /* Add buffers/(pages) for Rx processing whenever bpool count falls below
1431 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1433 int count = *countptr;
1436 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1438 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1439 if (unlikely(!new_bufs)) {
1440 /* Avoid looping forever if we've temporarily
1441 * run out of memory. We'll try again at the
1447 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1450 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1457 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1459 struct dpaa_bp *dpaa_bp;
1463 for (i = 0; i < DPAA_BPS_NUM; i++) {
1464 dpaa_bp = priv->dpaa_bps[i];
1467 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1468 res = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1475 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
1476 * either contiguous frames or scatter/gather ones.
1477 * Skb freeing is not handled here.
1479 * This function may be called on error paths in the Tx function, so guard
1480 * against cases when not all fd relevant fields were filled in.
1482 * Return the skb backpointer, since for S/G frames the buffer containing it
1485 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1486 const struct qm_fd *fd)
1488 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1489 struct device *dev = priv->net_dev->dev.parent;
1490 dma_addr_t addr = qm_fd_addr(fd);
1491 const struct qm_sg_entry *sgt;
1492 struct sk_buff **skbh, *skb;
1495 skbh = (struct sk_buff **)phys_to_virt(addr);
1498 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1499 nr_frags = skb_shinfo(skb)->nr_frags;
1500 dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
1501 sizeof(struct qm_sg_entry) * (1 + nr_frags),
1504 /* The sgt buffer has been allocated with netdev_alloc_frag(),
1507 sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
1509 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1510 dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1511 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1513 /* remaining pages were mapped with skb_frag_dma_map() */
1514 for (i = 1; i < nr_frags; i++) {
1515 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1517 dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1518 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1521 /* Free the page frag that we allocated on Tx */
1522 skb_free_frag(phys_to_virt(addr));
1524 dma_unmap_single(dev, addr,
1525 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1531 /* Build a linear skb around the received buffer.
1532 * We are guaranteed there is enough room at the end of the data buffer to
1533 * accommodate the shared info area of the skb.
1535 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1536 const struct qm_fd *fd)
1538 ssize_t fd_off = qm_fd_get_offset(fd);
1539 dma_addr_t addr = qm_fd_addr(fd);
1540 struct dpaa_bp *dpaa_bp;
1541 struct sk_buff *skb;
1544 vaddr = phys_to_virt(addr);
1545 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1547 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1551 skb = build_skb(vaddr, dpaa_bp->size +
1552 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1553 if (unlikely(!skb)) {
1554 WARN_ONCE(1, "Build skb failure on Rx\n");
1557 WARN_ON(fd_off != priv->rx_headroom);
1558 skb_reserve(skb, fd_off);
1559 skb_put(skb, qm_fd_get_length(fd));
1561 skb->ip_summed = CHECKSUM_NONE;
1566 skb_free_frag(vaddr);
1570 /* Build an skb with the data of the first S/G entry in the linear portion and
1571 * the rest of the frame as skb fragments.
1573 * The page fragment holding the S/G Table is recycled here.
1575 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1576 const struct qm_fd *fd)
1578 ssize_t fd_off = qm_fd_get_offset(fd);
1579 dma_addr_t addr = qm_fd_addr(fd);
1580 const struct qm_sg_entry *sgt;
1581 struct page *page, *head_page;
1582 struct dpaa_bp *dpaa_bp;
1583 void *vaddr, *sg_vaddr;
1584 int frag_off, frag_len;
1585 struct sk_buff *skb;
1592 vaddr = phys_to_virt(addr);
1593 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1595 /* Iterate through the SGT entries and add data buffers to the skb */
1596 sgt = vaddr + fd_off;
1597 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1598 /* Extension bit is not supported */
1599 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1601 sg_addr = qm_sg_addr(&sgt[i]);
1602 sg_vaddr = phys_to_virt(sg_addr);
1603 WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
1606 /* We may use multiple Rx pools */
1607 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1611 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1612 dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1615 sz = dpaa_bp->size +
1616 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1617 skb = build_skb(sg_vaddr, sz);
1618 if (WARN_ON(unlikely(!skb)))
1621 skb->ip_summed = CHECKSUM_NONE;
1623 /* Make sure forwarded skbs will have enough space
1624 * on Tx, if extra headers are added.
1626 WARN_ON(fd_off != priv->rx_headroom);
1627 skb_reserve(skb, fd_off);
1628 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1630 /* Not the first S/G entry; all data from buffer will
1631 * be added in an skb fragment; fragment index is offset
1632 * by one since first S/G entry was incorporated in the
1633 * linear part of the skb.
1635 * Caution: 'page' may be a tail page.
1637 page = virt_to_page(sg_vaddr);
1638 head_page = virt_to_head_page(sg_vaddr);
1640 /* Compute offset in (possibly tail) page */
1641 page_offset = ((unsigned long)sg_vaddr &
1643 (page_address(page) - page_address(head_page));
1644 /* page_offset only refers to the beginning of sgt[i];
1645 * but the buffer itself may have an internal offset.
1647 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1648 frag_len = qm_sg_entry_get_len(&sgt[i]);
1649 /* skb_add_rx_frag() does no checking on the page; if
1650 * we pass it a tail page, we'll end up with
1651 * bad page accounting and eventually with segafults.
1653 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1654 frag_len, dpaa_bp->size);
1656 /* Update the pool count for the current {cpu x bpool} */
1659 if (qm_sg_entry_is_final(&sgt[i]))
1662 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1664 /* free the SG table buffer */
1665 skb_free_frag(vaddr);
1670 /* compensate sw bpool counter changes */
1671 for (i--; i > 0; i--) {
1672 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1674 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1678 /* free all the SG entries */
1679 for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
1680 sg_addr = qm_sg_addr(&sgt[i]);
1681 sg_vaddr = phys_to_virt(sg_addr);
1682 skb_free_frag(sg_vaddr);
1683 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1685 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1689 if (qm_sg_entry_is_final(&sgt[i]))
1692 /* free the SGT fragment */
1693 skb_free_frag(vaddr);
1698 static int skb_to_contig_fd(struct dpaa_priv *priv,
1699 struct sk_buff *skb, struct qm_fd *fd,
1702 struct net_device *net_dev = priv->net_dev;
1703 struct device *dev = net_dev->dev.parent;
1704 enum dma_data_direction dma_dir;
1705 unsigned char *buffer_start;
1706 struct sk_buff **skbh;
1710 /* We are guaranteed to have at least tx_headroom bytes
1711 * available, so just use that for offset.
1713 fd->bpid = FSL_DPAA_BPID_INV;
1714 buffer_start = skb->data - priv->tx_headroom;
1715 dma_dir = DMA_TO_DEVICE;
1717 skbh = (struct sk_buff **)buffer_start;
1720 /* Enable L3/L4 hardware checksum computation.
1722 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1723 * need to write into the skb.
1725 err = dpaa_enable_tx_csum(priv, skb, fd,
1726 ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
1727 if (unlikely(err < 0)) {
1728 if (net_ratelimit())
1729 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1734 /* Fill in the rest of the FD fields */
1735 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1736 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1738 /* Map the entire buffer size that may be seen by FMan, but no more */
1739 addr = dma_map_single(dev, skbh,
1740 skb_tail_pointer(skb) - buffer_start, dma_dir);
1741 if (unlikely(dma_mapping_error(dev, addr))) {
1742 if (net_ratelimit())
1743 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1746 qm_fd_addr_set64(fd, addr);
1751 static int skb_to_sg_fd(struct dpaa_priv *priv,
1752 struct sk_buff *skb, struct qm_fd *fd)
1754 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1755 const int nr_frags = skb_shinfo(skb)->nr_frags;
1756 struct net_device *net_dev = priv->net_dev;
1757 struct device *dev = net_dev->dev.parent;
1758 struct qm_sg_entry *sgt;
1759 struct sk_buff **skbh;
1767 /* get a page frag to store the SGTable */
1768 sz = SKB_DATA_ALIGN(priv->tx_headroom +
1769 sizeof(struct qm_sg_entry) * (1 + nr_frags));
1770 sgt_buf = netdev_alloc_frag(sz);
1771 if (unlikely(!sgt_buf)) {
1772 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
1777 /* Enable L3/L4 hardware checksum computation.
1779 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1780 * need to write into the skb.
1782 err = dpaa_enable_tx_csum(priv, skb, fd,
1783 sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
1784 if (unlikely(err < 0)) {
1785 if (net_ratelimit())
1786 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1791 sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
1792 qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
1793 sgt[0].bpid = FSL_DPAA_BPID_INV;
1795 addr = dma_map_single(dev, skb->data,
1796 skb_headlen(skb), dma_dir);
1797 if (unlikely(dma_mapping_error(dev, addr))) {
1798 dev_err(dev, "DMA mapping failed");
1800 goto sg0_map_failed;
1802 qm_sg_entry_set64(&sgt[0], addr);
1804 /* populate the rest of SGT entries */
1805 frag = &skb_shinfo(skb)->frags[0];
1806 frag_len = frag->size;
1807 for (i = 1; i <= nr_frags; i++, frag++) {
1808 WARN_ON(!skb_frag_page(frag));
1809 addr = skb_frag_dma_map(dev, frag, 0,
1811 if (unlikely(dma_mapping_error(dev, addr))) {
1812 dev_err(dev, "DMA mapping failed");
1817 qm_sg_entry_set_len(&sgt[i], frag_len);
1818 sgt[i].bpid = FSL_DPAA_BPID_INV;
1821 /* keep the offset in the address */
1822 qm_sg_entry_set64(&sgt[i], addr);
1823 frag_len = frag->size;
1825 qm_sg_entry_set_f(&sgt[i - 1], frag_len);
1827 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
1829 /* DMA map the SGT page */
1830 buffer_start = (void *)sgt - priv->tx_headroom;
1831 skbh = (struct sk_buff **)buffer_start;
1834 addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
1835 sizeof(struct qm_sg_entry) * (1 + nr_frags),
1837 if (unlikely(dma_mapping_error(dev, addr))) {
1838 dev_err(dev, "DMA mapping failed");
1840 goto sgt_map_failed;
1843 fd->bpid = FSL_DPAA_BPID_INV;
1844 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1845 qm_fd_addr_set64(fd, addr);
1851 for (j = 0; j < i; j++)
1852 dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
1853 qm_sg_entry_get_len(&sgt[j]), dma_dir);
1856 skb_free_frag(sgt_buf);
1861 static inline int dpaa_xmit(struct dpaa_priv *priv,
1862 struct rtnl_link_stats64 *percpu_stats,
1866 struct qman_fq *egress_fq;
1869 egress_fq = priv->egress_fqs[queue];
1870 if (fd->bpid == FSL_DPAA_BPID_INV)
1871 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
1873 /* Trace this Tx fd */
1874 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
1876 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
1877 err = qman_enqueue(egress_fq, fd);
1882 if (unlikely(err < 0)) {
1883 percpu_stats->tx_errors++;
1884 percpu_stats->tx_fifo_errors++;
1888 percpu_stats->tx_packets++;
1889 percpu_stats->tx_bytes += qm_fd_get_length(fd);
1894 static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1896 const int queue_mapping = skb_get_queue_mapping(skb);
1897 bool nonlinear = skb_is_nonlinear(skb);
1898 struct rtnl_link_stats64 *percpu_stats;
1899 struct dpaa_percpu_priv *percpu_priv;
1900 struct dpaa_priv *priv;
1905 priv = netdev_priv(net_dev);
1906 percpu_priv = this_cpu_ptr(priv->percpu_priv);
1907 percpu_stats = &percpu_priv->stats;
1909 qm_fd_clear_fd(&fd);
1912 /* We're going to store the skb backpointer at the beginning
1913 * of the data buffer, so we need a privately owned skb
1915 * We've made sure skb is not shared in dev->priv_flags,
1916 * we need to verify the skb head is not cloned
1918 if (skb_cow_head(skb, priv->tx_headroom))
1921 WARN_ON(skb_is_nonlinear(skb));
1924 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
1925 * make sure we don't feed FMan with more fragments than it supports.
1928 likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
1929 /* Just create a S/G fd based on the skb */
1930 err = skb_to_sg_fd(priv, skb, &fd);
1931 percpu_priv->tx_frag_skbuffs++;
1933 /* If the egress skb contains more fragments than we support
1934 * we have no choice but to linearize it ourselves.
1936 if (unlikely(nonlinear) && __skb_linearize(skb))
1939 /* Finally, create a contig FD from this skb */
1940 err = skb_to_contig_fd(priv, skb, &fd, &offset);
1942 if (unlikely(err < 0))
1943 goto skb_to_fd_failed;
1945 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
1946 return NETDEV_TX_OK;
1948 dpaa_cleanup_tx_fd(priv, &fd);
1951 percpu_stats->tx_errors++;
1953 return NETDEV_TX_OK;
1956 static void dpaa_rx_error(struct net_device *net_dev,
1957 const struct dpaa_priv *priv,
1958 struct dpaa_percpu_priv *percpu_priv,
1959 const struct qm_fd *fd,
1962 if (net_ratelimit())
1963 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
1964 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
1966 percpu_priv->stats.rx_errors++;
1968 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
1969 percpu_priv->rx_errors.dme++;
1970 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
1971 percpu_priv->rx_errors.fpe++;
1972 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
1973 percpu_priv->rx_errors.fse++;
1974 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
1975 percpu_priv->rx_errors.phe++;
1977 dpaa_fd_release(net_dev, fd);
1980 static void dpaa_tx_error(struct net_device *net_dev,
1981 const struct dpaa_priv *priv,
1982 struct dpaa_percpu_priv *percpu_priv,
1983 const struct qm_fd *fd,
1986 struct sk_buff *skb;
1988 if (net_ratelimit())
1989 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
1990 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
1992 percpu_priv->stats.tx_errors++;
1994 skb = dpaa_cleanup_tx_fd(priv, fd);
1998 static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2000 struct dpaa_napi_portal *np =
2001 container_of(napi, struct dpaa_napi_portal, napi);
2003 int cleaned = qman_p_poll_dqrr(np->p, budget);
2005 if (cleaned < budget) {
2006 napi_complete(napi);
2007 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2009 } else if (np->down) {
2010 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2016 static void dpaa_tx_conf(struct net_device *net_dev,
2017 const struct dpaa_priv *priv,
2018 struct dpaa_percpu_priv *percpu_priv,
2019 const struct qm_fd *fd,
2022 struct sk_buff *skb;
2024 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2025 if (net_ratelimit())
2026 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2027 be32_to_cpu(fd->status) &
2028 FM_FD_STAT_TX_ERRORS);
2030 percpu_priv->stats.tx_errors++;
2033 percpu_priv->tx_confirm++;
2035 skb = dpaa_cleanup_tx_fd(priv, fd);
2040 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2041 struct qman_portal *portal)
2043 if (unlikely(in_irq() || !in_serving_softirq())) {
2044 /* Disable QMan IRQ and invoke NAPI */
2045 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2047 percpu_priv->np.p = portal;
2048 napi_schedule(&percpu_priv->np.napi);
2049 percpu_priv->in_interrupt++;
2055 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2057 const struct qm_dqrr_entry *dq)
2059 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2060 struct dpaa_percpu_priv *percpu_priv;
2061 struct net_device *net_dev;
2062 struct dpaa_bp *dpaa_bp;
2063 struct dpaa_priv *priv;
2065 net_dev = dpaa_fq->net_dev;
2066 priv = netdev_priv(net_dev);
2067 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2069 return qman_cb_dqrr_consume;
2071 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2073 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2074 return qman_cb_dqrr_stop;
2076 if (dpaa_eth_refill_bpools(priv))
2077 /* Unable to refill the buffer pool due to insufficient
2078 * system memory. Just release the frame back into the pool,
2079 * otherwise we'll soon end up with an empty buffer pool.
2081 dpaa_fd_release(net_dev, &dq->fd);
2083 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2085 return qman_cb_dqrr_consume;
2088 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2090 const struct qm_dqrr_entry *dq)
2092 struct rtnl_link_stats64 *percpu_stats;
2093 struct dpaa_percpu_priv *percpu_priv;
2094 const struct qm_fd *fd = &dq->fd;
2095 dma_addr_t addr = qm_fd_addr(fd);
2096 enum qm_fd_format fd_format;
2097 struct net_device *net_dev;
2098 u32 fd_status = fd->status;
2099 struct dpaa_bp *dpaa_bp;
2100 struct dpaa_priv *priv;
2101 unsigned int skb_len;
2102 struct sk_buff *skb;
2105 fd_status = be32_to_cpu(fd->status);
2106 fd_format = qm_fd_get_format(fd);
2107 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2108 priv = netdev_priv(net_dev);
2109 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2111 return qman_cb_dqrr_consume;
2113 /* Trace the Rx fd */
2114 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2116 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2117 percpu_stats = &percpu_priv->stats;
2119 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
2120 return qman_cb_dqrr_stop;
2122 /* Make sure we didn't run out of buffers */
2123 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2124 /* Unable to refill the buffer pool due to insufficient
2125 * system memory. Just release the frame back into the pool,
2126 * otherwise we'll soon end up with an empty buffer pool.
2128 dpaa_fd_release(net_dev, &dq->fd);
2129 return qman_cb_dqrr_consume;
2132 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2133 if (net_ratelimit())
2134 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2135 fd_status & FM_FD_STAT_RX_ERRORS);
2137 percpu_stats->rx_errors++;
2138 dpaa_fd_release(net_dev, fd);
2139 return qman_cb_dqrr_consume;
2142 dpaa_bp = dpaa_bpid2pool(fd->bpid);
2144 return qman_cb_dqrr_consume;
2146 dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2148 /* prefetch the first 64 bytes of the frame or the SGT start */
2149 prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
2151 fd_format = qm_fd_get_format(fd);
2152 /* The only FD types that we may receive are contig and S/G */
2153 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2155 /* Account for either the contig buffer or the SGT buffer (depending on
2156 * which case we were in) having been removed from the pool.
2158 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2161 if (likely(fd_format == qm_fd_contig))
2162 skb = contig_fd_to_skb(priv, fd);
2164 skb = sg_fd_to_skb(priv, fd);
2166 return qman_cb_dqrr_consume;
2168 skb->protocol = eth_type_trans(skb, net_dev);
2172 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
2173 return qman_cb_dqrr_consume;
2175 percpu_stats->rx_packets++;
2176 percpu_stats->rx_bytes += skb_len;
2178 return qman_cb_dqrr_consume;
2181 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2183 const struct qm_dqrr_entry *dq)
2185 struct dpaa_percpu_priv *percpu_priv;
2186 struct net_device *net_dev;
2187 struct dpaa_priv *priv;
2189 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2190 priv = netdev_priv(net_dev);
2192 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2194 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2195 return qman_cb_dqrr_stop;
2197 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2199 return qman_cb_dqrr_consume;
2202 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2204 const struct qm_dqrr_entry *dq)
2206 struct dpaa_percpu_priv *percpu_priv;
2207 struct net_device *net_dev;
2208 struct dpaa_priv *priv;
2210 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2211 priv = netdev_priv(net_dev);
2214 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2216 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2218 if (dpaa_eth_napi_schedule(percpu_priv, portal))
2219 return qman_cb_dqrr_stop;
2221 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2223 return qman_cb_dqrr_consume;
2226 static void egress_ern(struct qman_portal *portal,
2228 const union qm_mr_entry *msg)
2230 const struct qm_fd *fd = &msg->ern.fd;
2231 struct dpaa_percpu_priv *percpu_priv;
2232 const struct dpaa_priv *priv;
2233 struct net_device *net_dev;
2234 struct sk_buff *skb;
2236 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2237 priv = netdev_priv(net_dev);
2238 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2240 percpu_priv->stats.tx_dropped++;
2241 percpu_priv->stats.tx_fifo_errors++;
2242 count_ern(percpu_priv, msg);
2244 skb = dpaa_cleanup_tx_fd(priv, fd);
2245 dev_kfree_skb_any(skb);
2248 static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2249 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2250 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2251 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2252 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2253 .egress_ern = { .cb = { .ern = egress_ern } }
2256 static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2258 struct dpaa_percpu_priv *percpu_priv;
2261 for_each_possible_cpu(i) {
2262 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2264 percpu_priv->np.down = 0;
2265 napi_enable(&percpu_priv->np.napi);
2269 static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2271 struct dpaa_percpu_priv *percpu_priv;
2274 for_each_possible_cpu(i) {
2275 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2277 percpu_priv->np.down = 1;
2278 napi_disable(&percpu_priv->np.napi);
2282 static int dpaa_open(struct net_device *net_dev)
2284 struct mac_device *mac_dev;
2285 struct dpaa_priv *priv;
2288 priv = netdev_priv(net_dev);
2289 mac_dev = priv->mac_dev;
2290 dpaa_eth_napi_enable(priv);
2292 net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
2293 if (!net_dev->phydev) {
2294 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2296 goto phy_init_failed;
2299 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2300 err = fman_port_enable(mac_dev->port[i]);
2302 goto mac_start_failed;
2305 err = priv->mac_dev->start(mac_dev);
2307 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2308 goto mac_start_failed;
2311 netif_tx_start_all_queues(net_dev);
2316 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2317 fman_port_disable(mac_dev->port[i]);
2320 dpaa_eth_napi_disable(priv);
2325 static int dpaa_eth_stop(struct net_device *net_dev)
2327 struct dpaa_priv *priv;
2330 err = dpaa_stop(net_dev);
2332 priv = netdev_priv(net_dev);
2333 dpaa_eth_napi_disable(priv);
2338 static const struct net_device_ops dpaa_ops = {
2339 .ndo_open = dpaa_open,
2340 .ndo_start_xmit = dpaa_start_xmit,
2341 .ndo_stop = dpaa_eth_stop,
2342 .ndo_tx_timeout = dpaa_tx_timeout,
2343 .ndo_get_stats64 = dpaa_get_stats64,
2344 .ndo_set_mac_address = dpaa_set_mac_address,
2345 .ndo_validate_addr = eth_validate_addr,
2346 .ndo_set_rx_mode = dpaa_set_rx_mode,
2349 static int dpaa_napi_add(struct net_device *net_dev)
2351 struct dpaa_priv *priv = netdev_priv(net_dev);
2352 struct dpaa_percpu_priv *percpu_priv;
2355 for_each_possible_cpu(cpu) {
2356 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2358 netif_napi_add(net_dev, &percpu_priv->np.napi,
2359 dpaa_eth_poll, NAPI_POLL_WEIGHT);
2365 static void dpaa_napi_del(struct net_device *net_dev)
2367 struct dpaa_priv *priv = netdev_priv(net_dev);
2368 struct dpaa_percpu_priv *percpu_priv;
2371 for_each_possible_cpu(cpu) {
2372 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2374 netif_napi_del(&percpu_priv->np.napi);
2378 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
2379 struct bm_buffer *bmb)
2381 dma_addr_t addr = bm_buf_addr(bmb);
2383 dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2385 skb_free_frag(phys_to_virt(addr));
2388 /* Alloc the dpaa_bp struct and configure default values */
2389 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
2391 struct dpaa_bp *dpaa_bp;
2393 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
2395 return ERR_PTR(-ENOMEM);
2397 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
2398 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
2399 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
2401 dpaa_bp->seed_cb = dpaa_bp_seed;
2402 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
2407 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
2408 * We won't be sending congestion notifications to FMan; for now, we just use
2409 * this CGR to generate enqueue rejections to FMan in order to drop the frames
2410 * before they reach our ingress queues and eat up memory.
2412 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2414 struct qm_mcc_initcgr initcgr;
2418 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2420 if (netif_msg_drv(priv))
2421 pr_err("Error %d allocating CGR ID\n", err);
2425 /* Enable CS TD, but disable Congestion State Change Notifications. */
2426 memset(&initcgr, 0, sizeof(initcgr));
2427 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2428 initcgr.cgr.cscn_en = QM_CGR_EN;
2429 cs_th = DPAA_INGRESS_CS_THRESHOLD;
2430 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2432 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2433 initcgr.cgr.cstd_en = QM_CGR_EN;
2435 /* This CGR will be associated with the SWP affined to the current CPU.
2436 * However, we'll place all our ingress FQs in it.
2438 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2441 if (netif_msg_drv(priv))
2442 pr_err("Error %d creating ingress CGR with ID %d\n",
2443 err, priv->ingress_cgr.cgrid);
2444 qman_release_cgrid(priv->ingress_cgr.cgrid);
2447 if (netif_msg_drv(priv))
2448 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2449 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2451 priv->use_ingress_cgr = true;
2457 static const struct of_device_id dpaa_match[];
2459 static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2463 /* The frame headroom must accommodate:
2464 * - the driver private data area
2465 * - parse results, hash results, timestamp if selected
2466 * If either hash results or time stamp are selected, both will
2467 * be copied to/from the frame headroom, as TS is located between PR and
2468 * HR in the IC and IC copy size has a granularity of 16bytes
2469 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2471 * Also make sure the headroom is a multiple of data_align bytes
2473 headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2474 DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2476 return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2477 DPAA_FD_DATA_ALIGNMENT) :
2481 static int dpaa_eth_probe(struct platform_device *pdev)
2483 struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
2484 struct dpaa_percpu_priv *percpu_priv;
2485 struct net_device *net_dev = NULL;
2486 struct dpaa_fq *dpaa_fq, *tmp;
2487 struct dpaa_priv *priv = NULL;
2488 struct fm_port_fqs port_fqs;
2489 struct mac_device *mac_dev;
2490 int err = 0, i, channel;
2495 /* Allocate this early, so we can store relevant information in
2498 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
2500 dev_err(dev, "alloc_etherdev_mq() failed\n");
2501 goto alloc_etherdev_mq_failed;
2504 /* Do this here, so we can be verbose early */
2505 SET_NETDEV_DEV(net_dev, dev);
2506 dev_set_drvdata(dev, net_dev);
2508 priv = netdev_priv(net_dev);
2509 priv->net_dev = net_dev;
2511 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
2513 mac_dev = dpaa_mac_dev_get(pdev);
2514 if (IS_ERR(mac_dev)) {
2515 dev_err(dev, "dpaa_mac_dev_get() failed\n");
2516 err = PTR_ERR(mac_dev);
2517 goto mac_probe_failed;
2520 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
2521 * we choose conservatively and let the user explicitly set a higher
2522 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
2524 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
2525 * start with the maximum allowed.
2527 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
2529 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
2532 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
2533 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2535 /* device used for DMA mapping */
2536 arch_setup_dma_ops(dev, 0, 0, NULL, false);
2537 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2539 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2540 goto dev_mask_failed;
2544 for (i = 0; i < DPAA_BPS_NUM; i++) {
2547 dpaa_bps[i] = dpaa_bp_alloc(dev);
2548 if (IS_ERR(dpaa_bps[i]))
2549 return PTR_ERR(dpaa_bps[i]);
2550 /* the raw size of the buffers used for reception */
2551 dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
2552 /* avoid runtime computations by keeping the usable size here */
2553 dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2554 dpaa_bps[i]->dev = dev;
2556 err = dpaa_bp_alloc_pool(dpaa_bps[i]);
2558 dpaa_bps_free(priv);
2559 priv->dpaa_bps[i] = NULL;
2560 goto bp_create_failed;
2562 priv->dpaa_bps[i] = dpaa_bps[i];
2565 INIT_LIST_HEAD(&priv->dpaa_fq_list);
2567 memset(&port_fqs, 0, sizeof(port_fqs));
2569 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
2571 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
2572 goto fq_probe_failed;
2575 priv->mac_dev = mac_dev;
2577 channel = dpaa_get_channel();
2579 dev_err(dev, "dpaa_get_channel() failed\n");
2581 goto get_channel_failed;
2584 priv->channel = (u16)channel;
2586 /* Start a thread that will walk the CPUs with affine portals
2587 * and add this pool channel to each's dequeue mask.
2589 dpaa_eth_add_channel(priv->channel);
2591 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
2593 /* Create a congestion group for this netdev, with
2594 * dynamically-allocated CGR ID.
2595 * Must be executed after probing the MAC, but before
2596 * assigning the egress FQs to the CGRs.
2598 err = dpaa_eth_cgr_init(priv);
2600 dev_err(dev, "Error initializing CGR\n");
2601 goto tx_cgr_init_failed;
2604 err = dpaa_ingress_cgr_init(priv);
2606 dev_err(dev, "Error initializing ingress CGR\n");
2607 goto rx_cgr_init_failed;
2610 /* Add the FQs to the interface, and make them active */
2611 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
2612 err = dpaa_fq_init(dpaa_fq, false);
2614 goto fq_alloc_failed;
2617 priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
2618 priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
2620 /* All real interfaces need their ports initialized */
2621 dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2622 &priv->buf_layout[0], dev);
2624 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2625 if (!priv->percpu_priv) {
2626 dev_err(dev, "devm_alloc_percpu() failed\n");
2628 goto alloc_percpu_failed;
2630 for_each_possible_cpu(i) {
2631 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2632 memset(percpu_priv, 0, sizeof(*percpu_priv));
2635 /* Initialize NAPI */
2636 err = dpaa_napi_add(net_dev);
2638 goto napi_add_failed;
2640 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
2642 goto netdev_init_failed;
2644 dpaa_eth_sysfs_init(&net_dev->dev);
2646 netif_info(priv, probe, net_dev, "Probed interface %s\n",
2653 dpaa_napi_del(net_dev);
2654 alloc_percpu_failed:
2655 dpaa_fq_free(dev, &priv->dpaa_fq_list);
2657 qman_delete_cgr_safe(&priv->ingress_cgr);
2658 qman_release_cgrid(priv->ingress_cgr.cgrid);
2660 qman_delete_cgr_safe(&priv->cgr_data.cgr);
2661 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2664 dpaa_bps_free(priv);
2669 dev_set_drvdata(dev, NULL);
2670 free_netdev(net_dev);
2671 alloc_etherdev_mq_failed:
2672 for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
2673 if (atomic_read(&dpaa_bps[i]->refs) == 0)
2674 devm_kfree(dev, dpaa_bps[i]);
2679 static int dpaa_remove(struct platform_device *pdev)
2681 struct net_device *net_dev;
2682 struct dpaa_priv *priv;
2687 net_dev = dev_get_drvdata(dev);
2689 priv = netdev_priv(net_dev);
2691 dpaa_eth_sysfs_remove(dev);
2693 dev_set_drvdata(dev, NULL);
2694 unregister_netdev(net_dev);
2696 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
2698 qman_delete_cgr_safe(&priv->ingress_cgr);
2699 qman_release_cgrid(priv->ingress_cgr.cgrid);
2700 qman_delete_cgr_safe(&priv->cgr_data.cgr);
2701 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
2703 dpaa_napi_del(net_dev);
2705 dpaa_bps_free(priv);
2707 free_netdev(net_dev);
2712 static struct platform_device_id dpaa_devtype[] = {
2714 .name = "dpaa-ethernet",
2719 MODULE_DEVICE_TABLE(platform, dpaa_devtype);
2721 static struct platform_driver dpaa_driver = {
2723 .name = KBUILD_MODNAME,
2725 .id_table = dpaa_devtype,
2726 .probe = dpaa_eth_probe,
2727 .remove = dpaa_remove
2730 static int __init dpaa_load(void)
2734 pr_debug("FSL DPAA Ethernet driver\n");
2736 /* initialize dpaa_eth mirror values */
2737 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
2738 dpaa_max_frm = fman_get_max_frm();
2740 err = platform_driver_register(&dpaa_driver);
2742 pr_err("Error, platform_driver_register() = %d\n", err);
2746 module_init(dpaa_load);
2748 static void __exit dpaa_unload(void)
2750 platform_driver_unregister(&dpaa_driver);
2752 /* Only one channel is used and needs to be released after all
2753 * interfaces are removed
2755 dpaa_release_channel();
2757 module_exit(dpaa_unload);
2759 MODULE_LICENSE("Dual BSD/GPL");
2760 MODULE_DESCRIPTION("FSL DPAA Ethernet driver");