1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
28 #define RES_ENET_CSR 0
29 #define RES_RING_CSR 1
30 #define RES_RING_CMD 2
32 static const struct of_device_id xgene_enet_of_match[];
33 static const struct acpi_device_id xgene_enet_acpi_match[];
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
37 struct xgene_enet_raw_desc16 *raw_desc;
43 for (i = 0; i < buf_pool->slots; i++) {
44 raw_desc = &buf_pool->raw_desc16[i];
46 /* Hardware expects descriptor in little endian format */
47 raw_desc->m0 = cpu_to_le64(i |
48 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
53 static u16 xgene_enet_get_data_len(u64 bufdatalen)
57 hw_len = GET_VAL(BUFDATALEN, bufdatalen);
59 if (unlikely(hw_len == 0x7800)) {
61 } else if (!(hw_len & BIT(14))) {
62 mask = GENMASK(13, 0);
63 return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
64 } else if (!(hw_len & GENMASK(13, 12))) {
65 mask = GENMASK(11, 0);
66 return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
68 mask = GENMASK(11, 0);
69 return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
73 static u16 xgene_enet_set_data_len(u32 size)
77 hw_len = (size == SIZE_4K) ? BIT(14) : 0;
82 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
85 struct xgene_enet_raw_desc16 *raw_desc;
86 struct xgene_enet_pdata *pdata;
87 struct net_device *ndev;
95 if (unlikely(!buf_pool))
98 ndev = buf_pool->ndev;
99 pdata = netdev_priv(ndev);
100 dev = ndev_to_dev(ndev);
101 slots = buf_pool->slots - 1;
102 tail = buf_pool->tail;
104 for (i = 0; i < nbuf; i++) {
105 raw_desc = &buf_pool->raw_desc16[tail];
107 page = dev_alloc_page();
111 dma_addr = dma_map_page(dev, page, 0,
112 PAGE_SIZE, DMA_FROM_DEVICE);
113 if (unlikely(dma_mapping_error(dev, dma_addr))) {
118 hw_len = xgene_enet_set_data_len(PAGE_SIZE);
119 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
120 SET_VAL(BUFDATALEN, hw_len) |
123 buf_pool->frag_page[tail] = page;
124 tail = (tail + 1) & slots;
127 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
128 buf_pool->tail = tail;
133 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
137 struct xgene_enet_raw_desc16 *raw_desc;
138 struct xgene_enet_pdata *pdata;
139 struct net_device *ndev;
142 u32 tail = buf_pool->tail;
143 u32 slots = buf_pool->slots - 1;
147 ndev = buf_pool->ndev;
148 dev = ndev_to_dev(buf_pool->ndev);
149 pdata = netdev_priv(ndev);
151 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
152 len = XGENE_ENET_STD_MTU;
154 for (i = 0; i < nbuf; i++) {
155 raw_desc = &buf_pool->raw_desc16[tail];
157 skb = netdev_alloc_skb_ip_align(ndev, len);
161 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
162 if (dma_mapping_error(dev, dma_addr)) {
163 netdev_err(ndev, "DMA mapping error\n");
164 dev_kfree_skb_any(skb);
168 buf_pool->rx_skb[tail] = skb;
170 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
171 SET_VAL(BUFDATALEN, bufdatalen) |
173 tail = (tail + 1) & slots;
176 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
177 buf_pool->tail = tail;
182 static u8 xgene_enet_hdr_len(const void *data)
184 const struct ethhdr *eth = data;
186 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
189 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
191 struct device *dev = ndev_to_dev(buf_pool->ndev);
192 struct xgene_enet_raw_desc16 *raw_desc;
196 /* Free up the buffers held by hardware */
197 for (i = 0; i < buf_pool->slots; i++) {
198 if (buf_pool->rx_skb[i]) {
199 dev_kfree_skb_any(buf_pool->rx_skb[i]);
201 raw_desc = &buf_pool->raw_desc16[i];
202 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
203 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
209 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
211 struct device *dev = ndev_to_dev(buf_pool->ndev);
216 /* Free up the buffers held by hardware */
217 for (i = 0; i < buf_pool->slots; i++) {
218 page = buf_pool->frag_page[i];
220 dma_addr = buf_pool->frag_dma_addr[i];
221 dma_unmap_page(dev, dma_addr, PAGE_SIZE,
228 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
230 struct xgene_enet_desc_ring *rx_ring = data;
232 if (napi_schedule_prep(&rx_ring->napi)) {
233 disable_irq_nosync(irq);
234 __napi_schedule(&rx_ring->napi);
240 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
241 struct xgene_enet_raw_desc *raw_desc)
243 struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
247 dma_addr_t *frag_dma_addr;
253 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
254 skb = cp_ring->cp_skb[skb_index];
255 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
257 dev = ndev_to_dev(cp_ring->ndev);
258 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
262 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
263 frag = &skb_shinfo(skb)->frags[i];
264 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
268 if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
269 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
270 spin_lock(&pdata->mss_lock);
271 pdata->mss_refcnt[mss_index]--;
272 spin_unlock(&pdata->mss_lock);
275 /* Checking for error */
276 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
277 if (unlikely(status > 2)) {
278 cp_ring->tx_dropped++;
279 cp_ring->tx_errors++;
283 dev_kfree_skb_any(skb);
285 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
291 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
293 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
294 int mss_index = -EBUSY;
297 spin_lock(&pdata->mss_lock);
299 /* Reuse the slot if MSS matches */
300 for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
301 if (pdata->mss[i] == mss) {
302 pdata->mss_refcnt[i]++;
307 /* Overwrite the slot with ref_count = 0 */
308 for (i = 0; mss_index < 0 && i < NUM_MSS_REG; i++) {
309 if (!pdata->mss_refcnt[i]) {
310 pdata->mss_refcnt[i]++;
311 pdata->mac_ops->set_mss(pdata, mss, i);
317 spin_unlock(&pdata->mss_lock);
322 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
324 struct net_device *ndev = skb->dev;
326 u8 l3hlen = 0, l4hlen = 0;
327 u8 ethhdr, proto = 0, csum_enable = 0;
328 u32 hdr_len, mss = 0;
329 u32 i, len, nr_frags;
332 ethhdr = xgene_enet_hdr_len(skb->data);
334 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
335 unlikely(skb->protocol != htons(ETH_P_8021Q)))
338 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
342 if (unlikely(ip_is_fragment(iph)))
345 if (likely(iph->protocol == IPPROTO_TCP)) {
346 l4hlen = tcp_hdrlen(skb) >> 2;
348 proto = TSO_IPPROTO_TCP;
349 if (ndev->features & NETIF_F_TSO) {
350 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
351 mss = skb_shinfo(skb)->gso_size;
353 if (skb_is_nonlinear(skb)) {
354 len = skb_headlen(skb);
355 nr_frags = skb_shinfo(skb)->nr_frags;
357 for (i = 0; i < 2 && i < nr_frags; i++)
358 len += skb_shinfo(skb)->frags[i].size;
360 /* HW requires header must reside in 3 buffer */
361 if (unlikely(hdr_len > len)) {
362 if (skb_linearize(skb))
367 if (!mss || ((skb->len - hdr_len) <= mss))
370 mss_index = xgene_enet_setup_mss(ndev, mss);
371 if (unlikely(mss_index < 0))
374 *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
376 } else if (iph->protocol == IPPROTO_UDP) {
377 l4hlen = UDP_HDR_SIZE;
381 l3hlen = ip_hdrlen(skb) >> 2;
382 *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
383 SET_VAL(IPHDR, l3hlen) |
384 SET_VAL(ETHHDR, ethhdr) |
385 SET_VAL(EC, csum_enable) |
388 SET_BIT(TYPE_ETH_WORK_MESSAGE);
393 static u16 xgene_enet_encode_len(u16 len)
395 return (len == BUFLEN_16K) ? 0 : len;
398 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
400 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
401 SET_VAL(BUFDATALEN, len));
404 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
408 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
409 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
410 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
415 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
417 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
420 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
423 struct device *dev = ndev_to_dev(tx_ring->ndev);
424 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
425 struct xgene_enet_raw_desc *raw_desc;
426 __le64 *exp_desc = NULL, *exp_bufs = NULL;
427 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
429 u16 tail = tx_ring->tail;
432 u8 ll = 0, nv = 0, idx = 0;
434 u32 size, offset, ell_bytes = 0;
435 u32 i, fidx, nr_frags, count = 1;
438 raw_desc = &tx_ring->raw_desc[tail];
439 tail = (tail + 1) & (tx_ring->slots - 1);
440 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
442 ret = xgene_enet_work_msg(skb, &hopinfo);
446 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
449 len = skb_headlen(skb);
450 hw_len = xgene_enet_encode_len(len);
452 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
453 if (dma_mapping_error(dev, dma_addr)) {
454 netdev_err(tx_ring->ndev, "DMA mapping error\n");
458 /* Hardware expects descriptor in little endian format */
459 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
460 SET_VAL(BUFDATALEN, hw_len) |
463 if (!skb_is_nonlinear(skb))
468 exp_desc = (void *)&tx_ring->raw_desc[tail];
469 tail = (tail + 1) & (tx_ring->slots - 1);
470 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
472 nr_frags = skb_shinfo(skb)->nr_frags;
473 for (i = nr_frags; i < 4 ; i++)
474 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
476 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
478 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
480 frag = &skb_shinfo(skb)->frags[fidx];
481 size = skb_frag_size(frag);
484 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
486 if (dma_mapping_error(dev, pbuf_addr))
489 frag_dma_addr[fidx] = pbuf_addr;
492 if (size > BUFLEN_16K)
496 if (size > BUFLEN_16K) {
504 dma_addr = pbuf_addr + offset;
505 hw_len = xgene_enet_encode_len(len);
511 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
514 if (split || (fidx != nr_frags)) {
515 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
516 xgene_set_addr_len(exp_bufs, idx, dma_addr,
521 xgene_set_addr_len(exp_desc, i, dma_addr,
526 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
533 offset += BUFLEN_16K;
539 dma_addr = dma_map_single(dev, exp_bufs,
540 sizeof(u64) * MAX_EXP_BUFFS,
542 if (dma_mapping_error(dev, dma_addr)) {
543 dev_kfree_skb_any(skb);
546 i = ell_bytes >> LL_BYTES_LSB_LEN;
547 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
548 SET_VAL(LL_BYTES_MSB, i) |
549 SET_VAL(LL_LEN, idx));
550 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
554 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
555 SET_VAL(USERINFO, tx_ring->tail));
556 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
557 pdata->tx_level[tx_ring->cp_ring->index] += count;
558 tx_ring->tail = tail;
563 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
564 struct net_device *ndev)
566 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
567 struct xgene_enet_desc_ring *tx_ring;
568 int index = skb->queue_mapping;
569 u32 tx_level = pdata->tx_level[index];
572 tx_ring = pdata->tx_ring[index];
573 if (tx_level < pdata->txc_level[index])
574 tx_level += ((typeof(pdata->tx_level[index]))~0U);
576 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
577 netif_stop_subqueue(ndev, index);
578 return NETDEV_TX_BUSY;
581 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
584 count = xgene_enet_setup_tx_desc(tx_ring, skb);
586 return NETDEV_TX_BUSY;
589 dev_kfree_skb_any(skb);
593 skb_tx_timestamp(skb);
595 tx_ring->tx_packets++;
596 tx_ring->tx_bytes += skb->len;
598 pdata->ring_ops->wr_cmd(tx_ring, count);
602 static void xgene_enet_rx_csum(struct sk_buff *skb)
604 struct net_device *ndev = skb->dev;
605 struct iphdr *iph = ip_hdr(skb);
607 if (!(ndev->features & NETIF_F_RXCSUM))
610 if (skb->protocol != htons(ETH_P_IP))
613 if (ip_is_fragment(iph))
616 if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
622 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
623 struct xgene_enet_raw_desc *raw_desc,
624 struct xgene_enet_raw_desc *exp_desc)
626 __le64 *desc = (void *)exp_desc;
634 if (!buf_pool || !raw_desc || !exp_desc ||
635 (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
638 dev = ndev_to_dev(buf_pool->ndev);
639 slots = buf_pool->slots - 1;
640 head = buf_pool->head;
642 for (i = 0; i < 4; i++) {
643 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
647 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
648 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
650 page = buf_pool->frag_page[head];
653 buf_pool->frag_page[head] = NULL;
654 head = (head + 1) & slots;
656 buf_pool->head = head;
659 /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
660 static bool xgene_enet_errata_10GE_10(struct sk_buff *skb, u32 len, u8 status)
662 if (status == INGRESS_CRC &&
663 len >= (ETHER_STD_PACKET + 1) &&
664 len <= (ETHER_STD_PACKET + 4) &&
665 skb->protocol == htons(ETH_P_8021Q))
671 /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
672 static bool xgene_enet_errata_10GE_8(struct sk_buff *skb, u32 len, u8 status)
674 if (status == INGRESS_PKT_LEN && len == ETHER_MIN_PACKET) {
675 if (ntohs(eth_hdr(skb)->h_proto) < 46)
682 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
683 struct xgene_enet_raw_desc *raw_desc,
684 struct xgene_enet_raw_desc *exp_desc)
686 struct xgene_enet_desc_ring *buf_pool, *page_pool;
687 u32 datalen, frag_size, skb_index;
688 struct xgene_enet_pdata *pdata;
689 struct net_device *ndev;
700 ndev = rx_ring->ndev;
701 pdata = netdev_priv(ndev);
702 dev = ndev_to_dev(rx_ring->ndev);
703 buf_pool = rx_ring->buf_pool;
704 page_pool = rx_ring->page_pool;
706 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
707 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
708 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
709 skb = buf_pool->rx_skb[skb_index];
710 buf_pool->rx_skb[skb_index] = NULL;
712 datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
713 skb_put(skb, datalen);
714 prefetch(skb->data - NET_IP_ALIGN);
715 skb->protocol = eth_type_trans(skb, ndev);
717 /* checking for error */
718 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) |
719 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
720 if (unlikely(status)) {
721 if (xgene_enet_errata_10GE_8(skb, datalen, status)) {
723 } else if (xgene_enet_errata_10GE_10(skb, datalen, status)) {
726 dev_kfree_skb_any(skb);
727 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
728 xgene_enet_parse_error(rx_ring, status);
729 rx_ring->rx_dropped++;
734 nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
736 /* strip off CRC as HW isn't doing this */
741 slots = page_pool->slots - 1;
742 head = page_pool->head;
743 desc = (void *)exp_desc;
745 for (i = 0; i < 4; i++) {
746 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
750 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
751 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
753 page = page_pool->frag_page[head];
754 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
755 frag_size, PAGE_SIZE);
757 datalen += frag_size;
759 page_pool->frag_page[head] = NULL;
760 head = (head + 1) & slots;
763 page_pool->head = head;
764 rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
767 skb_checksum_none_assert(skb);
768 xgene_enet_rx_csum(skb);
770 rx_ring->rx_packets++;
771 rx_ring->rx_bytes += datalen;
772 napi_gro_receive(&rx_ring->napi, skb);
775 if (rx_ring->npagepool <= 0) {
776 ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
777 rx_ring->npagepool = NUM_NXTBUFPOOL;
782 if (--rx_ring->nbufpool == 0) {
783 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
784 rx_ring->nbufpool = NUM_BUFPOOL;
790 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
792 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
795 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
798 struct net_device *ndev = ring->ndev;
799 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
800 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
801 u16 head = ring->head;
802 u16 slots = ring->slots - 1;
803 int ret, desc_count, count = 0, processed = 0;
807 raw_desc = &ring->raw_desc[head];
809 is_completion = false;
811 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
814 /* read fpqnum field after dataaddr field */
816 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
817 head = (head + 1) & slots;
818 exp_desc = &ring->raw_desc[head];
820 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
821 head = (head - 1) & slots;
828 if (is_rx_desc(raw_desc)) {
829 ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
831 ret = xgene_enet_tx_completion(ring, raw_desc);
832 is_completion = true;
834 xgene_enet_mark_desc_slot_empty(raw_desc);
836 xgene_enet_mark_desc_slot_empty(exp_desc);
838 head = (head + 1) & slots;
843 pdata->txc_level[ring->index] += desc_count;
850 pdata->ring_ops->wr_cmd(ring, -count);
853 if (__netif_subqueue_stopped(ndev, ring->index))
854 netif_start_subqueue(ndev, ring->index);
860 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
862 struct xgene_enet_desc_ring *ring;
865 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
866 processed = xgene_enet_process_ring(ring, budget);
868 if (processed != budget) {
869 napi_complete_done(napi, processed);
870 enable_irq(ring->irq);
876 static void xgene_enet_timeout(struct net_device *ndev)
878 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
879 struct netdev_queue *txq;
882 pdata->mac_ops->reset(pdata);
884 for (i = 0; i < pdata->txq_cnt; i++) {
885 txq = netdev_get_tx_queue(ndev, i);
886 txq->trans_start = jiffies;
887 netif_tx_start_queue(txq);
891 static void xgene_enet_set_irq_name(struct net_device *ndev)
893 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
894 struct xgene_enet_desc_ring *ring;
897 for (i = 0; i < pdata->rxq_cnt; i++) {
898 ring = pdata->rx_ring[i];
899 if (!pdata->cq_cnt) {
900 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
903 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
908 for (i = 0; i < pdata->cq_cnt; i++) {
909 ring = pdata->tx_ring[i]->cp_ring;
910 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
915 static int xgene_enet_register_irq(struct net_device *ndev)
917 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
918 struct device *dev = ndev_to_dev(ndev);
919 struct xgene_enet_desc_ring *ring;
922 xgene_enet_set_irq_name(ndev);
923 for (i = 0; i < pdata->rxq_cnt; i++) {
924 ring = pdata->rx_ring[i];
925 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
926 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
927 0, ring->irq_name, ring);
929 netdev_err(ndev, "Failed to request irq %s\n",
934 for (i = 0; i < pdata->cq_cnt; i++) {
935 ring = pdata->tx_ring[i]->cp_ring;
936 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
937 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
938 0, ring->irq_name, ring);
940 netdev_err(ndev, "Failed to request irq %s\n",
948 static void xgene_enet_free_irq(struct net_device *ndev)
950 struct xgene_enet_pdata *pdata;
951 struct xgene_enet_desc_ring *ring;
955 pdata = netdev_priv(ndev);
956 dev = ndev_to_dev(ndev);
958 for (i = 0; i < pdata->rxq_cnt; i++) {
959 ring = pdata->rx_ring[i];
960 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
961 devm_free_irq(dev, ring->irq, ring);
964 for (i = 0; i < pdata->cq_cnt; i++) {
965 ring = pdata->tx_ring[i]->cp_ring;
966 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
967 devm_free_irq(dev, ring->irq, ring);
971 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
973 struct napi_struct *napi;
976 for (i = 0; i < pdata->rxq_cnt; i++) {
977 napi = &pdata->rx_ring[i]->napi;
981 for (i = 0; i < pdata->cq_cnt; i++) {
982 napi = &pdata->tx_ring[i]->cp_ring->napi;
987 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
989 struct napi_struct *napi;
992 for (i = 0; i < pdata->rxq_cnt; i++) {
993 napi = &pdata->rx_ring[i]->napi;
997 for (i = 0; i < pdata->cq_cnt; i++) {
998 napi = &pdata->tx_ring[i]->cp_ring->napi;
1003 static int xgene_enet_open(struct net_device *ndev)
1005 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1006 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
1009 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
1013 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
1017 xgene_enet_napi_enable(pdata);
1018 ret = xgene_enet_register_irq(ndev);
1023 phy_start(ndev->phydev);
1025 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
1026 netif_carrier_off(ndev);
1029 mac_ops->tx_enable(pdata);
1030 mac_ops->rx_enable(pdata);
1031 netif_tx_start_all_queues(ndev);
1036 static int xgene_enet_close(struct net_device *ndev)
1038 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1039 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
1042 netif_tx_stop_all_queues(ndev);
1043 mac_ops->tx_disable(pdata);
1044 mac_ops->rx_disable(pdata);
1047 phy_stop(ndev->phydev);
1049 cancel_delayed_work_sync(&pdata->link_work);
1051 xgene_enet_free_irq(ndev);
1052 xgene_enet_napi_disable(pdata);
1053 for (i = 0; i < pdata->rxq_cnt; i++)
1054 xgene_enet_process_ring(pdata->rx_ring[i], -1);
1058 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
1060 struct xgene_enet_pdata *pdata;
1063 pdata = netdev_priv(ring->ndev);
1064 dev = ndev_to_dev(ring->ndev);
1066 pdata->ring_ops->clear(ring);
1067 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1070 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
1072 struct xgene_enet_desc_ring *buf_pool, *page_pool;
1073 struct xgene_enet_desc_ring *ring;
1076 for (i = 0; i < pdata->txq_cnt; i++) {
1077 ring = pdata->tx_ring[i];
1079 xgene_enet_delete_ring(ring);
1080 pdata->port_ops->clear(pdata, ring);
1082 xgene_enet_delete_ring(ring->cp_ring);
1083 pdata->tx_ring[i] = NULL;
1088 for (i = 0; i < pdata->rxq_cnt; i++) {
1089 ring = pdata->rx_ring[i];
1091 page_pool = ring->page_pool;
1093 xgene_enet_delete_pagepool(page_pool);
1094 xgene_enet_delete_ring(page_pool);
1095 pdata->port_ops->clear(pdata, page_pool);
1098 buf_pool = ring->buf_pool;
1099 xgene_enet_delete_bufpool(buf_pool);
1100 xgene_enet_delete_ring(buf_pool);
1101 pdata->port_ops->clear(pdata, buf_pool);
1103 xgene_enet_delete_ring(ring);
1104 pdata->rx_ring[i] = NULL;
1110 static int xgene_enet_get_ring_size(struct device *dev,
1111 enum xgene_enet_ring_cfgsize cfgsize)
1116 case RING_CFGSIZE_512B:
1119 case RING_CFGSIZE_2KB:
1122 case RING_CFGSIZE_16KB:
1125 case RING_CFGSIZE_64KB:
1128 case RING_CFGSIZE_512KB:
1132 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
1139 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
1141 struct xgene_enet_pdata *pdata;
1147 dev = ndev_to_dev(ring->ndev);
1148 pdata = netdev_priv(ring->ndev);
1150 if (ring->desc_addr) {
1151 pdata->ring_ops->clear(ring);
1152 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1154 devm_kfree(dev, ring);
1157 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
1159 struct xgene_enet_desc_ring *page_pool;
1160 struct device *dev = &pdata->pdev->dev;
1161 struct xgene_enet_desc_ring *ring;
1165 for (i = 0; i < pdata->txq_cnt; i++) {
1166 ring = pdata->tx_ring[i];
1168 if (ring->cp_ring && ring->cp_ring->cp_skb)
1169 devm_kfree(dev, ring->cp_ring->cp_skb);
1171 if (ring->cp_ring && pdata->cq_cnt)
1172 xgene_enet_free_desc_ring(ring->cp_ring);
1174 xgene_enet_free_desc_ring(ring);
1179 for (i = 0; i < pdata->rxq_cnt; i++) {
1180 ring = pdata->rx_ring[i];
1182 if (ring->buf_pool) {
1183 if (ring->buf_pool->rx_skb)
1184 devm_kfree(dev, ring->buf_pool->rx_skb);
1186 xgene_enet_free_desc_ring(ring->buf_pool);
1189 page_pool = ring->page_pool;
1191 p = page_pool->frag_page;
1195 p = page_pool->frag_dma_addr;
1200 xgene_enet_free_desc_ring(ring);
1205 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
1206 struct xgene_enet_desc_ring *ring)
1208 if ((pdata->enet_id == XGENE_ENET2) &&
1209 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
1216 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
1217 struct xgene_enet_desc_ring *ring)
1219 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
1221 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
1224 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
1225 struct net_device *ndev, u32 ring_num,
1226 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
1228 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1229 struct device *dev = ndev_to_dev(ndev);
1230 struct xgene_enet_desc_ring *ring;
1231 void *irq_mbox_addr;
1234 size = xgene_enet_get_ring_size(dev, cfgsize);
1238 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1244 ring->num = ring_num;
1245 ring->cfgsize = cfgsize;
1248 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1249 GFP_KERNEL | __GFP_ZERO);
1250 if (!ring->desc_addr) {
1251 devm_kfree(dev, ring);
1256 if (is_irq_mbox_required(pdata, ring)) {
1257 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1258 &ring->irq_mbox_dma,
1259 GFP_KERNEL | __GFP_ZERO);
1260 if (!irq_mbox_addr) {
1261 dmam_free_coherent(dev, size, ring->desc_addr,
1263 devm_kfree(dev, ring);
1266 ring->irq_mbox_addr = irq_mbox_addr;
1269 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1270 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1271 ring = pdata->ring_ops->setup(ring);
1272 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
1273 ring->num, ring->size, ring->id, ring->slots);
1278 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1280 return (owner << 6) | (bufnum & GENMASK(5, 0));
1283 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1285 enum xgene_ring_owner owner;
1287 if (p->enet_id == XGENE_ENET1) {
1288 switch (p->phy_mode) {
1289 case PHY_INTERFACE_MODE_SGMII:
1290 owner = RING_OWNER_ETH0;
1293 owner = (!p->port_id) ? RING_OWNER_ETH0 :
1298 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1304 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1306 struct device *dev = &pdata->pdev->dev;
1310 ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1312 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1315 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1317 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1318 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1319 struct xgene_enet_desc_ring *page_pool = NULL;
1320 struct xgene_enet_desc_ring *buf_pool = NULL;
1321 struct device *dev = ndev_to_dev(ndev);
1322 u8 eth_bufnum = pdata->eth_bufnum;
1323 u8 bp_bufnum = pdata->bp_bufnum;
1324 u16 ring_num = pdata->ring_num;
1325 enum xgene_ring_owner owner;
1326 dma_addr_t dma_exp_bufs;
1332 cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1334 for (i = 0; i < pdata->rxq_cnt; i++) {
1335 /* allocate rx descriptor ring */
1336 owner = xgene_derive_ring_owner(pdata);
1337 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1338 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1346 /* allocate buffer pool for receiving packets */
1347 owner = xgene_derive_ring_owner(pdata);
1348 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1349 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1357 rx_ring->nbufpool = NUM_BUFPOOL;
1358 rx_ring->npagepool = NUM_NXTBUFPOOL;
1359 rx_ring->irq = pdata->irqs[i];
1360 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1361 sizeof(struct sk_buff *),
1363 if (!buf_pool->rx_skb) {
1368 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1369 rx_ring->buf_pool = buf_pool;
1370 pdata->rx_ring[i] = rx_ring;
1372 if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) ||
1373 (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) {
1377 /* allocate next buffer pool for jumbo packets */
1378 owner = xgene_derive_ring_owner(pdata);
1379 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1380 page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1388 slots = page_pool->slots;
1389 page_pool->frag_page = devm_kcalloc(dev, slots,
1390 sizeof(struct page *),
1392 if (!page_pool->frag_page) {
1397 page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
1400 if (!page_pool->frag_dma_addr) {
1405 page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
1406 rx_ring->page_pool = page_pool;
1409 for (i = 0; i < pdata->txq_cnt; i++) {
1410 /* allocate tx descriptor ring */
1411 owner = xgene_derive_ring_owner(pdata);
1412 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1413 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1421 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1422 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1423 GFP_KERNEL | __GFP_ZERO);
1428 tx_ring->exp_bufs = exp_bufs;
1430 pdata->tx_ring[i] = tx_ring;
1432 if (!pdata->cq_cnt) {
1433 cp_ring = pdata->rx_ring[i];
1435 /* allocate tx completion descriptor ring */
1436 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1438 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1446 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1450 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1451 sizeof(struct sk_buff *),
1453 if (!cp_ring->cp_skb) {
1458 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1459 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1461 if (!cp_ring->frag_dma_addr) {
1462 devm_kfree(dev, cp_ring->cp_skb);
1467 tx_ring->cp_ring = cp_ring;
1468 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1471 if (pdata->ring_ops->coalesce)
1472 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1473 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1478 xgene_enet_free_desc_rings(pdata);
1482 static void xgene_enet_get_stats64(
1483 struct net_device *ndev,
1484 struct rtnl_link_stats64 *stats)
1486 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1487 struct xgene_enet_desc_ring *ring;
1490 for (i = 0; i < pdata->txq_cnt; i++) {
1491 ring = pdata->tx_ring[i];
1493 stats->tx_packets += ring->tx_packets;
1494 stats->tx_bytes += ring->tx_bytes;
1495 stats->tx_dropped += ring->tx_dropped;
1496 stats->tx_errors += ring->tx_errors;
1500 for (i = 0; i < pdata->rxq_cnt; i++) {
1501 ring = pdata->rx_ring[i];
1503 stats->rx_packets += ring->rx_packets;
1504 stats->rx_bytes += ring->rx_bytes;
1505 stats->rx_dropped += ring->rx_dropped;
1506 stats->rx_errors += ring->rx_errors +
1507 ring->rx_length_errors +
1508 ring->rx_crc_errors +
1509 ring->rx_frame_errors +
1510 ring->rx_fifo_errors;
1511 stats->rx_length_errors += ring->rx_length_errors;
1512 stats->rx_crc_errors += ring->rx_crc_errors;
1513 stats->rx_frame_errors += ring->rx_frame_errors;
1514 stats->rx_fifo_errors += ring->rx_fifo_errors;
1519 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1521 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1524 ret = eth_mac_addr(ndev, addr);
1527 pdata->mac_ops->set_mac_addr(pdata);
1532 static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
1534 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1537 if (!netif_running(ndev))
1540 frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
1542 xgene_enet_close(ndev);
1543 ndev->mtu = new_mtu;
1544 pdata->mac_ops->set_framesize(pdata, frame_size);
1545 xgene_enet_open(ndev);
1550 static const struct net_device_ops xgene_ndev_ops = {
1551 .ndo_open = xgene_enet_open,
1552 .ndo_stop = xgene_enet_close,
1553 .ndo_start_xmit = xgene_enet_start_xmit,
1554 .ndo_tx_timeout = xgene_enet_timeout,
1555 .ndo_get_stats64 = xgene_enet_get_stats64,
1556 .ndo_change_mtu = xgene_change_mtu,
1557 .ndo_set_mac_address = xgene_enet_set_mac_address,
1561 static void xgene_get_port_id_acpi(struct device *dev,
1562 struct xgene_enet_pdata *pdata)
1567 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1568 if (ACPI_FAILURE(status)) {
1571 pdata->port_id = temp;
1578 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1582 of_property_read_u32(dev->of_node, "port-id", &id);
1584 pdata->port_id = id & BIT(0);
1589 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1591 struct device *dev = &pdata->pdev->dev;
1594 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1596 pdata->tx_delay = 4;
1600 if (delay < 0 || delay > 7) {
1601 dev_err(dev, "Invalid tx-delay specified\n");
1605 pdata->tx_delay = delay;
1610 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1612 struct device *dev = &pdata->pdev->dev;
1615 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1617 pdata->rx_delay = 2;
1621 if (delay < 0 || delay > 7) {
1622 dev_err(dev, "Invalid rx-delay specified\n");
1626 pdata->rx_delay = delay;
1631 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1633 struct platform_device *pdev = pdata->pdev;
1634 struct device *dev = &pdev->dev;
1635 int i, ret, max_irqs;
1637 if (phy_interface_mode_is_rgmii(pdata->phy_mode))
1639 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1642 max_irqs = XGENE_MAX_ENET_IRQ;
1644 for (i = 0; i < max_irqs; i++) {
1645 ret = platform_get_irq(pdev, i);
1647 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1649 pdata->rxq_cnt = max_irqs / 2;
1650 pdata->txq_cnt = max_irqs / 2;
1651 pdata->cq_cnt = max_irqs / 2;
1654 dev_err(dev, "Unable to get ENET IRQ\n");
1655 ret = ret ? : -ENXIO;
1658 pdata->irqs[i] = ret;
1664 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1668 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1671 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1674 ret = xgene_enet_phy_connect(pdata->ndev);
1676 pdata->mdio_driver = true;
1681 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1683 struct device *dev = &pdata->pdev->dev;
1685 pdata->sfp_gpio_en = false;
1686 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1687 (!device_property_present(dev, "sfp-gpios") &&
1688 !device_property_present(dev, "rxlos-gpios")))
1691 pdata->sfp_gpio_en = true;
1692 pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1693 if (IS_ERR(pdata->sfp_rdy))
1694 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1697 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1699 struct platform_device *pdev;
1700 struct net_device *ndev;
1702 struct resource *res;
1703 void __iomem *base_addr;
1711 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1713 dev_err(dev, "Resource enet_csr not defined\n");
1716 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1717 if (!pdata->base_addr) {
1718 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1722 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1724 dev_err(dev, "Resource ring_csr not defined\n");
1727 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1728 resource_size(res));
1729 if (!pdata->ring_csr_addr) {
1730 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1734 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1736 dev_err(dev, "Resource ring_cmd not defined\n");
1739 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1740 resource_size(res));
1741 if (!pdata->ring_cmd_addr) {
1742 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1747 xgene_get_port_id_dt(dev, pdata);
1750 xgene_get_port_id_acpi(dev, pdata);
1753 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1754 eth_hw_addr_random(ndev);
1756 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1758 pdata->phy_mode = device_get_phy_mode(dev);
1759 if (pdata->phy_mode < 0) {
1760 dev_err(dev, "Unable to get phy-connection-type\n");
1761 return pdata->phy_mode;
1763 if (!phy_interface_mode_is_rgmii(pdata->phy_mode) &&
1764 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1765 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1766 dev_err(dev, "Incorrect phy-connection-type specified\n");
1770 ret = xgene_get_tx_delay(pdata);
1774 ret = xgene_get_rx_delay(pdata);
1778 ret = xgene_enet_get_irqs(pdata);
1782 ret = xgene_enet_check_phy_handle(pdata);
1786 xgene_enet_gpiod_get(pdata);
1788 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1789 if (IS_ERR(pdata->clk)) {
1790 /* Abort if the clock is defined but couldn't be retrived.
1791 * Always abort if the clock is missing on DT system as
1792 * the driver can't cope with this case.
1794 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1795 return PTR_ERR(pdata->clk);
1796 /* Firmware may have set up the clock already. */
1797 dev_info(dev, "clocks have been setup already\n");
1800 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1801 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1803 base_addr = pdata->base_addr;
1804 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1805 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1806 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1807 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1808 if (phy_interface_mode_is_rgmii(pdata->phy_mode) ||
1809 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1810 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1811 pdata->mcx_stats_addr =
1812 pdata->base_addr + BLOCK_ETH_STATS_OFFSET;
1813 offset = (pdata->enet_id == XGENE_ENET1) ?
1814 BLOCK_ETH_MAC_CSR_OFFSET :
1815 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1816 pdata->mcx_mac_csr_addr = base_addr + offset;
1818 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1819 pdata->mcx_stats_addr = base_addr + BLOCK_AXG_STATS_OFFSET;
1820 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1821 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1823 pdata->rx_buff_cnt = NUM_PKT_BUF;
1828 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1830 struct xgene_enet_cle *enet_cle = &pdata->cle;
1831 struct xgene_enet_desc_ring *page_pool;
1832 struct net_device *ndev = pdata->ndev;
1833 struct xgene_enet_desc_ring *buf_pool;
1834 u16 dst_ring_num, ring_id;
1838 ret = pdata->port_ops->reset(pdata);
1842 ret = xgene_enet_create_desc_rings(ndev);
1844 netdev_err(ndev, "Error in ring configuration\n");
1848 /* setup buffer pool */
1849 for (i = 0; i < pdata->rxq_cnt; i++) {
1850 buf_pool = pdata->rx_ring[i]->buf_pool;
1851 xgene_enet_init_bufpool(buf_pool);
1852 page_pool = pdata->rx_ring[i]->page_pool;
1853 xgene_enet_init_bufpool(page_pool);
1855 count = pdata->rx_buff_cnt;
1856 ret = xgene_enet_refill_bufpool(buf_pool, count);
1860 ret = xgene_enet_refill_pagepool(page_pool, count);
1866 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1867 buf_pool = pdata->rx_ring[0]->buf_pool;
1868 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1869 /* Initialize and Enable PreClassifier Tree */
1870 enet_cle->max_nodes = 512;
1871 enet_cle->max_dbptrs = 1024;
1872 enet_cle->parsers = 3;
1873 enet_cle->active_parser = PARSER_ALL;
1874 enet_cle->ptree.start_node = 0;
1875 enet_cle->ptree.start_dbptr = 0;
1876 enet_cle->jump_bytes = 8;
1877 ret = pdata->cle_ops->cle_init(pdata);
1879 netdev_err(ndev, "Preclass Tree init error\n");
1884 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1885 buf_pool = pdata->rx_ring[0]->buf_pool;
1886 page_pool = pdata->rx_ring[0]->page_pool;
1887 ring_id = (page_pool) ? page_pool->id : 0;
1888 pdata->port_ops->cle_bypass(pdata, dst_ring_num,
1889 buf_pool->id, ring_id);
1892 ndev->max_mtu = XGENE_ENET_MAX_MTU;
1893 pdata->phy_speed = SPEED_UNKNOWN;
1894 pdata->mac_ops->init(pdata);
1899 xgene_enet_delete_desc_rings(pdata);
1903 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1905 switch (pdata->phy_mode) {
1906 case PHY_INTERFACE_MODE_RGMII:
1907 case PHY_INTERFACE_MODE_RGMII_ID:
1908 case PHY_INTERFACE_MODE_RGMII_RXID:
1909 case PHY_INTERFACE_MODE_RGMII_TXID:
1910 pdata->mac_ops = &xgene_gmac_ops;
1911 pdata->port_ops = &xgene_gport_ops;
1917 case PHY_INTERFACE_MODE_SGMII:
1918 pdata->mac_ops = &xgene_sgmac_ops;
1919 pdata->port_ops = &xgene_sgport_ops;
1926 pdata->mac_ops = &xgene_xgmac_ops;
1927 pdata->port_ops = &xgene_xgport_ops;
1928 pdata->cle_ops = &xgene_cle3in_ops;
1930 if (!pdata->rxq_cnt) {
1931 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1932 pdata->txq_cnt = XGENE_NUM_TX_RING;
1933 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1938 if (pdata->enet_id == XGENE_ENET1) {
1939 switch (pdata->port_id) {
1941 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1942 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1943 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1944 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1945 pdata->ring_num = START_RING_NUM_0;
1947 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1948 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1949 pdata->bp_bufnum = START_BP_BUFNUM_0;
1950 pdata->ring_num = START_RING_NUM_0;
1954 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1955 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1956 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1957 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1958 pdata->ring_num = XG_START_RING_NUM_1;
1960 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1961 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1962 pdata->bp_bufnum = START_BP_BUFNUM_1;
1963 pdata->ring_num = START_RING_NUM_1;
1969 pdata->ring_ops = &xgene_ring1_ops;
1971 switch (pdata->port_id) {
1973 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1974 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1975 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1976 pdata->ring_num = X2_START_RING_NUM_0;
1979 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1980 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1981 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1982 pdata->ring_num = X2_START_RING_NUM_1;
1988 pdata->ring_ops = &xgene_ring2_ops;
1992 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1994 struct napi_struct *napi;
1997 for (i = 0; i < pdata->rxq_cnt; i++) {
1998 napi = &pdata->rx_ring[i]->napi;
1999 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
2003 for (i = 0; i < pdata->cq_cnt; i++) {
2004 napi = &pdata->tx_ring[i]->cp_ring->napi;
2005 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
2011 static const struct acpi_device_id xgene_enet_acpi_match[] = {
2012 { "APMC0D05", XGENE_ENET1},
2013 { "APMC0D30", XGENE_ENET1},
2014 { "APMC0D31", XGENE_ENET1},
2015 { "APMC0D3F", XGENE_ENET1},
2016 { "APMC0D26", XGENE_ENET2},
2017 { "APMC0D25", XGENE_ENET2},
2020 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
2023 static const struct of_device_id xgene_enet_of_match[] = {
2024 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
2025 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
2026 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
2027 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
2028 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
2032 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
2034 static int xgene_enet_probe(struct platform_device *pdev)
2036 struct net_device *ndev;
2037 struct xgene_enet_pdata *pdata;
2038 struct device *dev = &pdev->dev;
2039 void (*link_state)(struct work_struct *);
2040 const struct of_device_id *of_id;
2043 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2044 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
2048 pdata = netdev_priv(ndev);
2052 SET_NETDEV_DEV(ndev, dev);
2053 platform_set_drvdata(pdev, pdata);
2054 ndev->netdev_ops = &xgene_ndev_ops;
2055 xgene_enet_set_ethtool_ops(ndev);
2056 ndev->features |= NETIF_F_IP_CSUM |
2061 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
2063 pdata->enet_id = (enum xgene_enet_id)of_id->data;
2067 const struct acpi_device_id *acpi_id;
2069 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
2071 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
2074 if (!pdata->enet_id) {
2079 ret = xgene_enet_get_resources(pdata);
2083 xgene_enet_setup_ops(pdata);
2084 spin_lock_init(&pdata->mac_lock);
2086 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2087 ndev->features |= NETIF_F_TSO | NETIF_F_RXCSUM;
2088 spin_lock_init(&pdata->mss_lock);
2090 ndev->hw_features = ndev->features;
2092 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
2094 netdev_err(ndev, "No usable DMA configuration\n");
2098 ret = xgene_enet_init_hw(pdata);
2102 link_state = pdata->mac_ops->link_state;
2103 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2104 INIT_DELAYED_WORK(&pdata->link_work, link_state);
2105 } else if (!pdata->mdio_driver) {
2106 if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2107 ret = xgene_enet_mdio_config(pdata);
2109 INIT_DELAYED_WORK(&pdata->link_work, link_state);
2115 spin_lock_init(&pdata->stats_lock);
2116 ret = xgene_extd_stats_init(pdata);
2120 xgene_enet_napi_add(pdata);
2121 ret = register_netdev(ndev);
2123 netdev_err(ndev, "Failed to register netdev\n");
2131 * If necessary, free_netdev() will call netif_napi_del() and undo
2132 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2135 if (pdata->mdio_driver)
2136 xgene_enet_phy_disconnect(pdata);
2137 else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2138 xgene_enet_mdio_remove(pdata);
2140 xgene_enet_delete_desc_rings(pdata);
2146 static int xgene_enet_remove(struct platform_device *pdev)
2148 struct xgene_enet_pdata *pdata;
2149 struct net_device *ndev;
2151 pdata = platform_get_drvdata(pdev);
2155 if (netif_running(ndev))
2159 if (pdata->mdio_driver)
2160 xgene_enet_phy_disconnect(pdata);
2161 else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
2162 xgene_enet_mdio_remove(pdata);
2164 unregister_netdev(ndev);
2165 xgene_enet_delete_desc_rings(pdata);
2166 pdata->port_ops->shutdown(pdata);
2172 static void xgene_enet_shutdown(struct platform_device *pdev)
2174 struct xgene_enet_pdata *pdata;
2176 pdata = platform_get_drvdata(pdev);
2183 xgene_enet_remove(pdev);
2186 static struct platform_driver xgene_enet_driver = {
2188 .name = "xgene-enet",
2189 .of_match_table = of_match_ptr(xgene_enet_of_match),
2190 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
2192 .probe = xgene_enet_probe,
2193 .remove = xgene_enet_remove,
2194 .shutdown = xgene_enet_shutdown,
2197 module_platform_driver(xgene_enet_driver);
2199 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2200 MODULE_VERSION(XGENE_DRV_VERSION);
2201 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2202 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2203 MODULE_LICENSE("GPL");