1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
28 #define RES_ENET_CSR 0
29 #define RES_RING_CSR 1
30 #define RES_RING_CMD 2
32 static const struct of_device_id xgene_enet_of_match[];
33 static const struct acpi_device_id xgene_enet_acpi_match[];
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
37 struct xgene_enet_raw_desc16 *raw_desc;
43 for (i = 0; i < buf_pool->slots; i++) {
44 raw_desc = &buf_pool->raw_desc16[i];
46 /* Hardware expects descriptor in little endian format */
47 raw_desc->m0 = cpu_to_le64(i |
48 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
53 static u16 xgene_enet_get_data_len(u64 bufdatalen)
57 hw_len = GET_VAL(BUFDATALEN, bufdatalen);
59 if (unlikely(hw_len == 0x7800)) {
61 } else if (!(hw_len & BIT(14))) {
62 mask = GENMASK(13, 0);
63 return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
64 } else if (!(hw_len & GENMASK(13, 12))) {
65 mask = GENMASK(11, 0);
66 return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
68 mask = GENMASK(11, 0);
69 return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
73 static u16 xgene_enet_set_data_len(u32 size)
77 hw_len = (size == SIZE_4K) ? BIT(14) : 0;
82 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
85 struct xgene_enet_raw_desc16 *raw_desc;
86 struct xgene_enet_pdata *pdata;
87 struct net_device *ndev;
95 if (unlikely(!buf_pool))
98 ndev = buf_pool->ndev;
99 pdata = netdev_priv(ndev);
100 dev = ndev_to_dev(ndev);
101 slots = buf_pool->slots - 1;
102 tail = buf_pool->tail;
104 for (i = 0; i < nbuf; i++) {
105 raw_desc = &buf_pool->raw_desc16[tail];
107 page = dev_alloc_page();
111 dma_addr = dma_map_page(dev, page, 0,
112 PAGE_SIZE, DMA_FROM_DEVICE);
113 if (unlikely(dma_mapping_error(dev, dma_addr))) {
118 hw_len = xgene_enet_set_data_len(PAGE_SIZE);
119 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
120 SET_VAL(BUFDATALEN, hw_len) |
123 buf_pool->frag_page[tail] = page;
124 tail = (tail + 1) & slots;
127 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
128 buf_pool->tail = tail;
133 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
137 struct xgene_enet_raw_desc16 *raw_desc;
138 struct xgene_enet_pdata *pdata;
139 struct net_device *ndev;
142 u32 tail = buf_pool->tail;
143 u32 slots = buf_pool->slots - 1;
147 ndev = buf_pool->ndev;
148 dev = ndev_to_dev(buf_pool->ndev);
149 pdata = netdev_priv(ndev);
151 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
152 len = XGENE_ENET_STD_MTU;
154 for (i = 0; i < nbuf; i++) {
155 raw_desc = &buf_pool->raw_desc16[tail];
157 skb = netdev_alloc_skb_ip_align(ndev, len);
161 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
162 if (dma_mapping_error(dev, dma_addr)) {
163 netdev_err(ndev, "DMA mapping error\n");
164 dev_kfree_skb_any(skb);
168 buf_pool->rx_skb[tail] = skb;
170 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
171 SET_VAL(BUFDATALEN, bufdatalen) |
173 tail = (tail + 1) & slots;
176 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
177 buf_pool->tail = tail;
182 static u8 xgene_enet_hdr_len(const void *data)
184 const struct ethhdr *eth = data;
186 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
189 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
191 struct device *dev = ndev_to_dev(buf_pool->ndev);
192 struct xgene_enet_raw_desc16 *raw_desc;
196 /* Free up the buffers held by hardware */
197 for (i = 0; i < buf_pool->slots; i++) {
198 if (buf_pool->rx_skb[i]) {
199 dev_kfree_skb_any(buf_pool->rx_skb[i]);
201 raw_desc = &buf_pool->raw_desc16[i];
202 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
203 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
209 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
211 struct device *dev = ndev_to_dev(buf_pool->ndev);
216 /* Free up the buffers held by hardware */
217 for (i = 0; i < buf_pool->slots; i++) {
218 page = buf_pool->frag_page[i];
220 dma_addr = buf_pool->frag_dma_addr[i];
221 dma_unmap_page(dev, dma_addr, PAGE_SIZE,
228 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
230 struct xgene_enet_desc_ring *rx_ring = data;
232 if (napi_schedule_prep(&rx_ring->napi)) {
233 disable_irq_nosync(irq);
234 __napi_schedule(&rx_ring->napi);
240 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
241 struct xgene_enet_raw_desc *raw_desc)
243 struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
247 dma_addr_t *frag_dma_addr;
253 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
254 skb = cp_ring->cp_skb[skb_index];
255 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
257 dev = ndev_to_dev(cp_ring->ndev);
258 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
262 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
263 frag = &skb_shinfo(skb)->frags[i];
264 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
268 if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
269 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
270 spin_lock(&pdata->mss_lock);
271 pdata->mss_refcnt[mss_index]--;
272 spin_unlock(&pdata->mss_lock);
275 /* Checking for error */
276 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
277 if (unlikely(status > 2)) {
278 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
284 dev_kfree_skb_any(skb);
286 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
293 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
295 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
296 bool mss_index_found = false;
300 spin_lock(&pdata->mss_lock);
302 /* Reuse the slot if MSS matches */
303 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
304 if (pdata->mss[i] == mss) {
305 pdata->mss_refcnt[i]++;
307 mss_index_found = true;
311 /* Overwrite the slot with ref_count = 0 */
312 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
313 if (!pdata->mss_refcnt[i]) {
314 pdata->mss_refcnt[i]++;
315 pdata->mac_ops->set_mss(pdata, mss, i);
318 mss_index_found = true;
322 /* No slots with ref_count = 0 available, return busy */
323 if (!mss_index_found)
326 spin_unlock(&pdata->mss_lock);
331 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
333 struct net_device *ndev = skb->dev;
335 u8 l3hlen = 0, l4hlen = 0;
336 u8 ethhdr, proto = 0, csum_enable = 0;
337 u32 hdr_len, mss = 0;
338 u32 i, len, nr_frags;
341 ethhdr = xgene_enet_hdr_len(skb->data);
343 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
344 unlikely(skb->protocol != htons(ETH_P_8021Q)))
347 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
351 if (unlikely(ip_is_fragment(iph)))
354 if (likely(iph->protocol == IPPROTO_TCP)) {
355 l4hlen = tcp_hdrlen(skb) >> 2;
357 proto = TSO_IPPROTO_TCP;
358 if (ndev->features & NETIF_F_TSO) {
359 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
360 mss = skb_shinfo(skb)->gso_size;
362 if (skb_is_nonlinear(skb)) {
363 len = skb_headlen(skb);
364 nr_frags = skb_shinfo(skb)->nr_frags;
366 for (i = 0; i < 2 && i < nr_frags; i++)
367 len += skb_shinfo(skb)->frags[i].size;
369 /* HW requires header must reside in 3 buffer */
370 if (unlikely(hdr_len > len)) {
371 if (skb_linearize(skb))
376 if (!mss || ((skb->len - hdr_len) <= mss))
379 mss_index = xgene_enet_setup_mss(ndev, mss);
380 if (unlikely(mss_index < 0))
383 *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
385 } else if (iph->protocol == IPPROTO_UDP) {
386 l4hlen = UDP_HDR_SIZE;
390 l3hlen = ip_hdrlen(skb) >> 2;
391 *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
392 SET_VAL(IPHDR, l3hlen) |
393 SET_VAL(ETHHDR, ethhdr) |
394 SET_VAL(EC, csum_enable) |
397 SET_BIT(TYPE_ETH_WORK_MESSAGE);
402 static u16 xgene_enet_encode_len(u16 len)
404 return (len == BUFLEN_16K) ? 0 : len;
407 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
409 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
410 SET_VAL(BUFDATALEN, len));
413 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
417 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
418 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
419 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
424 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
426 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
429 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
432 struct device *dev = ndev_to_dev(tx_ring->ndev);
433 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
434 struct xgene_enet_raw_desc *raw_desc;
435 __le64 *exp_desc = NULL, *exp_bufs = NULL;
436 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
438 u16 tail = tx_ring->tail;
441 u8 ll = 0, nv = 0, idx = 0;
443 u32 size, offset, ell_bytes = 0;
444 u32 i, fidx, nr_frags, count = 1;
447 raw_desc = &tx_ring->raw_desc[tail];
448 tail = (tail + 1) & (tx_ring->slots - 1);
449 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
451 ret = xgene_enet_work_msg(skb, &hopinfo);
455 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
458 len = skb_headlen(skb);
459 hw_len = xgene_enet_encode_len(len);
461 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
462 if (dma_mapping_error(dev, dma_addr)) {
463 netdev_err(tx_ring->ndev, "DMA mapping error\n");
467 /* Hardware expects descriptor in little endian format */
468 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
469 SET_VAL(BUFDATALEN, hw_len) |
472 if (!skb_is_nonlinear(skb))
477 exp_desc = (void *)&tx_ring->raw_desc[tail];
478 tail = (tail + 1) & (tx_ring->slots - 1);
479 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
481 nr_frags = skb_shinfo(skb)->nr_frags;
482 for (i = nr_frags; i < 4 ; i++)
483 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
485 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
487 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
489 frag = &skb_shinfo(skb)->frags[fidx];
490 size = skb_frag_size(frag);
493 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
495 if (dma_mapping_error(dev, pbuf_addr))
498 frag_dma_addr[fidx] = pbuf_addr;
501 if (size > BUFLEN_16K)
505 if (size > BUFLEN_16K) {
513 dma_addr = pbuf_addr + offset;
514 hw_len = xgene_enet_encode_len(len);
520 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
523 if (split || (fidx != nr_frags)) {
524 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
525 xgene_set_addr_len(exp_bufs, idx, dma_addr,
530 xgene_set_addr_len(exp_desc, i, dma_addr,
535 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
542 offset += BUFLEN_16K;
548 dma_addr = dma_map_single(dev, exp_bufs,
549 sizeof(u64) * MAX_EXP_BUFFS,
551 if (dma_mapping_error(dev, dma_addr)) {
552 dev_kfree_skb_any(skb);
555 i = ell_bytes >> LL_BYTES_LSB_LEN;
556 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
557 SET_VAL(LL_BYTES_MSB, i) |
558 SET_VAL(LL_LEN, idx));
559 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
563 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
564 SET_VAL(USERINFO, tx_ring->tail));
565 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
566 pdata->tx_level[tx_ring->cp_ring->index] += count;
567 tx_ring->tail = tail;
572 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
573 struct net_device *ndev)
575 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
576 struct xgene_enet_desc_ring *tx_ring;
577 int index = skb->queue_mapping;
578 u32 tx_level = pdata->tx_level[index];
581 tx_ring = pdata->tx_ring[index];
582 if (tx_level < pdata->txc_level[index])
583 tx_level += ((typeof(pdata->tx_level[index]))~0U);
585 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
586 netif_stop_subqueue(ndev, index);
587 return NETDEV_TX_BUSY;
590 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
593 count = xgene_enet_setup_tx_desc(tx_ring, skb);
595 return NETDEV_TX_BUSY;
598 dev_kfree_skb_any(skb);
602 skb_tx_timestamp(skb);
604 tx_ring->tx_packets++;
605 tx_ring->tx_bytes += skb->len;
607 pdata->ring_ops->wr_cmd(tx_ring, count);
611 static void xgene_enet_skip_csum(struct sk_buff *skb)
613 struct iphdr *iph = ip_hdr(skb);
615 if (!ip_is_fragment(iph) ||
616 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
617 skb->ip_summed = CHECKSUM_UNNECESSARY;
621 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
622 struct xgene_enet_raw_desc *raw_desc,
623 struct xgene_enet_raw_desc *exp_desc)
625 __le64 *desc = (void *)exp_desc;
633 if (!buf_pool || !raw_desc || !exp_desc ||
634 (!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
637 dev = ndev_to_dev(buf_pool->ndev);
638 slots = buf_pool->slots - 1;
639 head = buf_pool->head;
641 for (i = 0; i < 4; i++) {
642 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
646 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
647 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
649 page = buf_pool->frag_page[head];
652 buf_pool->frag_page[head] = NULL;
653 head = (head + 1) & slots;
655 buf_pool->head = head;
658 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
659 struct xgene_enet_raw_desc *raw_desc,
660 struct xgene_enet_raw_desc *exp_desc)
662 struct xgene_enet_desc_ring *buf_pool, *page_pool;
663 u32 datalen, frag_size, skb_index;
664 struct net_device *ndev;
675 ndev = rx_ring->ndev;
676 dev = ndev_to_dev(rx_ring->ndev);
677 buf_pool = rx_ring->buf_pool;
678 page_pool = rx_ring->page_pool;
680 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
681 XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
682 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
683 skb = buf_pool->rx_skb[skb_index];
684 buf_pool->rx_skb[skb_index] = NULL;
686 /* checking for error */
687 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
688 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
689 if (unlikely(status > 2)) {
690 dev_kfree_skb_any(skb);
691 xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
692 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
698 /* strip off CRC as HW isn't doing this */
699 datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
701 nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
705 skb_put(skb, datalen);
706 prefetch(skb->data - NET_IP_ALIGN);
711 slots = page_pool->slots - 1;
712 head = page_pool->head;
713 desc = (void *)exp_desc;
715 for (i = 0; i < 4; i++) {
716 frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
720 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
721 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
723 page = page_pool->frag_page[head];
724 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
725 frag_size, PAGE_SIZE);
727 datalen += frag_size;
729 page_pool->frag_page[head] = NULL;
730 head = (head + 1) & slots;
733 page_pool->head = head;
734 rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
737 skb_checksum_none_assert(skb);
738 skb->protocol = eth_type_trans(skb, ndev);
739 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
740 skb->protocol == htons(ETH_P_IP))) {
741 xgene_enet_skip_csum(skb);
744 rx_ring->rx_packets++;
745 rx_ring->rx_bytes += datalen;
746 napi_gro_receive(&rx_ring->napi, skb);
749 if (rx_ring->npagepool <= 0) {
750 ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
751 rx_ring->npagepool = NUM_NXTBUFPOOL;
756 if (--rx_ring->nbufpool == 0) {
757 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
758 rx_ring->nbufpool = NUM_BUFPOOL;
764 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
766 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
769 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
772 struct net_device *ndev = ring->ndev;
773 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
774 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
775 u16 head = ring->head;
776 u16 slots = ring->slots - 1;
777 int ret, desc_count, count = 0, processed = 0;
781 raw_desc = &ring->raw_desc[head];
783 is_completion = false;
785 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
788 /* read fpqnum field after dataaddr field */
790 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
791 head = (head + 1) & slots;
792 exp_desc = &ring->raw_desc[head];
794 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
795 head = (head - 1) & slots;
802 if (is_rx_desc(raw_desc)) {
803 ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
805 ret = xgene_enet_tx_completion(ring, raw_desc);
806 is_completion = true;
808 xgene_enet_mark_desc_slot_empty(raw_desc);
810 xgene_enet_mark_desc_slot_empty(exp_desc);
812 head = (head + 1) & slots;
817 pdata->txc_level[ring->index] += desc_count;
824 pdata->ring_ops->wr_cmd(ring, -count);
827 if (__netif_subqueue_stopped(ndev, ring->index))
828 netif_start_subqueue(ndev, ring->index);
834 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
836 struct xgene_enet_desc_ring *ring;
839 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
840 processed = xgene_enet_process_ring(ring, budget);
842 if (processed != budget) {
843 napi_complete_done(napi, processed);
844 enable_irq(ring->irq);
850 static void xgene_enet_timeout(struct net_device *ndev)
852 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
853 struct netdev_queue *txq;
856 pdata->mac_ops->reset(pdata);
858 for (i = 0; i < pdata->txq_cnt; i++) {
859 txq = netdev_get_tx_queue(ndev, i);
860 txq->trans_start = jiffies;
861 netif_tx_start_queue(txq);
865 static void xgene_enet_set_irq_name(struct net_device *ndev)
867 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
868 struct xgene_enet_desc_ring *ring;
871 for (i = 0; i < pdata->rxq_cnt; i++) {
872 ring = pdata->rx_ring[i];
873 if (!pdata->cq_cnt) {
874 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
877 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
882 for (i = 0; i < pdata->cq_cnt; i++) {
883 ring = pdata->tx_ring[i]->cp_ring;
884 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
889 static int xgene_enet_register_irq(struct net_device *ndev)
891 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
892 struct device *dev = ndev_to_dev(ndev);
893 struct xgene_enet_desc_ring *ring;
896 xgene_enet_set_irq_name(ndev);
897 for (i = 0; i < pdata->rxq_cnt; i++) {
898 ring = pdata->rx_ring[i];
899 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
900 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
901 0, ring->irq_name, ring);
903 netdev_err(ndev, "Failed to request irq %s\n",
908 for (i = 0; i < pdata->cq_cnt; i++) {
909 ring = pdata->tx_ring[i]->cp_ring;
910 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
911 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
912 0, ring->irq_name, ring);
914 netdev_err(ndev, "Failed to request irq %s\n",
922 static void xgene_enet_free_irq(struct net_device *ndev)
924 struct xgene_enet_pdata *pdata;
925 struct xgene_enet_desc_ring *ring;
929 pdata = netdev_priv(ndev);
930 dev = ndev_to_dev(ndev);
932 for (i = 0; i < pdata->rxq_cnt; i++) {
933 ring = pdata->rx_ring[i];
934 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
935 devm_free_irq(dev, ring->irq, ring);
938 for (i = 0; i < pdata->cq_cnt; i++) {
939 ring = pdata->tx_ring[i]->cp_ring;
940 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
941 devm_free_irq(dev, ring->irq, ring);
945 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
947 struct napi_struct *napi;
950 for (i = 0; i < pdata->rxq_cnt; i++) {
951 napi = &pdata->rx_ring[i]->napi;
955 for (i = 0; i < pdata->cq_cnt; i++) {
956 napi = &pdata->tx_ring[i]->cp_ring->napi;
961 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
963 struct napi_struct *napi;
966 for (i = 0; i < pdata->rxq_cnt; i++) {
967 napi = &pdata->rx_ring[i]->napi;
971 for (i = 0; i < pdata->cq_cnt; i++) {
972 napi = &pdata->tx_ring[i]->cp_ring->napi;
977 static int xgene_enet_open(struct net_device *ndev)
979 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
980 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
983 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
987 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
991 xgene_enet_napi_enable(pdata);
992 ret = xgene_enet_register_irq(ndev);
997 phy_start(ndev->phydev);
999 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
1000 netif_carrier_off(ndev);
1003 mac_ops->tx_enable(pdata);
1004 mac_ops->rx_enable(pdata);
1005 netif_tx_start_all_queues(ndev);
1010 static int xgene_enet_close(struct net_device *ndev)
1012 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1013 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
1016 netif_tx_stop_all_queues(ndev);
1017 mac_ops->tx_disable(pdata);
1018 mac_ops->rx_disable(pdata);
1021 phy_stop(ndev->phydev);
1023 cancel_delayed_work_sync(&pdata->link_work);
1025 xgene_enet_free_irq(ndev);
1026 xgene_enet_napi_disable(pdata);
1027 for (i = 0; i < pdata->rxq_cnt; i++)
1028 xgene_enet_process_ring(pdata->rx_ring[i], -1);
1032 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
1034 struct xgene_enet_pdata *pdata;
1037 pdata = netdev_priv(ring->ndev);
1038 dev = ndev_to_dev(ring->ndev);
1040 pdata->ring_ops->clear(ring);
1041 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1044 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
1046 struct xgene_enet_desc_ring *buf_pool, *page_pool;
1047 struct xgene_enet_desc_ring *ring;
1050 for (i = 0; i < pdata->txq_cnt; i++) {
1051 ring = pdata->tx_ring[i];
1053 xgene_enet_delete_ring(ring);
1054 pdata->port_ops->clear(pdata, ring);
1056 xgene_enet_delete_ring(ring->cp_ring);
1057 pdata->tx_ring[i] = NULL;
1062 for (i = 0; i < pdata->rxq_cnt; i++) {
1063 ring = pdata->rx_ring[i];
1065 page_pool = ring->page_pool;
1067 xgene_enet_delete_pagepool(page_pool);
1068 xgene_enet_delete_ring(page_pool);
1069 pdata->port_ops->clear(pdata, page_pool);
1072 buf_pool = ring->buf_pool;
1073 xgene_enet_delete_bufpool(buf_pool);
1074 xgene_enet_delete_ring(buf_pool);
1075 pdata->port_ops->clear(pdata, buf_pool);
1077 xgene_enet_delete_ring(ring);
1078 pdata->rx_ring[i] = NULL;
1084 static int xgene_enet_get_ring_size(struct device *dev,
1085 enum xgene_enet_ring_cfgsize cfgsize)
1090 case RING_CFGSIZE_512B:
1093 case RING_CFGSIZE_2KB:
1096 case RING_CFGSIZE_16KB:
1099 case RING_CFGSIZE_64KB:
1102 case RING_CFGSIZE_512KB:
1106 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
1113 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
1115 struct xgene_enet_pdata *pdata;
1121 dev = ndev_to_dev(ring->ndev);
1122 pdata = netdev_priv(ring->ndev);
1124 if (ring->desc_addr) {
1125 pdata->ring_ops->clear(ring);
1126 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
1128 devm_kfree(dev, ring);
1131 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
1133 struct xgene_enet_desc_ring *page_pool;
1134 struct device *dev = &pdata->pdev->dev;
1135 struct xgene_enet_desc_ring *ring;
1139 for (i = 0; i < pdata->txq_cnt; i++) {
1140 ring = pdata->tx_ring[i];
1142 if (ring->cp_ring && ring->cp_ring->cp_skb)
1143 devm_kfree(dev, ring->cp_ring->cp_skb);
1145 if (ring->cp_ring && pdata->cq_cnt)
1146 xgene_enet_free_desc_ring(ring->cp_ring);
1148 xgene_enet_free_desc_ring(ring);
1153 for (i = 0; i < pdata->rxq_cnt; i++) {
1154 ring = pdata->rx_ring[i];
1156 if (ring->buf_pool) {
1157 if (ring->buf_pool->rx_skb)
1158 devm_kfree(dev, ring->buf_pool->rx_skb);
1160 xgene_enet_free_desc_ring(ring->buf_pool);
1163 page_pool = ring->page_pool;
1165 p = page_pool->frag_page;
1169 p = page_pool->frag_dma_addr;
1174 xgene_enet_free_desc_ring(ring);
1179 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
1180 struct xgene_enet_desc_ring *ring)
1182 if ((pdata->enet_id == XGENE_ENET2) &&
1183 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
1190 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
1191 struct xgene_enet_desc_ring *ring)
1193 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
1195 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
1198 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
1199 struct net_device *ndev, u32 ring_num,
1200 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
1202 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1203 struct device *dev = ndev_to_dev(ndev);
1204 struct xgene_enet_desc_ring *ring;
1205 void *irq_mbox_addr;
1208 size = xgene_enet_get_ring_size(dev, cfgsize);
1212 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1218 ring->num = ring_num;
1219 ring->cfgsize = cfgsize;
1222 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1223 GFP_KERNEL | __GFP_ZERO);
1224 if (!ring->desc_addr) {
1225 devm_kfree(dev, ring);
1230 if (is_irq_mbox_required(pdata, ring)) {
1231 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1232 &ring->irq_mbox_dma,
1233 GFP_KERNEL | __GFP_ZERO);
1234 if (!irq_mbox_addr) {
1235 dmam_free_coherent(dev, size, ring->desc_addr,
1237 devm_kfree(dev, ring);
1240 ring->irq_mbox_addr = irq_mbox_addr;
1243 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1244 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1245 ring = pdata->ring_ops->setup(ring);
1246 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
1247 ring->num, ring->size, ring->id, ring->slots);
1252 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1254 return (owner << 6) | (bufnum & GENMASK(5, 0));
1257 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1259 enum xgene_ring_owner owner;
1261 if (p->enet_id == XGENE_ENET1) {
1262 switch (p->phy_mode) {
1263 case PHY_INTERFACE_MODE_SGMII:
1264 owner = RING_OWNER_ETH0;
1267 owner = (!p->port_id) ? RING_OWNER_ETH0 :
1272 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1278 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1280 struct device *dev = &pdata->pdev->dev;
1284 ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1286 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1289 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1291 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1292 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1293 struct xgene_enet_desc_ring *page_pool = NULL;
1294 struct xgene_enet_desc_ring *buf_pool = NULL;
1295 struct device *dev = ndev_to_dev(ndev);
1296 u8 eth_bufnum = pdata->eth_bufnum;
1297 u8 bp_bufnum = pdata->bp_bufnum;
1298 u16 ring_num = pdata->ring_num;
1299 enum xgene_ring_owner owner;
1300 dma_addr_t dma_exp_bufs;
1306 cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1308 for (i = 0; i < pdata->rxq_cnt; i++) {
1309 /* allocate rx descriptor ring */
1310 owner = xgene_derive_ring_owner(pdata);
1311 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1312 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1320 /* allocate buffer pool for receiving packets */
1321 owner = xgene_derive_ring_owner(pdata);
1322 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1323 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1331 rx_ring->nbufpool = NUM_BUFPOOL;
1332 rx_ring->npagepool = NUM_NXTBUFPOOL;
1333 rx_ring->irq = pdata->irqs[i];
1334 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1335 sizeof(struct sk_buff *),
1337 if (!buf_pool->rx_skb) {
1342 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1343 rx_ring->buf_pool = buf_pool;
1344 pdata->rx_ring[i] = rx_ring;
1346 if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) ||
1347 (pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) {
1351 /* allocate next buffer pool for jumbo packets */
1352 owner = xgene_derive_ring_owner(pdata);
1353 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1354 page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1362 slots = page_pool->slots;
1363 page_pool->frag_page = devm_kcalloc(dev, slots,
1364 sizeof(struct page *),
1366 if (!page_pool->frag_page) {
1371 page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
1374 if (!page_pool->frag_dma_addr) {
1379 page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
1380 rx_ring->page_pool = page_pool;
1383 for (i = 0; i < pdata->txq_cnt; i++) {
1384 /* allocate tx descriptor ring */
1385 owner = xgene_derive_ring_owner(pdata);
1386 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1387 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1395 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1396 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1397 GFP_KERNEL | __GFP_ZERO);
1402 tx_ring->exp_bufs = exp_bufs;
1404 pdata->tx_ring[i] = tx_ring;
1406 if (!pdata->cq_cnt) {
1407 cp_ring = pdata->rx_ring[i];
1409 /* allocate tx completion descriptor ring */
1410 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1412 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1420 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1424 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1425 sizeof(struct sk_buff *),
1427 if (!cp_ring->cp_skb) {
1432 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1433 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1435 if (!cp_ring->frag_dma_addr) {
1436 devm_kfree(dev, cp_ring->cp_skb);
1441 tx_ring->cp_ring = cp_ring;
1442 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1445 if (pdata->ring_ops->coalesce)
1446 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1447 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1452 xgene_enet_free_desc_rings(pdata);
1456 static void xgene_enet_get_stats64(
1457 struct net_device *ndev,
1458 struct rtnl_link_stats64 *storage)
1460 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1461 struct rtnl_link_stats64 *stats = &pdata->stats;
1462 struct xgene_enet_desc_ring *ring;
1465 for (i = 0; i < pdata->txq_cnt; i++) {
1466 ring = pdata->tx_ring[i];
1468 stats->tx_packets += ring->tx_packets;
1469 stats->tx_bytes += ring->tx_bytes;
1473 for (i = 0; i < pdata->rxq_cnt; i++) {
1474 ring = pdata->rx_ring[i];
1476 stats->rx_packets += ring->rx_packets;
1477 stats->rx_bytes += ring->rx_bytes;
1478 stats->rx_errors += ring->rx_length_errors +
1479 ring->rx_crc_errors +
1480 ring->rx_frame_errors +
1481 ring->rx_fifo_errors;
1482 stats->rx_dropped += ring->rx_dropped;
1485 memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1488 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1490 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1493 ret = eth_mac_addr(ndev, addr);
1496 pdata->mac_ops->set_mac_addr(pdata);
1501 static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
1503 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1506 if (!netif_running(ndev))
1509 frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
1511 xgene_enet_close(ndev);
1512 ndev->mtu = new_mtu;
1513 pdata->mac_ops->set_framesize(pdata, frame_size);
1514 xgene_enet_open(ndev);
1519 static const struct net_device_ops xgene_ndev_ops = {
1520 .ndo_open = xgene_enet_open,
1521 .ndo_stop = xgene_enet_close,
1522 .ndo_start_xmit = xgene_enet_start_xmit,
1523 .ndo_tx_timeout = xgene_enet_timeout,
1524 .ndo_get_stats64 = xgene_enet_get_stats64,
1525 .ndo_change_mtu = xgene_change_mtu,
1526 .ndo_set_mac_address = xgene_enet_set_mac_address,
1530 static void xgene_get_port_id_acpi(struct device *dev,
1531 struct xgene_enet_pdata *pdata)
1536 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1537 if (ACPI_FAILURE(status)) {
1540 pdata->port_id = temp;
1547 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1551 of_property_read_u32(dev->of_node, "port-id", &id);
1553 pdata->port_id = id & BIT(0);
1558 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1560 struct device *dev = &pdata->pdev->dev;
1563 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1565 pdata->tx_delay = 4;
1569 if (delay < 0 || delay > 7) {
1570 dev_err(dev, "Invalid tx-delay specified\n");
1574 pdata->tx_delay = delay;
1579 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1581 struct device *dev = &pdata->pdev->dev;
1584 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1586 pdata->rx_delay = 2;
1590 if (delay < 0 || delay > 7) {
1591 dev_err(dev, "Invalid rx-delay specified\n");
1595 pdata->rx_delay = delay;
1600 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1602 struct platform_device *pdev = pdata->pdev;
1603 struct device *dev = &pdev->dev;
1604 int i, ret, max_irqs;
1606 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1608 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1611 max_irqs = XGENE_MAX_ENET_IRQ;
1613 for (i = 0; i < max_irqs; i++) {
1614 ret = platform_get_irq(pdev, i);
1616 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1618 pdata->rxq_cnt = max_irqs / 2;
1619 pdata->txq_cnt = max_irqs / 2;
1620 pdata->cq_cnt = max_irqs / 2;
1623 dev_err(dev, "Unable to get ENET IRQ\n");
1624 ret = ret ? : -ENXIO;
1627 pdata->irqs[i] = ret;
1633 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1637 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1640 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1643 ret = xgene_enet_phy_connect(pdata->ndev);
1645 pdata->mdio_driver = true;
1650 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1652 struct device *dev = &pdata->pdev->dev;
1654 pdata->sfp_gpio_en = false;
1655 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1656 (!device_property_present(dev, "sfp-gpios") &&
1657 !device_property_present(dev, "rxlos-gpios")))
1660 pdata->sfp_gpio_en = true;
1661 pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1662 if (IS_ERR(pdata->sfp_rdy))
1663 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1666 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1668 struct platform_device *pdev;
1669 struct net_device *ndev;
1671 struct resource *res;
1672 void __iomem *base_addr;
1680 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1682 dev_err(dev, "Resource enet_csr not defined\n");
1685 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1686 if (!pdata->base_addr) {
1687 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1691 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1693 dev_err(dev, "Resource ring_csr not defined\n");
1696 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1697 resource_size(res));
1698 if (!pdata->ring_csr_addr) {
1699 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1703 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1705 dev_err(dev, "Resource ring_cmd not defined\n");
1708 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1709 resource_size(res));
1710 if (!pdata->ring_cmd_addr) {
1711 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1716 xgene_get_port_id_dt(dev, pdata);
1719 xgene_get_port_id_acpi(dev, pdata);
1722 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1723 eth_hw_addr_random(ndev);
1725 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1727 pdata->phy_mode = device_get_phy_mode(dev);
1728 if (pdata->phy_mode < 0) {
1729 dev_err(dev, "Unable to get phy-connection-type\n");
1730 return pdata->phy_mode;
1732 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1733 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1734 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1735 dev_err(dev, "Incorrect phy-connection-type specified\n");
1739 ret = xgene_get_tx_delay(pdata);
1743 ret = xgene_get_rx_delay(pdata);
1747 ret = xgene_enet_get_irqs(pdata);
1751 ret = xgene_enet_check_phy_handle(pdata);
1755 xgene_enet_gpiod_get(pdata);
1757 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1758 if (IS_ERR(pdata->clk)) {
1759 /* Firmware may have set up the clock already. */
1760 dev_info(dev, "clocks have been setup already\n");
1763 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1764 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1766 base_addr = pdata->base_addr;
1767 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1768 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1769 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1770 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1771 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1772 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1773 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1774 offset = (pdata->enet_id == XGENE_ENET1) ?
1775 BLOCK_ETH_MAC_CSR_OFFSET :
1776 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1777 pdata->mcx_mac_csr_addr = base_addr + offset;
1779 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1780 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1781 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1783 pdata->rx_buff_cnt = NUM_PKT_BUF;
1788 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1790 struct xgene_enet_cle *enet_cle = &pdata->cle;
1791 struct xgene_enet_desc_ring *page_pool;
1792 struct net_device *ndev = pdata->ndev;
1793 struct xgene_enet_desc_ring *buf_pool;
1794 u16 dst_ring_num, ring_id;
1798 ret = pdata->port_ops->reset(pdata);
1802 ret = xgene_enet_create_desc_rings(ndev);
1804 netdev_err(ndev, "Error in ring configuration\n");
1808 /* setup buffer pool */
1809 for (i = 0; i < pdata->rxq_cnt; i++) {
1810 buf_pool = pdata->rx_ring[i]->buf_pool;
1811 xgene_enet_init_bufpool(buf_pool);
1812 page_pool = pdata->rx_ring[i]->page_pool;
1813 xgene_enet_init_bufpool(page_pool);
1815 count = pdata->rx_buff_cnt;
1816 ret = xgene_enet_refill_bufpool(buf_pool, count);
1820 ret = xgene_enet_refill_pagepool(page_pool, count);
1826 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1827 buf_pool = pdata->rx_ring[0]->buf_pool;
1828 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1829 /* Initialize and Enable PreClassifier Tree */
1830 enet_cle->max_nodes = 512;
1831 enet_cle->max_dbptrs = 1024;
1832 enet_cle->parsers = 3;
1833 enet_cle->active_parser = PARSER_ALL;
1834 enet_cle->ptree.start_node = 0;
1835 enet_cle->ptree.start_dbptr = 0;
1836 enet_cle->jump_bytes = 8;
1837 ret = pdata->cle_ops->cle_init(pdata);
1839 netdev_err(ndev, "Preclass Tree init error\n");
1844 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1845 buf_pool = pdata->rx_ring[0]->buf_pool;
1846 page_pool = pdata->rx_ring[0]->page_pool;
1847 ring_id = (page_pool) ? page_pool->id : 0;
1848 pdata->port_ops->cle_bypass(pdata, dst_ring_num,
1849 buf_pool->id, ring_id);
1852 ndev->max_mtu = XGENE_ENET_MAX_MTU;
1853 pdata->phy_speed = SPEED_UNKNOWN;
1854 pdata->mac_ops->init(pdata);
1859 xgene_enet_delete_desc_rings(pdata);
1863 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1865 switch (pdata->phy_mode) {
1866 case PHY_INTERFACE_MODE_RGMII:
1867 pdata->mac_ops = &xgene_gmac_ops;
1868 pdata->port_ops = &xgene_gport_ops;
1874 case PHY_INTERFACE_MODE_SGMII:
1875 pdata->mac_ops = &xgene_sgmac_ops;
1876 pdata->port_ops = &xgene_sgport_ops;
1883 pdata->mac_ops = &xgene_xgmac_ops;
1884 pdata->port_ops = &xgene_xgport_ops;
1885 pdata->cle_ops = &xgene_cle3in_ops;
1887 if (!pdata->rxq_cnt) {
1888 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1889 pdata->txq_cnt = XGENE_NUM_TX_RING;
1890 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1895 if (pdata->enet_id == XGENE_ENET1) {
1896 switch (pdata->port_id) {
1898 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1899 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1900 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1901 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1902 pdata->ring_num = START_RING_NUM_0;
1904 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1905 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1906 pdata->bp_bufnum = START_BP_BUFNUM_0;
1907 pdata->ring_num = START_RING_NUM_0;
1911 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1912 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1913 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1914 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1915 pdata->ring_num = XG_START_RING_NUM_1;
1917 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1918 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1919 pdata->bp_bufnum = START_BP_BUFNUM_1;
1920 pdata->ring_num = START_RING_NUM_1;
1926 pdata->ring_ops = &xgene_ring1_ops;
1928 switch (pdata->port_id) {
1930 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1931 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1932 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1933 pdata->ring_num = X2_START_RING_NUM_0;
1936 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1937 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1938 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1939 pdata->ring_num = X2_START_RING_NUM_1;
1945 pdata->ring_ops = &xgene_ring2_ops;
1949 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1951 struct napi_struct *napi;
1954 for (i = 0; i < pdata->rxq_cnt; i++) {
1955 napi = &pdata->rx_ring[i]->napi;
1956 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1960 for (i = 0; i < pdata->cq_cnt; i++) {
1961 napi = &pdata->tx_ring[i]->cp_ring->napi;
1962 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1968 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1969 { "APMC0D05", XGENE_ENET1},
1970 { "APMC0D30", XGENE_ENET1},
1971 { "APMC0D31", XGENE_ENET1},
1972 { "APMC0D3F", XGENE_ENET1},
1973 { "APMC0D26", XGENE_ENET2},
1974 { "APMC0D25", XGENE_ENET2},
1977 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1980 static const struct of_device_id xgene_enet_of_match[] = {
1981 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1982 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1983 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1984 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1985 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1989 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1991 static int xgene_enet_probe(struct platform_device *pdev)
1993 struct net_device *ndev;
1994 struct xgene_enet_pdata *pdata;
1995 struct device *dev = &pdev->dev;
1996 void (*link_state)(struct work_struct *);
1997 const struct of_device_id *of_id;
2000 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2001 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
2005 pdata = netdev_priv(ndev);
2009 SET_NETDEV_DEV(ndev, dev);
2010 platform_set_drvdata(pdev, pdata);
2011 ndev->netdev_ops = &xgene_ndev_ops;
2012 xgene_enet_set_ethtool_ops(ndev);
2013 ndev->features |= NETIF_F_IP_CSUM |
2018 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
2020 pdata->enet_id = (enum xgene_enet_id)of_id->data;
2024 const struct acpi_device_id *acpi_id;
2026 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
2028 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
2031 if (!pdata->enet_id) {
2036 ret = xgene_enet_get_resources(pdata);
2040 xgene_enet_setup_ops(pdata);
2042 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2043 ndev->features |= NETIF_F_TSO;
2044 spin_lock_init(&pdata->mss_lock);
2046 ndev->hw_features = ndev->features;
2048 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
2050 netdev_err(ndev, "No usable DMA configuration\n");
2054 ret = xgene_enet_init_hw(pdata);
2058 link_state = pdata->mac_ops->link_state;
2059 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
2060 INIT_DELAYED_WORK(&pdata->link_work, link_state);
2061 } else if (!pdata->mdio_driver) {
2062 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
2063 ret = xgene_enet_mdio_config(pdata);
2065 INIT_DELAYED_WORK(&pdata->link_work, link_state);
2071 xgene_enet_napi_add(pdata);
2072 ret = register_netdev(ndev);
2074 netdev_err(ndev, "Failed to register netdev\n");
2082 * If necessary, free_netdev() will call netif_napi_del() and undo
2083 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2086 if (pdata->mdio_driver)
2087 xgene_enet_phy_disconnect(pdata);
2088 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
2089 xgene_enet_mdio_remove(pdata);
2091 xgene_enet_delete_desc_rings(pdata);
2097 static int xgene_enet_remove(struct platform_device *pdev)
2099 struct xgene_enet_pdata *pdata;
2100 struct net_device *ndev;
2102 pdata = platform_get_drvdata(pdev);
2106 if (netif_running(ndev))
2110 if (pdata->mdio_driver)
2111 xgene_enet_phy_disconnect(pdata);
2112 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
2113 xgene_enet_mdio_remove(pdata);
2115 unregister_netdev(ndev);
2116 pdata->port_ops->shutdown(pdata);
2117 xgene_enet_delete_desc_rings(pdata);
2123 static void xgene_enet_shutdown(struct platform_device *pdev)
2125 struct xgene_enet_pdata *pdata;
2127 pdata = platform_get_drvdata(pdev);
2134 xgene_enet_remove(pdev);
2137 static struct platform_driver xgene_enet_driver = {
2139 .name = "xgene-enet",
2140 .of_match_table = of_match_ptr(xgene_enet_of_match),
2141 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
2143 .probe = xgene_enet_probe,
2144 .remove = xgene_enet_remove,
2145 .shutdown = xgene_enet_shutdown,
2148 module_platform_driver(xgene_enet_driver);
2150 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2151 MODULE_VERSION(XGENE_DRV_VERSION);
2152 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2153 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2154 MODULE_LICENSE("GPL");