1 /* Freescale QUICC Engine HDLC Device Driver
3 * Copyright 2016 Freescale Semiconductor Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/hdlc.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/sched.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <soc/fsl/qe/qe_tdm.h>
31 #include <uapi/linux/if_arp.h>
33 #include "fsl_ucc_hdlc.h"
35 #define DRV_DESC "Freescale QE UCC HDLC Driver"
36 #define DRV_NAME "ucc_hdlc"
38 #define TDM_PPPOHT_SLIC_MAXIN
40 static struct ucc_tdm_info utdm_primary_info = {
55 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
56 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
57 .tenc = UCC_FAST_TX_ENCODING_NRZ,
58 .renc = UCC_FAST_RX_ENCODING_NRZ,
59 .tcrc = UCC_FAST_16_BIT_CRC,
60 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
64 #ifdef TDM_PPPOHT_SLIC_MAXIN
79 static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
81 static int uhdlc_init(struct ucc_hdlc_private *priv)
83 struct ucc_tdm_info *ut_info;
84 struct ucc_fast_info *uf_info;
89 dma_addr_t bd_dma_addr;
94 ut_info = priv->ut_info;
95 uf_info = &ut_info->uf_info;
102 /* This sets HPM register in CMXUCR register which configures a
103 * open drain connected HDLC bus
106 uf_info->brkpt_support = 1;
108 uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
109 UCC_HDLC_UCCE_TXB) << 16);
111 ret = ucc_fast_init(uf_info, &priv->uccf);
113 dev_err(priv->dev, "Failed to init uccf.");
117 priv->uf_regs = priv->uccf->uf_regs;
118 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
121 if (priv->loopback) {
122 dev_info(priv->dev, "Loopback Mode\n");
123 /* use the same clock when work in loopback */
124 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
126 gumr = ioread32be(&priv->uf_regs->gumr);
127 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
129 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
130 iowrite32be(gumr, &priv->uf_regs->gumr);
135 ucc_tdm_init(priv->utdm, priv->ut_info);
137 /* Write to QE CECR, UCCx channel to Stop Transmission */
138 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
139 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
140 QE_CR_PROTOCOL_UNSPECIFIED, 0);
142 /* Set UPSMR normal mode (need fixed)*/
143 iowrite32be(0, &priv->uf_regs->upsmr);
146 if (priv->hdlc_bus) {
149 dev_info(priv->dev, "HDLC bus Mode\n");
150 upsmr = ioread32be(&priv->uf_regs->upsmr);
152 /* bus mode and retransmit enable, with collision window
155 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
157 iowrite32be(upsmr, &priv->uf_regs->upsmr);
159 /* explicitly disable CDS & CTSP */
160 gumr = ioread32be(&priv->uf_regs->gumr);
161 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
162 /* set automatic sync to explicitly ignore CD signal */
163 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
164 iowrite32be(gumr, &priv->uf_regs->gumr);
167 priv->rx_ring_size = RX_BD_RING_LEN;
168 priv->tx_ring_size = TX_BD_RING_LEN;
170 priv->rx_bd_base = dma_alloc_coherent(priv->dev,
171 RX_BD_RING_LEN * sizeof(struct qe_bd),
172 &priv->dma_rx_bd, GFP_KERNEL);
174 if (!priv->rx_bd_base) {
175 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
181 priv->tx_bd_base = dma_alloc_coherent(priv->dev,
182 TX_BD_RING_LEN * sizeof(struct qe_bd),
183 &priv->dma_tx_bd, GFP_KERNEL);
185 if (!priv->tx_bd_base) {
186 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
191 /* Alloc parameter ram for ucc hdlc */
192 priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
193 ALIGNMENT_OF_UCC_HDLC_PRAM);
195 if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
196 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
201 priv->rx_skbuff = kcalloc(priv->rx_ring_size,
202 sizeof(*priv->rx_skbuff),
204 if (!priv->rx_skbuff)
207 priv->tx_skbuff = kcalloc(priv->tx_ring_size,
208 sizeof(*priv->tx_skbuff),
210 if (!priv->tx_skbuff)
214 priv->skb_dirtytx = 0;
215 priv->curtx_bd = priv->tx_bd_base;
216 priv->dirty_tx = priv->tx_bd_base;
217 priv->currx_bd = priv->rx_bd_base;
218 priv->currx_bdnum = 0;
220 /* init parameter base */
221 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
222 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
223 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
225 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
226 qe_muram_addr(priv->ucc_pram_offset);
228 /* Zero out parameter ram */
229 memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
231 /* Alloc riptr, tiptr */
232 riptr = qe_muram_alloc(32, 32);
233 if (IS_ERR_VALUE(riptr)) {
234 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
239 tiptr = qe_muram_alloc(32, 32);
240 if (IS_ERR_VALUE(tiptr)) {
241 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
246 /* Set RIPTR, TIPTR */
247 iowrite16be(riptr, &priv->ucc_pram->riptr);
248 iowrite16be(tiptr, &priv->ucc_pram->tiptr);
251 iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
253 /* Set RBASE, TBASE */
254 iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
255 iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
257 /* Set RSTATE, TSTATE */
258 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
259 iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
261 /* Set C_MASK, C_PRES for 16bit CRC */
262 iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
263 iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
265 iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
266 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
267 iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
268 iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
269 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
270 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
271 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
272 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
275 bd_buffer = dma_zalloc_coherent(priv->dev,
276 (RX_BD_RING_LEN + TX_BD_RING_LEN) *
278 &bd_dma_addr, GFP_KERNEL);
281 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
286 priv->rx_buffer = bd_buffer;
287 priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
289 priv->dma_rx_addr = bd_dma_addr;
290 priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
292 for (i = 0; i < RX_BD_RING_LEN; i++) {
293 if (i < (RX_BD_RING_LEN - 1))
294 bd_status = R_E_S | R_I_S;
296 bd_status = R_E_S | R_I_S | R_W_S;
298 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
299 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
300 &priv->rx_bd_base[i].buf);
303 for (i = 0; i < TX_BD_RING_LEN; i++) {
304 if (i < (TX_BD_RING_LEN - 1))
305 bd_status = T_I_S | T_TC_S;
307 bd_status = T_I_S | T_TC_S | T_W_S;
309 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
310 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
311 &priv->tx_bd_base[i].buf);
317 qe_muram_free(tiptr);
319 qe_muram_free(riptr);
321 kfree(priv->tx_skbuff);
323 kfree(priv->rx_skbuff);
325 qe_muram_free(priv->ucc_pram_offset);
327 dma_free_coherent(priv->dev,
328 TX_BD_RING_LEN * sizeof(struct qe_bd),
329 priv->tx_bd_base, priv->dma_tx_bd);
331 dma_free_coherent(priv->dev,
332 RX_BD_RING_LEN * sizeof(struct qe_bd),
333 priv->rx_bd_base, priv->dma_rx_bd);
335 ucc_fast_free(priv->uccf);
340 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
342 hdlc_device *hdlc = dev_to_hdlc(dev);
343 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
344 struct qe_bd __iomem *bd;
351 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
352 dev->stats.tx_dropped++;
354 netdev_err(dev, "No enough space for hdlc head\n");
358 skb_push(skb, HDLC_HEAD_LEN);
360 proto_head = (u16 *)skb->data;
361 *proto_head = htons(DEFAULT_HDLC_HEAD);
363 dev->stats.tx_bytes += skb->len;
367 proto_head = (u16 *)skb->data;
368 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
369 dev->stats.tx_dropped++;
371 netdev_err(dev, "Wrong ppp header\n");
375 dev->stats.tx_bytes += skb->len;
379 dev->stats.tx_bytes += skb->len;
383 dev->stats.tx_dropped++;
387 spin_lock_irqsave(&priv->lock, flags);
389 /* Start from the next BD that should be filled */
391 bd_status = ioread16be(&bd->status);
392 /* Save the skb pointer so we can free it later */
393 priv->tx_skbuff[priv->skb_curtx] = skb;
395 /* Update the current skb pointer (wrapping if this was the last) */
397 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
399 /* copy skb data to tx buffer for sdma processing */
400 memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
401 skb->data, skb->len);
403 /* set bd status and length */
404 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
406 iowrite16be(skb->len, &bd->length);
407 iowrite16be(bd_status, &bd->status);
409 /* Move to next BD in the ring */
410 if (!(bd_status & T_W_S))
413 bd = priv->tx_bd_base;
415 if (bd == priv->dirty_tx) {
416 if (!netif_queue_stopped(dev))
417 netif_stop_queue(dev);
422 spin_unlock_irqrestore(&priv->lock, flags);
427 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
429 /* Start from the next BD that should be filled */
430 struct net_device *dev = priv->ndev;
431 struct qe_bd *bd; /* BD pointer */
435 bd_status = ioread16be(&bd->status);
437 /* Normal processing. */
438 while ((bd_status & T_R_S) == 0) {
441 /* BD contains already transmitted buffer. */
442 /* Handle the transmitted buffer and release */
443 /* the BD to be used with the current frame */
445 skb = priv->tx_skbuff[priv->skb_dirtytx];
448 dev->stats.tx_packets++;
449 memset(priv->tx_buffer +
450 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
452 dev_kfree_skb_irq(skb);
454 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
457 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
459 /* We freed a buffer, so now we can restart transmission */
460 if (netif_queue_stopped(dev))
461 netif_wake_queue(dev);
463 /* Advance the confirmation BD pointer */
464 if (!(bd_status & T_W_S))
467 bd = priv->tx_bd_base;
468 bd_status = ioread16be(&bd->status);
475 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
477 struct net_device *dev = priv->ndev;
478 struct sk_buff *skb = NULL;
479 hdlc_device *hdlc = dev_to_hdlc(dev);
482 u16 length, howmany = 0;
486 bd_status = ioread16be(&bd->status);
488 /* while there are received buffers and BD is full (~R_E) */
489 while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
490 if (bd_status & R_OV_S)
491 dev->stats.rx_over_errors++;
492 if (bd_status & R_CR_S) {
493 dev->stats.rx_crc_errors++;
494 dev->stats.rx_dropped++;
497 bdbuffer = priv->rx_buffer +
498 (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
499 length = ioread16be(&bd->length);
503 bdbuffer += HDLC_HEAD_LEN;
504 length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
506 skb = dev_alloc_skb(length);
508 dev->stats.rx_dropped++;
512 skb_put(skb, length);
515 memcpy(skb->data, bdbuffer, length);
520 length -= HDLC_CRC_SIZE;
522 skb = dev_alloc_skb(length);
524 dev->stats.rx_dropped++;
528 skb_put(skb, length);
531 memcpy(skb->data, bdbuffer, length);
535 dev->stats.rx_packets++;
536 dev->stats.rx_bytes += skb->len;
539 skb->protocol = hdlc_type_trans(skb, dev);
540 netif_receive_skb(skb);
543 iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
545 /* update to point at the next bd */
546 if (bd_status & R_W_S) {
547 priv->currx_bdnum = 0;
548 bd = priv->rx_bd_base;
550 if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
551 priv->currx_bdnum += 1;
553 priv->currx_bdnum = RX_BD_RING_LEN - 1;
558 bd_status = ioread16be(&bd->status);
565 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
567 struct ucc_hdlc_private *priv = container_of(napi,
568 struct ucc_hdlc_private,
572 /* Tx event processing */
573 spin_lock(&priv->lock);
575 spin_unlock(&priv->lock);
578 howmany += hdlc_rx_done(priv, budget - howmany);
580 if (howmany < budget) {
581 napi_complete_done(napi, howmany);
582 qe_setbits32(priv->uccf->p_uccm,
583 (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
589 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
591 struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
592 struct net_device *dev = priv->ndev;
593 struct ucc_fast_private *uccf;
594 struct ucc_tdm_info *ut_info;
598 ut_info = priv->ut_info;
601 ucce = ioread32be(uccf->p_ucce);
602 uccm = ioread32be(uccf->p_uccm);
604 iowrite32be(ucce, uccf->p_ucce);
608 if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
609 if (napi_schedule_prep(&priv->napi)) {
610 uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
612 iowrite32be(uccm, uccf->p_uccm);
613 __napi_schedule(&priv->napi);
617 /* Errors and other events */
618 if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
619 dev->stats.rx_errors++;
620 if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
621 dev->stats.tx_errors++;
626 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
628 const size_t size = sizeof(te1_settings);
630 struct ucc_hdlc_private *priv = netdev_priv(dev);
632 if (cmd != SIOCWANDEV)
633 return hdlc_ioctl(dev, ifr, cmd);
635 switch (ifr->ifr_settings.type) {
637 ifr->ifr_settings.type = IF_IFACE_E1;
638 if (ifr->ifr_settings.size < size) {
639 ifr->ifr_settings.size = size; /* data size wanted */
642 memset(&line, 0, sizeof(line));
643 line.clock_type = priv->clocking;
645 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
650 return hdlc_ioctl(dev, ifr, cmd);
654 static int uhdlc_open(struct net_device *dev)
657 hdlc_device *hdlc = dev_to_hdlc(dev);
658 struct ucc_hdlc_private *priv = hdlc->priv;
659 struct ucc_tdm *utdm = priv->utdm;
661 if (priv->hdlc_busy != 1) {
662 if (request_irq(priv->ut_info->uf_info.irq,
663 ucc_hdlc_irq_handler, 0, "hdlc", priv))
666 cecr_subblock = ucc_fast_get_qe_cr_subblock(
667 priv->ut_info->uf_info.ucc_num);
669 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
670 QE_CR_PROTOCOL_UNSPECIFIED, 0);
672 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
674 /* Enable the TDM port */
676 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
679 netif_device_attach(priv->ndev);
680 napi_enable(&priv->napi);
681 netif_start_queue(dev);
688 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
690 qe_muram_free(priv->ucc_pram->riptr);
691 qe_muram_free(priv->ucc_pram->tiptr);
693 if (priv->rx_bd_base) {
694 dma_free_coherent(priv->dev,
695 RX_BD_RING_LEN * sizeof(struct qe_bd),
696 priv->rx_bd_base, priv->dma_rx_bd);
698 priv->rx_bd_base = NULL;
702 if (priv->tx_bd_base) {
703 dma_free_coherent(priv->dev,
704 TX_BD_RING_LEN * sizeof(struct qe_bd),
705 priv->tx_bd_base, priv->dma_tx_bd);
707 priv->tx_bd_base = NULL;
711 if (priv->ucc_pram) {
712 qe_muram_free(priv->ucc_pram_offset);
713 priv->ucc_pram = NULL;
714 priv->ucc_pram_offset = 0;
717 kfree(priv->rx_skbuff);
718 priv->rx_skbuff = NULL;
720 kfree(priv->tx_skbuff);
721 priv->tx_skbuff = NULL;
724 iounmap(priv->uf_regs);
725 priv->uf_regs = NULL;
729 ucc_fast_free(priv->uccf);
733 if (priv->rx_buffer) {
734 dma_free_coherent(priv->dev,
735 RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
736 priv->rx_buffer, priv->dma_rx_addr);
737 priv->rx_buffer = NULL;
738 priv->dma_rx_addr = 0;
741 if (priv->tx_buffer) {
742 dma_free_coherent(priv->dev,
743 TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
744 priv->tx_buffer, priv->dma_tx_addr);
745 priv->tx_buffer = NULL;
746 priv->dma_tx_addr = 0;
750 static int uhdlc_close(struct net_device *dev)
752 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
753 struct ucc_tdm *utdm = priv->utdm;
756 napi_disable(&priv->napi);
757 cecr_subblock = ucc_fast_get_qe_cr_subblock(
758 priv->ut_info->uf_info.ucc_num);
760 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
761 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
762 qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
763 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
766 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
768 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
770 free_irq(priv->ut_info->uf_info.irq, priv);
771 netif_stop_queue(dev);
777 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
778 unsigned short parity)
780 struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
782 if (encoding != ENCODING_NRZ &&
783 encoding != ENCODING_NRZI)
786 if (parity != PARITY_NONE &&
787 parity != PARITY_CRC32_PR1_CCITT &&
788 parity != PARITY_CRC16_PR0_CCITT &&
789 parity != PARITY_CRC16_PR1_CCITT)
792 priv->encoding = encoding;
793 priv->parity = parity;
799 static void store_clk_config(struct ucc_hdlc_private *priv)
801 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
804 priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
805 priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
808 priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
811 memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
814 static void resume_clk_config(struct ucc_hdlc_private *priv)
816 struct qe_mux *qe_mux_reg = &qe_immr->qmx;
818 memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
820 iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
821 iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
823 iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
826 static int uhdlc_suspend(struct device *dev)
828 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
829 struct ucc_tdm_info *ut_info;
830 struct ucc_fast __iomem *uf_regs;
835 if (!netif_running(priv->ndev))
838 netif_device_detach(priv->ndev);
839 napi_disable(&priv->napi);
841 ut_info = priv->ut_info;
842 uf_regs = priv->uf_regs;
844 /* backup gumr guemr*/
845 priv->gumr = ioread32be(&uf_regs->gumr);
846 priv->guemr = ioread8(&uf_regs->guemr);
848 priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
850 if (!priv->ucc_pram_bak)
853 /* backup HDLC parameter */
854 memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
855 sizeof(struct ucc_hdlc_param));
857 /* store the clk configuration */
858 store_clk_config(priv);
861 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
866 static int uhdlc_resume(struct device *dev)
868 struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
869 struct ucc_tdm *utdm;
870 struct ucc_tdm_info *ut_info;
871 struct ucc_fast __iomem *uf_regs;
872 struct ucc_fast_private *uccf;
873 struct ucc_fast_info *uf_info;
881 if (!netif_running(priv->ndev))
885 ut_info = priv->ut_info;
886 uf_info = &ut_info->uf_info;
887 uf_regs = priv->uf_regs;
890 /* restore gumr guemr */
891 iowrite8(priv->guemr, &uf_regs->guemr);
892 iowrite32be(priv->gumr, &uf_regs->gumr);
894 /* Set Virtual Fifo registers */
895 iowrite16be(uf_info->urfs, &uf_regs->urfs);
896 iowrite16be(uf_info->urfet, &uf_regs->urfet);
897 iowrite16be(uf_info->urfset, &uf_regs->urfset);
898 iowrite16be(uf_info->utfs, &uf_regs->utfs);
899 iowrite16be(uf_info->utfet, &uf_regs->utfet);
900 iowrite16be(uf_info->utftt, &uf_regs->utftt);
901 /* utfb, urfb are offsets from MURAM base */
902 iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
903 iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
905 /* Rx Tx and sync clock routing */
906 resume_clk_config(priv);
908 iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
909 iowrite32be(0xffffffff, &uf_regs->ucce);
911 ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
915 ucc_tdm_init(priv->utdm, priv->ut_info);
917 /* Write to QE CECR, UCCx channel to Stop Transmission */
918 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
919 ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
920 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
922 /* Set UPSMR normal mode */
923 iowrite32be(0, &uf_regs->upsmr);
925 /* init parameter base */
926 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
927 ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
928 QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
930 priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
931 qe_muram_addr(priv->ucc_pram_offset);
933 /* restore ucc parameter */
934 memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
935 sizeof(struct ucc_hdlc_param));
936 kfree(priv->ucc_pram_bak);
938 /* rebuild BD entry */
939 for (i = 0; i < RX_BD_RING_LEN; i++) {
940 if (i < (RX_BD_RING_LEN - 1))
941 bd_status = R_E_S | R_I_S;
943 bd_status = R_E_S | R_I_S | R_W_S;
945 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
946 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
947 &priv->rx_bd_base[i].buf);
950 for (i = 0; i < TX_BD_RING_LEN; i++) {
951 if (i < (TX_BD_RING_LEN - 1))
952 bd_status = T_I_S | T_TC_S;
954 bd_status = T_I_S | T_TC_S | T_W_S;
956 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
957 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
958 &priv->tx_bd_base[i].buf);
961 /* if hdlc is busy enable TX and RX */
962 if (priv->hdlc_busy == 1) {
963 cecr_subblock = ucc_fast_get_qe_cr_subblock(
964 priv->ut_info->uf_info.ucc_num);
966 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
967 (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
969 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
971 /* Enable the TDM port */
973 utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
976 napi_enable(&priv->napi);
977 netif_device_attach(priv->ndev);
982 static const struct dev_pm_ops uhdlc_pm_ops = {
983 .suspend = uhdlc_suspend,
984 .resume = uhdlc_resume,
985 .freeze = uhdlc_suspend,
986 .thaw = uhdlc_resume,
989 #define HDLC_PM_OPS (&uhdlc_pm_ops)
993 #define HDLC_PM_OPS NULL
996 static const struct net_device_ops uhdlc_ops = {
997 .ndo_open = uhdlc_open,
998 .ndo_stop = uhdlc_close,
999 .ndo_start_xmit = hdlc_start_xmit,
1000 .ndo_do_ioctl = uhdlc_ioctl,
1003 static int ucc_hdlc_probe(struct platform_device *pdev)
1005 struct device_node *np = pdev->dev.of_node;
1006 struct ucc_hdlc_private *uhdlc_priv = NULL;
1007 struct ucc_tdm_info *ut_info;
1008 struct ucc_tdm *utdm = NULL;
1009 struct resource res;
1010 struct net_device *dev;
1017 ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1019 dev_err(&pdev->dev, "Invalid ucc property\n");
1024 if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1025 dev_err(&pdev->dev, ": Invalid UCC num\n");
1029 memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1030 sizeof(utdm_primary_info));
1032 ut_info = &utdm_info[ucc_num];
1033 ut_info->uf_info.ucc_num = ucc_num;
1035 sprop = of_get_property(np, "rx-clock-name", NULL);
1037 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1038 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1039 (ut_info->uf_info.rx_clock > QE_CLK24)) {
1040 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1044 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1048 sprop = of_get_property(np, "tx-clock-name", NULL);
1050 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1051 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1052 (ut_info->uf_info.tx_clock > QE_CLK24)) {
1053 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1057 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1061 ret = of_address_to_resource(np, 0, &res);
1065 ut_info->uf_info.regs = res.start;
1066 ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1068 uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1073 dev_set_drvdata(&pdev->dev, uhdlc_priv);
1074 uhdlc_priv->dev = &pdev->dev;
1075 uhdlc_priv->ut_info = ut_info;
1077 if (of_get_property(np, "fsl,tdm-interface", NULL))
1078 uhdlc_priv->tsa = 1;
1080 if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1081 uhdlc_priv->loopback = 1;
1083 if (of_get_property(np, "fsl,hdlc-bus", NULL))
1084 uhdlc_priv->hdlc_bus = 1;
1086 if (uhdlc_priv->tsa == 1) {
1087 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1090 dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1091 goto free_uhdlc_priv;
1093 uhdlc_priv->utdm = utdm;
1094 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1099 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1100 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1102 ret = uhdlc_init(uhdlc_priv);
1104 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1108 dev = alloc_hdlcdev(uhdlc_priv);
1111 pr_err("ucc_hdlc: unable to allocate memory\n");
1112 goto undo_uhdlc_init;
1115 uhdlc_priv->ndev = dev;
1116 hdlc = dev_to_hdlc(dev);
1117 dev->tx_queue_len = 16;
1118 dev->netdev_ops = &uhdlc_ops;
1119 hdlc->attach = ucc_hdlc_attach;
1120 hdlc->xmit = ucc_hdlc_tx;
1121 netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1122 if (register_hdlc_device(dev)) {
1124 pr_err("ucc_hdlc: unable to register hdlc device\n");
1135 if (uhdlc_priv->tsa)
1142 static int ucc_hdlc_remove(struct platform_device *pdev)
1144 struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1146 uhdlc_memclean(priv);
1148 if (priv->utdm->si_regs) {
1149 iounmap(priv->utdm->si_regs);
1150 priv->utdm->si_regs = NULL;
1153 if (priv->utdm->siram) {
1154 iounmap(priv->utdm->siram);
1155 priv->utdm->siram = NULL;
1159 dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1164 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1166 .compatible = "fsl,ucc-hdlc",
1171 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1173 static struct platform_driver ucc_hdlc_driver = {
1174 .probe = ucc_hdlc_probe,
1175 .remove = ucc_hdlc_remove,
1179 .of_match_table = fsl_ucc_hdlc_of_match,
1183 module_platform_driver(ucc_hdlc_driver);
1184 MODULE_LICENSE("GPL");