2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
14 * This file does the core driver implementation for the UDC that is based
15 * on Synopsys device controller IP (different than HS OTG IP) that is either
16 * connected through PCI bus or integrated to SoC platforms.
20 #define UDC_MOD_DESCRIPTION "Synopsys USB Device Controller"
21 #define UDC_DRIVER_VERSION_STRING "01.00.0206"
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/kernel.h>
26 #include <linux/delay.h>
27 #include <linux/ioport.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/errno.h>
31 #include <linux/timer.h>
32 #include <linux/list.h>
33 #include <linux/interrupt.h>
34 #include <linux/ioctl.h>
36 #include <linux/dmapool.h>
37 #include <linux/prefetch.h>
38 #include <linux/moduleparam.h>
39 #include <asm/byteorder.h>
40 #include <asm/unaligned.h>
41 #include "amd5536udc.h"
43 static void udc_tasklet_disconnect(unsigned long);
44 static void udc_setup_endpoints(struct udc *dev);
45 static void udc_soft_reset(struct udc *dev);
46 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
47 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
50 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
51 static const char name[] = "udc";
53 /* structure to hold endpoint function pointers */
54 static const struct usb_ep_ops udc_ep_ops;
56 /* received setup data */
57 static union udc_setup_data setup_data;
59 /* pointer to device object */
60 static struct udc *udc;
62 /* irq spin lock for soft reset */
63 static DEFINE_SPINLOCK(udc_irq_spinlock);
65 static DEFINE_SPINLOCK(udc_stall_spinlock);
68 * slave mode: pending bytes in rx fifo after nyet,
69 * used if EPIN irq came but no req was available
71 static unsigned int udc_rxfifo_pending;
73 /* count soft resets after suspend to avoid loop */
74 static int soft_reset_occured;
75 static int soft_reset_after_usbreset_occured;
78 static struct timer_list udc_timer;
79 static int stop_timer;
81 /* set_rde -- Is used to control enabling of RX DMA. Problem is
82 * that UDC has only one bit (RDE) to enable/disable RX DMA for
83 * all OUT endpoints. So we have to handle race conditions like
84 * when OUT data reaches the fifo but no request was queued yet.
85 * This cannot be solved by letting the RX DMA disabled until a
86 * request gets queued because there may be other OUT packets
87 * in the FIFO (important for not blocking control traffic).
88 * The value of set_rde controls the correspondig timer.
90 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
91 * set_rde 0 == do not touch RDE, do no start the RDE timer
92 * set_rde 1 == timer function will look whether FIFO has data
93 * set_rde 2 == set by timer function to enable RX DMA on next call
95 static int set_rde = -1;
97 static DECLARE_COMPLETION(on_exit);
98 static struct timer_list udc_pollstall_timer;
99 static int stop_pollstall_timer;
100 static DECLARE_COMPLETION(on_pollstall_exit);
102 /* tasklet for usb disconnect */
103 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
104 (unsigned long) &udc);
107 /* endpoint names used for print */
108 static const char ep0_string[] = "ep0in";
109 static const struct {
111 const struct usb_ep_caps caps;
113 #define EP_INFO(_name, _caps) \
120 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
122 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
123 EP_INFO("ep2in-bulk",
124 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
125 EP_INFO("ep3in-bulk",
126 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
127 EP_INFO("ep4in-bulk",
128 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
129 EP_INFO("ep5in-bulk",
130 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
131 EP_INFO("ep6in-bulk",
132 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
133 EP_INFO("ep7in-bulk",
134 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
135 EP_INFO("ep8in-bulk",
136 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
137 EP_INFO("ep9in-bulk",
138 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
139 EP_INFO("ep10in-bulk",
140 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
141 EP_INFO("ep11in-bulk",
142 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
143 EP_INFO("ep12in-bulk",
144 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
145 EP_INFO("ep13in-bulk",
146 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
147 EP_INFO("ep14in-bulk",
148 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
149 EP_INFO("ep15in-bulk",
150 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
152 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
153 EP_INFO("ep1out-bulk",
154 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
155 EP_INFO("ep2out-bulk",
156 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
157 EP_INFO("ep3out-bulk",
158 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
159 EP_INFO("ep4out-bulk",
160 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
161 EP_INFO("ep5out-bulk",
162 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
163 EP_INFO("ep6out-bulk",
164 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
165 EP_INFO("ep7out-bulk",
166 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
167 EP_INFO("ep8out-bulk",
168 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
169 EP_INFO("ep9out-bulk",
170 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
171 EP_INFO("ep10out-bulk",
172 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
173 EP_INFO("ep11out-bulk",
174 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
175 EP_INFO("ep12out-bulk",
176 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
177 EP_INFO("ep13out-bulk",
178 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
179 EP_INFO("ep14out-bulk",
180 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
181 EP_INFO("ep15out-bulk",
182 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
187 /* buffer fill mode */
188 static int use_dma_bufferfill_mode;
189 /* tx buffer size for high speed */
190 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
192 /*---------------------------------------------------------------------------*/
193 /* Prints UDC device registers and endpoint irq registers */
194 static void print_regs(struct udc *dev)
196 DBG(dev, "------- Device registers -------\n");
197 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
198 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
199 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
201 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
202 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
204 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
205 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
207 DBG(dev, "USE DMA = %d\n", use_dma);
208 if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
209 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
210 "WITHOUT desc. update)\n");
211 dev_info(dev->dev, "DMA mode (%s)\n", "PPBNDU");
212 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
213 DBG(dev, "DMA mode = PPBDU (packet per buffer "
214 "WITH desc. update)\n");
215 dev_info(dev->dev, "DMA mode (%s)\n", "PPBDU");
217 if (use_dma && use_dma_bufferfill_mode) {
218 DBG(dev, "DMA mode = BF (buffer fill mode)\n");
219 dev_info(dev->dev, "DMA mode (%s)\n", "BF");
222 dev_info(dev->dev, "FIFO mode\n");
223 DBG(dev, "-------------------------------------------------------\n");
226 /* Masks unused interrupts */
227 int udc_mask_unused_interrupts(struct udc *dev)
231 /* mask all dev interrupts */
232 tmp = AMD_BIT(UDC_DEVINT_SVC) |
233 AMD_BIT(UDC_DEVINT_ENUM) |
234 AMD_BIT(UDC_DEVINT_US) |
235 AMD_BIT(UDC_DEVINT_UR) |
236 AMD_BIT(UDC_DEVINT_ES) |
237 AMD_BIT(UDC_DEVINT_SI) |
238 AMD_BIT(UDC_DEVINT_SOF)|
239 AMD_BIT(UDC_DEVINT_SC);
240 writel(tmp, &dev->regs->irqmsk);
242 /* mask all ep interrupts */
243 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
247 EXPORT_SYMBOL_GPL(udc_mask_unused_interrupts);
249 /* Enables endpoint 0 interrupts */
250 static int udc_enable_ep0_interrupts(struct udc *dev)
254 DBG(dev, "udc_enable_ep0_interrupts()\n");
257 tmp = readl(&dev->regs->ep_irqmsk);
258 /* enable ep0 irq's */
259 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
260 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
261 writel(tmp, &dev->regs->ep_irqmsk);
266 /* Enables device interrupts for SET_INTF and SET_CONFIG */
267 int udc_enable_dev_setup_interrupts(struct udc *dev)
271 DBG(dev, "enable device interrupts for setup data\n");
274 tmp = readl(&dev->regs->irqmsk);
276 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
277 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
278 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
279 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
280 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
281 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
282 writel(tmp, &dev->regs->irqmsk);
286 EXPORT_SYMBOL_GPL(udc_enable_dev_setup_interrupts);
288 /* Calculates fifo start of endpoint based on preceding endpoints */
289 static int udc_set_txfifo_addr(struct udc_ep *ep)
295 if (!ep || !(ep->in))
299 ep->txfifo = dev->txfifo;
302 for (i = 0; i < ep->num; i++) {
303 if (dev->ep[i].regs) {
305 tmp = readl(&dev->ep[i].regs->bufin_framenum);
306 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
313 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
314 static u32 cnak_pending;
316 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
318 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
319 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
320 cnak_pending |= 1 << (num);
323 cnak_pending = cnak_pending & (~(1 << (num)));
327 /* Enables endpoint, is called by gadget driver */
329 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
334 unsigned long iflags;
339 || usbep->name == ep0_string
341 || desc->bDescriptorType != USB_DT_ENDPOINT)
344 ep = container_of(usbep, struct udc_ep, ep);
347 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
349 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
352 spin_lock_irqsave(&dev->lock, iflags);
357 /* set traffic type */
358 tmp = readl(&dev->ep[ep->num].regs->ctl);
359 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
360 writel(tmp, &dev->ep[ep->num].regs->ctl);
362 /* set max packet size */
363 maxpacket = usb_endpoint_maxp(desc);
364 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
365 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
366 ep->ep.maxpacket = maxpacket;
367 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
372 /* ep ix in UDC CSR register space */
373 udc_csr_epix = ep->num;
375 /* set buffer size (tx fifo entries) */
376 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
377 /* double buffering: fifo size = 2 x max packet size */
380 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
383 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
385 /* calc. tx fifo base addr */
386 udc_set_txfifo_addr(ep);
389 tmp = readl(&ep->regs->ctl);
390 tmp |= AMD_BIT(UDC_EPCTL_F);
391 writel(tmp, &ep->regs->ctl);
395 /* ep ix in UDC CSR register space */
396 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
398 /* set max packet size UDC CSR */
399 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
400 tmp = AMD_ADDBITS(tmp, maxpacket,
402 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
404 if (use_dma && !ep->in) {
405 /* alloc and init BNA dummy request */
406 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
407 ep->bna_occurred = 0;
410 if (ep->num != UDC_EP0OUT_IX)
411 dev->data_ep_enabled = 1;
415 tmp = readl(&dev->csr->ne[udc_csr_epix]);
417 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
419 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
421 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
423 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
425 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
427 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
429 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
431 writel(tmp, &dev->csr->ne[udc_csr_epix]);
434 tmp = readl(&dev->regs->ep_irqmsk);
435 tmp &= AMD_UNMASK_BIT(ep->num);
436 writel(tmp, &dev->regs->ep_irqmsk);
439 * clear NAK by writing CNAK
440 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
442 if (!use_dma || ep->in) {
443 tmp = readl(&ep->regs->ctl);
444 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
445 writel(tmp, &ep->regs->ctl);
447 UDC_QUEUE_CNAK(ep, ep->num);
449 tmp = desc->bEndpointAddress;
450 DBG(dev, "%s enabled\n", usbep->name);
452 spin_unlock_irqrestore(&dev->lock, iflags);
456 /* Resets endpoint */
457 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
461 VDBG(ep->dev, "ep-%d reset\n", ep->num);
463 ep->ep.ops = &udc_ep_ops;
464 INIT_LIST_HEAD(&ep->queue);
466 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
468 tmp = readl(&ep->regs->ctl);
469 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
470 writel(tmp, &ep->regs->ctl);
473 /* disable interrupt */
474 tmp = readl(®s->ep_irqmsk);
475 tmp |= AMD_BIT(ep->num);
476 writel(tmp, ®s->ep_irqmsk);
479 /* unset P and IN bit of potential former DMA */
480 tmp = readl(&ep->regs->ctl);
481 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
482 writel(tmp, &ep->regs->ctl);
484 tmp = readl(&ep->regs->sts);
485 tmp |= AMD_BIT(UDC_EPSTS_IN);
486 writel(tmp, &ep->regs->sts);
489 tmp = readl(&ep->regs->ctl);
490 tmp |= AMD_BIT(UDC_EPCTL_F);
491 writel(tmp, &ep->regs->ctl);
494 /* reset desc pointer */
495 writel(0, &ep->regs->desptr);
498 /* Disables endpoint, is called by gadget driver */
499 static int udc_ep_disable(struct usb_ep *usbep)
501 struct udc_ep *ep = NULL;
502 unsigned long iflags;
507 ep = container_of(usbep, struct udc_ep, ep);
508 if (usbep->name == ep0_string || !ep->ep.desc)
511 DBG(ep->dev, "Disable ep-%d\n", ep->num);
513 spin_lock_irqsave(&ep->dev->lock, iflags);
514 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
516 ep_init(ep->dev->regs, ep);
517 spin_unlock_irqrestore(&ep->dev->lock, iflags);
522 /* Allocates request packet, called by gadget driver */
523 static struct usb_request *
524 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
526 struct udc_request *req;
527 struct udc_data_dma *dma_desc;
533 ep = container_of(usbep, struct udc_ep, ep);
535 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
536 req = kzalloc(sizeof(struct udc_request), gfp);
540 req->req.dma = DMA_DONT_USE;
541 INIT_LIST_HEAD(&req->queue);
544 /* ep0 in requests are allocated from data pool here */
545 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
552 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
555 (unsigned long)req->td_phys);
556 /* prevent from using desc. - set HOST BUSY */
557 dma_desc->status = AMD_ADDBITS(dma_desc->status,
558 UDC_DMA_STP_STS_BS_HOST_BUSY,
560 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
561 req->td_data = dma_desc;
562 req->td_data_last = NULL;
569 /* frees pci pool descriptors of a DMA chain */
570 static void udc_free_dma_chain(struct udc *dev, struct udc_request *req)
572 struct udc_data_dma *td = req->td_data;
575 dma_addr_t addr_next = 0x00;
576 dma_addr_t addr = (dma_addr_t)td->next;
578 DBG(dev, "free chain req = %p\n", req);
580 /* do not free first desc., will be done by free for request */
581 for (i = 1; i < req->chain_len; i++) {
582 td = phys_to_virt(addr);
583 addr_next = (dma_addr_t)td->next;
584 dma_pool_free(dev->data_requests, td, addr);
589 /* Frees request packet, called by gadget driver */
591 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
594 struct udc_request *req;
596 if (!usbep || !usbreq)
599 ep = container_of(usbep, struct udc_ep, ep);
600 req = container_of(usbreq, struct udc_request, req);
601 VDBG(ep->dev, "free_req req=%p\n", req);
602 BUG_ON(!list_empty(&req->queue));
604 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
606 /* free dma chain if created */
607 if (req->chain_len > 1)
608 udc_free_dma_chain(ep->dev, req);
610 dma_pool_free(ep->dev->data_requests, req->td_data,
616 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
617 static void udc_init_bna_dummy(struct udc_request *req)
621 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
622 /* set next pointer to itself */
623 req->td_data->next = req->td_phys;
626 = AMD_ADDBITS(req->td_data->status,
627 UDC_DMA_STP_STS_BS_DMA_DONE,
630 pr_debug("bna desc = %p, sts = %08x\n",
631 req->td_data, req->td_data->status);
636 /* Allocate BNA dummy descriptor */
637 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
639 struct udc_request *req = NULL;
640 struct usb_request *_req = NULL;
642 /* alloc the dummy request */
643 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
645 req = container_of(_req, struct udc_request, req);
646 ep->bna_dummy_req = req;
647 udc_init_bna_dummy(req);
652 /* Write data to TX fifo for IN packets */
654 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
660 unsigned remaining = 0;
665 req_buf = req->buf + req->actual;
667 remaining = req->length - req->actual;
669 buf = (u32 *) req_buf;
671 bytes = ep->ep.maxpacket;
672 if (bytes > remaining)
676 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
677 writel(*(buf + i), ep->txfifo);
679 /* remaining bytes must be written by byte access */
680 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
681 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
685 /* dummy write confirm */
686 writel(0, &ep->regs->confirm);
689 /* Read dwords from RX fifo for OUT transfers */
690 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
694 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
696 for (i = 0; i < dwords; i++)
697 *(buf + i) = readl(dev->rxfifo);
701 /* Read bytes from RX fifo for OUT transfers */
702 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
707 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
710 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
711 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
713 /* remaining bytes must be read by byte access */
714 if (bytes % UDC_DWORD_BYTES) {
715 tmp = readl(dev->rxfifo);
716 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
717 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
718 tmp = tmp >> UDC_BITS_PER_BYTE;
725 /* Read data from RX fifo for OUT transfers */
727 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
732 unsigned finished = 0;
734 /* received number bytes */
735 bytes = readl(&ep->regs->sts);
736 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
738 buf_space = req->req.length - req->req.actual;
739 buf = req->req.buf + req->req.actual;
740 if (bytes > buf_space) {
741 if ((buf_space % ep->ep.maxpacket) != 0) {
743 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
744 ep->ep.name, bytes, buf_space);
745 req->req.status = -EOVERFLOW;
749 req->req.actual += bytes;
752 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
753 || ((req->req.actual == req->req.length) && !req->req.zero))
756 /* read rx fifo bytes */
757 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
758 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
763 /* Creates or re-inits a DMA chain */
764 static int udc_create_dma_chain(
766 struct udc_request *req,
767 unsigned long buf_len, gfp_t gfp_flags
770 unsigned long bytes = req->req.length;
773 struct udc_data_dma *td = NULL;
774 struct udc_data_dma *last = NULL;
775 unsigned long txbytes;
776 unsigned create_new_chain = 0;
779 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
781 dma_addr = DMA_DONT_USE;
783 /* unset L bit in first desc for OUT */
785 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
787 /* alloc only new desc's if not already available */
788 len = req->req.length / ep->ep.maxpacket;
789 if (req->req.length % ep->ep.maxpacket)
792 if (len > req->chain_len) {
793 /* shorter chain already allocated before */
794 if (req->chain_len > 1)
795 udc_free_dma_chain(ep->dev, req);
796 req->chain_len = len;
797 create_new_chain = 1;
801 /* gen. required number of descriptors and buffers */
802 for (i = buf_len; i < bytes; i += buf_len) {
803 /* create or determine next desc. */
804 if (create_new_chain) {
805 td = dma_pool_alloc(ep->dev->data_requests,
806 gfp_flags, &dma_addr);
811 } else if (i == buf_len) {
813 td = (struct udc_data_dma *)phys_to_virt(
817 td = (struct udc_data_dma *)phys_to_virt(last->next);
822 td->bufptr = req->req.dma + i; /* assign buffer */
827 if ((bytes - i) >= buf_len) {
834 /* link td and assign tx bytes */
836 if (create_new_chain)
837 req->td_data->next = dma_addr;
840 * req->td_data->next = virt_to_phys(td);
845 req->td_data->status =
846 AMD_ADDBITS(req->td_data->status,
848 UDC_DMA_IN_STS_TXBYTES);
850 td->status = AMD_ADDBITS(td->status,
852 UDC_DMA_IN_STS_TXBYTES);
855 if (create_new_chain)
856 last->next = dma_addr;
859 * last->next = virt_to_phys(td);
863 td->status = AMD_ADDBITS(td->status,
865 UDC_DMA_IN_STS_TXBYTES);
872 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
873 /* last desc. points to itself */
874 req->td_data_last = td;
880 /* create/re-init a DMA descriptor or a DMA descriptor chain */
881 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
886 VDBG(ep->dev, "prep_dma\n");
887 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
888 ep->num, req->td_data);
890 /* set buffer pointer */
891 req->td_data->bufptr = req->req.dma;
894 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
896 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
899 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
901 if (retval == -ENOMEM)
902 DBG(ep->dev, "Out of DMA memory\n");
906 if (req->req.length == ep->ep.maxpacket) {
908 req->td_data->status =
909 AMD_ADDBITS(req->td_data->status,
911 UDC_DMA_IN_STS_TXBYTES);
919 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
920 "maxpacket=%d ep%d\n",
921 use_dma_ppb, req->req.length,
922 ep->ep.maxpacket, ep->num);
924 * if bytes < max packet then tx bytes must
925 * be written in packet per buffer mode
927 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
928 || ep->num == UDC_EP0OUT_IX
929 || ep->num == UDC_EP0IN_IX) {
931 req->td_data->status =
932 AMD_ADDBITS(req->td_data->status,
934 UDC_DMA_IN_STS_TXBYTES);
935 /* reset frame num */
936 req->td_data->status =
937 AMD_ADDBITS(req->td_data->status,
939 UDC_DMA_IN_STS_FRAMENUM);
942 req->td_data->status =
943 AMD_ADDBITS(req->td_data->status,
944 UDC_DMA_STP_STS_BS_HOST_BUSY,
947 VDBG(ep->dev, "OUT set host ready\n");
949 req->td_data->status =
950 AMD_ADDBITS(req->td_data->status,
951 UDC_DMA_STP_STS_BS_HOST_READY,
955 /* clear NAK by writing CNAK */
957 tmp = readl(&ep->regs->ctl);
958 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
959 writel(tmp, &ep->regs->ctl);
961 UDC_QUEUE_CNAK(ep, ep->num);
969 /* Completes request packet ... caller MUST hold lock */
971 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
972 __releases(ep->dev->lock)
973 __acquires(ep->dev->lock)
978 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
983 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
988 /* set new status if pending */
989 if (req->req.status == -EINPROGRESS)
990 req->req.status = sts;
992 /* remove from ep queue */
993 list_del_init(&req->queue);
995 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
996 &req->req, req->req.length, ep->ep.name, sts);
998 spin_unlock(&dev->lock);
999 usb_gadget_giveback_request(&ep->ep, &req->req);
1000 spin_lock(&dev->lock);
1001 ep->halted = halted;
1004 /* Iterates to the end of a DMA chain and returns last descriptor */
1005 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
1007 struct udc_data_dma *td;
1010 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
1011 td = phys_to_virt(td->next);
1017 /* Iterates to the end of a DMA chain and counts bytes received */
1018 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1020 struct udc_data_dma *td;
1024 /* received number bytes */
1025 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1027 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1028 td = phys_to_virt(td->next);
1029 /* received number bytes */
1031 count += AMD_GETBITS(td->status,
1032 UDC_DMA_OUT_STS_RXBYTES);
1040 /* Enabling RX DMA */
1041 static void udc_set_rde(struct udc *dev)
1045 VDBG(dev, "udc_set_rde()\n");
1046 /* stop RDE timer */
1047 if (timer_pending(&udc_timer)) {
1049 mod_timer(&udc_timer, jiffies - 1);
1052 tmp = readl(&dev->regs->ctl);
1053 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1054 writel(tmp, &dev->regs->ctl);
1057 /* Queues a request packet, called by gadget driver */
1059 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1063 unsigned long iflags;
1065 struct udc_request *req;
1069 /* check the inputs */
1070 req = container_of(usbreq, struct udc_request, req);
1072 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1073 || !list_empty(&req->queue))
1076 ep = container_of(usbep, struct udc_ep, ep);
1077 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1080 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1083 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1086 /* map dma (usually done before) */
1088 VDBG(dev, "DMA map req %p\n", req);
1089 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1094 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1095 usbep->name, usbreq, usbreq->length,
1096 req->td_data, usbreq->buf);
1098 spin_lock_irqsave(&dev->lock, iflags);
1100 usbreq->status = -EINPROGRESS;
1103 /* on empty queue just do first transfer */
1104 if (list_empty(&ep->queue)) {
1106 if (usbreq->length == 0) {
1107 /* IN zlp's are handled by hardware */
1108 complete_req(ep, req, 0);
1109 VDBG(dev, "%s: zlp\n", ep->ep.name);
1111 * if set_config or set_intf is waiting for ack by zlp
1114 if (dev->set_cfg_not_acked) {
1115 tmp = readl(&dev->regs->ctl);
1116 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1117 writel(tmp, &dev->regs->ctl);
1118 dev->set_cfg_not_acked = 0;
1120 /* setup command is ACK'ed now by zlp */
1121 if (dev->waiting_zlp_ack_ep0in) {
1122 /* clear NAK by writing CNAK in EP0_IN */
1123 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1124 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1125 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1126 dev->ep[UDC_EP0IN_IX].naking = 0;
1127 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1129 dev->waiting_zlp_ack_ep0in = 0;
1134 retval = prep_dma(ep, req, GFP_ATOMIC);
1137 /* write desc pointer to enable DMA */
1139 /* set HOST READY */
1140 req->td_data->status =
1141 AMD_ADDBITS(req->td_data->status,
1142 UDC_DMA_IN_STS_BS_HOST_READY,
1146 /* disabled rx dma while descriptor update */
1148 /* stop RDE timer */
1149 if (timer_pending(&udc_timer)) {
1151 mod_timer(&udc_timer, jiffies - 1);
1154 tmp = readl(&dev->regs->ctl);
1155 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1156 writel(tmp, &dev->regs->ctl);
1160 * if BNA occurred then let BNA dummy desc.
1161 * point to current desc.
1163 if (ep->bna_occurred) {
1164 VDBG(dev, "copy to BNA dummy desc.\n");
1165 memcpy(ep->bna_dummy_req->td_data,
1167 sizeof(struct udc_data_dma));
1170 /* write desc pointer */
1171 writel(req->td_phys, &ep->regs->desptr);
1173 /* clear NAK by writing CNAK */
1175 tmp = readl(&ep->regs->ctl);
1176 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1177 writel(tmp, &ep->regs->ctl);
1179 UDC_QUEUE_CNAK(ep, ep->num);
1184 tmp = readl(&dev->regs->ep_irqmsk);
1185 tmp &= AMD_UNMASK_BIT(ep->num);
1186 writel(tmp, &dev->regs->ep_irqmsk);
1188 } else if (ep->in) {
1190 tmp = readl(&dev->regs->ep_irqmsk);
1191 tmp &= AMD_UNMASK_BIT(ep->num);
1192 writel(tmp, &dev->regs->ep_irqmsk);
1195 } else if (ep->dma) {
1198 * prep_dma not used for OUT ep's, this is not possible
1199 * for PPB modes, because of chain creation reasons
1202 retval = prep_dma(ep, req, GFP_ATOMIC);
1207 VDBG(dev, "list_add\n");
1208 /* add request to ep queue */
1211 list_add_tail(&req->queue, &ep->queue);
1213 /* open rxfifo if out data queued */
1218 if (ep->num != UDC_EP0OUT_IX)
1219 dev->data_ep_queued = 1;
1221 /* stop OUT naking */
1223 if (!use_dma && udc_rxfifo_pending) {
1224 DBG(dev, "udc_queue(): pending bytes in "
1225 "rxfifo after nyet\n");
1227 * read pending bytes afer nyet:
1230 if (udc_rxfifo_read(ep, req)) {
1232 complete_req(ep, req, 0);
1234 udc_rxfifo_pending = 0;
1241 spin_unlock_irqrestore(&dev->lock, iflags);
1245 /* Empty request queue of an endpoint; caller holds spinlock */
1246 void empty_req_queue(struct udc_ep *ep)
1248 struct udc_request *req;
1251 while (!list_empty(&ep->queue)) {
1252 req = list_entry(ep->queue.next,
1255 complete_req(ep, req, -ESHUTDOWN);
1258 EXPORT_SYMBOL_GPL(empty_req_queue);
1260 /* Dequeues a request packet, called by gadget driver */
1261 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1264 struct udc_request *req;
1266 unsigned long iflags;
1268 ep = container_of(usbep, struct udc_ep, ep);
1269 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
1270 && ep->num != UDC_EP0OUT_IX)))
1273 req = container_of(usbreq, struct udc_request, req);
1275 spin_lock_irqsave(&ep->dev->lock, iflags);
1276 halted = ep->halted;
1278 /* request in processing or next one */
1279 if (ep->queue.next == &req->queue) {
1280 if (ep->dma && req->dma_going) {
1282 ep->cancel_transfer = 1;
1286 /* stop potential receive DMA */
1287 tmp = readl(&udc->regs->ctl);
1288 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1291 * Cancel transfer later in ISR
1292 * if descriptor was touched.
1294 dma_sts = AMD_GETBITS(req->td_data->status,
1295 UDC_DMA_OUT_STS_BS);
1296 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1297 ep->cancel_transfer = 1;
1299 udc_init_bna_dummy(ep->req);
1300 writel(ep->bna_dummy_req->td_phys,
1303 writel(tmp, &udc->regs->ctl);
1307 complete_req(ep, req, -ECONNRESET);
1308 ep->halted = halted;
1310 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1314 /* Halt or clear halt of endpoint */
1316 udc_set_halt(struct usb_ep *usbep, int halt)
1320 unsigned long iflags;
1326 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1328 ep = container_of(usbep, struct udc_ep, ep);
1329 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1331 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1334 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1335 /* halt or clear halt */
1338 ep->dev->stall_ep0in = 1;
1342 * rxfifo empty not taken into acount
1344 tmp = readl(&ep->regs->ctl);
1345 tmp |= AMD_BIT(UDC_EPCTL_S);
1346 writel(tmp, &ep->regs->ctl);
1349 /* setup poll timer */
1350 if (!timer_pending(&udc_pollstall_timer)) {
1351 udc_pollstall_timer.expires = jiffies +
1352 HZ * UDC_POLLSTALL_TIMER_USECONDS
1354 if (!stop_pollstall_timer) {
1355 DBG(ep->dev, "start polltimer\n");
1356 add_timer(&udc_pollstall_timer);
1361 /* ep is halted by set_halt() before */
1363 tmp = readl(&ep->regs->ctl);
1364 /* clear stall bit */
1365 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1366 /* clear NAK by writing CNAK */
1367 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1368 writel(tmp, &ep->regs->ctl);
1370 UDC_QUEUE_CNAK(ep, ep->num);
1373 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1377 /* gadget interface */
1378 static const struct usb_ep_ops udc_ep_ops = {
1379 .enable = udc_ep_enable,
1380 .disable = udc_ep_disable,
1382 .alloc_request = udc_alloc_request,
1383 .free_request = udc_free_request,
1386 .dequeue = udc_dequeue,
1388 .set_halt = udc_set_halt,
1389 /* fifo ops not implemented */
1392 /*-------------------------------------------------------------------------*/
1394 /* Get frame counter (not implemented) */
1395 static int udc_get_frame(struct usb_gadget *gadget)
1400 /* Initiates a remote wakeup */
1401 static int udc_remote_wakeup(struct udc *dev)
1403 unsigned long flags;
1406 DBG(dev, "UDC initiates remote wakeup\n");
1408 spin_lock_irqsave(&dev->lock, flags);
1410 tmp = readl(&dev->regs->ctl);
1411 tmp |= AMD_BIT(UDC_DEVCTL_RES);
1412 writel(tmp, &dev->regs->ctl);
1413 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1414 writel(tmp, &dev->regs->ctl);
1416 spin_unlock_irqrestore(&dev->lock, flags);
1420 /* Remote wakeup gadget interface */
1421 static int udc_wakeup(struct usb_gadget *gadget)
1427 dev = container_of(gadget, struct udc, gadget);
1428 udc_remote_wakeup(dev);
1433 static int amd5536_udc_start(struct usb_gadget *g,
1434 struct usb_gadget_driver *driver);
1435 static int amd5536_udc_stop(struct usb_gadget *g);
1437 static const struct usb_gadget_ops udc_ops = {
1438 .wakeup = udc_wakeup,
1439 .get_frame = udc_get_frame,
1440 .udc_start = amd5536_udc_start,
1441 .udc_stop = amd5536_udc_stop,
1444 /* Setups endpoint parameters, adds endpoints to linked list */
1445 static void make_ep_lists(struct udc *dev)
1447 /* make gadget ep lists */
1448 INIT_LIST_HEAD(&dev->gadget.ep_list);
1449 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1450 &dev->gadget.ep_list);
1451 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1452 &dev->gadget.ep_list);
1453 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1454 &dev->gadget.ep_list);
1457 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1458 if (dev->gadget.speed == USB_SPEED_FULL)
1459 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1460 else if (dev->gadget.speed == USB_SPEED_HIGH)
1461 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1462 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1465 /* Inits UDC context */
1466 void udc_basic_init(struct udc *dev)
1470 DBG(dev, "udc_basic_init()\n");
1472 dev->gadget.speed = USB_SPEED_UNKNOWN;
1474 /* stop RDE timer */
1475 if (timer_pending(&udc_timer)) {
1477 mod_timer(&udc_timer, jiffies - 1);
1479 /* stop poll stall timer */
1480 if (timer_pending(&udc_pollstall_timer))
1481 mod_timer(&udc_pollstall_timer, jiffies - 1);
1483 tmp = readl(&dev->regs->ctl);
1484 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1485 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1486 writel(tmp, &dev->regs->ctl);
1488 /* enable dynamic CSR programming */
1489 tmp = readl(&dev->regs->cfg);
1490 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1491 /* set self powered */
1492 tmp |= AMD_BIT(UDC_DEVCFG_SP);
1493 /* set remote wakeupable */
1494 tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1495 writel(tmp, &dev->regs->cfg);
1499 dev->data_ep_enabled = 0;
1500 dev->data_ep_queued = 0;
1502 EXPORT_SYMBOL_GPL(udc_basic_init);
1504 /* init registers at driver load time */
1505 static int startup_registers(struct udc *dev)
1509 /* init controller by soft reset */
1510 udc_soft_reset(dev);
1512 /* mask not needed interrupts */
1513 udc_mask_unused_interrupts(dev);
1515 /* put into initial config */
1516 udc_basic_init(dev);
1517 /* link up all endpoints */
1518 udc_setup_endpoints(dev);
1521 tmp = readl(&dev->regs->cfg);
1523 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1525 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1526 writel(tmp, &dev->regs->cfg);
1531 /* Sets initial endpoint parameters */
1532 static void udc_setup_endpoints(struct udc *dev)
1538 DBG(dev, "udc_setup_endpoints()\n");
1540 /* read enum speed */
1541 tmp = readl(&dev->regs->sts);
1542 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1543 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
1544 dev->gadget.speed = USB_SPEED_HIGH;
1545 else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
1546 dev->gadget.speed = USB_SPEED_FULL;
1548 /* set basic ep parameters */
1549 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1552 ep->ep.name = ep_info[tmp].name;
1553 ep->ep.caps = ep_info[tmp].caps;
1555 /* txfifo size is calculated at enable time */
1556 ep->txfifo = dev->txfifo;
1559 if (tmp < UDC_EPIN_NUM) {
1560 ep->fifo_depth = UDC_TXFIFO_SIZE;
1563 ep->fifo_depth = UDC_RXFIFO_SIZE;
1567 ep->regs = &dev->ep_regs[tmp];
1569 * ep will be reset only if ep was not enabled before to avoid
1570 * disabling ep interrupts when ENUM interrupt occurs but ep is
1571 * not enabled by gadget driver
1574 ep_init(dev->regs, ep);
1578 * ep->dma is not really used, just to indicate that
1579 * DMA is active: remove this
1580 * dma regs = dev control regs
1582 ep->dma = &dev->regs->ctl;
1584 /* nak OUT endpoints until enable - not for ep0 */
1585 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1586 && tmp > UDC_EPIN_NUM) {
1588 reg = readl(&dev->ep[tmp].regs->ctl);
1589 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1590 writel(reg, &dev->ep[tmp].regs->ctl);
1591 dev->ep[tmp].naking = 1;
1596 /* EP0 max packet */
1597 if (dev->gadget.speed == USB_SPEED_FULL) {
1598 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1599 UDC_FS_EP0IN_MAX_PKT_SIZE);
1600 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1601 UDC_FS_EP0OUT_MAX_PKT_SIZE);
1602 } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1603 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1604 UDC_EP0IN_MAX_PKT_SIZE);
1605 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1606 UDC_EP0OUT_MAX_PKT_SIZE);
1610 * with suspend bug workaround, ep0 params for gadget driver
1611 * are set at gadget driver bind() call
1613 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1614 dev->ep[UDC_EP0IN_IX].halted = 0;
1615 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1617 /* init cfg/alt/int */
1618 dev->cur_config = 0;
1623 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
1624 static void usb_connect(struct udc *dev)
1626 /* Return if already connected */
1630 dev_info(dev->dev, "USB Connect\n");
1634 /* put into initial config */
1635 udc_basic_init(dev);
1637 /* enable device setup interrupts */
1638 udc_enable_dev_setup_interrupts(dev);
1642 * Calls gadget with disconnect event and resets the UDC and makes
1643 * initial bringup to be ready for ep0 events
1645 static void usb_disconnect(struct udc *dev)
1647 /* Return if already disconnected */
1648 if (!dev->connected)
1651 dev_info(dev->dev, "USB Disconnect\n");
1655 /* mask interrupts */
1656 udc_mask_unused_interrupts(dev);
1658 /* REVISIT there doesn't seem to be a point to having this
1659 * talk to a tasklet ... do it directly, we already hold
1660 * the spinlock needed to process the disconnect.
1663 tasklet_schedule(&disconnect_tasklet);
1666 /* Tasklet for disconnect to be outside of interrupt context */
1667 static void udc_tasklet_disconnect(unsigned long par)
1669 struct udc *dev = (struct udc *)(*((struct udc **) par));
1672 DBG(dev, "Tasklet disconnect\n");
1673 spin_lock_irq(&dev->lock);
1676 spin_unlock(&dev->lock);
1677 dev->driver->disconnect(&dev->gadget);
1678 spin_lock(&dev->lock);
1681 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
1682 empty_req_queue(&dev->ep[tmp]);
1688 &dev->ep[UDC_EP0IN_IX]);
1691 if (!soft_reset_occured) {
1692 /* init controller by soft reset */
1693 udc_soft_reset(dev);
1694 soft_reset_occured++;
1697 /* re-enable dev interrupts */
1698 udc_enable_dev_setup_interrupts(dev);
1699 /* back to full speed ? */
1700 if (use_fullspeed) {
1701 tmp = readl(&dev->regs->cfg);
1702 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1703 writel(tmp, &dev->regs->cfg);
1706 spin_unlock_irq(&dev->lock);
1709 /* Reset the UDC core */
1710 static void udc_soft_reset(struct udc *dev)
1712 unsigned long flags;
1714 DBG(dev, "Soft reset\n");
1716 * reset possible waiting interrupts, because int.
1717 * status is lost after soft reset,
1718 * ep int. status reset
1720 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1721 /* device int. status reset */
1722 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1724 /* Don't do this for Broadcom UDC since this is a reserved
1727 if (dev->chiprev != UDC_BCM_REV) {
1728 spin_lock_irqsave(&udc_irq_spinlock, flags);
1729 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1730 readl(&dev->regs->cfg);
1731 spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1735 /* RDE timer callback to set RDE bit */
1736 static void udc_timer_function(unsigned long v)
1740 spin_lock_irq(&udc_irq_spinlock);
1744 * open the fifo if fifo was filled on last timer call
1748 /* set RDE to receive setup data */
1749 tmp = readl(&udc->regs->ctl);
1750 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1751 writel(tmp, &udc->regs->ctl);
1753 } else if (readl(&udc->regs->sts)
1754 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1756 * if fifo empty setup polling, do not just
1759 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1761 add_timer(&udc_timer);
1764 * fifo contains data now, setup timer for opening
1765 * the fifo when timer expires to be able to receive
1766 * setup packets, when data packets gets queued by
1767 * gadget layer then timer will forced to expire with
1768 * set_rde=0 (RDE is set in udc_queue())
1771 /* debug: lhadmot_timer_start = 221070 */
1772 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1774 add_timer(&udc_timer);
1778 set_rde = -1; /* RDE was set by udc_queue() */
1779 spin_unlock_irq(&udc_irq_spinlock);
1785 /* Handle halt state, used in stall poll timer */
1786 static void udc_handle_halt_state(struct udc_ep *ep)
1789 /* set stall as long not halted */
1790 if (ep->halted == 1) {
1791 tmp = readl(&ep->regs->ctl);
1792 /* STALL cleared ? */
1793 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1795 * FIXME: MSC spec requires that stall remains
1796 * even on receivng of CLEAR_FEATURE HALT. So
1797 * we would set STALL again here to be compliant.
1798 * But with current mass storage drivers this does
1799 * not work (would produce endless host retries).
1800 * So we clear halt on CLEAR_FEATURE.
1802 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1803 tmp |= AMD_BIT(UDC_EPCTL_S);
1804 writel(tmp, &ep->regs->ctl);*/
1806 /* clear NAK by writing CNAK */
1807 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1808 writel(tmp, &ep->regs->ctl);
1810 UDC_QUEUE_CNAK(ep, ep->num);
1815 /* Stall timer callback to poll S bit and set it again after */
1816 static void udc_pollstall_timer_function(unsigned long v)
1821 spin_lock_irq(&udc_stall_spinlock);
1823 * only one IN and OUT endpoints are handled
1826 ep = &udc->ep[UDC_EPIN_IX];
1827 udc_handle_halt_state(ep);
1830 /* OUT poll stall */
1831 ep = &udc->ep[UDC_EPOUT_IX];
1832 udc_handle_halt_state(ep);
1836 /* setup timer again when still halted */
1837 if (!stop_pollstall_timer && halted) {
1838 udc_pollstall_timer.expires = jiffies +
1839 HZ * UDC_POLLSTALL_TIMER_USECONDS
1841 add_timer(&udc_pollstall_timer);
1843 spin_unlock_irq(&udc_stall_spinlock);
1845 if (stop_pollstall_timer)
1846 complete(&on_pollstall_exit);
1849 /* Inits endpoint 0 so that SETUP packets are processed */
1850 static void activate_control_endpoints(struct udc *dev)
1854 DBG(dev, "activate_control_endpoints\n");
1857 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1858 tmp |= AMD_BIT(UDC_EPCTL_F);
1859 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1861 /* set ep0 directions */
1862 dev->ep[UDC_EP0IN_IX].in = 1;
1863 dev->ep[UDC_EP0OUT_IX].in = 0;
1865 /* set buffer size (tx fifo entries) of EP0_IN */
1866 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1867 if (dev->gadget.speed == USB_SPEED_FULL)
1868 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1869 UDC_EPIN_BUFF_SIZE);
1870 else if (dev->gadget.speed == USB_SPEED_HIGH)
1871 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1872 UDC_EPIN_BUFF_SIZE);
1873 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1875 /* set max packet size of EP0_IN */
1876 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1877 if (dev->gadget.speed == USB_SPEED_FULL)
1878 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1879 UDC_EP_MAX_PKT_SIZE);
1880 else if (dev->gadget.speed == USB_SPEED_HIGH)
1881 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1882 UDC_EP_MAX_PKT_SIZE);
1883 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1885 /* set max packet size of EP0_OUT */
1886 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1887 if (dev->gadget.speed == USB_SPEED_FULL)
1888 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1889 UDC_EP_MAX_PKT_SIZE);
1890 else if (dev->gadget.speed == USB_SPEED_HIGH)
1891 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1892 UDC_EP_MAX_PKT_SIZE);
1893 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1895 /* set max packet size of EP0 in UDC CSR */
1896 tmp = readl(&dev->csr->ne[0]);
1897 if (dev->gadget.speed == USB_SPEED_FULL)
1898 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1899 UDC_CSR_NE_MAX_PKT);
1900 else if (dev->gadget.speed == USB_SPEED_HIGH)
1901 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1902 UDC_CSR_NE_MAX_PKT);
1903 writel(tmp, &dev->csr->ne[0]);
1906 dev->ep[UDC_EP0OUT_IX].td->status |=
1907 AMD_BIT(UDC_DMA_OUT_STS_L);
1908 /* write dma desc address */
1909 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1910 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1911 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1912 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1913 /* stop RDE timer */
1914 if (timer_pending(&udc_timer)) {
1916 mod_timer(&udc_timer, jiffies - 1);
1918 /* stop pollstall timer */
1919 if (timer_pending(&udc_pollstall_timer))
1920 mod_timer(&udc_pollstall_timer, jiffies - 1);
1922 tmp = readl(&dev->regs->ctl);
1923 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1924 | AMD_BIT(UDC_DEVCTL_RDE)
1925 | AMD_BIT(UDC_DEVCTL_TDE);
1926 if (use_dma_bufferfill_mode)
1927 tmp |= AMD_BIT(UDC_DEVCTL_BF);
1928 else if (use_dma_ppb_du)
1929 tmp |= AMD_BIT(UDC_DEVCTL_DU);
1930 writel(tmp, &dev->regs->ctl);
1933 /* clear NAK by writing CNAK for EP0IN */
1934 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1935 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1936 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1937 dev->ep[UDC_EP0IN_IX].naking = 0;
1938 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1940 /* clear NAK by writing CNAK for EP0OUT */
1941 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1942 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1943 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1944 dev->ep[UDC_EP0OUT_IX].naking = 0;
1945 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1948 /* Make endpoint 0 ready for control traffic */
1949 static int setup_ep0(struct udc *dev)
1951 activate_control_endpoints(dev);
1952 /* enable ep0 interrupts */
1953 udc_enable_ep0_interrupts(dev);
1954 /* enable device setup interrupts */
1955 udc_enable_dev_setup_interrupts(dev);
1960 /* Called by gadget driver to register itself */
1961 static int amd5536_udc_start(struct usb_gadget *g,
1962 struct usb_gadget_driver *driver)
1964 struct udc *dev = to_amd5536_udc(g);
1967 driver->driver.bus = NULL;
1968 dev->driver = driver;
1970 /* Some gadget drivers use both ep0 directions.
1971 * NOTE: to gadget driver, ep0 is just one endpoint...
1973 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1974 dev->ep[UDC_EP0IN_IX].ep.driver_data;
1976 /* get ready for ep0 traffic */
1980 tmp = readl(&dev->regs->ctl);
1981 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1982 writel(tmp, &dev->regs->ctl);
1989 /* shutdown requests and disconnect from gadget */
1991 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
1992 __releases(dev->lock)
1993 __acquires(dev->lock)
1997 /* empty queues and init hardware */
1998 udc_basic_init(dev);
2000 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2001 empty_req_queue(&dev->ep[tmp]);
2003 udc_setup_endpoints(dev);
2006 /* Called by gadget driver to unregister itself */
2007 static int amd5536_udc_stop(struct usb_gadget *g)
2009 struct udc *dev = to_amd5536_udc(g);
2010 unsigned long flags;
2013 spin_lock_irqsave(&dev->lock, flags);
2014 udc_mask_unused_interrupts(dev);
2015 shutdown(dev, NULL);
2016 spin_unlock_irqrestore(&dev->lock, flags);
2021 tmp = readl(&dev->regs->ctl);
2022 tmp |= AMD_BIT(UDC_DEVCTL_SD);
2023 writel(tmp, &dev->regs->ctl);
2028 /* Clear pending NAK bits */
2029 static void udc_process_cnak_queue(struct udc *dev)
2035 DBG(dev, "CNAK pending queue processing\n");
2036 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2037 if (cnak_pending & (1 << tmp)) {
2038 DBG(dev, "CNAK pending for ep%d\n", tmp);
2039 /* clear NAK by writing CNAK */
2040 reg = readl(&dev->ep[tmp].regs->ctl);
2041 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2042 writel(reg, &dev->ep[tmp].regs->ctl);
2043 dev->ep[tmp].naking = 0;
2044 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2047 /* ... and ep0out */
2048 if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2049 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2050 /* clear NAK by writing CNAK */
2051 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2052 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2053 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2054 dev->ep[UDC_EP0OUT_IX].naking = 0;
2055 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2056 dev->ep[UDC_EP0OUT_IX].num);
2060 /* Enabling RX DMA after setup packet */
2061 static void udc_ep0_set_rde(struct udc *dev)
2065 * only enable RXDMA when no data endpoint enabled
2068 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2072 * setup timer for enabling RDE (to not enable
2073 * RXFIFO DMA for data endpoints to early)
2075 if (set_rde != 0 && !timer_pending(&udc_timer)) {
2077 jiffies + HZ/UDC_RDE_TIMER_DIV;
2080 add_timer(&udc_timer);
2087 /* Interrupt handler for data OUT traffic */
2088 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2090 irqreturn_t ret_val = IRQ_NONE;
2093 struct udc_request *req;
2095 struct udc_data_dma *td = NULL;
2098 VDBG(dev, "ep%d irq\n", ep_ix);
2099 ep = &dev->ep[ep_ix];
2101 tmp = readl(&ep->regs->sts);
2104 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2105 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
2106 ep->num, readl(&ep->regs->desptr));
2108 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2109 if (!ep->cancel_transfer)
2110 ep->bna_occurred = 1;
2112 ep->cancel_transfer = 0;
2113 ret_val = IRQ_HANDLED;
2118 if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2119 dev_err(dev->dev, "HE ep%dout occurred\n", ep->num);
2122 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2123 ret_val = IRQ_HANDLED;
2127 if (!list_empty(&ep->queue)) {
2130 req = list_entry(ep->queue.next,
2131 struct udc_request, queue);
2134 udc_rxfifo_pending = 1;
2136 VDBG(dev, "req = %p\n", req);
2141 if (req && udc_rxfifo_read(ep, req)) {
2142 ret_val = IRQ_HANDLED;
2145 complete_req(ep, req, 0);
2147 if (!list_empty(&ep->queue) && !ep->halted) {
2148 req = list_entry(ep->queue.next,
2149 struct udc_request, queue);
2155 } else if (!ep->cancel_transfer && req) {
2156 ret_val = IRQ_HANDLED;
2158 /* check for DMA done */
2160 dma_done = AMD_GETBITS(req->td_data->status,
2161 UDC_DMA_OUT_STS_BS);
2162 /* packet per buffer mode - rx bytes */
2165 * if BNA occurred then recover desc. from
2168 if (ep->bna_occurred) {
2169 VDBG(dev, "Recover desc. from BNA dummy\n");
2170 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2171 sizeof(struct udc_data_dma));
2172 ep->bna_occurred = 0;
2173 udc_init_bna_dummy(ep->req);
2175 td = udc_get_last_dma_desc(req);
2176 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2178 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2179 /* buffer fill mode - rx bytes */
2181 /* received number bytes */
2182 count = AMD_GETBITS(req->td_data->status,
2183 UDC_DMA_OUT_STS_RXBYTES);
2184 VDBG(dev, "rx bytes=%u\n", count);
2185 /* packet per buffer mode - rx bytes */
2187 VDBG(dev, "req->td_data=%p\n", req->td_data);
2188 VDBG(dev, "last desc = %p\n", td);
2189 /* received number bytes */
2190 if (use_dma_ppb_du) {
2191 /* every desc. counts bytes */
2192 count = udc_get_ppbdu_rxbytes(req);
2194 /* last desc. counts bytes */
2195 count = AMD_GETBITS(td->status,
2196 UDC_DMA_OUT_STS_RXBYTES);
2197 if (!count && req->req.length
2198 == UDC_DMA_MAXPACKET) {
2200 * on 64k packets the RXBYTES
2203 count = UDC_DMA_MAXPACKET;
2206 VDBG(dev, "last desc rx bytes=%u\n", count);
2209 tmp = req->req.length - req->req.actual;
2211 if ((tmp % ep->ep.maxpacket) != 0) {
2212 DBG(dev, "%s: rx %db, space=%db\n",
2213 ep->ep.name, count, tmp);
2214 req->req.status = -EOVERFLOW;
2218 req->req.actual += count;
2220 /* complete request */
2221 complete_req(ep, req, 0);
2224 if (!list_empty(&ep->queue) && !ep->halted) {
2225 req = list_entry(ep->queue.next,
2229 * DMA may be already started by udc_queue()
2230 * called by gadget drivers completion
2231 * routine. This happens when queue
2232 * holds one request only.
2234 if (req->dma_going == 0) {
2236 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2238 /* write desc pointer */
2239 writel(req->td_phys,
2247 * implant BNA dummy descriptor to allow
2248 * RXFIFO opening by RDE
2250 if (ep->bna_dummy_req) {
2251 /* write desc pointer */
2252 writel(ep->bna_dummy_req->td_phys,
2254 ep->bna_occurred = 0;
2258 * schedule timer for setting RDE if queue
2259 * remains empty to allow ep0 packets pass
2263 && !timer_pending(&udc_timer)) {
2266 + HZ*UDC_RDE_TIMER_SECONDS;
2269 add_timer(&udc_timer);
2271 if (ep->num != UDC_EP0OUT_IX)
2272 dev->data_ep_queued = 0;
2277 * RX DMA must be reenabled for each desc in PPBDU mode
2278 * and must be enabled for PPBNDU mode in case of BNA
2283 } else if (ep->cancel_transfer) {
2284 ret_val = IRQ_HANDLED;
2285 ep->cancel_transfer = 0;
2288 /* check pending CNAKS */
2290 /* CNAk processing when rxfifo empty only */
2291 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2292 udc_process_cnak_queue(dev);
2295 /* clear OUT bits in ep status */
2296 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2301 /* Interrupt handler for data IN traffic */
2302 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2304 irqreturn_t ret_val = IRQ_NONE;
2308 struct udc_request *req;
2309 struct udc_data_dma *td;
2312 ep = &dev->ep[ep_ix];
2314 epsts = readl(&ep->regs->sts);
2317 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2319 "BNA ep%din occurred - DESPTR = %08lx\n",
2321 (unsigned long) readl(&ep->regs->desptr));
2324 writel(epsts, &ep->regs->sts);
2325 ret_val = IRQ_HANDLED;
2330 if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2332 "HE ep%dn occurred - DESPTR = %08lx\n",
2333 ep->num, (unsigned long) readl(&ep->regs->desptr));
2336 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2337 ret_val = IRQ_HANDLED;
2341 /* DMA completion */
2342 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2343 VDBG(dev, "TDC set- completion\n");
2344 ret_val = IRQ_HANDLED;
2345 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2346 req = list_entry(ep->queue.next,
2347 struct udc_request, queue);
2349 * length bytes transferred
2350 * check dma done of last desc. in PPBDU mode
2352 if (use_dma_ppb_du) {
2353 td = udc_get_last_dma_desc(req);
2355 req->req.actual = req->req.length;
2357 /* assume all bytes transferred */
2358 req->req.actual = req->req.length;
2361 if (req->req.actual == req->req.length) {
2363 complete_req(ep, req, 0);
2365 /* further request available ? */
2366 if (list_empty(&ep->queue)) {
2367 /* disable interrupt */
2368 tmp = readl(&dev->regs->ep_irqmsk);
2369 tmp |= AMD_BIT(ep->num);
2370 writel(tmp, &dev->regs->ep_irqmsk);
2374 ep->cancel_transfer = 0;
2378 * status reg has IN bit set and TDC not set (if TDC was handled,
2379 * IN must not be handled (UDC defect) ?
2381 if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2382 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2383 ret_val = IRQ_HANDLED;
2384 if (!list_empty(&ep->queue)) {
2386 req = list_entry(ep->queue.next,
2387 struct udc_request, queue);
2391 udc_txfifo_write(ep, &req->req);
2392 len = req->req.length - req->req.actual;
2393 if (len > ep->ep.maxpacket)
2394 len = ep->ep.maxpacket;
2395 req->req.actual += len;
2396 if (req->req.actual == req->req.length
2397 || (len != ep->ep.maxpacket)) {
2399 complete_req(ep, req, 0);
2402 } else if (req && !req->dma_going) {
2403 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2410 * unset L bit of first desc.
2413 if (use_dma_ppb && req->req.length >
2415 req->td_data->status &=
2420 /* write desc pointer */
2421 writel(req->td_phys, &ep->regs->desptr);
2423 /* set HOST READY */
2424 req->td_data->status =
2426 req->td_data->status,
2427 UDC_DMA_IN_STS_BS_HOST_READY,
2430 /* set poll demand bit */
2431 tmp = readl(&ep->regs->ctl);
2432 tmp |= AMD_BIT(UDC_EPCTL_P);
2433 writel(tmp, &ep->regs->ctl);
2437 } else if (!use_dma && ep->in) {
2438 /* disable interrupt */
2440 &dev->regs->ep_irqmsk);
2441 tmp |= AMD_BIT(ep->num);
2443 &dev->regs->ep_irqmsk);
2446 /* clear status bits */
2447 writel(epsts, &ep->regs->sts);
2454 /* Interrupt handler for Control OUT traffic */
2455 static irqreturn_t udc_control_out_isr(struct udc *dev)
2456 __releases(dev->lock)
2457 __acquires(dev->lock)
2459 irqreturn_t ret_val = IRQ_NONE;
2461 int setup_supported;
2465 struct udc_ep *ep_tmp;
2467 ep = &dev->ep[UDC_EP0OUT_IX];
2470 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2472 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2473 /* check BNA and clear if set */
2474 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2475 VDBG(dev, "ep0: BNA set\n");
2476 writel(AMD_BIT(UDC_EPSTS_BNA),
2477 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2478 ep->bna_occurred = 1;
2479 ret_val = IRQ_HANDLED;
2483 /* type of data: SETUP or DATA 0 bytes */
2484 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2485 VDBG(dev, "data_typ = %x\n", tmp);
2488 if (tmp == UDC_EPSTS_OUT_SETUP) {
2489 ret_val = IRQ_HANDLED;
2491 ep->dev->stall_ep0in = 0;
2492 dev->waiting_zlp_ack_ep0in = 0;
2494 /* set NAK for EP0_IN */
2495 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2496 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2497 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2498 dev->ep[UDC_EP0IN_IX].naking = 1;
2499 /* get setup data */
2502 /* clear OUT bits in ep status */
2503 writel(UDC_EPSTS_OUT_CLEAR,
2504 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2506 setup_data.data[0] =
2507 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2508 setup_data.data[1] =
2509 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2510 /* set HOST READY */
2511 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2512 UDC_DMA_STP_STS_BS_HOST_READY;
2515 udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2518 /* determine direction of control data */
2519 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2520 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2522 udc_ep0_set_rde(dev);
2525 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2527 * implant BNA dummy descriptor to allow RXFIFO opening
2530 if (ep->bna_dummy_req) {
2531 /* write desc pointer */
2532 writel(ep->bna_dummy_req->td_phys,
2533 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2534 ep->bna_occurred = 0;
2538 dev->ep[UDC_EP0OUT_IX].naking = 1;
2540 * setup timer for enabling RDE (to not enable
2541 * RXFIFO DMA for data to early)
2544 if (!timer_pending(&udc_timer)) {
2545 udc_timer.expires = jiffies +
2546 HZ/UDC_RDE_TIMER_DIV;
2548 add_timer(&udc_timer);
2553 * mass storage reset must be processed here because
2554 * next packet may be a CLEAR_FEATURE HALT which would not
2555 * clear the stall bit when no STALL handshake was received
2556 * before (autostall can cause this)
2558 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2559 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2560 DBG(dev, "MSC Reset\n");
2563 * only one IN and OUT endpoints are handled
2565 ep_tmp = &udc->ep[UDC_EPIN_IX];
2566 udc_set_halt(&ep_tmp->ep, 0);
2567 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2568 udc_set_halt(&ep_tmp->ep, 0);
2571 /* call gadget with setup data received */
2572 spin_unlock(&dev->lock);
2573 setup_supported = dev->driver->setup(&dev->gadget,
2574 &setup_data.request);
2575 spin_lock(&dev->lock);
2577 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2578 /* ep0 in returns data (not zlp) on IN phase */
2579 if (setup_supported >= 0 && setup_supported <
2580 UDC_EP0IN_MAXPACKET) {
2581 /* clear NAK by writing CNAK in EP0_IN */
2582 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2583 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2584 dev->ep[UDC_EP0IN_IX].naking = 0;
2585 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2587 /* if unsupported request then stall */
2588 } else if (setup_supported < 0) {
2589 tmp |= AMD_BIT(UDC_EPCTL_S);
2590 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2592 dev->waiting_zlp_ack_ep0in = 1;
2595 /* clear NAK by writing CNAK in EP0_OUT */
2597 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2598 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2599 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2600 dev->ep[UDC_EP0OUT_IX].naking = 0;
2601 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2605 /* clear OUT bits in ep status */
2606 writel(UDC_EPSTS_OUT_CLEAR,
2607 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2610 /* data packet 0 bytes */
2611 } else if (tmp == UDC_EPSTS_OUT_DATA) {
2612 /* clear OUT bits in ep status */
2613 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2615 /* get setup data: only 0 packet */
2617 /* no req if 0 packet, just reactivate */
2618 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2621 /* set HOST READY */
2622 dev->ep[UDC_EP0OUT_IX].td->status =
2624 dev->ep[UDC_EP0OUT_IX].td->status,
2625 UDC_DMA_OUT_STS_BS_HOST_READY,
2626 UDC_DMA_OUT_STS_BS);
2628 udc_ep0_set_rde(dev);
2629 ret_val = IRQ_HANDLED;
2633 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2634 /* re-program desc. pointer for possible ZLPs */
2635 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2636 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2638 udc_ep0_set_rde(dev);
2642 /* received number bytes */
2643 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2644 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2645 /* out data for fifo mode not working */
2648 /* 0 packet or real data ? */
2650 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2652 /* dummy read confirm */
2653 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2654 ret_val = IRQ_HANDLED;
2659 /* check pending CNAKS */
2661 /* CNAk processing when rxfifo empty only */
2662 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2663 udc_process_cnak_queue(dev);
2670 /* Interrupt handler for Control IN traffic */
2671 static irqreturn_t udc_control_in_isr(struct udc *dev)
2673 irqreturn_t ret_val = IRQ_NONE;
2676 struct udc_request *req;
2679 ep = &dev->ep[UDC_EP0IN_IX];
2682 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2684 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2685 /* DMA completion */
2686 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2687 VDBG(dev, "isr: TDC clear\n");
2688 ret_val = IRQ_HANDLED;
2691 writel(AMD_BIT(UDC_EPSTS_TDC),
2692 &dev->ep[UDC_EP0IN_IX].regs->sts);
2694 /* status reg has IN bit set ? */
2695 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2696 ret_val = IRQ_HANDLED;
2700 writel(AMD_BIT(UDC_EPSTS_IN),
2701 &dev->ep[UDC_EP0IN_IX].regs->sts);
2703 if (dev->stall_ep0in) {
2704 DBG(dev, "stall ep0in\n");
2706 tmp = readl(&ep->regs->ctl);
2707 tmp |= AMD_BIT(UDC_EPCTL_S);
2708 writel(tmp, &ep->regs->ctl);
2710 if (!list_empty(&ep->queue)) {
2712 req = list_entry(ep->queue.next,
2713 struct udc_request, queue);
2716 /* write desc pointer */
2717 writel(req->td_phys, &ep->regs->desptr);
2718 /* set HOST READY */
2719 req->td_data->status =
2721 req->td_data->status,
2722 UDC_DMA_STP_STS_BS_HOST_READY,
2723 UDC_DMA_STP_STS_BS);
2725 /* set poll demand bit */
2727 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2728 tmp |= AMD_BIT(UDC_EPCTL_P);
2730 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2732 /* all bytes will be transferred */
2733 req->req.actual = req->req.length;
2736 complete_req(ep, req, 0);
2740 udc_txfifo_write(ep, &req->req);
2742 /* lengh bytes transferred */
2743 len = req->req.length - req->req.actual;
2744 if (len > ep->ep.maxpacket)
2745 len = ep->ep.maxpacket;
2747 req->req.actual += len;
2748 if (req->req.actual == req->req.length
2749 || (len != ep->ep.maxpacket)) {
2751 complete_req(ep, req, 0);
2758 dev->stall_ep0in = 0;
2761 writel(AMD_BIT(UDC_EPSTS_IN),
2762 &dev->ep[UDC_EP0IN_IX].regs->sts);
2770 /* Interrupt handler for global device events */
2771 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2772 __releases(dev->lock)
2773 __acquires(dev->lock)
2775 irqreturn_t ret_val = IRQ_NONE;
2782 /* SET_CONFIG irq ? */
2783 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2784 ret_val = IRQ_HANDLED;
2786 /* read config value */
2787 tmp = readl(&dev->regs->sts);
2788 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2789 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2790 dev->cur_config = cfg;
2791 dev->set_cfg_not_acked = 1;
2793 /* make usb request for gadget driver */
2794 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2795 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2796 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2798 /* programm the NE registers */
2799 for (i = 0; i < UDC_EP_NUM; i++) {
2803 /* ep ix in UDC CSR register space */
2804 udc_csr_epix = ep->num;
2809 /* ep ix in UDC CSR register space */
2810 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2813 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2815 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2818 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2820 /* clear stall bits */
2822 tmp = readl(&ep->regs->ctl);
2823 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2824 writel(tmp, &ep->regs->ctl);
2826 /* call gadget zero with setup data received */
2827 spin_unlock(&dev->lock);
2828 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2829 spin_lock(&dev->lock);
2831 } /* SET_INTERFACE ? */
2832 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2833 ret_val = IRQ_HANDLED;
2835 dev->set_cfg_not_acked = 1;
2836 /* read interface and alt setting values */
2837 tmp = readl(&dev->regs->sts);
2838 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2839 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2841 /* make usb request for gadget driver */
2842 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2843 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2844 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2845 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2846 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2848 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2849 dev->cur_alt, dev->cur_intf);
2851 /* programm the NE registers */
2852 for (i = 0; i < UDC_EP_NUM; i++) {
2856 /* ep ix in UDC CSR register space */
2857 udc_csr_epix = ep->num;
2862 /* ep ix in UDC CSR register space */
2863 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2868 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2870 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2872 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2874 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2877 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2879 /* clear stall bits */
2881 tmp = readl(&ep->regs->ctl);
2882 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2883 writel(tmp, &ep->regs->ctl);
2886 /* call gadget zero with setup data received */
2887 spin_unlock(&dev->lock);
2888 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2889 spin_lock(&dev->lock);
2892 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2893 DBG(dev, "USB Reset interrupt\n");
2894 ret_val = IRQ_HANDLED;
2896 /* allow soft reset when suspend occurs */
2897 soft_reset_occured = 0;
2899 dev->waiting_zlp_ack_ep0in = 0;
2900 dev->set_cfg_not_acked = 0;
2902 /* mask not needed interrupts */
2903 udc_mask_unused_interrupts(dev);
2905 /* call gadget to resume and reset configs etc. */
2906 spin_unlock(&dev->lock);
2907 if (dev->sys_suspended && dev->driver->resume) {
2908 dev->driver->resume(&dev->gadget);
2909 dev->sys_suspended = 0;
2911 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2912 spin_lock(&dev->lock);
2914 /* disable ep0 to empty req queue */
2915 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2916 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2918 /* soft reset when rxfifo not empty */
2919 tmp = readl(&dev->regs->sts);
2920 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2921 && !soft_reset_after_usbreset_occured) {
2922 udc_soft_reset(dev);
2923 soft_reset_after_usbreset_occured++;
2927 * DMA reset to kill potential old DMA hw hang,
2928 * POLL bit is already reset by ep_init() through
2931 DBG(dev, "DMA machine reset\n");
2932 tmp = readl(&dev->regs->cfg);
2933 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2934 writel(tmp, &dev->regs->cfg);
2936 /* put into initial config */
2937 udc_basic_init(dev);
2939 /* enable device setup interrupts */
2940 udc_enable_dev_setup_interrupts(dev);
2942 /* enable suspend interrupt */
2943 tmp = readl(&dev->regs->irqmsk);
2944 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2945 writel(tmp, &dev->regs->irqmsk);
2948 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2949 DBG(dev, "USB Suspend interrupt\n");
2950 ret_val = IRQ_HANDLED;
2951 if (dev->driver->suspend) {
2952 spin_unlock(&dev->lock);
2953 dev->sys_suspended = 1;
2954 dev->driver->suspend(&dev->gadget);
2955 spin_lock(&dev->lock);
2958 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2959 DBG(dev, "ENUM interrupt\n");
2960 ret_val = IRQ_HANDLED;
2961 soft_reset_after_usbreset_occured = 0;
2963 /* disable ep0 to empty req queue */
2964 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2965 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2967 /* link up all endpoints */
2968 udc_setup_endpoints(dev);
2969 dev_info(dev->dev, "Connect: %s\n",
2970 usb_speed_string(dev->gadget.speed));
2973 activate_control_endpoints(dev);
2975 /* enable ep0 interrupts */
2976 udc_enable_ep0_interrupts(dev);
2978 /* session valid change interrupt */
2979 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
2980 DBG(dev, "USB SVC interrupt\n");
2981 ret_val = IRQ_HANDLED;
2983 /* check that session is not valid to detect disconnect */
2984 tmp = readl(&dev->regs->sts);
2985 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
2986 /* disable suspend interrupt */
2987 tmp = readl(&dev->regs->irqmsk);
2988 tmp |= AMD_BIT(UDC_DEVINT_US);
2989 writel(tmp, &dev->regs->irqmsk);
2990 DBG(dev, "USB Disconnect (session valid low)\n");
2991 /* cleanup on disconnect */
2992 usb_disconnect(udc);
3000 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3001 irqreturn_t udc_irq(int irq, void *pdev)
3003 struct udc *dev = pdev;
3007 irqreturn_t ret_val = IRQ_NONE;
3009 spin_lock(&dev->lock);
3011 /* check for ep irq */
3012 reg = readl(&dev->regs->ep_irqsts);
3014 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3015 ret_val |= udc_control_out_isr(dev);
3016 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3017 ret_val |= udc_control_in_isr(dev);
3023 for (i = 1; i < UDC_EP_NUM; i++) {
3025 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3028 /* clear irq status */
3029 writel(ep_irq, &dev->regs->ep_irqsts);
3031 /* irq for out ep ? */
3032 if (i > UDC_EPIN_NUM)
3033 ret_val |= udc_data_out_isr(dev, i);
3035 ret_val |= udc_data_in_isr(dev, i);
3041 /* check for dev irq */
3042 reg = readl(&dev->regs->irqsts);
3045 writel(reg, &dev->regs->irqsts);
3046 ret_val |= udc_dev_isr(dev, reg);
3050 spin_unlock(&dev->lock);
3053 EXPORT_SYMBOL_GPL(udc_irq);
3055 /* Tears down device */
3056 void gadget_release(struct device *pdev)
3058 struct amd5536udc *dev = dev_get_drvdata(pdev);
3061 EXPORT_SYMBOL_GPL(gadget_release);
3063 /* Cleanup on device remove */
3064 void udc_remove(struct udc *dev)
3068 if (timer_pending(&udc_timer))
3069 wait_for_completion(&on_exit);
3071 del_timer_sync(&udc_timer);
3072 /* remove pollstall timer */
3073 stop_pollstall_timer++;
3074 if (timer_pending(&udc_pollstall_timer))
3075 wait_for_completion(&on_pollstall_exit);
3076 if (udc_pollstall_timer.data)
3077 del_timer_sync(&udc_pollstall_timer);
3080 EXPORT_SYMBOL_GPL(udc_remove);
3082 /* free all the dma pools */
3083 void free_dma_pools(struct udc *dev)
3085 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3086 dev->ep[UDC_EP0OUT_IX].td_phys);
3087 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3088 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3089 dma_pool_destroy(dev->stp_requests);
3090 dma_pool_destroy(dev->data_requests);
3092 EXPORT_SYMBOL_GPL(free_dma_pools);
3094 /* create dma pools on init */
3095 int init_dma_pools(struct udc *dev)
3097 struct udc_stp_dma *td_stp;
3098 struct udc_data_dma *td_data;
3101 /* consistent DMA mode setting ? */
3103 use_dma_bufferfill_mode = 0;
3106 use_dma_bufferfill_mode = 1;
3110 dev->data_requests = dma_pool_create("data_requests", dev->dev,
3111 sizeof(struct udc_data_dma), 0, 0);
3112 if (!dev->data_requests) {
3113 DBG(dev, "can't get request data pool\n");
3117 /* EP0 in dma regs = dev control regs */
3118 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3120 /* dma desc for setup data */
3121 dev->stp_requests = dma_pool_create("setup requests", dev->dev,
3122 sizeof(struct udc_stp_dma), 0, 0);
3123 if (!dev->stp_requests) {
3124 DBG(dev, "can't get stp request pool\n");
3126 goto err_create_dma_pool;
3129 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3130 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3135 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3137 /* data: 0 packets !? */
3138 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3139 &dev->ep[UDC_EP0OUT_IX].td_phys);
3142 goto err_alloc_phys;
3144 dev->ep[UDC_EP0OUT_IX].td = td_data;
3148 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3149 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3151 dma_pool_destroy(dev->stp_requests);
3152 dev->stp_requests = NULL;
3153 err_create_dma_pool:
3154 dma_pool_destroy(dev->data_requests);
3155 dev->data_requests = NULL;
3158 EXPORT_SYMBOL_GPL(init_dma_pools);
3161 int udc_probe(struct udc *dev)
3167 /* mark timer as not initialized */
3169 udc_pollstall_timer.data = 0;
3171 /* device struct setup */
3172 dev->gadget.ops = &udc_ops;
3174 dev_set_name(&dev->gadget.dev, "gadget");
3175 dev->gadget.name = name;
3176 dev->gadget.max_speed = USB_SPEED_HIGH;
3178 /* init registers, interrupts, ... */
3179 startup_registers(dev);
3181 dev_info(dev->dev, "%s\n", mod_desc);
3183 snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3185 /* Print this device info for AMD chips only*/
3186 if (dev->chiprev == UDC_HSA0_REV ||
3187 dev->chiprev == UDC_HSB1_REV) {
3188 dev_info(dev->dev, "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3189 tmp, dev->phys_addr, dev->chiprev,
3190 (dev->chiprev == UDC_HSA0_REV) ?
3192 strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3193 if (dev->chiprev == UDC_HSA0_REV) {
3194 dev_err(dev->dev, "chip revision is A0; too old\n");
3199 "driver version: %s(for Geode5536 B1)\n", tmp);
3204 retval = usb_add_gadget_udc_release(udc->dev, &dev->gadget,
3210 init_timer(&udc_timer);
3211 udc_timer.function = udc_timer_function;
3213 /* timer pollstall init */
3214 init_timer(&udc_pollstall_timer);
3215 udc_pollstall_timer.function = udc_pollstall_timer_function;
3216 udc_pollstall_timer.data = 1;
3219 reg = readl(&dev->regs->ctl);
3220 reg |= AMD_BIT(UDC_DEVCTL_SD);
3221 writel(reg, &dev->regs->ctl);
3223 /* print dev register info */
3231 EXPORT_SYMBOL_GPL(udc_probe);
3233 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3234 MODULE_AUTHOR("Thomas Dahlmann");
3235 MODULE_LICENSE("GPL");