]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/usb/cdns3/gadget.c
Merge branch 'pci/virtualization'
[linux.git] / drivers / usb / cdns3 / gadget.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence USBSS DRD Driver - gadget side.
4  *
5  * Copyright (C) 2018-2019 Cadence Design Systems.
6  * Copyright (C) 2017-2018 NXP
7  *
8  * Authors: Pawel Jez <pjez@cadence.com>,
9  *          Pawel Laszczak <pawell@cadence.com>
10  *          Peter Chen <peter.chen@nxp.com>
11  */
12
13 /*
14  * Work around 1:
15  * At some situations, the controller may get stale data address in TRB
16  * at below sequences:
17  * 1. Controller read TRB includes data address
18  * 2. Software updates TRBs includes data address and Cycle bit
19  * 3. Controller read TRB which includes Cycle bit
20  * 4. DMA run with stale data address
21  *
22  * To fix this problem, driver needs to make the first TRB in TD as invalid.
23  * After preparing all TRBs driver needs to check the position of DMA and
24  * if the DMA point to the first just added TRB and doorbell is 1,
25  * then driver must defer making this TRB as valid. This TRB will be make
26  * as valid during adding next TRB only if DMA is stopped or at TRBERR
27  * interrupt.
28  *
29  * Issue has been fixed in DEV_VER_V3 version of controller.
30  *
31  * Work around 2:
32  * Controller for OUT endpoints has shared on-chip buffers for all incoming
33  * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
34  * in correct order. If the first packet in the buffer will not be handled,
35  * then the following packets directed for other endpoints and  functions
36  * will be blocked.
37  * Additionally the packets directed to one endpoint can block entire on-chip
38  * buffers. In this case transfer to other endpoints also will blocked.
39  *
40  * To resolve this issue after raising the descriptor missing interrupt
41  * driver prepares internal usb_request object and use it to arm DMA transfer.
42  *
43  * The problematic situation was observed in case when endpoint has been enabled
44  * but no usb_request were queued. Driver try detects such endpoints and will
45  * use this workaround only for these endpoint.
46  *
47  * Driver use limited number of buffer. This number can be set by macro
48  * CDNS3_WA2_NUM_BUFFERS.
49  *
50  * Such blocking situation was observed on ACM gadget. For this function
51  * host send OUT data packet but ACM function is not prepared for this packet.
52  * It's cause that buffer placed in on chip memory block transfer to other
53  * endpoints.
54  *
55  * Issue has been fixed in DEV_VER_V2 version of controller.
56  *
57  */
58
59 #include <linux/dma-mapping.h>
60 #include <linux/usb/gadget.h>
61 #include <linux/module.h>
62 #include <linux/iopoll.h>
63
64 #include "core.h"
65 #include "gadget-export.h"
66 #include "gadget.h"
67 #include "trace.h"
68 #include "drd.h"
69
70 static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
71                                    struct usb_request *request,
72                                    gfp_t gfp_flags);
73
74 /**
75  * cdns3_set_register_bit - set bit in given register.
76  * @ptr: address of device controller register to be read and changed
77  * @mask: bits requested to set
78  */
79 void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
80 {
81         mask = readl(ptr) | mask;
82         writel(mask, ptr);
83 }
84
85 /**
86  * cdns3_ep_addr_to_index - Macro converts endpoint address to
87  * index of endpoint object in cdns3_device.eps[] container
88  * @ep_addr: endpoint address for which endpoint object is required
89  *
90  */
91 u8 cdns3_ep_addr_to_index(u8 ep_addr)
92 {
93         return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
94 }
95
96 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
97                              struct cdns3_endpoint *priv_ep)
98 {
99         int dma_index;
100
101         dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
102
103         return dma_index / TRB_SIZE;
104 }
105
106 /**
107  * cdns3_next_request - returns next request from list
108  * @list: list containing requests
109  *
110  * Returns request or NULL if no requests in list
111  */
112 struct usb_request *cdns3_next_request(struct list_head *list)
113 {
114         return list_first_entry_or_null(list, struct usb_request, list);
115 }
116
117 /**
118  * cdns3_next_align_buf - returns next buffer from list
119  * @list: list containing buffers
120  *
121  * Returns buffer or NULL if no buffers in list
122  */
123 struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
124 {
125         return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
126 }
127
128 /**
129  * cdns3_next_priv_request - returns next request from list
130  * @list: list containing requests
131  *
132  * Returns request or NULL if no requests in list
133  */
134 struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
135 {
136         return list_first_entry_or_null(list, struct cdns3_request, list);
137 }
138
139 /**
140  * select_ep - selects endpoint
141  * @priv_dev:  extended gadget object
142  * @ep: endpoint address
143  */
144 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
145 {
146         if (priv_dev->selected_ep == ep)
147                 return;
148
149         priv_dev->selected_ep = ep;
150         writel(ep, &priv_dev->regs->ep_sel);
151 }
152
153 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
154                                  struct cdns3_trb *trb)
155 {
156         u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
157
158         return priv_ep->trb_pool_dma + offset;
159 }
160
161 int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
162 {
163         switch (priv_ep->type) {
164         case USB_ENDPOINT_XFER_ISOC:
165                 return TRB_ISO_RING_SIZE;
166         case USB_ENDPOINT_XFER_CONTROL:
167                 return TRB_CTRL_RING_SIZE;
168         default:
169                 return TRB_RING_SIZE;
170         }
171 }
172
173 /**
174  * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
175  * @priv_ep:  endpoint object
176  *
177  * Function will return 0 on success or -ENOMEM on allocation error
178  */
179 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
180 {
181         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
182         int ring_size = cdns3_ring_size(priv_ep);
183         struct cdns3_trb *link_trb;
184
185         if (!priv_ep->trb_pool) {
186                 priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev,
187                                                        ring_size,
188                                                        &priv_ep->trb_pool_dma,
189                                                        GFP_DMA32 | GFP_ATOMIC);
190                 if (!priv_ep->trb_pool)
191                         return -ENOMEM;
192         } else {
193                 memset(priv_ep->trb_pool, 0, ring_size);
194         }
195
196         if (!priv_ep->num)
197                 return 0;
198
199         priv_ep->num_trbs = ring_size / TRB_SIZE;
200         /* Initialize the last TRB as Link TRB. */
201         link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
202         link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
203         link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
204
205         return 0;
206 }
207
208 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
209 {
210         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
211
212         if (priv_ep->trb_pool) {
213                 dma_free_coherent(priv_dev->sysdev,
214                                   cdns3_ring_size(priv_ep),
215                                   priv_ep->trb_pool, priv_ep->trb_pool_dma);
216                 priv_ep->trb_pool = NULL;
217         }
218 }
219
220 /**
221  * cdns3_ep_stall_flush - Stalls and flushes selected endpoint
222  * @priv_ep: endpoint object
223  *
224  * Endpoint must be selected before call to this function
225  */
226 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
227 {
228         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
229         int val;
230
231         trace_cdns3_halt(priv_ep, 1, 1);
232
233         writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
234                &priv_dev->regs->ep_cmd);
235
236         /* wait for DFLUSH cleared */
237         readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
238                                   !(val & EP_CMD_DFLUSH), 1, 1000);
239         priv_ep->flags |= EP_STALLED;
240         priv_ep->flags &= ~EP_STALL_PENDING;
241 }
242
243 /**
244  * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
245  * @priv_dev: extended gadget object
246  */
247 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
248 {
249         writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
250
251         cdns3_allow_enable_l1(priv_dev, 0);
252         priv_dev->hw_configured_flag = 0;
253         priv_dev->onchip_used_size = 0;
254         priv_dev->out_mem_is_allocated = 0;
255         priv_dev->wait_for_setup = 0;
256 }
257
258 /**
259  * cdns3_ep_inc_trb - increment a trb index.
260  * @index: Pointer to the TRB index to increment.
261  * @cs: Cycle state
262  * @trb_in_seg: number of TRBs in segment
263  *
264  * The index should never point to the link TRB. After incrementing,
265  * if it is point to the link TRB, wrap around to the beginning and revert
266  * cycle state bit The
267  * link TRB is always at the last TRB entry.
268  */
269 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
270 {
271         (*index)++;
272         if (*index == (trb_in_seg - 1)) {
273                 *index = 0;
274                 *cs ^=  1;
275         }
276 }
277
278 /**
279  * cdns3_ep_inc_enq - increment endpoint's enqueue pointer
280  * @priv_ep: The endpoint whose enqueue pointer we're incrementing
281  */
282 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
283 {
284         priv_ep->free_trbs--;
285         cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
286 }
287
288 /**
289  * cdns3_ep_inc_deq - increment endpoint's dequeue pointer
290  * @priv_ep: The endpoint whose dequeue pointer we're incrementing
291  */
292 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
293 {
294         priv_ep->free_trbs++;
295         cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
296 }
297
298 void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
299 {
300         struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
301         int current_trb = priv_req->start_trb;
302
303         while (current_trb != priv_req->end_trb) {
304                 cdns3_ep_inc_deq(priv_ep);
305                 current_trb = priv_ep->dequeue;
306         }
307
308         cdns3_ep_inc_deq(priv_ep);
309 }
310
311 /**
312  * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
313  * @priv_dev: Extended gadget object
314  * @enable: Enable/disable permit to transition to L1.
315  *
316  * If bit USB_CONF_L1EN is set and device receive Extended Token packet,
317  * then controller answer with ACK handshake.
318  * If bit USB_CONF_L1DS is set and device receive Extended Token packet,
319  * then controller answer with NYET handshake.
320  */
321 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
322 {
323         if (enable)
324                 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
325         else
326                 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
327 }
328
329 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
330 {
331         u32 reg;
332
333         reg = readl(&priv_dev->regs->usb_sts);
334
335         if (DEV_SUPERSPEED(reg))
336                 return USB_SPEED_SUPER;
337         else if (DEV_HIGHSPEED(reg))
338                 return USB_SPEED_HIGH;
339         else if (DEV_FULLSPEED(reg))
340                 return USB_SPEED_FULL;
341         else if (DEV_LOWSPEED(reg))
342                 return USB_SPEED_LOW;
343         return USB_SPEED_UNKNOWN;
344 }
345
346 /**
347  * cdns3_start_all_request - add to ring all request not started
348  * @priv_dev: Extended gadget object
349  * @priv_ep: The endpoint for whom request will be started.
350  *
351  * Returns return ENOMEM if transfer ring i not enough TRBs to start
352  *         all requests.
353  */
354 static int cdns3_start_all_request(struct cdns3_device *priv_dev,
355                                    struct cdns3_endpoint *priv_ep)
356 {
357         struct usb_request *request;
358         int ret = 0;
359
360         while (!list_empty(&priv_ep->deferred_req_list)) {
361                 request = cdns3_next_request(&priv_ep->deferred_req_list);
362
363                 ret = cdns3_ep_run_transfer(priv_ep, request);
364                 if (ret)
365                         return ret;
366
367                 list_del(&request->list);
368                 list_add_tail(&request->list,
369                               &priv_ep->pending_req_list);
370         }
371
372         priv_ep->flags &= ~EP_RING_FULL;
373         return ret;
374 }
375
376 /*
377  * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
378  * driver try to detect whether endpoint need additional internal
379  * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
380  * if before first DESCMISS interrupt the DMA will be armed.
381  */
382 #define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
383         if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
384                 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
385                 (reg) |= EP_STS_EN_DESCMISEN; \
386         } } while (0)
387
388 /**
389  * cdns3_wa2_descmiss_copy_data copy data from internal requests to
390  * request queued by class driver.
391  * @priv_ep: extended endpoint object
392  * @request: request object
393  */
394 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
395                                          struct usb_request *request)
396 {
397         struct usb_request *descmiss_req;
398         struct cdns3_request *descmiss_priv_req;
399
400         while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
401                 int chunk_end;
402                 int length;
403
404                 descmiss_priv_req =
405                         cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
406                 descmiss_req = &descmiss_priv_req->request;
407
408                 /* driver can't touch pending request */
409                 if (descmiss_priv_req->flags & REQUEST_PENDING)
410                         break;
411
412                 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
413                 length = request->actual + descmiss_req->actual;
414
415                 request->status = descmiss_req->status;
416
417                 if (length <= request->length) {
418                         memcpy(&((u8 *)request->buf)[request->actual],
419                                descmiss_req->buf,
420                                descmiss_req->actual);
421                         request->actual = length;
422                 } else {
423                         /* It should never occures */
424                         request->status = -ENOMEM;
425                 }
426
427                 list_del_init(&descmiss_priv_req->list);
428
429                 kfree(descmiss_req->buf);
430                 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
431                 --priv_ep->wa2_counter;
432
433                 if (!chunk_end)
434                         break;
435         }
436 }
437
438 struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
439                                               struct cdns3_endpoint *priv_ep,
440                                               struct cdns3_request *priv_req)
441 {
442         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
443             priv_req->flags & REQUEST_INTERNAL) {
444                 struct usb_request *req;
445
446                 req = cdns3_next_request(&priv_ep->deferred_req_list);
447
448                 priv_ep->descmis_req = NULL;
449
450                 if (!req)
451                         return NULL;
452
453                 cdns3_wa2_descmiss_copy_data(priv_ep, req);
454                 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
455                     req->length != req->actual) {
456                         /* wait for next part of transfer */
457                         return NULL;
458                 }
459
460                 if (req->status == -EINPROGRESS)
461                         req->status = 0;
462
463                 list_del_init(&req->list);
464                 cdns3_start_all_request(priv_dev, priv_ep);
465                 return req;
466         }
467
468         return &priv_req->request;
469 }
470
471 int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
472                               struct cdns3_endpoint *priv_ep,
473                               struct cdns3_request *priv_req)
474 {
475         int deferred = 0;
476
477         /*
478          * If transfer was queued before DESCMISS appear than we
479          * can disable handling of DESCMISS interrupt. Driver assumes that it
480          * can disable special treatment for this endpoint.
481          */
482         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
483                 u32 reg;
484
485                 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
486                 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
487                 reg = readl(&priv_dev->regs->ep_sts_en);
488                 reg &= ~EP_STS_EN_DESCMISEN;
489                 trace_cdns3_wa2(priv_ep, "workaround disabled\n");
490                 writel(reg, &priv_dev->regs->ep_sts_en);
491         }
492
493         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
494                 u8 pending_empty = list_empty(&priv_ep->pending_req_list);
495                 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
496
497                 /*
498                  *  DESCMISS transfer has been finished, so data will be
499                  *  directly copied from internal allocated usb_request
500                  *  objects.
501                  */
502                 if (pending_empty && !descmiss_empty &&
503                     !(priv_req->flags & REQUEST_INTERNAL)) {
504                         cdns3_wa2_descmiss_copy_data(priv_ep,
505                                                      &priv_req->request);
506
507                         trace_cdns3_wa2(priv_ep, "get internal stored data");
508
509                         list_add_tail(&priv_req->request.list,
510                                       &priv_ep->pending_req_list);
511                         cdns3_gadget_giveback(priv_ep, priv_req,
512                                               priv_req->request.status);
513
514                         /*
515                          * Intentionally driver returns positive value as
516                          * correct value. It informs that transfer has
517                          * been finished.
518                          */
519                         return EINPROGRESS;
520                 }
521
522                 /*
523                  * Driver will wait for completion DESCMISS transfer,
524                  * before starts new, not DESCMISS transfer.
525                  */
526                 if (!pending_empty && !descmiss_empty) {
527                         trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
528                         deferred = 1;
529                 }
530
531                 if (priv_req->flags & REQUEST_INTERNAL)
532                         list_add_tail(&priv_req->list,
533                                       &priv_ep->wa2_descmiss_req_list);
534         }
535
536         return deferred;
537 }
538
539 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
540 {
541         struct cdns3_request *priv_req;
542
543         while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
544                 u8 chain;
545
546                 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
547                 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
548
549                 trace_cdns3_wa2(priv_ep, "removes eldest request");
550
551                 kfree(priv_req->request.buf);
552                 cdns3_gadget_ep_free_request(&priv_ep->endpoint,
553                                              &priv_req->request);
554                 list_del_init(&priv_req->list);
555                 --priv_ep->wa2_counter;
556
557                 if (!chain)
558                         break;
559         }
560 }
561
562 /**
563  * cdns3_wa2_descmissing_packet - handles descriptor missing event.
564  * @priv_dev: extended gadget object
565  *
566  * This function is used only for WA2. For more information see Work around 2
567  * description.
568  */
569 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
570 {
571         struct cdns3_request *priv_req;
572         struct usb_request *request;
573
574         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
575                 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
576                 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
577         }
578
579         trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
580
581         if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
582                 cdns3_wa2_remove_old_request(priv_ep);
583
584         request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
585                                                 GFP_ATOMIC);
586         if (!request)
587                 goto err;
588
589         priv_req = to_cdns3_request(request);
590         priv_req->flags |= REQUEST_INTERNAL;
591
592         /* if this field is still assigned it indicate that transfer related
593          * with this request has not been finished yet. Driver in this
594          * case simply allocate next request and assign flag REQUEST_INTERNAL_CH
595          * flag to previous one. It will indicate that current request is
596          * part of the previous one.
597          */
598         if (priv_ep->descmis_req)
599                 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
600
601         priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
602                                         GFP_ATOMIC);
603         priv_ep->wa2_counter++;
604
605         if (!priv_req->request.buf) {
606                 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
607                 goto err;
608         }
609
610         priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
611         priv_ep->descmis_req = priv_req;
612
613         __cdns3_gadget_ep_queue(&priv_ep->endpoint,
614                                 &priv_ep->descmis_req->request,
615                                 GFP_ATOMIC);
616
617         return;
618
619 err:
620         dev_err(priv_ep->cdns3_dev->dev,
621                 "Failed: No sufficient memory for DESCMIS\n");
622 }
623
624 /**
625  * cdns3_gadget_giveback - call struct usb_request's ->complete callback
626  * @priv_ep: The endpoint to whom the request belongs to
627  * @priv_req: The request we're giving back
628  * @status: completion code for the request
629  *
630  * Must be called with controller's lock held and interrupts disabled. This
631  * function will unmap @req and call its ->complete() callback to notify upper
632  * layers that it has completed.
633  */
634 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
635                            struct cdns3_request *priv_req,
636                            int status)
637 {
638         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
639         struct usb_request *request = &priv_req->request;
640
641         list_del_init(&request->list);
642
643         if (request->status == -EINPROGRESS)
644                 request->status = status;
645
646         usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request,
647                                         priv_ep->dir);
648
649         if ((priv_req->flags & REQUEST_UNALIGNED) &&
650             priv_ep->dir == USB_DIR_OUT && !request->status)
651                 memcpy(request->buf, priv_req->aligned_buf->buf,
652                        request->length);
653
654         priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
655         trace_cdns3_gadget_giveback(priv_req);
656
657         if (priv_dev->dev_ver < DEV_VER_V2) {
658                 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
659                                                     priv_req);
660                 if (!request)
661                         return;
662         }
663
664         if (request->complete) {
665                 spin_unlock(&priv_dev->lock);
666                 usb_gadget_giveback_request(&priv_ep->endpoint,
667                                             request);
668                 spin_lock(&priv_dev->lock);
669         }
670
671         if (request->buf == priv_dev->zlp_buf)
672                 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
673 }
674
675 void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
676 {
677         /* Work around for stale data address in TRB*/
678         if (priv_ep->wa1_set) {
679                 trace_cdns3_wa1(priv_ep, "restore cycle bit");
680
681                 priv_ep->wa1_set = 0;
682                 priv_ep->wa1_trb_index = 0xFFFF;
683                 if (priv_ep->wa1_cycle_bit) {
684                         priv_ep->wa1_trb->control =
685                                 priv_ep->wa1_trb->control | 0x1;
686                 } else {
687                         priv_ep->wa1_trb->control =
688                                 priv_ep->wa1_trb->control & ~0x1;
689                 }
690         }
691 }
692
693 static void cdns3_free_aligned_request_buf(struct work_struct *work)
694 {
695         struct cdns3_device *priv_dev = container_of(work, struct cdns3_device,
696                                         aligned_buf_wq);
697         struct cdns3_aligned_buf *buf, *tmp;
698         unsigned long flags;
699
700         spin_lock_irqsave(&priv_dev->lock, flags);
701
702         list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
703                 if (!buf->in_use) {
704                         list_del(&buf->list);
705
706                         /*
707                          * Re-enable interrupts to free DMA capable memory.
708                          * Driver can't free this memory with disabled
709                          * interrupts.
710                          */
711                         spin_unlock_irqrestore(&priv_dev->lock, flags);
712                         dma_free_coherent(priv_dev->sysdev, buf->size,
713                                           buf->buf, buf->dma);
714                         kfree(buf);
715                         spin_lock_irqsave(&priv_dev->lock, flags);
716                 }
717         }
718
719         spin_unlock_irqrestore(&priv_dev->lock, flags);
720 }
721
722 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
723 {
724         struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
725         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
726         struct cdns3_aligned_buf *buf;
727
728         /* check if buffer is aligned to 8. */
729         if (!((uintptr_t)priv_req->request.buf & 0x7))
730                 return 0;
731
732         buf = priv_req->aligned_buf;
733
734         if (!buf || priv_req->request.length > buf->size) {
735                 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
736                 if (!buf)
737                         return -ENOMEM;
738
739                 buf->size = priv_req->request.length;
740
741                 buf->buf = dma_alloc_coherent(priv_dev->sysdev,
742                                               buf->size,
743                                               &buf->dma,
744                                               GFP_ATOMIC);
745                 if (!buf->buf) {
746                         kfree(buf);
747                         return -ENOMEM;
748                 }
749
750                 if (priv_req->aligned_buf) {
751                         trace_cdns3_free_aligned_request(priv_req);
752                         priv_req->aligned_buf->in_use = 0;
753                         queue_work(system_freezable_wq,
754                                    &priv_dev->aligned_buf_wq);
755                 }
756
757                 buf->in_use = 1;
758                 priv_req->aligned_buf = buf;
759
760                 list_add_tail(&buf->list,
761                               &priv_dev->aligned_buf_list);
762         }
763
764         if (priv_ep->dir == USB_DIR_IN) {
765                 memcpy(buf->buf, priv_req->request.buf,
766                        priv_req->request.length);
767         }
768
769         priv_req->flags |= REQUEST_UNALIGNED;
770         trace_cdns3_prepare_aligned_request(priv_req);
771
772         return 0;
773 }
774
775 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
776                                   struct cdns3_trb *trb)
777 {
778         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
779
780         if (!priv_ep->wa1_set) {
781                 u32 doorbell;
782
783                 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
784
785                 if (doorbell) {
786                         priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
787                         priv_ep->wa1_set = 1;
788                         priv_ep->wa1_trb = trb;
789                         priv_ep->wa1_trb_index = priv_ep->enqueue;
790                         trace_cdns3_wa1(priv_ep, "set guard");
791                         return 0;
792                 }
793         }
794         return 1;
795 }
796
797 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
798                                              struct cdns3_endpoint *priv_ep)
799 {
800         int dma_index;
801         u32 doorbell;
802
803         doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
804         dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
805
806         if (!doorbell || dma_index != priv_ep->wa1_trb_index)
807                 cdns3_wa1_restore_cycle_bit(priv_ep);
808 }
809
810 /**
811  * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
812  * @priv_ep: endpoint object
813  *
814  * Returns zero on success or negative value on failure
815  */
816 int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
817                           struct usb_request *request)
818 {
819         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
820         struct cdns3_request *priv_req;
821         struct cdns3_trb *trb;
822         dma_addr_t trb_dma;
823         u32 togle_pcs = 1;
824         int sg_iter = 0;
825         int num_trb;
826         int address;
827         u32 control;
828         int pcs;
829
830         if (priv_ep->type == USB_ENDPOINT_XFER_ISOC)
831                 num_trb = priv_ep->interval;
832         else
833                 num_trb = request->num_sgs ? request->num_sgs : 1;
834
835         if (num_trb > priv_ep->free_trbs) {
836                 priv_ep->flags |= EP_RING_FULL;
837                 return -ENOBUFS;
838         }
839
840         priv_req = to_cdns3_request(request);
841         address = priv_ep->endpoint.desc->bEndpointAddress;
842
843         priv_ep->flags |= EP_PENDING_REQUEST;
844
845         /* must allocate buffer aligned to 8 */
846         if (priv_req->flags & REQUEST_UNALIGNED)
847                 trb_dma = priv_req->aligned_buf->dma;
848         else
849                 trb_dma = request->dma;
850
851         trb = priv_ep->trb_pool + priv_ep->enqueue;
852         priv_req->start_trb = priv_ep->enqueue;
853         priv_req->trb = trb;
854
855         cdns3_select_ep(priv_ep->cdns3_dev, address);
856
857         /* prepare ring */
858         if ((priv_ep->enqueue + num_trb)  >= (priv_ep->num_trbs - 1)) {
859                 struct cdns3_trb *link_trb;
860                 int doorbell, dma_index;
861                 u32 ch_bit = 0;
862
863                 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
864                 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
865
866                 /* Driver can't update LINK TRB if it is current processed. */
867                 if (doorbell && dma_index == priv_ep->num_trbs - 1) {
868                         priv_ep->flags |= EP_DEFERRED_DRDY;
869                         return -ENOBUFS;
870                 }
871
872                 /*updating C bt in  Link TRB before starting DMA*/
873                 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
874                 /*
875                  * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
876                  * that DMA stuck at the LINK TRB.
877                  * On the other hand, removing TRB_CHAIN for longer TRs for
878                  * epXout cause that DMA stuck after handling LINK TRB.
879                  * To eliminate this strange behavioral driver set TRB_CHAIN
880                  * bit only for TR size > 2.
881                  */
882                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
883                     TRBS_PER_SEGMENT > 2)
884                         ch_bit = TRB_CHAIN;
885
886                 link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
887                                     TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
888         }
889
890         if (priv_dev->dev_ver <= DEV_VER_V2)
891                 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
892
893         /* set incorrect Cycle Bit for first trb*/
894         control = priv_ep->pcs ? 0 : TRB_CYCLE;
895
896         do {
897                 u32 length;
898                 u16 td_size = 0;
899
900                 /* fill TRB */
901                 control |= TRB_TYPE(TRB_NORMAL);
902                 trb->buffer = TRB_BUFFER(request->num_sgs == 0
903                                 ? trb_dma : request->sg[sg_iter].dma_address);
904
905                 if (likely(!request->num_sgs))
906                         length = request->length;
907                 else
908                         length = request->sg[sg_iter].length;
909
910                 if (likely(priv_dev->dev_ver >= DEV_VER_V2))
911                         td_size = DIV_ROUND_UP(length,
912                                                priv_ep->endpoint.maxpacket);
913
914                 trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
915                                         TRB_LEN(length);
916                 if (priv_dev->gadget.speed == USB_SPEED_SUPER)
917                         trb->length |= TRB_TDL_SS_SIZE(td_size);
918                 else
919                         control |= TRB_TDL_HS_SIZE(td_size);
920
921                 pcs = priv_ep->pcs ? TRB_CYCLE : 0;
922
923                 /*
924                  * first trb should be prepared as last to avoid processing
925                  *  transfer to early
926                  */
927                 if (sg_iter != 0)
928                         control |= pcs;
929
930                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
931                         control |= TRB_IOC | TRB_ISP;
932                 } else {
933                         /* for last element in TD or in SG list */
934                         if (sg_iter == (num_trb - 1) && sg_iter != 0)
935                                 control |= pcs | TRB_IOC | TRB_ISP;
936                 }
937
938                 if (sg_iter)
939                         trb->control = control;
940                 else
941                         priv_req->trb->control = control;
942
943                 control = 0;
944                 ++sg_iter;
945                 priv_req->end_trb = priv_ep->enqueue;
946                 cdns3_ep_inc_enq(priv_ep);
947                 trb = priv_ep->trb_pool + priv_ep->enqueue;
948         } while (sg_iter < num_trb);
949
950         trb = priv_req->trb;
951
952         priv_req->flags |= REQUEST_PENDING;
953
954         if (sg_iter == 1)
955                 trb->control |= TRB_IOC | TRB_ISP;
956
957         /*
958          * Memory barrier - cycle bit must be set before other filds in trb.
959          */
960         wmb();
961
962         /* give the TD to the consumer*/
963         if (togle_pcs)
964                 trb->control =  trb->control ^ 1;
965
966         if (priv_dev->dev_ver <= DEV_VER_V2)
967                 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
968
969         trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
970
971         /*
972          * Memory barrier - Cycle Bit must be set before trb->length  and
973          * trb->buffer fields.
974          */
975         wmb();
976
977         /*
978          * For DMULT mode we can set address to transfer ring only once after
979          * enabling endpoint.
980          */
981         if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
982                 /*
983                  * Until SW is not ready to handle the OUT transfer the ISO OUT
984                  * Endpoint should be disabled (EP_CFG.ENABLE = 0).
985                  * EP_CFG_ENABLE must be set before updating ep_traddr.
986                  */
987                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir &&
988                     !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
989                         priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
990                         cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
991                                                EP_CFG_ENABLE);
992                 }
993
994                 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
995                                         priv_req->start_trb * TRB_SIZE),
996                                         &priv_dev->regs->ep_traddr);
997
998                 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
999         }
1000
1001         if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
1002                 trace_cdns3_ring(priv_ep);
1003                 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
1004                 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
1005                 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1006                 trace_cdns3_doorbell_epx(priv_ep->name,
1007                                          readl(&priv_dev->regs->ep_traddr));
1008         }
1009
1010         /* WORKAROUND for transition to L0 */
1011         __cdns3_gadget_wakeup(priv_dev);
1012
1013         return 0;
1014 }
1015
1016 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
1017 {
1018         struct cdns3_endpoint *priv_ep;
1019         struct usb_ep *ep;
1020         int val;
1021
1022         if (priv_dev->hw_configured_flag)
1023                 return;
1024
1025         writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
1026         writel(EP_CMD_ERDY | EP_CMD_REQ_CMPL, &priv_dev->regs->ep_cmd);
1027
1028         cdns3_set_register_bit(&priv_dev->regs->usb_conf,
1029                                USB_CONF_U1EN | USB_CONF_U2EN);
1030
1031         /* wait until configuration set */
1032         readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
1033                                   val & USB_STS_CFGSTS_MASK, 1, 100);
1034
1035         priv_dev->hw_configured_flag = 1;
1036
1037         list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
1038                 if (ep->enabled) {
1039                         priv_ep = ep_to_cdns3_ep(ep);
1040                         cdns3_start_all_request(priv_dev, priv_ep);
1041                 }
1042         }
1043 }
1044
1045 /**
1046  * cdns3_request_handled - check whether request has been handled by DMA
1047  *
1048  * @priv_ep: extended endpoint object.
1049  * @priv_req: request object for checking
1050  *
1051  * Endpoint must be selected before invoking this function.
1052  *
1053  * Returns false if request has not been handled by DMA, else returns true.
1054  *
1055  * SR - start ring
1056  * ER -  end ring
1057  * DQ = priv_ep->dequeue - dequeue position
1058  * EQ = priv_ep->enqueue -  enqueue position
1059  * ST = priv_req->start_trb - index of first TRB in transfer ring
1060  * ET = priv_req->end_trb - index of last TRB in transfer ring
1061  * CI = current_index - index of processed TRB by DMA.
1062  *
1063  * As first step, function checks if cycle bit for priv_req->start_trb is
1064  * correct.
1065  *
1066  * some rules:
1067  * 1. priv_ep->dequeue never exceed current_index.
1068  * 2  priv_ep->enqueue never exceed priv_ep->dequeue
1069  * 3. exception: priv_ep->enqueue == priv_ep->dequeue
1070  *    and priv_ep->free_trbs is zero.
1071  *    This case indicate that TR is full.
1072  *
1073  * Then We can split recognition into two parts:
1074  * Case 1 - priv_ep->dequeue < current_index
1075  *      SR ... EQ ... DQ ... CI ... ER
1076  *      SR ... DQ ... CI ... EQ ... ER
1077  *
1078  *      Request has been handled by DMA if ST and ET is between DQ and CI.
1079  *
1080  * Case 2 - priv_ep->dequeue > current_index
1081  * This situation take place when CI go through the LINK TRB at the end of
1082  * transfer ring.
1083  *      SR ... CI ... EQ ... DQ ... ER
1084  *
1085  *      Request has been handled by DMA if ET is less then CI or
1086  *      ET is greater or equal DQ.
1087  */
1088 static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
1089                                   struct cdns3_request *priv_req)
1090 {
1091         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1092         struct cdns3_trb *trb = priv_req->trb;
1093         int current_index = 0;
1094         int handled = 0;
1095         int doorbell;
1096
1097         current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
1098         doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
1099
1100         trb = &priv_ep->trb_pool[priv_req->start_trb];
1101
1102         if ((trb->control  & TRB_CYCLE) != priv_ep->ccs)
1103                 goto finish;
1104
1105         if (doorbell == 1 && current_index == priv_ep->dequeue)
1106                 goto finish;
1107
1108         /* The corner case for TRBS_PER_SEGMENT equal 2). */
1109         if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
1110                 handled = 1;
1111                 goto finish;
1112         }
1113
1114         if (priv_ep->enqueue == priv_ep->dequeue &&
1115             priv_ep->free_trbs == 0) {
1116                 handled = 1;
1117         } else if (priv_ep->dequeue < current_index) {
1118                 if ((current_index == (priv_ep->num_trbs - 1)) &&
1119                     !priv_ep->dequeue)
1120                         goto finish;
1121
1122                 if (priv_req->end_trb >= priv_ep->dequeue &&
1123                     priv_req->end_trb < current_index)
1124                         handled = 1;
1125         } else if (priv_ep->dequeue  > current_index) {
1126                 if (priv_req->end_trb  < current_index ||
1127                     priv_req->end_trb >= priv_ep->dequeue)
1128                         handled = 1;
1129         }
1130
1131 finish:
1132         trace_cdns3_request_handled(priv_req, current_index, handled);
1133
1134         return handled;
1135 }
1136
1137 static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
1138                                      struct cdns3_endpoint *priv_ep)
1139 {
1140         struct cdns3_request *priv_req;
1141         struct usb_request *request;
1142         struct cdns3_trb *trb;
1143
1144         while (!list_empty(&priv_ep->pending_req_list)) {
1145                 request = cdns3_next_request(&priv_ep->pending_req_list);
1146                 priv_req = to_cdns3_request(request);
1147
1148                 /* Re-select endpoint. It could be changed by other CPU during
1149                  * handling usb_gadget_giveback_request.
1150                  */
1151                 cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1152
1153                 if (!cdns3_request_handled(priv_ep, priv_req))
1154                         goto prepare_next_td;
1155
1156                 trb = priv_ep->trb_pool + priv_ep->dequeue;
1157                 trace_cdns3_complete_trb(priv_ep, trb);
1158
1159                 if (trb != priv_req->trb)
1160                         dev_warn(priv_dev->dev,
1161                                  "request_trb=0x%p, queue_trb=0x%p\n",
1162                                  priv_req->trb, trb);
1163
1164                 request->actual = TRB_LEN(le32_to_cpu(trb->length));
1165                 cdns3_move_deq_to_next_trb(priv_req);
1166                 cdns3_gadget_giveback(priv_ep, priv_req, 0);
1167
1168                 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
1169                     TRBS_PER_SEGMENT == 2)
1170                         break;
1171         }
1172         priv_ep->flags &= ~EP_PENDING_REQUEST;
1173
1174 prepare_next_td:
1175         if (!(priv_ep->flags & EP_STALLED) &&
1176             !(priv_ep->flags & EP_STALL_PENDING))
1177                 cdns3_start_all_request(priv_dev, priv_ep);
1178 }
1179
1180 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
1181 {
1182         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1183
1184         cdns3_wa1_restore_cycle_bit(priv_ep);
1185
1186         if (rearm) {
1187                 trace_cdns3_ring(priv_ep);
1188
1189                 /* Cycle Bit must be updated before arming DMA. */
1190                 wmb();
1191                 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1192
1193                 __cdns3_gadget_wakeup(priv_dev);
1194
1195                 trace_cdns3_doorbell_epx(priv_ep->name,
1196                                          readl(&priv_dev->regs->ep_traddr));
1197         }
1198 }
1199
1200 /**
1201  * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
1202  * @priv_ep: endpoint object
1203  *
1204  * Returns 0
1205  */
1206 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
1207 {
1208         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1209         u32 ep_sts_reg;
1210
1211         cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1212
1213         trace_cdns3_epx_irq(priv_dev, priv_ep);
1214
1215         ep_sts_reg = readl(&priv_dev->regs->ep_sts);
1216         writel(ep_sts_reg, &priv_dev->regs->ep_sts);
1217
1218         if (ep_sts_reg & EP_STS_TRBERR) {
1219                 if (priv_ep->flags & EP_STALL_PENDING &&
1220                     !(ep_sts_reg & EP_STS_DESCMIS &&
1221                     priv_dev->dev_ver < DEV_VER_V2)) {
1222                         cdns3_ep_stall_flush(priv_ep);
1223                 }
1224
1225                 /*
1226                  * For isochronous transfer driver completes request on
1227                  * IOC or on TRBERR. IOC appears only when device receive
1228                  * OUT data packet. If host disable stream or lost some packet
1229                  * then the only way to finish all queued transfer is to do it
1230                  * on TRBERR event.
1231                  */
1232                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
1233                     !priv_ep->wa1_set) {
1234                         if (!priv_ep->dir) {
1235                                 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
1236
1237                                 ep_cfg &= ~EP_CFG_ENABLE;
1238                                 writel(ep_cfg, &priv_dev->regs->ep_cfg);
1239                                 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
1240                         }
1241                         cdns3_transfer_completed(priv_dev, priv_ep);
1242                 } else if (!(priv_ep->flags & EP_STALLED) &&
1243                           !(priv_ep->flags & EP_STALL_PENDING)) {
1244                         if (priv_ep->flags & EP_DEFERRED_DRDY) {
1245                                 priv_ep->flags &= ~EP_DEFERRED_DRDY;
1246                                 cdns3_start_all_request(priv_dev, priv_ep);
1247                         } else {
1248                                 cdns3_rearm_transfer(priv_ep,
1249                                                      priv_ep->wa1_set);
1250                         }
1251                 }
1252         }
1253
1254         if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
1255                 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
1256                         if (ep_sts_reg & EP_STS_ISP)
1257                                 priv_ep->flags |= EP_QUIRK_END_TRANSFER;
1258                         else
1259                                 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
1260                 }
1261
1262                 cdns3_transfer_completed(priv_dev, priv_ep);
1263         }
1264
1265         /*
1266          * WA2: this condition should only be meet when
1267          * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
1268          * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
1269          * In other cases this interrupt will be disabled/
1270          */
1271         if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
1272             !(priv_ep->flags & EP_STALLED))
1273                 cdns3_wa2_descmissing_packet(priv_ep);
1274
1275         return 0;
1276 }
1277
1278 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
1279 {
1280         if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) {
1281                 spin_unlock(&priv_dev->lock);
1282                 priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
1283                 spin_lock(&priv_dev->lock);
1284         }
1285 }
1286
1287 /**
1288  * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
1289  * @priv_dev: extended gadget object
1290  * @usb_ists: bitmap representation of device's reported interrupts
1291  * (usb_ists register value)
1292  */
1293 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
1294                                               u32 usb_ists)
1295 {
1296         int speed = 0;
1297
1298         trace_cdns3_usb_irq(priv_dev, usb_ists);
1299         if (usb_ists & USB_ISTS_L1ENTI) {
1300                 /*
1301                  * WORKAROUND: CDNS3 controller has issue with hardware resuming
1302                  * from L1. To fix it, if any DMA transfer is pending driver
1303                  * must starts driving resume signal immediately.
1304                  */
1305                 if (readl(&priv_dev->regs->drbl))
1306                         __cdns3_gadget_wakeup(priv_dev);
1307         }
1308
1309         /* Connection detected */
1310         if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
1311                 speed = cdns3_get_speed(priv_dev);
1312                 priv_dev->gadget.speed = speed;
1313                 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
1314                 cdns3_ep0_config(priv_dev);
1315         }
1316
1317         /* Disconnection detected */
1318         if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
1319                 cdns3_disconnect_gadget(priv_dev);
1320                 priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
1321                 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
1322                 cdns3_hw_reset_eps_config(priv_dev);
1323         }
1324
1325         if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
1326                 if (priv_dev->gadget_driver &&
1327                     priv_dev->gadget_driver->suspend) {
1328                         spin_unlock(&priv_dev->lock);
1329                         priv_dev->gadget_driver->suspend(&priv_dev->gadget);
1330                         spin_lock(&priv_dev->lock);
1331                 }
1332         }
1333
1334         if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
1335                 if (priv_dev->gadget_driver &&
1336                     priv_dev->gadget_driver->resume) {
1337                         spin_unlock(&priv_dev->lock);
1338                         priv_dev->gadget_driver->resume(&priv_dev->gadget);
1339                         spin_lock(&priv_dev->lock);
1340                 }
1341         }
1342
1343         /* reset*/
1344         if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
1345                 if (priv_dev->gadget_driver) {
1346                         spin_unlock(&priv_dev->lock);
1347                         usb_gadget_udc_reset(&priv_dev->gadget,
1348                                              priv_dev->gadget_driver);
1349                         spin_lock(&priv_dev->lock);
1350
1351                         /*read again to check the actual speed*/
1352                         speed = cdns3_get_speed(priv_dev);
1353                         priv_dev->gadget.speed = speed;
1354                         cdns3_hw_reset_eps_config(priv_dev);
1355                         cdns3_ep0_config(priv_dev);
1356                 }
1357         }
1358 }
1359
1360 /**
1361  * cdns3_device_irq_handler- interrupt handler for device part of controller
1362  *
1363  * @irq: irq number for cdns3 core device
1364  * @data: structure of cdns3
1365  *
1366  * Returns IRQ_HANDLED or IRQ_NONE
1367  */
1368 static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
1369 {
1370         struct cdns3_device *priv_dev;
1371         struct cdns3 *cdns = data;
1372         irqreturn_t ret = IRQ_NONE;
1373         u32 reg;
1374
1375         priv_dev = cdns->gadget_dev;
1376
1377         /* check USB device interrupt */
1378         reg = readl(&priv_dev->regs->usb_ists);
1379         if (reg) {
1380                 /* After masking interrupts the new interrupts won't be
1381                  * reported in usb_ists/ep_ists. In order to not lose some
1382                  * of them driver disables only detected interrupts.
1383                  * They will be enabled ASAP after clearing source of
1384                  * interrupt. This an unusual behavior only applies to
1385                  * usb_ists register.
1386                  */
1387                 reg = ~reg & readl(&priv_dev->regs->usb_ien);
1388                 /* mask deferred interrupt. */
1389                 writel(reg, &priv_dev->regs->usb_ien);
1390                 ret = IRQ_WAKE_THREAD;
1391         }
1392
1393         /* check endpoint interrupt */
1394         reg = readl(&priv_dev->regs->ep_ists);
1395         if (reg) {
1396                 writel(0, &priv_dev->regs->ep_ien);
1397                 ret = IRQ_WAKE_THREAD;
1398         }
1399
1400         return ret;
1401 }
1402
1403 /**
1404  * cdns3_device_thread_irq_handler- interrupt handler for device part
1405  * of controller
1406  *
1407  * @irq: irq number for cdns3 core device
1408  * @data: structure of cdns3
1409  *
1410  * Returns IRQ_HANDLED or IRQ_NONE
1411  */
1412 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
1413 {
1414         struct cdns3_device *priv_dev;
1415         struct cdns3 *cdns = data;
1416         irqreturn_t ret = IRQ_NONE;
1417         unsigned long flags;
1418         int bit;
1419         u32 reg;
1420
1421         priv_dev = cdns->gadget_dev;
1422         spin_lock_irqsave(&priv_dev->lock, flags);
1423
1424         reg = readl(&priv_dev->regs->usb_ists);
1425         if (reg) {
1426                 writel(reg, &priv_dev->regs->usb_ists);
1427                 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
1428                 cdns3_check_usb_interrupt_proceed(priv_dev, reg);
1429                 ret = IRQ_HANDLED;
1430         }
1431
1432         reg = readl(&priv_dev->regs->ep_ists);
1433
1434         /* handle default endpoint OUT */
1435         if (reg & EP_ISTS_EP_OUT0) {
1436                 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
1437                 ret = IRQ_HANDLED;
1438         }
1439
1440         /* handle default endpoint IN */
1441         if (reg & EP_ISTS_EP_IN0) {
1442                 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
1443                 ret = IRQ_HANDLED;
1444         }
1445
1446         /* check if interrupt from non default endpoint, if no exit */
1447         reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
1448         if (!reg)
1449                 goto irqend;
1450
1451         for_each_set_bit(bit, (unsigned long *)&reg,
1452                          sizeof(u32) * BITS_PER_BYTE) {
1453                 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
1454                 ret = IRQ_HANDLED;
1455         }
1456
1457 irqend:
1458         writel(~0, &priv_dev->regs->ep_ien);
1459         spin_unlock_irqrestore(&priv_dev->lock, flags);
1460
1461         return ret;
1462 }
1463
1464 /**
1465  * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
1466  *
1467  * The real reservation will occur during write to EP_CFG register,
1468  * this function is used to check if the 'size' reservation is allowed.
1469  *
1470  * @priv_dev: extended gadget object
1471  * @size: the size (KB) for EP would like to allocate
1472  * @is_in: endpoint direction
1473  *
1474  * Return 0 if the required size can met or negative value on failure
1475  */
1476 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
1477                                           int size, int is_in)
1478 {
1479         int remained;
1480
1481         /* 2KB are reserved for EP0*/
1482         remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
1483
1484         if (is_in) {
1485                 if (remained < size)
1486                         return -EPERM;
1487
1488                 priv_dev->onchip_used_size += size;
1489         } else {
1490                 int required;
1491
1492                 /**
1493                  *  ALL OUT EPs are shared the same chunk onchip memory, so
1494                  * driver checks if it already has assigned enough buffers
1495                  */
1496                 if (priv_dev->out_mem_is_allocated >= size)
1497                         return 0;
1498
1499                 required = size - priv_dev->out_mem_is_allocated;
1500
1501                 if (required > remained)
1502                         return -EPERM;
1503
1504                 priv_dev->out_mem_is_allocated += required;
1505                 priv_dev->onchip_used_size += required;
1506         }
1507
1508         return 0;
1509 }
1510
1511 void cdns3_configure_dmult(struct cdns3_device *priv_dev,
1512                            struct cdns3_endpoint *priv_ep)
1513 {
1514         struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
1515
1516         /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
1517         if (priv_dev->dev_ver <= DEV_VER_V2)
1518                 writel(USB_CONF_DMULT, &regs->usb_conf);
1519
1520         if (priv_dev->dev_ver == DEV_VER_V2)
1521                 writel(USB_CONF2_EN_TDL_TRB, &regs->usb_conf2);
1522
1523         if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
1524                 u32 mask;
1525
1526                 if (priv_ep->dir)
1527                         mask = BIT(priv_ep->num + 16);
1528                 else
1529                         mask = BIT(priv_ep->num);
1530
1531                 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
1532                         cdns3_set_register_bit(&regs->tdl_from_trb, mask);
1533                         cdns3_set_register_bit(&regs->tdl_beh, mask);
1534                         cdns3_set_register_bit(&regs->tdl_beh2, mask);
1535                         cdns3_set_register_bit(&regs->dma_adv_td, mask);
1536                 }
1537
1538                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
1539                         cdns3_set_register_bit(&regs->tdl_from_trb, mask);
1540
1541                 cdns3_set_register_bit(&regs->dtrans, mask);
1542         }
1543 }
1544
1545 /**
1546  * cdns3_ep_config Configure hardware endpoint
1547  * @priv_ep: extended endpoint object
1548  */
1549 void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
1550 {
1551         bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
1552         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1553         u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
1554         u32 max_packet_size = 0;
1555         u8 maxburst = 0;
1556         u32 ep_cfg = 0;
1557         u8 buffering;
1558         u8 mult = 0;
1559         int ret;
1560
1561         buffering = CDNS3_EP_BUF_SIZE - 1;
1562
1563         cdns3_configure_dmult(priv_dev, priv_ep);
1564
1565         switch (priv_ep->type) {
1566         case USB_ENDPOINT_XFER_INT:
1567                 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
1568
1569                 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
1570                     priv_dev->dev_ver > DEV_VER_V2)
1571                         ep_cfg |= EP_CFG_TDL_CHK;
1572                 break;
1573         case USB_ENDPOINT_XFER_BULK:
1574                 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
1575
1576                 if ((priv_dev->dev_ver == DEV_VER_V2  && !priv_ep->dir) ||
1577                     priv_dev->dev_ver > DEV_VER_V2)
1578                         ep_cfg |= EP_CFG_TDL_CHK;
1579                 break;
1580         default:
1581                 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
1582                 mult = CDNS3_EP_ISO_HS_MULT - 1;
1583                 buffering = mult + 1;
1584         }
1585
1586         switch (priv_dev->gadget.speed) {
1587         case USB_SPEED_FULL:
1588                 max_packet_size = is_iso_ep ? 1023 : 64;
1589                 break;
1590         case USB_SPEED_HIGH:
1591                 max_packet_size = is_iso_ep ? 1024 : 512;
1592                 break;
1593         case USB_SPEED_SUPER:
1594                 /* It's limitation that driver assumes in driver. */
1595                 mult = 0;
1596                 max_packet_size = 1024;
1597                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
1598                         maxburst = CDNS3_EP_ISO_SS_BURST - 1;
1599                         buffering = (mult + 1) *
1600                                     (maxburst + 1);
1601
1602                         if (priv_ep->interval > 1)
1603                                 buffering++;
1604                 } else {
1605                         maxburst = CDNS3_EP_BUF_SIZE - 1;
1606                 }
1607                 break;
1608         default:
1609                 /* all other speed are not supported */
1610                 return;
1611         }
1612
1613         if (max_packet_size == 1024)
1614                 priv_ep->trb_burst_size = 128;
1615         else if (max_packet_size >= 512)
1616                 priv_ep->trb_burst_size = 64;
1617         else
1618                 priv_ep->trb_burst_size = 16;
1619
1620         ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
1621                                              !!priv_ep->dir);
1622         if (ret) {
1623                 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
1624                 return;
1625         }
1626
1627         ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
1628                   EP_CFG_MULT(mult) |
1629                   EP_CFG_BUFFERING(buffering) |
1630                   EP_CFG_MAXBURST(maxburst);
1631
1632         cdns3_select_ep(priv_dev, bEndpointAddress);
1633         writel(ep_cfg, &priv_dev->regs->ep_cfg);
1634
1635         dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
1636                 priv_ep->name, ep_cfg);
1637 }
1638
1639 /* Find correct direction for HW endpoint according to description */
1640 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
1641                                    struct cdns3_endpoint *priv_ep)
1642 {
1643         return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
1644                (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
1645 }
1646
1647 static struct
1648 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
1649                                         struct usb_endpoint_descriptor *desc)
1650 {
1651         struct usb_ep *ep;
1652         struct cdns3_endpoint *priv_ep;
1653
1654         list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
1655                 unsigned long num;
1656                 int ret;
1657                 /* ep name pattern likes epXin or epXout */
1658                 char c[2] = {ep->name[2], '\0'};
1659
1660                 ret = kstrtoul(c, 10, &num);
1661                 if (ret)
1662                         return ERR_PTR(ret);
1663
1664                 priv_ep = ep_to_cdns3_ep(ep);
1665                 if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
1666                         if (!(priv_ep->flags & EP_CLAIMED)) {
1667                                 priv_ep->num  = num;
1668                                 return priv_ep;
1669                         }
1670                 }
1671         }
1672
1673         return ERR_PTR(-ENOENT);
1674 }
1675
1676 /*
1677  *  Cadence IP has one limitation that all endpoints must be configured
1678  * (Type & MaxPacketSize) before setting configuration through hardware
1679  * register, it means we can't change endpoints configuration after
1680  * set_configuration.
1681  *
1682  * This function set EP_CLAIMED flag which is added when the gadget driver
1683  * uses usb_ep_autoconfig to configure specific endpoint;
1684  * When the udc driver receives set_configurion request,
1685  * it goes through all claimed endpoints, and configure all endpoints
1686  * accordingly.
1687  *
1688  * At usb_ep_ops.enable/disable, we only enable and disable endpoint through
1689  * ep_cfg register which can be changed after set_configuration, and do
1690  * some software operation accordingly.
1691  */
1692 static struct
1693 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
1694                               struct usb_endpoint_descriptor *desc,
1695                               struct usb_ss_ep_comp_descriptor *comp_desc)
1696 {
1697         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
1698         struct cdns3_endpoint *priv_ep;
1699         unsigned long flags;
1700
1701         priv_ep = cdns3_find_available_ep(priv_dev, desc);
1702         if (IS_ERR(priv_ep)) {
1703                 dev_err(priv_dev->dev, "no available ep\n");
1704                 return NULL;
1705         }
1706
1707         dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
1708
1709         spin_lock_irqsave(&priv_dev->lock, flags);
1710         priv_ep->endpoint.desc = desc;
1711         priv_ep->dir  = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
1712         priv_ep->type = usb_endpoint_type(desc);
1713         priv_ep->flags |= EP_CLAIMED;
1714         priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1715
1716         spin_unlock_irqrestore(&priv_dev->lock, flags);
1717         return &priv_ep->endpoint;
1718 }
1719
1720 /**
1721  * cdns3_gadget_ep_alloc_request Allocates request
1722  * @ep: endpoint object associated with request
1723  * @gfp_flags: gfp flags
1724  *
1725  * Returns allocated request address, NULL on allocation error
1726  */
1727 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
1728                                                   gfp_t gfp_flags)
1729 {
1730         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
1731         struct cdns3_request *priv_req;
1732
1733         priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
1734         if (!priv_req)
1735                 return NULL;
1736
1737         priv_req->priv_ep = priv_ep;
1738
1739         trace_cdns3_alloc_request(priv_req);
1740         return &priv_req->request;
1741 }
1742
1743 /**
1744  * cdns3_gadget_ep_free_request Free memory occupied by request
1745  * @ep: endpoint object associated with request
1746  * @request: request to free memory
1747  */
1748 void cdns3_gadget_ep_free_request(struct usb_ep *ep,
1749                                   struct usb_request *request)
1750 {
1751         struct cdns3_request *priv_req = to_cdns3_request(request);
1752
1753         if (priv_req->aligned_buf)
1754                 priv_req->aligned_buf->in_use = 0;
1755
1756         trace_cdns3_free_request(priv_req);
1757         kfree(priv_req);
1758 }
1759
1760 /**
1761  * cdns3_gadget_ep_enable Enable endpoint
1762  * @ep: endpoint object
1763  * @desc: endpoint descriptor
1764  *
1765  * Returns 0 on success, error code elsewhere
1766  */
1767 static int cdns3_gadget_ep_enable(struct usb_ep *ep,
1768                                   const struct usb_endpoint_descriptor *desc)
1769 {
1770         struct cdns3_endpoint *priv_ep;
1771         struct cdns3_device *priv_dev;
1772         u32 reg = EP_STS_EN_TRBERREN;
1773         u32 bEndpointAddress;
1774         unsigned long flags;
1775         int enable = 1;
1776         int ret;
1777         int val;
1778
1779         priv_ep = ep_to_cdns3_ep(ep);
1780         priv_dev = priv_ep->cdns3_dev;
1781
1782         if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
1783                 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
1784                 return -EINVAL;
1785         }
1786
1787         if (!desc->wMaxPacketSize) {
1788                 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
1789                 return -EINVAL;
1790         }
1791
1792         if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED,
1793                           "%s is already enabled\n", priv_ep->name))
1794                 return 0;
1795
1796         spin_lock_irqsave(&priv_dev->lock, flags);
1797
1798         priv_ep->endpoint.desc = desc;
1799         priv_ep->type = usb_endpoint_type(desc);
1800         priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1801
1802         if (priv_ep->interval > ISO_MAX_INTERVAL &&
1803             priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
1804                 dev_err(priv_dev->dev, "Driver is limited to %d period\n",
1805                         ISO_MAX_INTERVAL);
1806
1807                 ret =  -EINVAL;
1808                 goto exit;
1809         }
1810
1811         ret = cdns3_allocate_trb_pool(priv_ep);
1812
1813         if (ret)
1814                 goto exit;
1815
1816         bEndpointAddress = priv_ep->num | priv_ep->dir;
1817         cdns3_select_ep(priv_dev, bEndpointAddress);
1818
1819         trace_cdns3_gadget_ep_enable(priv_ep);
1820
1821         writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
1822
1823         ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
1824                                         !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
1825                                         1, 1000);
1826
1827         if (unlikely(ret)) {
1828                 cdns3_free_trb_pool(priv_ep);
1829                 ret =  -EINVAL;
1830                 goto exit;
1831         }
1832
1833         /* enable interrupt for selected endpoint */
1834         cdns3_set_register_bit(&priv_dev->regs->ep_ien,
1835                                BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
1836
1837         if (priv_dev->dev_ver < DEV_VER_V2)
1838                 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
1839
1840         writel(reg, &priv_dev->regs->ep_sts_en);
1841
1842         /*
1843          * For some versions of controller at some point during ISO OUT traffic
1844          * DMA reads Transfer Ring for the EP which has never got doorbell.
1845          * This issue was detected only on simulation, but to avoid this issue
1846          * driver add protection against it. To fix it driver enable ISO OUT
1847          * endpoint before setting DRBL. This special treatment of ISO OUT
1848          * endpoints are recommended by controller specification.
1849          */
1850         if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir)
1851                 enable = 0;
1852
1853         if (enable)
1854                 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
1855
1856         ep->desc = desc;
1857         priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
1858                             EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
1859         priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
1860         priv_ep->wa1_set = 0;
1861         priv_ep->enqueue = 0;
1862         priv_ep->dequeue = 0;
1863         reg = readl(&priv_dev->regs->ep_sts);
1864         priv_ep->pcs = !!EP_STS_CCS(reg);
1865         priv_ep->ccs = !!EP_STS_CCS(reg);
1866         /* one TRB is reserved for link TRB used in DMULT mode*/
1867         priv_ep->free_trbs = priv_ep->num_trbs - 1;
1868 exit:
1869         spin_unlock_irqrestore(&priv_dev->lock, flags);
1870
1871         return ret;
1872 }
1873
1874 /**
1875  * cdns3_gadget_ep_disable Disable endpoint
1876  * @ep: endpoint object
1877  *
1878  * Returns 0 on success, error code elsewhere
1879  */
1880 static int cdns3_gadget_ep_disable(struct usb_ep *ep)
1881 {
1882         struct cdns3_endpoint *priv_ep;
1883         struct cdns3_request *priv_req;
1884         struct cdns3_device *priv_dev;
1885         struct usb_request *request;
1886         unsigned long flags;
1887         int ret = 0;
1888         u32 ep_cfg;
1889         int val;
1890
1891         if (!ep) {
1892                 pr_err("usbss: invalid parameters\n");
1893                 return -EINVAL;
1894         }
1895
1896         priv_ep = ep_to_cdns3_ep(ep);
1897         priv_dev = priv_ep->cdns3_dev;
1898
1899         if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED),
1900                           "%s is already disabled\n", priv_ep->name))
1901                 return 0;
1902
1903         spin_lock_irqsave(&priv_dev->lock, flags);
1904
1905         trace_cdns3_gadget_ep_disable(priv_ep);
1906
1907         cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
1908
1909         ep_cfg = readl(&priv_dev->regs->ep_cfg);
1910         ep_cfg &= ~EP_CFG_ENABLE;
1911         writel(ep_cfg, &priv_dev->regs->ep_cfg);
1912
1913         /**
1914          * Driver needs some time before resetting endpoint.
1915          * It need waits for clearing DBUSY bit or for timeout expired.
1916          * 10us is enough time for controller to stop transfer.
1917          */
1918         readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
1919                                   !(val & EP_STS_DBUSY), 1, 10);
1920         writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
1921
1922         readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
1923                                   !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
1924                                   1, 1000);
1925         if (unlikely(ret))
1926                 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
1927                         priv_ep->name);
1928
1929         while (!list_empty(&priv_ep->pending_req_list)) {
1930                 request = cdns3_next_request(&priv_ep->pending_req_list);
1931
1932                 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
1933                                       -ESHUTDOWN);
1934         }
1935
1936         while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
1937                 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
1938
1939                 kfree(priv_req->request.buf);
1940                 cdns3_gadget_ep_free_request(&priv_ep->endpoint,
1941                                              &priv_req->request);
1942                 list_del_init(&priv_req->list);
1943                 --priv_ep->wa2_counter;
1944         }
1945
1946         while (!list_empty(&priv_ep->deferred_req_list)) {
1947                 request = cdns3_next_request(&priv_ep->deferred_req_list);
1948
1949                 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
1950                                       -ESHUTDOWN);
1951         }
1952
1953         priv_ep->descmis_req = NULL;
1954
1955         ep->desc = NULL;
1956         priv_ep->flags &= ~EP_ENABLED;
1957
1958         spin_unlock_irqrestore(&priv_dev->lock, flags);
1959
1960         return ret;
1961 }
1962
1963 /**
1964  * cdns3_gadget_ep_queue Transfer data on endpoint
1965  * @ep: endpoint object
1966  * @request: request object
1967  * @gfp_flags: gfp flags
1968  *
1969  * Returns 0 on success, error code elsewhere
1970  */
1971 static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
1972                                    struct usb_request *request,
1973                                    gfp_t gfp_flags)
1974 {
1975         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
1976         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1977         struct cdns3_request *priv_req;
1978         int ret = 0;
1979
1980         request->actual = 0;
1981         request->status = -EINPROGRESS;
1982         priv_req = to_cdns3_request(request);
1983         trace_cdns3_ep_queue(priv_req);
1984
1985         if (priv_dev->dev_ver < DEV_VER_V2) {
1986                 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
1987                                                 priv_req);
1988
1989                 if (ret == EINPROGRESS)
1990                         return 0;
1991         }
1992
1993         ret = cdns3_prepare_aligned_request_buf(priv_req);
1994         if (ret < 0)
1995                 return ret;
1996
1997         ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request,
1998                                             usb_endpoint_dir_in(ep->desc));
1999         if (ret)
2000                 return ret;
2001
2002         list_add_tail(&request->list, &priv_ep->deferred_req_list);
2003
2004         /*
2005          * If hardware endpoint configuration has not been set yet then
2006          * just queue request in deferred list. Transfer will be started in
2007          * cdns3_set_hw_configuration.
2008          */
2009         if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
2010             !(priv_ep->flags & EP_STALL_PENDING))
2011                 cdns3_start_all_request(priv_dev, priv_ep);
2012
2013         return 0;
2014 }
2015
2016 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2017                                  gfp_t gfp_flags)
2018 {
2019         struct usb_request *zlp_request;
2020         struct cdns3_endpoint *priv_ep;
2021         struct cdns3_device *priv_dev;
2022         unsigned long flags;
2023         int ret;
2024
2025         if (!request || !ep)
2026                 return -EINVAL;
2027
2028         priv_ep = ep_to_cdns3_ep(ep);
2029         priv_dev = priv_ep->cdns3_dev;
2030
2031         spin_lock_irqsave(&priv_dev->lock, flags);
2032
2033         ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
2034
2035         if (ret == 0 && request->zero && request->length &&
2036             (request->length % ep->maxpacket == 0)) {
2037                 struct cdns3_request *priv_req;
2038
2039                 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
2040                 zlp_request->buf = priv_dev->zlp_buf;
2041                 zlp_request->length = 0;
2042
2043                 priv_req = to_cdns3_request(zlp_request);
2044                 priv_req->flags |= REQUEST_ZLP;
2045
2046                 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
2047                         priv_ep->name);
2048                 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
2049         }
2050
2051         spin_unlock_irqrestore(&priv_dev->lock, flags);
2052         return ret;
2053 }
2054
2055 /**
2056  * cdns3_gadget_ep_dequeue Remove request from transfer queue
2057  * @ep: endpoint object associated with request
2058  * @request: request object
2059  *
2060  * Returns 0 on success, error code elsewhere
2061  */
2062 int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
2063                             struct usb_request *request)
2064 {
2065         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2066         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2067         struct usb_request *req, *req_temp;
2068         struct cdns3_request *priv_req;
2069         struct cdns3_trb *link_trb;
2070         unsigned long flags;
2071         int ret = 0;
2072
2073         if (!ep || !request || !ep->desc)
2074                 return -EINVAL;
2075
2076         spin_lock_irqsave(&priv_dev->lock, flags);
2077
2078         priv_req = to_cdns3_request(request);
2079
2080         trace_cdns3_ep_dequeue(priv_req);
2081
2082         cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2083
2084         list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
2085                                  list) {
2086                 if (request == req)
2087                         goto found;
2088         }
2089
2090         list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
2091                                  list) {
2092                 if (request == req)
2093                         goto found;
2094         }
2095
2096         goto not_found;
2097
2098 found:
2099
2100         if (priv_ep->wa1_trb == priv_req->trb)
2101                 cdns3_wa1_restore_cycle_bit(priv_ep);
2102
2103         link_trb = priv_req->trb;
2104         cdns3_move_deq_to_next_trb(priv_req);
2105         cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
2106
2107         /* Update ring */
2108         request = cdns3_next_request(&priv_ep->deferred_req_list);
2109         if (request) {
2110                 priv_req = to_cdns3_request(request);
2111
2112                 link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
2113                                               (priv_req->start_trb * TRB_SIZE));
2114                 link_trb->control = (link_trb->control & TRB_CYCLE) |
2115                                     TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
2116         } else {
2117                 priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
2118         }
2119
2120 not_found:
2121         spin_unlock_irqrestore(&priv_dev->lock, flags);
2122         return ret;
2123 }
2124
2125 /**
2126  * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint
2127  * Should be called after acquiring spin_lock and selecting ep
2128  * @ep: endpoint object to set stall on.
2129  */
2130 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
2131 {
2132         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2133
2134         trace_cdns3_halt(priv_ep, 1, 0);
2135
2136         if (!(priv_ep->flags & EP_STALLED)) {
2137                 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
2138
2139                 if (!(ep_sts_reg & EP_STS_DBUSY))
2140                         cdns3_ep_stall_flush(priv_ep);
2141                 else
2142                         priv_ep->flags |= EP_STALL_PENDING;
2143         }
2144 }
2145
2146 /**
2147  * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint
2148  * Should be called after acquiring spin_lock and selecting ep
2149  * @ep: endpoint object to clear stall on
2150  */
2151 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
2152 {
2153         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2154         struct usb_request *request;
2155         int ret;
2156         int val;
2157
2158         trace_cdns3_halt(priv_ep, 0, 0);
2159
2160         writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2161
2162         /* wait for EPRST cleared */
2163         ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2164                                         !(val & EP_CMD_EPRST), 1, 100);
2165         if (ret)
2166                 return -EINVAL;
2167
2168         priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
2169
2170         request = cdns3_next_request(&priv_ep->pending_req_list);
2171
2172         if (request)
2173                 cdns3_rearm_transfer(priv_ep, 1);
2174
2175         cdns3_start_all_request(priv_dev, priv_ep);
2176         return ret;
2177 }
2178
2179 /**
2180  * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint
2181  * @ep: endpoint object to set/clear stall on
2182  * @value: 1 for set stall, 0 for clear stall
2183  *
2184  * Returns 0 on success, error code elsewhere
2185  */
2186 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2187 {
2188         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2189         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2190         unsigned long flags;
2191         int ret = 0;
2192
2193         if (!(priv_ep->flags & EP_ENABLED))
2194                 return -EPERM;
2195
2196         spin_lock_irqsave(&priv_dev->lock, flags);
2197
2198         cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2199
2200         if (!value) {
2201                 priv_ep->flags &= ~EP_WEDGE;
2202                 ret = __cdns3_gadget_ep_clear_halt(priv_ep);
2203         } else {
2204                 __cdns3_gadget_ep_set_halt(priv_ep);
2205         }
2206
2207         spin_unlock_irqrestore(&priv_dev->lock, flags);
2208
2209         return ret;
2210 }
2211
2212 extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
2213
2214 static const struct usb_ep_ops cdns3_gadget_ep_ops = {
2215         .enable = cdns3_gadget_ep_enable,
2216         .disable = cdns3_gadget_ep_disable,
2217         .alloc_request = cdns3_gadget_ep_alloc_request,
2218         .free_request = cdns3_gadget_ep_free_request,
2219         .queue = cdns3_gadget_ep_queue,
2220         .dequeue = cdns3_gadget_ep_dequeue,
2221         .set_halt = cdns3_gadget_ep_set_halt,
2222         .set_wedge = cdns3_gadget_ep_set_wedge,
2223 };
2224
2225 /**
2226  * cdns3_gadget_get_frame Returns number of actual ITP frame
2227  * @gadget: gadget object
2228  *
2229  * Returns number of actual ITP frame
2230  */
2231 static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
2232 {
2233         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2234
2235         return readl(&priv_dev->regs->usb_itpn);
2236 }
2237
2238 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
2239 {
2240         enum usb_device_speed speed;
2241
2242         speed = cdns3_get_speed(priv_dev);
2243
2244         if (speed >= USB_SPEED_SUPER)
2245                 return 0;
2246
2247         /* Start driving resume signaling to indicate remote wakeup. */
2248         writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
2249
2250         return 0;
2251 }
2252
2253 static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
2254 {
2255         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2256         unsigned long flags;
2257         int ret = 0;
2258
2259         spin_lock_irqsave(&priv_dev->lock, flags);
2260         ret = __cdns3_gadget_wakeup(priv_dev);
2261         spin_unlock_irqrestore(&priv_dev->lock, flags);
2262         return ret;
2263 }
2264
2265 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
2266                                         int is_selfpowered)
2267 {
2268         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2269         unsigned long flags;
2270
2271         spin_lock_irqsave(&priv_dev->lock, flags);
2272         priv_dev->is_selfpowered = !!is_selfpowered;
2273         spin_unlock_irqrestore(&priv_dev->lock, flags);
2274         return 0;
2275 }
2276
2277 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
2278 {
2279         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2280
2281         if (is_on)
2282                 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
2283         else
2284                 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2285
2286         return 0;
2287 }
2288
2289 static void cdns3_gadget_config(struct cdns3_device *priv_dev)
2290 {
2291         struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
2292         u32 reg;
2293
2294         cdns3_ep0_config(priv_dev);
2295
2296         /* enable interrupts for endpoint 0 (in and out) */
2297         writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, &regs->ep_ien);
2298
2299         /*
2300          * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
2301          * revision of controller.
2302          */
2303         if (priv_dev->dev_ver == DEV_VER_TI_V1) {
2304                 reg = readl(&regs->dbg_link1);
2305
2306                 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
2307                 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
2308                        DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
2309                 writel(reg, &regs->dbg_link1);
2310         }
2311
2312         /*
2313          * By default some platforms has set protected access to memory.
2314          * This cause problem with cache, so driver restore non-secure
2315          * access to memory.
2316          */
2317         reg = readl(&regs->dma_axi_ctrl);
2318         reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
2319                DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
2320         writel(reg, &regs->dma_axi_ctrl);
2321
2322         /* enable generic interrupt*/
2323         writel(USB_IEN_INIT, &regs->usb_ien);
2324         writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
2325
2326         cdns3_configure_dmult(priv_dev, NULL);
2327
2328         cdns3_gadget_pullup(&priv_dev->gadget, 1);
2329 }
2330
2331 /**
2332  * cdns3_gadget_udc_start Gadget start
2333  * @gadget: gadget object
2334  * @driver: driver which operates on this gadget
2335  *
2336  * Returns 0 on success, error code elsewhere
2337  */
2338 static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
2339                                   struct usb_gadget_driver *driver)
2340 {
2341         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2342         unsigned long flags;
2343
2344         spin_lock_irqsave(&priv_dev->lock, flags);
2345         priv_dev->gadget_driver = driver;
2346         cdns3_gadget_config(priv_dev);
2347         spin_unlock_irqrestore(&priv_dev->lock, flags);
2348         return 0;
2349 }
2350
2351 /**
2352  * cdns3_gadget_udc_stop Stops gadget
2353  * @gadget: gadget object
2354  *
2355  * Returns 0
2356  */
2357 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
2358 {
2359         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2360         struct cdns3_endpoint *priv_ep;
2361         u32 bEndpointAddress;
2362         struct usb_ep *ep;
2363         int ret = 0;
2364         int val;
2365
2366         priv_dev->gadget_driver = NULL;
2367
2368         priv_dev->onchip_used_size = 0;
2369         priv_dev->out_mem_is_allocated = 0;
2370         priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2371
2372         list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
2373                 priv_ep = ep_to_cdns3_ep(ep);
2374                 bEndpointAddress = priv_ep->num | priv_ep->dir;
2375                 cdns3_select_ep(priv_dev, bEndpointAddress);
2376                 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2377                 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2378                                           !(val & EP_CMD_EPRST), 1, 100);
2379         }
2380
2381         /* disable interrupt for device */
2382         writel(0, &priv_dev->regs->usb_ien);
2383         writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2384
2385         return ret;
2386 }
2387
2388 static const struct usb_gadget_ops cdns3_gadget_ops = {
2389         .get_frame = cdns3_gadget_get_frame,
2390         .wakeup = cdns3_gadget_wakeup,
2391         .set_selfpowered = cdns3_gadget_set_selfpowered,
2392         .pullup = cdns3_gadget_pullup,
2393         .udc_start = cdns3_gadget_udc_start,
2394         .udc_stop = cdns3_gadget_udc_stop,
2395         .match_ep = cdns3_gadget_match_ep,
2396 };
2397
2398 static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
2399 {
2400         int i;
2401
2402         /* ep0 OUT point to ep0 IN. */
2403         priv_dev->eps[16] = NULL;
2404
2405         for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
2406                 if (priv_dev->eps[i]) {
2407                         cdns3_free_trb_pool(priv_dev->eps[i]);
2408                         devm_kfree(priv_dev->dev, priv_dev->eps[i]);
2409                 }
2410 }
2411
2412 /**
2413  * cdns3_init_eps Initializes software endpoints of gadget
2414  * @cdns3: extended gadget object
2415  *
2416  * Returns 0 on success, error code elsewhere
2417  */
2418 static int cdns3_init_eps(struct cdns3_device *priv_dev)
2419 {
2420         u32 ep_enabled_reg, iso_ep_reg;
2421         struct cdns3_endpoint *priv_ep;
2422         int ep_dir, ep_number;
2423         u32 ep_mask;
2424         int ret = 0;
2425         int i;
2426
2427         /* Read it from USB_CAP3 to USB_CAP5 */
2428         ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
2429         iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
2430
2431         dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
2432
2433         for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
2434                 ep_dir = i >> 4;        /* i div 16 */
2435                 ep_number = i & 0xF;    /* i % 16 */
2436                 ep_mask = BIT(i);
2437
2438                 if (!(ep_enabled_reg & ep_mask))
2439                         continue;
2440
2441                 if (ep_dir && !ep_number) {
2442                         priv_dev->eps[i] = priv_dev->eps[0];
2443                         continue;
2444                 }
2445
2446                 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
2447                                        GFP_KERNEL);
2448                 if (!priv_ep)
2449                         goto err;
2450
2451                 /* set parent of endpoint object */
2452                 priv_ep->cdns3_dev = priv_dev;
2453                 priv_dev->eps[i] = priv_ep;
2454                 priv_ep->num = ep_number;
2455                 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
2456
2457                 if (!ep_number) {
2458                         ret = cdns3_init_ep0(priv_dev, priv_ep);
2459                         if (ret) {
2460                                 dev_err(priv_dev->dev, "Failed to init ep0\n");
2461                                 goto err;
2462                         }
2463                 } else {
2464                         snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
2465                                  ep_number, !!ep_dir ? "in" : "out");
2466                         priv_ep->endpoint.name = priv_ep->name;
2467
2468                         usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
2469                                                    CDNS3_EP_MAX_PACKET_LIMIT);
2470                         priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
2471                         priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
2472                         if (ep_dir)
2473                                 priv_ep->endpoint.caps.dir_in = 1;
2474                         else
2475                                 priv_ep->endpoint.caps.dir_out = 1;
2476
2477                         if (iso_ep_reg & ep_mask)
2478                                 priv_ep->endpoint.caps.type_iso = 1;
2479
2480                         priv_ep->endpoint.caps.type_bulk = 1;
2481                         priv_ep->endpoint.caps.type_int = 1;
2482
2483                         list_add_tail(&priv_ep->endpoint.ep_list,
2484                                       &priv_dev->gadget.ep_list);
2485                 }
2486
2487                 priv_ep->flags = 0;
2488
2489                 dev_info(priv_dev->dev, "Initialized  %s support: %s %s\n",
2490                          priv_ep->name,
2491                          priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
2492                          priv_ep->endpoint.caps.type_iso ? "ISO" : "");
2493
2494                 INIT_LIST_HEAD(&priv_ep->pending_req_list);
2495                 INIT_LIST_HEAD(&priv_ep->deferred_req_list);
2496                 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
2497         }
2498
2499         return 0;
2500 err:
2501         cdns3_free_all_eps(priv_dev);
2502         return -ENOMEM;
2503 }
2504
2505 void cdns3_gadget_exit(struct cdns3 *cdns)
2506 {
2507         struct cdns3_device *priv_dev;
2508
2509         priv_dev = cdns->gadget_dev;
2510
2511         devm_free_irq(cdns->dev, cdns->dev_irq, cdns);
2512
2513         pm_runtime_mark_last_busy(cdns->dev);
2514         pm_runtime_put_autosuspend(cdns->dev);
2515
2516         usb_del_gadget_udc(&priv_dev->gadget);
2517
2518         cdns3_free_all_eps(priv_dev);
2519
2520         while (!list_empty(&priv_dev->aligned_buf_list)) {
2521                 struct cdns3_aligned_buf *buf;
2522
2523                 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
2524                 dma_free_coherent(priv_dev->sysdev, buf->size,
2525                                   buf->buf,
2526                                   buf->dma);
2527
2528                 list_del(&buf->list);
2529                 kfree(buf);
2530         }
2531
2532         dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
2533                           priv_dev->setup_dma);
2534
2535         kfree(priv_dev->zlp_buf);
2536         kfree(priv_dev);
2537         cdns->gadget_dev = NULL;
2538         cdns3_drd_switch_gadget(cdns, 0);
2539 }
2540
2541 static int cdns3_gadget_start(struct cdns3 *cdns)
2542 {
2543         struct cdns3_device *priv_dev;
2544         u32 max_speed;
2545         int ret;
2546
2547         priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
2548         if (!priv_dev)
2549                 return -ENOMEM;
2550
2551         cdns->gadget_dev = priv_dev;
2552         priv_dev->sysdev = cdns->dev;
2553         priv_dev->dev = cdns->dev;
2554         priv_dev->regs = cdns->dev_regs;
2555
2556         device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size",
2557                                  &priv_dev->onchip_buffers);
2558
2559         if (priv_dev->onchip_buffers <=  0) {
2560                 u32 reg = readl(&priv_dev->regs->usb_cap2);
2561
2562                 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
2563         }
2564
2565         if (!priv_dev->onchip_buffers)
2566                 priv_dev->onchip_buffers = 256;
2567
2568         max_speed = usb_get_maximum_speed(cdns->dev);
2569
2570         /* Check the maximum_speed parameter */
2571         switch (max_speed) {
2572         case USB_SPEED_FULL:
2573                 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
2574                 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2575                 break;
2576         case USB_SPEED_HIGH:
2577                 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2578                 break;
2579         case USB_SPEED_SUPER:
2580                 break;
2581         default:
2582                 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
2583                         max_speed);
2584                 /* fall through */
2585         case USB_SPEED_UNKNOWN:
2586                 /* default to superspeed */
2587                 max_speed = USB_SPEED_SUPER;
2588                 break;
2589         }
2590
2591         /* fill gadget fields */
2592         priv_dev->gadget.max_speed = max_speed;
2593         priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2594         priv_dev->gadget.ops = &cdns3_gadget_ops;
2595         priv_dev->gadget.name = "usb-ss-gadget";
2596         priv_dev->gadget.sg_supported = 1;
2597         priv_dev->gadget.quirk_avoids_skb_reserve = 1;
2598
2599         spin_lock_init(&priv_dev->lock);
2600         INIT_WORK(&priv_dev->pending_status_wq,
2601                   cdns3_pending_setup_status_handler);
2602
2603         INIT_WORK(&priv_dev->aligned_buf_wq,
2604                   cdns3_free_aligned_request_buf);
2605
2606         /* initialize endpoint container */
2607         INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
2608         INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
2609
2610         ret = cdns3_init_eps(priv_dev);
2611         if (ret) {
2612                 dev_err(priv_dev->dev, "Failed to create endpoints\n");
2613                 goto err1;
2614         }
2615
2616         /* allocate memory for setup packet buffer */
2617         priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8,
2618                                                  &priv_dev->setup_dma, GFP_DMA);
2619         if (!priv_dev->setup_buf) {
2620                 ret = -ENOMEM;
2621                 goto err2;
2622         }
2623
2624         priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
2625
2626         dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
2627                 readl(&priv_dev->regs->usb_cap6));
2628         dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
2629                 readl(&priv_dev->regs->usb_cap1));
2630         dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n",
2631                 readl(&priv_dev->regs->usb_cap2));
2632
2633         priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
2634
2635         priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
2636         if (!priv_dev->zlp_buf) {
2637                 ret = -ENOMEM;
2638                 goto err3;
2639         }
2640
2641         /* add USB gadget device */
2642         ret = usb_add_gadget_udc(priv_dev->dev, &priv_dev->gadget);
2643         if (ret < 0) {
2644                 dev_err(priv_dev->dev,
2645                         "Failed to register USB device controller\n");
2646                 goto err4;
2647         }
2648
2649         return 0;
2650 err4:
2651         kfree(priv_dev->zlp_buf);
2652 err3:
2653         dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf,
2654                           priv_dev->setup_dma);
2655 err2:
2656         cdns3_free_all_eps(priv_dev);
2657 err1:
2658         cdns->gadget_dev = NULL;
2659         return ret;
2660 }
2661
2662 static int __cdns3_gadget_init(struct cdns3 *cdns)
2663 {
2664         int ret = 0;
2665
2666         /* Ensure 32-bit DMA Mask in case we switched back from Host mode */
2667         ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
2668         if (ret) {
2669                 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
2670                 return ret;
2671         }
2672
2673         cdns3_drd_switch_gadget(cdns, 1);
2674         pm_runtime_get_sync(cdns->dev);
2675
2676         ret = cdns3_gadget_start(cdns);
2677         if (ret)
2678                 return ret;
2679
2680         /*
2681          * Because interrupt line can be shared with other components in
2682          * driver it can't use IRQF_ONESHOT flag here.
2683          */
2684         ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq,
2685                                         cdns3_device_irq_handler,
2686                                         cdns3_device_thread_irq_handler,
2687                                         IRQF_SHARED, dev_name(cdns->dev), cdns);
2688
2689         if (ret)
2690                 goto err0;
2691
2692         return 0;
2693 err0:
2694         cdns3_gadget_exit(cdns);
2695         return ret;
2696 }
2697
2698 static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
2699 {
2700         struct cdns3_device *priv_dev = cdns->gadget_dev;
2701
2702         cdns3_disconnect_gadget(priv_dev);
2703
2704         priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2705         usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
2706         cdns3_hw_reset_eps_config(priv_dev);
2707
2708         /* disable interrupt for device */
2709         writel(0, &priv_dev->regs->usb_ien);
2710
2711         cdns3_gadget_pullup(&priv_dev->gadget, 0);
2712
2713         return 0;
2714 }
2715
2716 static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
2717 {
2718         struct cdns3_device *priv_dev = cdns->gadget_dev;
2719
2720         if (!priv_dev->gadget_driver)
2721                 return 0;
2722
2723         cdns3_gadget_config(priv_dev);
2724
2725         return 0;
2726 }
2727
2728 /**
2729  * cdns3_gadget_init - initialize device structure
2730  *
2731  * cdns: cdns3 instance
2732  *
2733  * This function initializes the gadget.
2734  */
2735 int cdns3_gadget_init(struct cdns3 *cdns)
2736 {
2737         struct cdns3_role_driver *rdrv;
2738
2739         rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
2740         if (!rdrv)
2741                 return -ENOMEM;
2742
2743         rdrv->start     = __cdns3_gadget_init;
2744         rdrv->stop      = cdns3_gadget_exit;
2745         rdrv->suspend   = cdns3_gadget_suspend;
2746         rdrv->resume    = cdns3_gadget_resume;
2747         rdrv->state     = CDNS3_ROLE_STATE_INACTIVE;
2748         rdrv->name      = "gadget";
2749         cdns->roles[USB_ROLE_DEVICE] = rdrv;
2750
2751         return 0;
2752 }