2 * xhci-dbgtty.c - tty glue for xHCI debug capability
4 * Copyright (C) 2017 Intel Corporation
6 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/slab.h>
10 #include <linux/tty.h>
11 #include <linux/tty_flip.h>
14 #include "xhci-dbgcap.h"
17 dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
21 len = kfifo_len(&port->write_fifo);
25 size = kfifo_out(&port->write_fifo, packet, size);
29 static int dbc_start_tx(struct dbc_port *port)
30 __releases(&port->port_lock)
31 __acquires(&port->port_lock)
34 struct dbc_request *req;
36 bool do_tty_wake = false;
37 struct list_head *pool = &port->write_pool;
39 while (!list_empty(pool)) {
40 req = list_entry(pool->next, struct dbc_request, list_pool);
41 len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
47 list_del(&req->list_pool);
49 spin_unlock(&port->port_lock);
50 status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
51 spin_lock(&port->port_lock);
54 list_add(&req->list_pool, pool);
59 if (do_tty_wake && port->port.tty)
60 tty_wakeup(port->port.tty);
65 static void dbc_start_rx(struct dbc_port *port)
66 __releases(&port->port_lock)
67 __acquires(&port->port_lock)
69 struct dbc_request *req;
71 struct list_head *pool = &port->read_pool;
73 while (!list_empty(pool)) {
77 req = list_entry(pool->next, struct dbc_request, list_pool);
78 list_del(&req->list_pool);
79 req->length = DBC_MAX_PACKET;
81 spin_unlock(&port->port_lock);
82 status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
83 spin_lock(&port->port_lock);
86 list_add(&req->list_pool, pool);
93 dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
96 struct xhci_dbc *dbc = xhci->dbc;
97 struct dbc_port *port = &dbc->port;
99 spin_lock_irqsave(&port->port_lock, flags);
100 list_add_tail(&req->list_pool, &port->read_queue);
101 tasklet_schedule(&port->push);
102 spin_unlock_irqrestore(&port->port_lock, flags);
105 static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
108 struct xhci_dbc *dbc = xhci->dbc;
109 struct dbc_port *port = &dbc->port;
111 spin_lock_irqsave(&port->port_lock, flags);
112 list_add(&req->list_pool, &port->write_pool);
113 switch (req->status) {
120 xhci_warn(xhci, "unexpected write complete status %d\n",
124 spin_unlock_irqrestore(&port->port_lock, flags);
127 static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
130 dbc_free_request(dep, req);
134 xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
135 void (*fn)(struct xhci_hcd *, struct dbc_request *))
138 struct dbc_request *req;
140 for (i = 0; i < DBC_QUEUE_SIZE; i++) {
141 req = dbc_alloc_request(dep, GFP_ATOMIC);
145 req->length = DBC_MAX_PACKET;
146 req->buf = kmalloc(req->length, GFP_KERNEL);
148 xhci_dbc_free_req(dep, req);
153 list_add_tail(&req->list_pool, head);
156 return list_empty(head) ? -ENOMEM : 0;
160 xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
162 struct dbc_request *req;
164 while (!list_empty(head)) {
165 req = list_entry(head->next, struct dbc_request, list_pool);
166 list_del(&req->list_pool);
167 xhci_dbc_free_req(dep, req);
171 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
173 struct dbc_port *port = driver->driver_state;
175 tty->driver_data = port;
177 return tty_port_install(&port->port, driver, tty);
180 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
182 struct dbc_port *port = tty->driver_data;
184 return tty_port_open(&port->port, tty, file);
187 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
189 struct dbc_port *port = tty->driver_data;
191 tty_port_close(&port->port, tty, file);
194 static int dbc_tty_write(struct tty_struct *tty,
195 const unsigned char *buf,
198 struct dbc_port *port = tty->driver_data;
201 spin_lock_irqsave(&port->port_lock, flags);
203 count = kfifo_in(&port->write_fifo, buf, count);
205 spin_unlock_irqrestore(&port->port_lock, flags);
210 static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
212 struct dbc_port *port = tty->driver_data;
216 spin_lock_irqsave(&port->port_lock, flags);
217 status = kfifo_put(&port->write_fifo, ch);
218 spin_unlock_irqrestore(&port->port_lock, flags);
223 static void dbc_tty_flush_chars(struct tty_struct *tty)
225 struct dbc_port *port = tty->driver_data;
228 spin_lock_irqsave(&port->port_lock, flags);
230 spin_unlock_irqrestore(&port->port_lock, flags);
233 static int dbc_tty_write_room(struct tty_struct *tty)
235 struct dbc_port *port = tty->driver_data;
239 spin_lock_irqsave(&port->port_lock, flags);
240 room = kfifo_avail(&port->write_fifo);
241 spin_unlock_irqrestore(&port->port_lock, flags);
246 static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
248 struct dbc_port *port = tty->driver_data;
252 spin_lock_irqsave(&port->port_lock, flags);
253 chars = kfifo_len(&port->write_fifo);
254 spin_unlock_irqrestore(&port->port_lock, flags);
259 static void dbc_tty_unthrottle(struct tty_struct *tty)
261 struct dbc_port *port = tty->driver_data;
264 spin_lock_irqsave(&port->port_lock, flags);
265 tasklet_schedule(&port->push);
266 spin_unlock_irqrestore(&port->port_lock, flags);
269 static const struct tty_operations dbc_tty_ops = {
270 .install = dbc_tty_install,
271 .open = dbc_tty_open,
272 .close = dbc_tty_close,
273 .write = dbc_tty_write,
274 .put_char = dbc_tty_put_char,
275 .flush_chars = dbc_tty_flush_chars,
276 .write_room = dbc_tty_write_room,
277 .chars_in_buffer = dbc_tty_chars_in_buffer,
278 .unthrottle = dbc_tty_unthrottle,
281 static struct tty_driver *dbc_tty_driver;
283 int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
286 struct xhci_dbc *dbc = xhci->dbc;
288 dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
289 TTY_DRIVER_DYNAMIC_DEV);
290 if (IS_ERR(dbc_tty_driver)) {
291 status = PTR_ERR(dbc_tty_driver);
292 dbc_tty_driver = NULL;
296 dbc_tty_driver->driver_name = "dbc_serial";
297 dbc_tty_driver->name = "ttyDBC";
299 dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
300 dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
301 dbc_tty_driver->init_termios = tty_std_termios;
302 dbc_tty_driver->init_termios.c_cflag =
303 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
304 dbc_tty_driver->init_termios.c_ispeed = 9600;
305 dbc_tty_driver->init_termios.c_ospeed = 9600;
306 dbc_tty_driver->driver_state = &dbc->port;
308 tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
310 status = tty_register_driver(dbc_tty_driver);
313 "can't register dbc tty driver, err %d\n", status);
314 put_tty_driver(dbc_tty_driver);
315 dbc_tty_driver = NULL;
321 void xhci_dbc_tty_unregister_driver(void)
323 tty_unregister_driver(dbc_tty_driver);
324 put_tty_driver(dbc_tty_driver);
325 dbc_tty_driver = NULL;
328 static void dbc_rx_push(unsigned long _port)
330 struct dbc_request *req;
331 struct tty_struct *tty;
333 bool do_push = false;
334 bool disconnect = false;
335 struct dbc_port *port = (void *)_port;
336 struct list_head *queue = &port->read_queue;
338 spin_lock_irqsave(&port->port_lock, flags);
339 tty = port->port.tty;
340 while (!list_empty(queue)) {
341 req = list_first_entry(queue, struct dbc_request, list_pool);
343 if (tty && tty_throttled(tty))
346 switch (req->status) {
353 pr_warn("ttyDBC0: unexpected RX status %d\n",
359 char *packet = req->buf;
360 unsigned int n, size = req->actual;
369 count = tty_insert_flip_string(&port->port, packet,
374 port->n_read += count;
380 list_move(&req->list_pool, &port->read_pool);
384 tty_flip_buffer_push(&port->port);
386 if (!list_empty(queue) && tty) {
387 if (!tty_throttled(tty)) {
389 tasklet_schedule(&port->push);
391 pr_warn("ttyDBC0: RX not scheduled?\n");
398 spin_unlock_irqrestore(&port->port_lock, flags);
401 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
404 struct dbc_port *port = container_of(_port, struct dbc_port, port);
406 spin_lock_irqsave(&port->port_lock, flags);
408 spin_unlock_irqrestore(&port->port_lock, flags);
413 static const struct tty_port_operations dbc_port_ops = {
414 .activate = dbc_port_activate,
418 xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
420 tty_port_init(&port->port);
421 spin_lock_init(&port->port_lock);
422 tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
423 INIT_LIST_HEAD(&port->read_pool);
424 INIT_LIST_HEAD(&port->read_queue);
425 INIT_LIST_HEAD(&port->write_pool);
427 port->in = get_in_ep(xhci);
428 port->out = get_out_ep(xhci);
429 port->port.ops = &dbc_port_ops;
434 xhci_dbc_tty_exit_port(struct dbc_port *port)
436 tasklet_kill(&port->push);
437 tty_port_destroy(&port->port);
440 int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
443 struct device *tty_dev;
444 struct xhci_dbc *dbc = xhci->dbc;
445 struct dbc_port *port = &dbc->port;
447 xhci_dbc_tty_init_port(xhci, port);
448 tty_dev = tty_port_register_device(&port->port,
449 dbc_tty_driver, 0, NULL);
450 if (IS_ERR(tty_dev)) {
451 ret = PTR_ERR(tty_dev);
455 ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
459 ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
464 ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
469 port->registered = true;
474 xhci_dbc_free_requests(port->in, &port->read_pool);
475 xhci_dbc_free_requests(port->out, &port->write_pool);
476 kfifo_free(&port->write_fifo);
479 tty_unregister_device(dbc_tty_driver, 0);
482 xhci_dbc_tty_exit_port(port);
484 xhci_err(xhci, "can't register tty port, err %d\n", ret);
489 void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
491 struct xhci_dbc *dbc = xhci->dbc;
492 struct dbc_port *port = &dbc->port;
494 tty_unregister_device(dbc_tty_driver, 0);
495 xhci_dbc_tty_exit_port(port);
496 port->registered = false;
498 kfifo_free(&port->write_fifo);
499 xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
500 xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
501 xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);