2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/math64.h>
38 #include <rdma/ib_verbs.h>
42 #define DRV_VERSION "0.1"
44 MODULE_AUTHOR("Steve Wise");
45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
46 MODULE_LICENSE("Dual BSD/GPL");
47 MODULE_VERSION(DRV_VERSION);
49 static int allow_db_fc_on_t5;
50 module_param(allow_db_fc_on_t5, int, 0644);
51 MODULE_PARM_DESC(allow_db_fc_on_t5,
52 "Allow DB Flow Control on T5 (default = 0)");
54 static int allow_db_coalescing_on_t5;
55 module_param(allow_db_coalescing_on_t5, int, 0644);
56 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
57 "Allow DB Coalescing on T5 (default = 0)");
60 module_param(c4iw_wr_log, int, 0444);
61 MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
63 static int c4iw_wr_log_size_order = 12;
64 module_param(c4iw_wr_log_size_order, int, 0444);
65 MODULE_PARM_DESC(c4iw_wr_log_size_order,
66 "Number of entries (log2) in the work request timing log.");
69 struct list_head entry;
70 struct cxgb4_lld_info lldi;
74 static LIST_HEAD(uld_ctx_list);
75 static DEFINE_MUTEX(dev_mutex);
77 #define DB_FC_RESUME_SIZE 64
78 #define DB_FC_RESUME_DELAY 1
79 #define DB_FC_DRAIN_THRESH 0
81 static struct dentry *c4iw_debugfs_root;
83 struct c4iw_debugfs_data {
84 struct c4iw_dev *devp;
90 static int count_idrs(int id, void *p, void *data)
94 *countp = *countp + 1;
98 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
101 struct c4iw_debugfs_data *d = file->private_data;
103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
106 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
108 struct wr_log_entry le;
111 if (!wq->rdev->wr_log)
114 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
115 (wq->rdev->wr_log_size - 1);
116 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
117 getnstimeofday(&le.poll_host_ts);
119 le.cqe_sge_ts = CQE_TS(cqe);
122 le.opcode = CQE_OPCODE(cqe);
123 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
124 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
125 le.wr_id = CQE_WRID_SQ_IDX(cqe);
128 le.opcode = FW_RI_RECEIVE;
129 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
130 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
131 le.wr_id = CQE_WRID_MSN(cqe);
133 wq->rdev->wr_log[idx] = le;
136 static int wr_log_show(struct seq_file *seq, void *v)
138 struct c4iw_dev *dev = seq->private;
139 struct timespec prev_ts = {0, 0};
140 struct wr_log_entry *lep;
144 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
146 idx = atomic_read(&dev->rdev.wr_log_idx) &
147 (dev->rdev.wr_log_size - 1);
150 end = dev->rdev.wr_log_size - 1;
151 lep = &dev->rdev.wr_log[idx];
156 prev_ts = lep->poll_host_ts;
158 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
159 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
160 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
161 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
162 "cqe_poll_delta_ns %llu\n",
164 timespec_sub(lep->poll_host_ts,
166 timespec_sub(lep->poll_host_ts,
168 lep->qid, lep->opcode,
169 lep->opcode == FW_RI_RECEIVE ?
172 timespec_sub(lep->poll_host_ts,
173 lep->post_host_ts).tv_sec,
174 timespec_sub(lep->poll_host_ts,
175 lep->post_host_ts).tv_nsec,
176 lep->post_sge_ts, lep->cqe_sge_ts,
178 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
179 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
180 prev_ts = lep->poll_host_ts;
183 if (idx > (dev->rdev.wr_log_size - 1))
185 lep = &dev->rdev.wr_log[idx];
191 static int wr_log_open(struct inode *inode, struct file *file)
193 return single_open(file, wr_log_show, inode->i_private);
196 static ssize_t wr_log_clear(struct file *file, const char __user *buf,
197 size_t count, loff_t *pos)
199 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
202 if (dev->rdev.wr_log)
203 for (i = 0; i < dev->rdev.wr_log_size; i++)
204 dev->rdev.wr_log[i].valid = 0;
208 static const struct file_operations wr_log_debugfs_fops = {
209 .owner = THIS_MODULE,
211 .release = single_release,
214 .write = wr_log_clear,
217 static int dump_qp(int id, void *p, void *data)
219 struct c4iw_qp *qp = p;
220 struct c4iw_debugfs_data *qpd = data;
224 if (id != qp->wq.sq.qid)
227 space = qpd->bufsize - qpd->pos - 1;
232 if (qp->ep->com.local_addr.ss_family == AF_INET) {
233 struct sockaddr_in *lsin = (struct sockaddr_in *)
234 &qp->ep->com.cm_id->local_addr;
235 struct sockaddr_in *rsin = (struct sockaddr_in *)
236 &qp->ep->com.cm_id->remote_addr;
237 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
238 &qp->ep->com.cm_id->m_local_addr;
239 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
240 &qp->ep->com.cm_id->m_remote_addr;
242 cc = snprintf(qpd->buf + qpd->pos, space,
243 "rc qp sq id %u rq id %u state %u "
244 "onchip %u ep tid %u state %u "
245 "%pI4:%u/%u->%pI4:%u/%u\n",
246 qp->wq.sq.qid, qp->wq.rq.qid,
248 qp->wq.sq.flags & T4_SQ_ONCHIP,
249 qp->ep->hwtid, (int)qp->ep->com.state,
250 &lsin->sin_addr, ntohs(lsin->sin_port),
251 ntohs(mapped_lsin->sin_port),
252 &rsin->sin_addr, ntohs(rsin->sin_port),
253 ntohs(mapped_rsin->sin_port));
255 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
256 &qp->ep->com.cm_id->local_addr;
257 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
258 &qp->ep->com.cm_id->remote_addr;
259 struct sockaddr_in6 *mapped_lsin6 =
260 (struct sockaddr_in6 *)
261 &qp->ep->com.cm_id->m_local_addr;
262 struct sockaddr_in6 *mapped_rsin6 =
263 (struct sockaddr_in6 *)
264 &qp->ep->com.cm_id->m_remote_addr;
266 cc = snprintf(qpd->buf + qpd->pos, space,
267 "rc qp sq id %u rq id %u state %u "
268 "onchip %u ep tid %u state %u "
269 "%pI6:%u/%u->%pI6:%u/%u\n",
270 qp->wq.sq.qid, qp->wq.rq.qid,
272 qp->wq.sq.flags & T4_SQ_ONCHIP,
273 qp->ep->hwtid, (int)qp->ep->com.state,
275 ntohs(lsin6->sin6_port),
276 ntohs(mapped_lsin6->sin6_port),
278 ntohs(rsin6->sin6_port),
279 ntohs(mapped_rsin6->sin6_port));
282 cc = snprintf(qpd->buf + qpd->pos, space,
283 "qp sq id %u rq id %u state %u onchip %u\n",
284 qp->wq.sq.qid, qp->wq.rq.qid,
286 qp->wq.sq.flags & T4_SQ_ONCHIP);
292 static int qp_release(struct inode *inode, struct file *file)
294 struct c4iw_debugfs_data *qpd = file->private_data;
296 printk(KERN_INFO "%s null qpd?\n", __func__);
304 static int qp_open(struct inode *inode, struct file *file)
306 struct c4iw_debugfs_data *qpd;
309 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
313 qpd->devp = inode->i_private;
316 spin_lock_irq(&qpd->devp->lock);
317 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
318 spin_unlock_irq(&qpd->devp->lock);
320 qpd->bufsize = count * 180;
321 qpd->buf = vmalloc(qpd->bufsize);
327 spin_lock_irq(&qpd->devp->lock);
328 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
329 spin_unlock_irq(&qpd->devp->lock);
331 qpd->buf[qpd->pos++] = 0;
332 file->private_data = qpd;
336 static const struct file_operations qp_debugfs_fops = {
337 .owner = THIS_MODULE,
339 .release = qp_release,
340 .read = debugfs_read,
341 .llseek = default_llseek,
344 static int dump_stag(int id, void *p, void *data)
346 struct c4iw_debugfs_data *stagd = data;
349 struct fw_ri_tpte tpte;
352 space = stagd->bufsize - stagd->pos - 1;
356 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
359 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
360 "%s cxgb4_read_tpte err %d\n", __func__, ret);
363 cc = snprintf(stagd->buf + stagd->pos, space,
364 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
365 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
367 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
368 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
369 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
370 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
371 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
372 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
373 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
374 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
380 static int stag_release(struct inode *inode, struct file *file)
382 struct c4iw_debugfs_data *stagd = file->private_data;
384 printk(KERN_INFO "%s null stagd?\n", __func__);
392 static int stag_open(struct inode *inode, struct file *file)
394 struct c4iw_debugfs_data *stagd;
398 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
403 stagd->devp = inode->i_private;
406 spin_lock_irq(&stagd->devp->lock);
407 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
408 spin_unlock_irq(&stagd->devp->lock);
410 stagd->bufsize = count * 256;
411 stagd->buf = vmalloc(stagd->bufsize);
417 spin_lock_irq(&stagd->devp->lock);
418 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
419 spin_unlock_irq(&stagd->devp->lock);
421 stagd->buf[stagd->pos++] = 0;
422 file->private_data = stagd;
430 static const struct file_operations stag_debugfs_fops = {
431 .owner = THIS_MODULE,
433 .release = stag_release,
434 .read = debugfs_read,
435 .llseek = default_llseek,
438 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
440 static int stats_show(struct seq_file *seq, void *v)
442 struct c4iw_dev *dev = seq->private;
444 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
446 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
447 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
448 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
449 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
450 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
451 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
452 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
453 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
454 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
455 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
456 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
457 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
458 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
459 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
460 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
461 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
462 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
463 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
464 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
465 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
466 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
467 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
468 db_state_str[dev->db_state],
469 dev->rdev.stats.db_state_transitions,
470 dev->rdev.stats.db_fc_interruptions);
471 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
472 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
473 dev->rdev.stats.act_ofld_conn_fails);
474 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
475 dev->rdev.stats.pas_ofld_conn_fails);
476 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
477 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
481 static int stats_open(struct inode *inode, struct file *file)
483 return single_open(file, stats_show, inode->i_private);
486 static ssize_t stats_clear(struct file *file, const char __user *buf,
487 size_t count, loff_t *pos)
489 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
491 mutex_lock(&dev->rdev.stats.lock);
492 dev->rdev.stats.pd.max = 0;
493 dev->rdev.stats.pd.fail = 0;
494 dev->rdev.stats.qid.max = 0;
495 dev->rdev.stats.qid.fail = 0;
496 dev->rdev.stats.stag.max = 0;
497 dev->rdev.stats.stag.fail = 0;
498 dev->rdev.stats.pbl.max = 0;
499 dev->rdev.stats.pbl.fail = 0;
500 dev->rdev.stats.rqt.max = 0;
501 dev->rdev.stats.rqt.fail = 0;
502 dev->rdev.stats.ocqp.max = 0;
503 dev->rdev.stats.ocqp.fail = 0;
504 dev->rdev.stats.db_full = 0;
505 dev->rdev.stats.db_empty = 0;
506 dev->rdev.stats.db_drop = 0;
507 dev->rdev.stats.db_state_transitions = 0;
508 dev->rdev.stats.tcam_full = 0;
509 dev->rdev.stats.act_ofld_conn_fails = 0;
510 dev->rdev.stats.pas_ofld_conn_fails = 0;
511 mutex_unlock(&dev->rdev.stats.lock);
515 static const struct file_operations stats_debugfs_fops = {
516 .owner = THIS_MODULE,
518 .release = single_release,
521 .write = stats_clear,
524 static int dump_ep(int id, void *p, void *data)
526 struct c4iw_ep *ep = p;
527 struct c4iw_debugfs_data *epd = data;
531 space = epd->bufsize - epd->pos - 1;
535 if (ep->com.local_addr.ss_family == AF_INET) {
536 struct sockaddr_in *lsin = (struct sockaddr_in *)
537 &ep->com.cm_id->local_addr;
538 struct sockaddr_in *rsin = (struct sockaddr_in *)
539 &ep->com.cm_id->remote_addr;
540 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
541 &ep->com.cm_id->m_local_addr;
542 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
543 &ep->com.cm_id->m_remote_addr;
545 cc = snprintf(epd->buf + epd->pos, space,
546 "ep %p cm_id %p qp %p state %d flags 0x%lx "
547 "history 0x%lx hwtid %d atid %d "
548 "conn_na %u abort_na %u "
549 "%pI4:%d/%d <-> %pI4:%d/%d\n",
550 ep, ep->com.cm_id, ep->com.qp,
551 (int)ep->com.state, ep->com.flags,
552 ep->com.history, ep->hwtid, ep->atid,
553 ep->stats.connect_neg_adv,
554 ep->stats.abort_neg_adv,
555 &lsin->sin_addr, ntohs(lsin->sin_port),
556 ntohs(mapped_lsin->sin_port),
557 &rsin->sin_addr, ntohs(rsin->sin_port),
558 ntohs(mapped_rsin->sin_port));
560 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
561 &ep->com.cm_id->local_addr;
562 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
563 &ep->com.cm_id->remote_addr;
564 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
565 &ep->com.cm_id->m_local_addr;
566 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
567 &ep->com.cm_id->m_remote_addr;
569 cc = snprintf(epd->buf + epd->pos, space,
570 "ep %p cm_id %p qp %p state %d flags 0x%lx "
571 "history 0x%lx hwtid %d atid %d "
572 "conn_na %u abort_na %u "
573 "%pI6:%d/%d <-> %pI6:%d/%d\n",
574 ep, ep->com.cm_id, ep->com.qp,
575 (int)ep->com.state, ep->com.flags,
576 ep->com.history, ep->hwtid, ep->atid,
577 ep->stats.connect_neg_adv,
578 ep->stats.abort_neg_adv,
579 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
580 ntohs(mapped_lsin6->sin6_port),
581 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
582 ntohs(mapped_rsin6->sin6_port));
589 static int dump_listen_ep(int id, void *p, void *data)
591 struct c4iw_listen_ep *ep = p;
592 struct c4iw_debugfs_data *epd = data;
596 space = epd->bufsize - epd->pos - 1;
600 if (ep->com.local_addr.ss_family == AF_INET) {
601 struct sockaddr_in *lsin = (struct sockaddr_in *)
602 &ep->com.cm_id->local_addr;
603 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
604 &ep->com.cm_id->m_local_addr;
606 cc = snprintf(epd->buf + epd->pos, space,
607 "ep %p cm_id %p state %d flags 0x%lx stid %d "
608 "backlog %d %pI4:%d/%d\n",
609 ep, ep->com.cm_id, (int)ep->com.state,
610 ep->com.flags, ep->stid, ep->backlog,
611 &lsin->sin_addr, ntohs(lsin->sin_port),
612 ntohs(mapped_lsin->sin_port));
614 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
615 &ep->com.cm_id->local_addr;
616 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
617 &ep->com.cm_id->m_local_addr;
619 cc = snprintf(epd->buf + epd->pos, space,
620 "ep %p cm_id %p state %d flags 0x%lx stid %d "
621 "backlog %d %pI6:%d/%d\n",
622 ep, ep->com.cm_id, (int)ep->com.state,
623 ep->com.flags, ep->stid, ep->backlog,
624 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
625 ntohs(mapped_lsin6->sin6_port));
632 static int ep_release(struct inode *inode, struct file *file)
634 struct c4iw_debugfs_data *epd = file->private_data;
636 pr_info("%s null qpd?\n", __func__);
644 static int ep_open(struct inode *inode, struct file *file)
646 struct c4iw_debugfs_data *epd;
650 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
655 epd->devp = inode->i_private;
658 spin_lock_irq(&epd->devp->lock);
659 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
660 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
661 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
662 spin_unlock_irq(&epd->devp->lock);
664 epd->bufsize = count * 240;
665 epd->buf = vmalloc(epd->bufsize);
671 spin_lock_irq(&epd->devp->lock);
672 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
673 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
674 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
675 spin_unlock_irq(&epd->devp->lock);
677 file->private_data = epd;
685 static const struct file_operations ep_debugfs_fops = {
686 .owner = THIS_MODULE,
688 .release = ep_release,
689 .read = debugfs_read,
692 static int setup_debugfs(struct c4iw_dev *devp)
694 if (!devp->debugfs_root)
697 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
698 (void *)devp, &qp_debugfs_fops, 4096);
700 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
701 (void *)devp, &stag_debugfs_fops, 4096);
703 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
704 (void *)devp, &stats_debugfs_fops, 4096);
706 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
707 (void *)devp, &ep_debugfs_fops, 4096);
710 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
711 (void *)devp, &wr_log_debugfs_fops, 4096);
715 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
716 struct c4iw_dev_ucontext *uctx)
718 struct list_head *pos, *nxt;
719 struct c4iw_qid_list *entry;
721 mutex_lock(&uctx->lock);
722 list_for_each_safe(pos, nxt, &uctx->qpids) {
723 entry = list_entry(pos, struct c4iw_qid_list, entry);
724 list_del_init(&entry->entry);
725 if (!(entry->qid & rdev->qpmask)) {
726 c4iw_put_resource(&rdev->resource.qid_table,
728 mutex_lock(&rdev->stats.lock);
729 rdev->stats.qid.cur -= rdev->qpmask + 1;
730 mutex_unlock(&rdev->stats.lock);
735 list_for_each_safe(pos, nxt, &uctx->qpids) {
736 entry = list_entry(pos, struct c4iw_qid_list, entry);
737 list_del_init(&entry->entry);
740 mutex_unlock(&uctx->lock);
743 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
744 struct c4iw_dev_ucontext *uctx)
746 INIT_LIST_HEAD(&uctx->qpids);
747 INIT_LIST_HEAD(&uctx->cqids);
748 mutex_init(&uctx->lock);
751 /* Caller takes care of locking if needed */
752 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
756 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
759 * This implementation assumes udb_density == ucq_density! Eventually
760 * we might need to support this but for now fail the open. Also the
761 * cqid and qpid range must match for now.
763 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
764 pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
765 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
766 rdev->lldi.ucq_density);
769 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
770 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
771 pr_err(MOD "%s: unsupported qp and cq id ranges "
772 "qp start %u size %u cq start %u size %u\n",
773 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
774 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
775 rdev->lldi.vr->cq.size);
779 rdev->qpmask = rdev->lldi.udb_density - 1;
780 rdev->cqmask = rdev->lldi.ucq_density - 1;
781 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
782 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
783 "qp qid start %u size %u cq qid start %u size %u\n",
784 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
785 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
786 rdev->lldi.vr->pbl.start,
787 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
788 rdev->lldi.vr->rq.size,
789 rdev->lldi.vr->qp.start,
790 rdev->lldi.vr->qp.size,
791 rdev->lldi.vr->cq.start,
792 rdev->lldi.vr->cq.size);
793 PDBG("udb %pR db_reg %p gts_reg %p "
794 "qpmask 0x%x cqmask 0x%x\n",
795 &rdev->lldi.pdev->resource[2],
796 rdev->lldi.db_reg, rdev->lldi.gts_reg,
797 rdev->qpmask, rdev->cqmask);
799 if (c4iw_num_stags(rdev) == 0)
802 rdev->stats.pd.total = T4_MAX_NUM_PD;
803 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
804 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
805 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
806 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
807 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
809 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
811 printk(KERN_ERR MOD "error %d initializing resources\n", err);
814 err = c4iw_pblpool_create(rdev);
816 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
817 goto destroy_resource;
819 err = c4iw_rqtpool_create(rdev);
821 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
822 goto destroy_pblpool;
824 err = c4iw_ocqp_pool_create(rdev);
826 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
827 goto destroy_rqtpool;
829 rdev->status_page = (struct t4_dev_status_page *)
830 __get_free_page(GFP_KERNEL);
831 if (!rdev->status_page) {
833 goto destroy_ocqp_pool;
835 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
836 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
837 rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
838 rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
841 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
842 sizeof(*rdev->wr_log), GFP_KERNEL);
844 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
845 atomic_set(&rdev->wr_log_idx, 0);
849 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
850 if (!rdev->free_workq) {
852 goto err_free_status_page;
855 rdev->status_page->db_off = 0;
858 err_free_status_page:
859 free_page((unsigned long)rdev->status_page);
861 c4iw_ocqp_pool_destroy(rdev);
863 c4iw_rqtpool_destroy(rdev);
865 c4iw_pblpool_destroy(rdev);
867 c4iw_destroy_resource(&rdev->resource);
871 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
873 destroy_workqueue(rdev->free_workq);
875 free_page((unsigned long)rdev->status_page);
876 c4iw_pblpool_destroy(rdev);
877 c4iw_rqtpool_destroy(rdev);
878 c4iw_destroy_resource(&rdev->resource);
881 static void c4iw_dealloc(struct uld_ctx *ctx)
883 c4iw_rdev_close(&ctx->dev->rdev);
884 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
885 idr_destroy(&ctx->dev->cqidr);
886 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
887 idr_destroy(&ctx->dev->qpidr);
888 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
889 idr_destroy(&ctx->dev->mmidr);
890 wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
891 idr_destroy(&ctx->dev->hwtid_idr);
892 idr_destroy(&ctx->dev->stid_idr);
893 idr_destroy(&ctx->dev->atid_idr);
894 if (ctx->dev->rdev.bar2_kva)
895 iounmap(ctx->dev->rdev.bar2_kva);
896 if (ctx->dev->rdev.oc_mw_kva)
897 iounmap(ctx->dev->rdev.oc_mw_kva);
898 ib_dealloc_device(&ctx->dev->ibdev);
902 static void c4iw_remove(struct uld_ctx *ctx)
904 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
905 c4iw_unregister_device(ctx->dev);
909 static int rdma_supported(const struct cxgb4_lld_info *infop)
911 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
912 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
913 infop->vr->cq.size > 0;
916 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
918 struct c4iw_dev *devp;
921 if (!rdma_supported(infop)) {
922 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
923 pci_name(infop->pdev));
924 return ERR_PTR(-ENOSYS);
926 if (!ocqp_supported(infop))
927 pr_info("%s: On-Chip Queues not supported on this device.\n",
928 pci_name(infop->pdev));
930 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
932 printk(KERN_ERR MOD "Cannot allocate ib device\n");
933 return ERR_PTR(-ENOMEM);
935 devp->rdev.lldi = *infop;
937 /* init various hw-queue params based on lld info */
938 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
939 __func__, devp->rdev.lldi.sge_ingpadboundary,
940 devp->rdev.lldi.sge_egrstatuspagesize);
942 devp->rdev.hw_queue.t4_eq_status_entries =
943 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
944 devp->rdev.hw_queue.t4_max_eq_size = 65520;
945 devp->rdev.hw_queue.t4_max_iq_size = 65520;
946 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
947 devp->rdev.hw_queue.t4_eq_status_entries - 1;
948 devp->rdev.hw_queue.t4_max_sq_size =
949 devp->rdev.hw_queue.t4_max_eq_size -
950 devp->rdev.hw_queue.t4_eq_status_entries - 1;
951 devp->rdev.hw_queue.t4_max_qp_depth =
952 devp->rdev.hw_queue.t4_max_rq_size;
953 devp->rdev.hw_queue.t4_max_cq_depth =
954 devp->rdev.hw_queue.t4_max_iq_size - 2;
955 devp->rdev.hw_queue.t4_stat_len =
956 devp->rdev.lldi.sge_egrstatuspagesize;
959 * For T5/T6 devices, we map all of BAR2 with WC.
960 * For T4 devices with onchip qp mem, we map only that part
963 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
964 if (!is_t4(devp->rdev.lldi.adapter_type)) {
965 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
966 pci_resource_len(devp->rdev.lldi.pdev, 2));
967 if (!devp->rdev.bar2_kva) {
968 pr_err(MOD "Unable to ioremap BAR2\n");
969 ib_dealloc_device(&devp->ibdev);
970 return ERR_PTR(-EINVAL);
972 } else if (ocqp_supported(infop)) {
973 devp->rdev.oc_mw_pa =
974 pci_resource_start(devp->rdev.lldi.pdev, 2) +
975 pci_resource_len(devp->rdev.lldi.pdev, 2) -
976 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
977 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
978 devp->rdev.lldi.vr->ocq.size);
979 if (!devp->rdev.oc_mw_kva) {
980 pr_err(MOD "Unable to ioremap onchip mem\n");
981 ib_dealloc_device(&devp->ibdev);
982 return ERR_PTR(-EINVAL);
986 PDBG(KERN_INFO MOD "ocq memory: "
987 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
988 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
989 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
991 ret = c4iw_rdev_open(&devp->rdev);
993 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
994 ib_dealloc_device(&devp->ibdev);
998 idr_init(&devp->cqidr);
999 idr_init(&devp->qpidr);
1000 idr_init(&devp->mmidr);
1001 idr_init(&devp->hwtid_idr);
1002 idr_init(&devp->stid_idr);
1003 idr_init(&devp->atid_idr);
1004 spin_lock_init(&devp->lock);
1005 mutex_init(&devp->rdev.stats.lock);
1006 mutex_init(&devp->db_mutex);
1007 INIT_LIST_HEAD(&devp->db_fc_list);
1008 init_waitqueue_head(&devp->wait);
1009 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
1011 if (c4iw_debugfs_root) {
1012 devp->debugfs_root = debugfs_create_dir(
1013 pci_name(devp->rdev.lldi.pdev),
1015 setup_debugfs(devp);
1022 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1024 struct uld_ctx *ctx;
1025 static int vers_printed;
1028 if (!vers_printed++)
1029 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1032 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
1034 ctx = ERR_PTR(-ENOMEM);
1039 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
1040 __func__, pci_name(ctx->lldi.pdev),
1041 ctx->lldi.nchan, ctx->lldi.nrxq,
1042 ctx->lldi.ntxq, ctx->lldi.nports);
1044 mutex_lock(&dev_mutex);
1045 list_add_tail(&ctx->entry, &uld_ctx_list);
1046 mutex_unlock(&dev_mutex);
1048 for (i = 0; i < ctx->lldi.nrxq; i++)
1049 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
1054 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
1058 struct sk_buff *skb;
1061 * Allocate space for cpl_pass_accept_req which will be synthesized by
1062 * driver. Once the driver synthesizes the request the skb will go
1063 * through the regular cpl_pass_accept_req processing.
1064 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1067 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1068 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
1072 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1073 sizeof(struct rss_header) - pktshift);
1076 * This skb will contain:
1077 * rss_header from the rspq descriptor (1 flit)
1078 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1079 * space for the difference between the size of an
1080 * rx_pkt and pass_accept_req cpl (1 flit)
1081 * the packet data from the gl
1083 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
1084 sizeof(struct rss_header));
1085 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
1086 sizeof(struct cpl_pass_accept_req),
1088 gl->tot_len - pktshift);
1092 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
1095 unsigned int opcode = *(u8 *)rsp;
1096 struct sk_buff *skb;
1098 if (opcode != CPL_RX_PKT)
1101 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1105 if (c4iw_handlers[opcode] == NULL) {
1106 pr_info("%s no handler opcode 0x%x...\n", __func__,
1111 c4iw_handlers[opcode](dev, skb);
1117 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1118 const struct pkt_gl *gl)
1120 struct uld_ctx *ctx = handle;
1121 struct c4iw_dev *dev = ctx->dev;
1122 struct sk_buff *skb;
1126 /* omit RSS and rsp_ctrl at end of descriptor */
1127 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1129 skb = alloc_skb(256, GFP_ATOMIC);
1132 __skb_put(skb, len);
1133 skb_copy_to_linear_data(skb, &rsp[1], len);
1134 } else if (gl == CXGB4_MSG_AN) {
1135 const struct rsp_ctrl *rc = (void *)rsp;
1137 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1138 c4iw_ev_handler(dev, qid);
1140 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1141 if (recv_rx_pkt(dev, gl, rsp))
1144 pr_info("%s: unexpected FL contents at %p, " \
1145 "RSS %#llx, FL %#llx, len %u\n",
1146 pci_name(ctx->lldi.pdev), gl->va,
1147 (unsigned long long)be64_to_cpu(*rsp),
1148 (unsigned long long)be64_to_cpu(
1149 *(__force __be64 *)gl->va),
1154 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
1159 opcode = *(u8 *)rsp;
1160 if (c4iw_handlers[opcode]) {
1161 c4iw_handlers[opcode](dev, skb);
1163 pr_info("%s no handler opcode 0x%x...\n", __func__,
1173 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1175 struct uld_ctx *ctx = handle;
1177 PDBG("%s new_state %u\n", __func__, new_state);
1178 switch (new_state) {
1179 case CXGB4_STATE_UP:
1180 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
1184 ctx->dev = c4iw_alloc(&ctx->lldi);
1185 if (IS_ERR(ctx->dev)) {
1187 "%s: initialization failed: %ld\n",
1188 pci_name(ctx->lldi.pdev),
1193 ret = c4iw_register_device(ctx->dev);
1196 "%s: RDMA registration failed: %d\n",
1197 pci_name(ctx->lldi.pdev), ret);
1202 case CXGB4_STATE_DOWN:
1203 printk(KERN_INFO MOD "%s: Down\n",
1204 pci_name(ctx->lldi.pdev));
1208 case CXGB4_STATE_START_RECOVERY:
1209 printk(KERN_INFO MOD "%s: Fatal Error\n",
1210 pci_name(ctx->lldi.pdev));
1212 struct ib_event event;
1214 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
1215 memset(&event, 0, sizeof event);
1216 event.event = IB_EVENT_DEVICE_FATAL;
1217 event.device = &ctx->dev->ibdev;
1218 ib_dispatch_event(&event);
1222 case CXGB4_STATE_DETACH:
1223 printk(KERN_INFO MOD "%s: Detach\n",
1224 pci_name(ctx->lldi.pdev));
1232 static int disable_qp_db(int id, void *p, void *data)
1234 struct c4iw_qp *qp = p;
1236 t4_disable_wq_db(&qp->wq);
1240 static void stop_queues(struct uld_ctx *ctx)
1242 unsigned long flags;
1244 spin_lock_irqsave(&ctx->dev->lock, flags);
1245 ctx->dev->rdev.stats.db_state_transitions++;
1246 ctx->dev->db_state = STOPPED;
1247 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
1248 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
1250 ctx->dev->rdev.status_page->db_off = 1;
1251 spin_unlock_irqrestore(&ctx->dev->lock, flags);
1254 static int enable_qp_db(int id, void *p, void *data)
1256 struct c4iw_qp *qp = p;
1258 t4_enable_wq_db(&qp->wq);
1262 static void resume_rc_qp(struct c4iw_qp *qp)
1264 spin_lock(&qp->lock);
1265 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
1266 qp->wq.sq.wq_pidx_inc = 0;
1267 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
1268 qp->wq.rq.wq_pidx_inc = 0;
1269 spin_unlock(&qp->lock);
1272 static void resume_a_chunk(struct uld_ctx *ctx)
1277 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1278 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1280 list_del_init(&qp->db_fc_entry);
1282 if (list_empty(&ctx->dev->db_fc_list))
1287 static void resume_queues(struct uld_ctx *ctx)
1289 spin_lock_irq(&ctx->dev->lock);
1290 if (ctx->dev->db_state != STOPPED)
1292 ctx->dev->db_state = FLOW_CONTROL;
1294 if (list_empty(&ctx->dev->db_fc_list)) {
1295 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1296 ctx->dev->db_state = NORMAL;
1297 ctx->dev->rdev.stats.db_state_transitions++;
1298 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1299 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1302 ctx->dev->rdev.status_page->db_off = 0;
1306 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1307 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1308 DB_FC_DRAIN_THRESH)) {
1309 resume_a_chunk(ctx);
1311 if (!list_empty(&ctx->dev->db_fc_list)) {
1312 spin_unlock_irq(&ctx->dev->lock);
1313 if (DB_FC_RESUME_DELAY) {
1314 set_current_state(TASK_UNINTERRUPTIBLE);
1315 schedule_timeout(DB_FC_RESUME_DELAY);
1317 spin_lock_irq(&ctx->dev->lock);
1318 if (ctx->dev->db_state != FLOW_CONTROL)
1324 if (ctx->dev->db_state != NORMAL)
1325 ctx->dev->rdev.stats.db_fc_interruptions++;
1326 spin_unlock_irq(&ctx->dev->lock);
1331 struct c4iw_qp **qps;
1334 static int add_and_ref_qp(int id, void *p, void *data)
1336 struct qp_list *qp_listp = data;
1337 struct c4iw_qp *qp = p;
1339 c4iw_qp_add_ref(&qp->ibqp);
1340 qp_listp->qps[qp_listp->idx++] = qp;
1344 static int count_qps(int id, void *p, void *data)
1346 unsigned *countp = data;
1351 static void deref_qps(struct qp_list *qp_list)
1355 for (idx = 0; idx < qp_list->idx; idx++)
1356 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1359 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1364 for (idx = 0; idx < qp_list->idx; idx++) {
1365 struct c4iw_qp *qp = qp_list->qps[idx];
1367 spin_lock_irq(&qp->rhp->lock);
1368 spin_lock(&qp->lock);
1369 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1371 t4_sq_host_wq_pidx(&qp->wq),
1372 t4_sq_wq_size(&qp->wq));
1374 pr_err(MOD "%s: Fatal error - "
1375 "DB overflow recovery failed - "
1376 "error syncing SQ qid %u\n",
1377 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1378 spin_unlock(&qp->lock);
1379 spin_unlock_irq(&qp->rhp->lock);
1382 qp->wq.sq.wq_pidx_inc = 0;
1384 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1386 t4_rq_host_wq_pidx(&qp->wq),
1387 t4_rq_wq_size(&qp->wq));
1390 pr_err(MOD "%s: Fatal error - "
1391 "DB overflow recovery failed - "
1392 "error syncing RQ qid %u\n",
1393 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1394 spin_unlock(&qp->lock);
1395 spin_unlock_irq(&qp->rhp->lock);
1398 qp->wq.rq.wq_pidx_inc = 0;
1399 spin_unlock(&qp->lock);
1400 spin_unlock_irq(&qp->rhp->lock);
1402 /* Wait for the dbfifo to drain */
1403 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1404 set_current_state(TASK_UNINTERRUPTIBLE);
1405 schedule_timeout(usecs_to_jiffies(10));
1410 static void recover_queues(struct uld_ctx *ctx)
1413 struct qp_list qp_list;
1416 /* slow everybody down */
1417 set_current_state(TASK_UNINTERRUPTIBLE);
1418 schedule_timeout(usecs_to_jiffies(1000));
1420 /* flush the SGE contexts */
1421 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1423 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1424 pci_name(ctx->lldi.pdev));
1428 /* Count active queues so we can build a list of queues to recover */
1429 spin_lock_irq(&ctx->dev->lock);
1430 WARN_ON(ctx->dev->db_state != STOPPED);
1431 ctx->dev->db_state = RECOVERY;
1432 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1434 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1436 spin_unlock_irq(&ctx->dev->lock);
1441 /* add and ref each qp so it doesn't get freed */
1442 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1444 spin_unlock_irq(&ctx->dev->lock);
1446 /* now traverse the list in a safe context to recover the db state*/
1447 recover_lost_dbs(ctx, &qp_list);
1449 /* we're almost done! deref the qps and clean up */
1450 deref_qps(&qp_list);
1453 spin_lock_irq(&ctx->dev->lock);
1454 WARN_ON(ctx->dev->db_state != RECOVERY);
1455 ctx->dev->db_state = STOPPED;
1456 spin_unlock_irq(&ctx->dev->lock);
1459 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1461 struct uld_ctx *ctx = handle;
1464 case CXGB4_CONTROL_DB_FULL:
1466 ctx->dev->rdev.stats.db_full++;
1468 case CXGB4_CONTROL_DB_EMPTY:
1470 mutex_lock(&ctx->dev->rdev.stats.lock);
1471 ctx->dev->rdev.stats.db_empty++;
1472 mutex_unlock(&ctx->dev->rdev.stats.lock);
1474 case CXGB4_CONTROL_DB_DROP:
1475 recover_queues(ctx);
1476 mutex_lock(&ctx->dev->rdev.stats.lock);
1477 ctx->dev->rdev.stats.db_drop++;
1478 mutex_unlock(&ctx->dev->rdev.stats.lock);
1481 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1482 pci_name(ctx->lldi.pdev), control);
1488 static struct cxgb4_uld_info c4iw_uld_info = {
1490 .nrxq = MAX_ULD_QSETS,
1491 .ntxq = MAX_ULD_QSETS,
1495 .add = c4iw_uld_add,
1496 .rx_handler = c4iw_uld_rx_handler,
1497 .state_change = c4iw_uld_state_change,
1498 .control = c4iw_uld_control,
1501 static int __init c4iw_init_module(void)
1505 err = c4iw_cm_init();
1509 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1510 if (!c4iw_debugfs_root)
1511 printk(KERN_WARNING MOD
1512 "could not create debugfs entry, continuing\n");
1514 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1519 static void __exit c4iw_exit_module(void)
1521 struct uld_ctx *ctx, *tmp;
1523 mutex_lock(&dev_mutex);
1524 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1529 mutex_unlock(&dev_mutex);
1530 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1532 debugfs_remove_recursive(c4iw_debugfs_root);
1535 module_init(c4iw_init_module);
1536 module_exit(c4iw_exit_module);