]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hfi1/init.c
Merge remote-tracking branches 'asoc/topic/rt5514', 'asoc/topic/rt5616', 'asoc/topic...
[linux.git] / drivers / infiniband / hw / hfi1 / init.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/idr.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <rdma/rdma_vt.h>
57
58 #include "hfi.h"
59 #include "device.h"
60 #include "common.h"
61 #include "trace.h"
62 #include "mad.h"
63 #include "sdma.h"
64 #include "debugfs.h"
65 #include "verbs.h"
66 #include "aspm.h"
67 #include "affinity.h"
68
69 #undef pr_fmt
70 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
71
72 /*
73  * min buffers we want to have per context, after driver
74  */
75 #define HFI1_MIN_USER_CTXT_BUFCNT 7
76
77 #define HFI1_MIN_HDRQ_EGRBUF_CNT 2
78 #define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
79 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
80 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
81
82 /*
83  * Number of user receive contexts we are configured to use (to allow for more
84  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
85  */
86 int num_user_contexts = -1;
87 module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
88 MODULE_PARM_DESC(
89         num_user_contexts, "Set max number of user contexts to use");
90
91 uint krcvqs[RXE_NUM_DATA_VL];
92 int krcvqsset;
93 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
94 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
95
96 /* computed based on above array */
97 unsigned long n_krcvqs;
98
99 static unsigned hfi1_rcvarr_split = 25;
100 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
101 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
102
103 static uint eager_buffer_size = (2 << 20); /* 2MB */
104 module_param(eager_buffer_size, uint, S_IRUGO);
105 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 2MB");
106
107 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
108 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
109 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
110
111 static uint hfi1_hdrq_entsize = 32;
112 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, S_IRUGO);
113 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B (default), 32 - 128B");
114
115 unsigned int user_credit_return_threshold = 33; /* default is 33% */
116 module_param(user_credit_return_threshold, uint, S_IRUGO);
117 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
118
119 static inline u64 encode_rcv_header_entry_size(u16);
120
121 static struct idr hfi1_unit_table;
122 u32 hfi1_cpulist_count;
123 unsigned long *hfi1_cpulist;
124
125 /*
126  * Common code for creating the receive context array.
127  */
128 int hfi1_create_ctxts(struct hfi1_devdata *dd)
129 {
130         unsigned i;
131         int ret;
132
133         /* Control context has to be always 0 */
134         BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
135
136         dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
137                                GFP_KERNEL, dd->node);
138         if (!dd->rcd)
139                 goto nomem;
140
141         /* create one or more kernel contexts */
142         for (i = 0; i < dd->first_user_ctxt; ++i) {
143                 struct hfi1_pportdata *ppd;
144                 struct hfi1_ctxtdata *rcd;
145
146                 ppd = dd->pport + (i % dd->num_pports);
147
148                 /* dd->rcd[i] gets assigned inside the callee */
149                 rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
150                 if (!rcd) {
151                         dd_dev_err(dd,
152                                    "Unable to allocate kernel receive context, failing\n");
153                         goto nomem;
154                 }
155                 /*
156                  * Set up the kernel context flags here and now because they
157                  * use default values for all receive side memories.  User
158                  * contexts will be handled as they are created.
159                  */
160                 rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
161                         HFI1_CAP_KGET(NODROP_RHQ_FULL) |
162                         HFI1_CAP_KGET(NODROP_EGR_FULL) |
163                         HFI1_CAP_KGET(DMA_RTAIL);
164
165                 /* Control context must use DMA_RTAIL */
166                 if (rcd->ctxt == HFI1_CTRL_CTXT)
167                         rcd->flags |= HFI1_CAP_DMA_RTAIL;
168                 rcd->seq_cnt = 1;
169
170                 rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
171                 if (!rcd->sc) {
172                         dd_dev_err(dd,
173                                    "Unable to allocate kernel send context, failing\n");
174                         goto nomem;
175                 }
176
177                 ret = hfi1_init_ctxt(rcd->sc);
178                 if (ret < 0) {
179                         dd_dev_err(dd,
180                                    "Failed to setup kernel receive context, failing\n");
181                         ret = -EFAULT;
182                         goto bail;
183                 }
184         }
185
186         /*
187          * Initialize aspm, to be done after gen3 transition and setting up
188          * contexts and before enabling interrupts
189          */
190         aspm_init(dd);
191
192         return 0;
193 nomem:
194         ret = -ENOMEM;
195 bail:
196         if (dd->rcd) {
197                 for (i = 0; i < dd->num_rcv_contexts; ++i)
198                         hfi1_free_ctxtdata(dd, dd->rcd[i]);
199         }
200         kfree(dd->rcd);
201         dd->rcd = NULL;
202         return ret;
203 }
204
205 /*
206  * Common code for user and kernel context setup.
207  */
208 struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
209                                            int numa)
210 {
211         struct hfi1_devdata *dd = ppd->dd;
212         struct hfi1_ctxtdata *rcd;
213         unsigned kctxt_ngroups = 0;
214         u32 base;
215
216         if (dd->rcv_entries.nctxt_extra >
217             dd->num_rcv_contexts - dd->first_user_ctxt)
218                 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
219                                  (dd->num_rcv_contexts - dd->first_user_ctxt));
220         rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
221         if (rcd) {
222                 u32 rcvtids, max_entries;
223
224                 hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
225
226                 INIT_LIST_HEAD(&rcd->qp_wait_list);
227                 rcd->ppd = ppd;
228                 rcd->dd = dd;
229                 rcd->cnt = 1;
230                 rcd->ctxt = ctxt;
231                 dd->rcd[ctxt] = rcd;
232                 rcd->numa_id = numa;
233                 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
234
235                 mutex_init(&rcd->exp_lock);
236
237                 /*
238                  * Calculate the context's RcvArray entry starting point.
239                  * We do this here because we have to take into account all
240                  * the RcvArray entries that previous context would have
241                  * taken and we have to account for any extra groups
242                  * assigned to the kernel or user contexts.
243                  */
244                 if (ctxt < dd->first_user_ctxt) {
245                         if (ctxt < kctxt_ngroups) {
246                                 base = ctxt * (dd->rcv_entries.ngroups + 1);
247                                 rcd->rcv_array_groups++;
248                         } else
249                                 base = kctxt_ngroups +
250                                         (ctxt * dd->rcv_entries.ngroups);
251                 } else {
252                         u16 ct = ctxt - dd->first_user_ctxt;
253
254                         base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
255                                 kctxt_ngroups);
256                         if (ct < dd->rcv_entries.nctxt_extra) {
257                                 base += ct * (dd->rcv_entries.ngroups + 1);
258                                 rcd->rcv_array_groups++;
259                         } else
260                                 base += dd->rcv_entries.nctxt_extra +
261                                         (ct * dd->rcv_entries.ngroups);
262                 }
263                 rcd->eager_base = base * dd->rcv_entries.group_size;
264
265                 rcd->rcvhdrq_cnt = rcvhdrcnt;
266                 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
267                 /*
268                  * Simple Eager buffer allocation: we have already pre-allocated
269                  * the number of RcvArray entry groups. Each ctxtdata structure
270                  * holds the number of groups for that context.
271                  *
272                  * To follow CSR requirements and maintain cacheline alignment,
273                  * make sure all sizes and bases are multiples of group_size.
274                  *
275                  * The expected entry count is what is left after assigning
276                  * eager.
277                  */
278                 max_entries = rcd->rcv_array_groups *
279                         dd->rcv_entries.group_size;
280                 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
281                 rcd->egrbufs.count = round_down(rcvtids,
282                                                 dd->rcv_entries.group_size);
283                 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
284                         dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
285                                    rcd->ctxt);
286                         rcd->egrbufs.count = MAX_EAGER_ENTRIES;
287                 }
288                 hfi1_cdbg(PROC,
289                           "ctxt%u: max Eager buffer RcvArray entries: %u\n",
290                           rcd->ctxt, rcd->egrbufs.count);
291
292                 /*
293                  * Allocate array that will hold the eager buffer accounting
294                  * data.
295                  * This will allocate the maximum possible buffer count based
296                  * on the value of the RcvArray split parameter.
297                  * The resulting value will be rounded down to the closest
298                  * multiple of dd->rcv_entries.group_size.
299                  */
300                 rcd->egrbufs.buffers = kcalloc(rcd->egrbufs.count,
301                                                sizeof(*rcd->egrbufs.buffers),
302                                                GFP_KERNEL);
303                 if (!rcd->egrbufs.buffers)
304                         goto bail;
305                 rcd->egrbufs.rcvtids = kcalloc(rcd->egrbufs.count,
306                                                sizeof(*rcd->egrbufs.rcvtids),
307                                                GFP_KERNEL);
308                 if (!rcd->egrbufs.rcvtids)
309                         goto bail;
310                 rcd->egrbufs.size = eager_buffer_size;
311                 /*
312                  * The size of the buffers programmed into the RcvArray
313                  * entries needs to be big enough to handle the highest
314                  * MTU supported.
315                  */
316                 if (rcd->egrbufs.size < hfi1_max_mtu) {
317                         rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
318                         hfi1_cdbg(PROC,
319                                   "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
320                                     rcd->ctxt, rcd->egrbufs.size);
321                 }
322                 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
323
324                 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
325                         rcd->opstats = kzalloc(sizeof(*rcd->opstats),
326                                 GFP_KERNEL);
327                         if (!rcd->opstats)
328                                 goto bail;
329                 }
330         }
331         return rcd;
332 bail:
333         dd->rcd[ctxt] = NULL;
334         kfree(rcd->egrbufs.rcvtids);
335         kfree(rcd->egrbufs.buffers);
336         kfree(rcd);
337         return NULL;
338 }
339
340 /*
341  * Convert a receive header entry size that to the encoding used in the CSR.
342  *
343  * Return a zero if the given size is invalid.
344  */
345 static inline u64 encode_rcv_header_entry_size(u16 size)
346 {
347         /* there are only 3 valid receive header entry sizes */
348         if (size == 2)
349                 return 1;
350         if (size == 16)
351                 return 2;
352         else if (size == 32)
353                 return 4;
354         return 0; /* invalid */
355 }
356
357 /*
358  * Select the largest ccti value over all SLs to determine the intra-
359  * packet gap for the link.
360  *
361  * called with cca_timer_lock held (to protect access to cca_timer
362  * array), and rcu_read_lock() (to protect access to cc_state).
363  */
364 void set_link_ipg(struct hfi1_pportdata *ppd)
365 {
366         struct hfi1_devdata *dd = ppd->dd;
367         struct cc_state *cc_state;
368         int i;
369         u16 cce, ccti_limit, max_ccti = 0;
370         u16 shift, mult;
371         u64 src;
372         u32 current_egress_rate; /* Mbits /sec */
373         u32 max_pkt_time;
374         /*
375          * max_pkt_time is the maximum packet egress time in units
376          * of the fabric clock period 1/(805 MHz).
377          */
378
379         cc_state = get_cc_state(ppd);
380
381         if (!cc_state)
382                 /*
383                  * This should _never_ happen - rcu_read_lock() is held,
384                  * and set_link_ipg() should not be called if cc_state
385                  * is NULL.
386                  */
387                 return;
388
389         for (i = 0; i < OPA_MAX_SLS; i++) {
390                 u16 ccti = ppd->cca_timer[i].ccti;
391
392                 if (ccti > max_ccti)
393                         max_ccti = ccti;
394         }
395
396         ccti_limit = cc_state->cct.ccti_limit;
397         if (max_ccti > ccti_limit)
398                 max_ccti = ccti_limit;
399
400         cce = cc_state->cct.entries[max_ccti].entry;
401         shift = (cce & 0xc000) >> 14;
402         mult = (cce & 0x3fff);
403
404         current_egress_rate = active_egress_rate(ppd);
405
406         max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
407
408         src = (max_pkt_time >> shift) * mult;
409
410         src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
411         src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
412
413         write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
414 }
415
416 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
417 {
418         struct cca_timer *cca_timer;
419         struct hfi1_pportdata *ppd;
420         int sl;
421         u16 ccti_timer, ccti_min;
422         struct cc_state *cc_state;
423         unsigned long flags;
424         enum hrtimer_restart ret = HRTIMER_NORESTART;
425
426         cca_timer = container_of(t, struct cca_timer, hrtimer);
427         ppd = cca_timer->ppd;
428         sl = cca_timer->sl;
429
430         rcu_read_lock();
431
432         cc_state = get_cc_state(ppd);
433
434         if (!cc_state) {
435                 rcu_read_unlock();
436                 return HRTIMER_NORESTART;
437         }
438
439         /*
440          * 1) decrement ccti for SL
441          * 2) calculate IPG for link (set_link_ipg())
442          * 3) restart timer, unless ccti is at min value
443          */
444
445         ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
446         ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
447
448         spin_lock_irqsave(&ppd->cca_timer_lock, flags);
449
450         if (cca_timer->ccti > ccti_min) {
451                 cca_timer->ccti--;
452                 set_link_ipg(ppd);
453         }
454
455         if (cca_timer->ccti > ccti_min) {
456                 unsigned long nsec = 1024 * ccti_timer;
457                 /* ccti_timer is in units of 1.024 usec */
458                 hrtimer_forward_now(t, ns_to_ktime(nsec));
459                 ret = HRTIMER_RESTART;
460         }
461
462         spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
463         rcu_read_unlock();
464         return ret;
465 }
466
467 /*
468  * Common code for initializing the physical port structure.
469  */
470 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
471                          struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
472 {
473         int i;
474         uint default_pkey_idx;
475         struct cc_state *cc_state;
476
477         ppd->dd = dd;
478         ppd->hw_pidx = hw_pidx;
479         ppd->port = port; /* IB port number, not index */
480
481         default_pkey_idx = 1;
482
483         ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
484         if (loopback) {
485                 hfi1_early_err(&pdev->dev,
486                                "Faking data partition 0x8001 in idx %u\n",
487                                !default_pkey_idx);
488                 ppd->pkeys[!default_pkey_idx] = 0x8001;
489         }
490
491         INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
492         INIT_WORK(&ppd->link_up_work, handle_link_up);
493         INIT_WORK(&ppd->link_down_work, handle_link_down);
494         INIT_WORK(&ppd->freeze_work, handle_freeze);
495         INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
496         INIT_WORK(&ppd->sma_message_work, handle_sma_message);
497         INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
498         INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
499         INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
500         INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
501
502         mutex_init(&ppd->hls_lock);
503         spin_lock_init(&ppd->qsfp_info.qsfp_lock);
504
505         ppd->qsfp_info.ppd = ppd;
506         ppd->sm_trap_qp = 0x0;
507         ppd->sa_qp = 0x1;
508
509         ppd->hfi1_wq = NULL;
510
511         spin_lock_init(&ppd->cca_timer_lock);
512
513         for (i = 0; i < OPA_MAX_SLS; i++) {
514                 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
515                              HRTIMER_MODE_REL);
516                 ppd->cca_timer[i].ppd = ppd;
517                 ppd->cca_timer[i].sl = i;
518                 ppd->cca_timer[i].ccti = 0;
519                 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
520         }
521
522         ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
523
524         spin_lock_init(&ppd->cc_state_lock);
525         spin_lock_init(&ppd->cc_log_lock);
526         cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
527         RCU_INIT_POINTER(ppd->cc_state, cc_state);
528         if (!cc_state)
529                 goto bail;
530         return;
531
532 bail:
533
534         hfi1_early_err(&pdev->dev,
535                        "Congestion Control Agent disabled for port %d\n", port);
536 }
537
538 /*
539  * Do initialization for device that is only needed on
540  * first detect, not on resets.
541  */
542 static int loadtime_init(struct hfi1_devdata *dd)
543 {
544         return 0;
545 }
546
547 /**
548  * init_after_reset - re-initialize after a reset
549  * @dd: the hfi1_ib device
550  *
551  * sanity check at least some of the values after reset, and
552  * ensure no receive or transmit (explicitly, in case reset
553  * failed
554  */
555 static int init_after_reset(struct hfi1_devdata *dd)
556 {
557         int i;
558
559         /*
560          * Ensure chip does no sends or receives, tail updates, or
561          * pioavail updates while we re-initialize.  This is mostly
562          * for the driver data structures, not chip registers.
563          */
564         for (i = 0; i < dd->num_rcv_contexts; i++)
565                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
566                                   HFI1_RCVCTRL_INTRAVAIL_DIS |
567                                   HFI1_RCVCTRL_TAILUPD_DIS, i);
568         pio_send_control(dd, PSC_GLOBAL_DISABLE);
569         for (i = 0; i < dd->num_send_contexts; i++)
570                 sc_disable(dd->send_contexts[i].sc);
571
572         return 0;
573 }
574
575 static void enable_chip(struct hfi1_devdata *dd)
576 {
577         u32 rcvmask;
578         u32 i;
579
580         /* enable PIO send */
581         pio_send_control(dd, PSC_GLOBAL_ENABLE);
582
583         /*
584          * Enable kernel ctxts' receive and receive interrupt.
585          * Other ctxts done as user opens and initializes them.
586          */
587         for (i = 0; i < dd->first_user_ctxt; ++i) {
588                 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
589                 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
590                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
591                 if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
592                         rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
593                 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
594                         rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
595                 if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
596                         rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
597                 hfi1_rcvctrl(dd, rcvmask, i);
598                 sc_enable(dd->rcd[i]->sc);
599         }
600 }
601
602 /**
603  * create_workqueues - create per port workqueues
604  * @dd: the hfi1_ib device
605  */
606 static int create_workqueues(struct hfi1_devdata *dd)
607 {
608         int pidx;
609         struct hfi1_pportdata *ppd;
610
611         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
612                 ppd = dd->pport + pidx;
613                 if (!ppd->hfi1_wq) {
614                         ppd->hfi1_wq =
615                                 alloc_workqueue(
616                                     "hfi%d_%d",
617                                     WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
618                                     dd->num_sdma,
619                                     dd->unit, pidx);
620                         if (!ppd->hfi1_wq)
621                                 goto wq_error;
622                 }
623         }
624         return 0;
625 wq_error:
626         pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
627         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
628                 ppd = dd->pport + pidx;
629                 if (ppd->hfi1_wq) {
630                         destroy_workqueue(ppd->hfi1_wq);
631                         ppd->hfi1_wq = NULL;
632                 }
633         }
634         return -ENOMEM;
635 }
636
637 /**
638  * hfi1_init - do the actual initialization sequence on the chip
639  * @dd: the hfi1_ib device
640  * @reinit: re-initializing, so don't allocate new memory
641  *
642  * Do the actual initialization sequence on the chip.  This is done
643  * both from the init routine called from the PCI infrastructure, and
644  * when we reset the chip, or detect that it was reset internally,
645  * or it's administratively re-enabled.
646  *
647  * Memory allocation here and in called routines is only done in
648  * the first case (reinit == 0).  We have to be careful, because even
649  * without memory allocation, we need to re-write all the chip registers
650  * TIDs, etc. after the reset or enable has completed.
651  */
652 int hfi1_init(struct hfi1_devdata *dd, int reinit)
653 {
654         int ret = 0, pidx, lastfail = 0;
655         unsigned i, len;
656         struct hfi1_ctxtdata *rcd;
657         struct hfi1_pportdata *ppd;
658
659         /* Set up recv low level handlers */
660         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EXPECTED] =
661                                                 kdeth_process_expected;
662         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_EAGER] =
663                                                 kdeth_process_eager;
664         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_IB] = process_receive_ib;
665         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_ERROR] =
666                                                 process_receive_error;
667         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_BYPASS] =
668                                                 process_receive_bypass;
669         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID5] =
670                                                 process_receive_invalid;
671         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID6] =
672                                                 process_receive_invalid;
673         dd->normal_rhf_rcv_functions[RHF_RCV_TYPE_INVALID7] =
674                                                 process_receive_invalid;
675         dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
676
677         /* Set up send low level handlers */
678         dd->process_pio_send = hfi1_verbs_send_pio;
679         dd->process_dma_send = hfi1_verbs_send_dma;
680         dd->pio_inline_send = pio_copy;
681
682         if (is_ax(dd)) {
683                 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
684                 dd->do_drop = 1;
685         } else {
686                 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
687                 dd->do_drop = 0;
688         }
689
690         /* make sure the link is not "up" */
691         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
692                 ppd = dd->pport + pidx;
693                 ppd->linkup = 0;
694         }
695
696         if (reinit)
697                 ret = init_after_reset(dd);
698         else
699                 ret = loadtime_init(dd);
700         if (ret)
701                 goto done;
702
703         /* allocate dummy tail memory for all receive contexts */
704         dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
705                 &dd->pcidev->dev, sizeof(u64),
706                 &dd->rcvhdrtail_dummy_dma,
707                 GFP_KERNEL);
708
709         if (!dd->rcvhdrtail_dummy_kvaddr) {
710                 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
711                 ret = -ENOMEM;
712                 goto done;
713         }
714
715         /* dd->rcd can be NULL if early initialization failed */
716         for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
717                 /*
718                  * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
719                  * re-init, the simplest way to handle this is to free
720                  * existing, and re-allocate.
721                  * Need to re-create rest of ctxt 0 ctxtdata as well.
722                  */
723                 rcd = dd->rcd[i];
724                 if (!rcd)
725                         continue;
726
727                 rcd->do_interrupt = &handle_receive_interrupt;
728
729                 lastfail = hfi1_create_rcvhdrq(dd, rcd);
730                 if (!lastfail)
731                         lastfail = hfi1_setup_eagerbufs(rcd);
732                 if (lastfail) {
733                         dd_dev_err(dd,
734                                    "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
735                         ret = lastfail;
736                 }
737         }
738
739         /* Allocate enough memory for user event notification. */
740         len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
741                          sizeof(*dd->events));
742         dd->events = vmalloc_user(len);
743         if (!dd->events)
744                 dd_dev_err(dd, "Failed to allocate user events page\n");
745         /*
746          * Allocate a page for device and port status.
747          * Page will be shared amongst all user processes.
748          */
749         dd->status = vmalloc_user(PAGE_SIZE);
750         if (!dd->status)
751                 dd_dev_err(dd, "Failed to allocate dev status page\n");
752         else
753                 dd->freezelen = PAGE_SIZE - (sizeof(*dd->status) -
754                                              sizeof(dd->status->freezemsg));
755         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
756                 ppd = dd->pport + pidx;
757                 if (dd->status)
758                         /* Currently, we only have one port */
759                         ppd->statusp = &dd->status->port;
760
761                 set_mtu(ppd);
762         }
763
764         /* enable chip even if we have an error, so we can debug cause */
765         enable_chip(dd);
766
767 done:
768         /*
769          * Set status even if port serdes is not initialized
770          * so that diags will work.
771          */
772         if (dd->status)
773                 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
774                         HFI1_STATUS_INITTED;
775         if (!ret) {
776                 /* enable all interrupts from the chip */
777                 set_intr_state(dd, 1);
778
779                 /* chip is OK for user apps; mark it as initialized */
780                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
781                         ppd = dd->pport + pidx;
782
783                         /*
784                          * start the serdes - must be after interrupts are
785                          * enabled so we are notified when the link goes up
786                          */
787                         lastfail = bringup_serdes(ppd);
788                         if (lastfail)
789                                 dd_dev_info(dd,
790                                             "Failed to bring up port %u\n",
791                                             ppd->port);
792
793                         /*
794                          * Set status even if port serdes is not initialized
795                          * so that diags will work.
796                          */
797                         if (ppd->statusp)
798                                 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
799                                                         HFI1_STATUS_INITTED;
800                         if (!ppd->link_speed_enabled)
801                                 continue;
802                 }
803         }
804
805         /* if ret is non-zero, we probably should do some cleanup here... */
806         return ret;
807 }
808
809 static inline struct hfi1_devdata *__hfi1_lookup(int unit)
810 {
811         return idr_find(&hfi1_unit_table, unit);
812 }
813
814 struct hfi1_devdata *hfi1_lookup(int unit)
815 {
816         struct hfi1_devdata *dd;
817         unsigned long flags;
818
819         spin_lock_irqsave(&hfi1_devs_lock, flags);
820         dd = __hfi1_lookup(unit);
821         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
822
823         return dd;
824 }
825
826 /*
827  * Stop the timers during unit shutdown, or after an error late
828  * in initialization.
829  */
830 static void stop_timers(struct hfi1_devdata *dd)
831 {
832         struct hfi1_pportdata *ppd;
833         int pidx;
834
835         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
836                 ppd = dd->pport + pidx;
837                 if (ppd->led_override_timer.data) {
838                         del_timer_sync(&ppd->led_override_timer);
839                         atomic_set(&ppd->led_override_timer_active, 0);
840                 }
841         }
842 }
843
844 /**
845  * shutdown_device - shut down a device
846  * @dd: the hfi1_ib device
847  *
848  * This is called to make the device quiet when we are about to
849  * unload the driver, and also when the device is administratively
850  * disabled.   It does not free any data structures.
851  * Everything it does has to be setup again by hfi1_init(dd, 1)
852  */
853 static void shutdown_device(struct hfi1_devdata *dd)
854 {
855         struct hfi1_pportdata *ppd;
856         unsigned pidx;
857         int i;
858
859         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
860                 ppd = dd->pport + pidx;
861
862                 ppd->linkup = 0;
863                 if (ppd->statusp)
864                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
865                                            HFI1_STATUS_IB_READY);
866         }
867         dd->flags &= ~HFI1_INITTED;
868
869         /* mask interrupts, but not errors */
870         set_intr_state(dd, 0);
871
872         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
873                 ppd = dd->pport + pidx;
874                 for (i = 0; i < dd->num_rcv_contexts; i++)
875                         hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
876                                           HFI1_RCVCTRL_CTXT_DIS |
877                                           HFI1_RCVCTRL_INTRAVAIL_DIS |
878                                           HFI1_RCVCTRL_PKEY_DIS |
879                                           HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
880                 /*
881                  * Gracefully stop all sends allowing any in progress to
882                  * trickle out first.
883                  */
884                 for (i = 0; i < dd->num_send_contexts; i++)
885                         sc_flush(dd->send_contexts[i].sc);
886         }
887
888         /*
889          * Enough for anything that's going to trickle out to have actually
890          * done so.
891          */
892         udelay(20);
893
894         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
895                 ppd = dd->pport + pidx;
896
897                 /* disable all contexts */
898                 for (i = 0; i < dd->num_send_contexts; i++)
899                         sc_disable(dd->send_contexts[i].sc);
900                 /* disable the send device */
901                 pio_send_control(dd, PSC_GLOBAL_DISABLE);
902
903                 shutdown_led_override(ppd);
904
905                 /*
906                  * Clear SerdesEnable.
907                  * We can't count on interrupts since we are stopping.
908                  */
909                 hfi1_quiet_serdes(ppd);
910
911                 if (ppd->hfi1_wq) {
912                         destroy_workqueue(ppd->hfi1_wq);
913                         ppd->hfi1_wq = NULL;
914                 }
915         }
916         sdma_exit(dd);
917 }
918
919 /**
920  * hfi1_free_ctxtdata - free a context's allocated data
921  * @dd: the hfi1_ib device
922  * @rcd: the ctxtdata structure
923  *
924  * free up any allocated data for a context
925  * This should not touch anything that would affect a simultaneous
926  * re-allocation of context data, because it is called after hfi1_mutex
927  * is released (and can be called from reinit as well).
928  * It should never change any chip state, or global driver state.
929  */
930 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
931 {
932         unsigned e;
933
934         if (!rcd)
935                 return;
936
937         if (rcd->rcvhdrq) {
938                 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
939                                   rcd->rcvhdrq, rcd->rcvhdrq_dma);
940                 rcd->rcvhdrq = NULL;
941                 if (rcd->rcvhdrtail_kvaddr) {
942                         dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
943                                           (void *)rcd->rcvhdrtail_kvaddr,
944                                           rcd->rcvhdrqtailaddr_dma);
945                         rcd->rcvhdrtail_kvaddr = NULL;
946                 }
947         }
948
949         /* all the RcvArray entries should have been cleared by now */
950         kfree(rcd->egrbufs.rcvtids);
951
952         for (e = 0; e < rcd->egrbufs.alloced; e++) {
953                 if (rcd->egrbufs.buffers[e].dma)
954                         dma_free_coherent(&dd->pcidev->dev,
955                                           rcd->egrbufs.buffers[e].len,
956                                           rcd->egrbufs.buffers[e].addr,
957                                           rcd->egrbufs.buffers[e].dma);
958         }
959         kfree(rcd->egrbufs.buffers);
960
961         sc_free(rcd->sc);
962         vfree(rcd->user_event_mask);
963         vfree(rcd->subctxt_uregbase);
964         vfree(rcd->subctxt_rcvegrbuf);
965         vfree(rcd->subctxt_rcvhdr_base);
966         kfree(rcd->opstats);
967         kfree(rcd);
968 }
969
970 /*
971  * Release our hold on the shared asic data.  If we are the last one,
972  * return the structure to be finalized outside the lock.  Must be
973  * holding hfi1_devs_lock.
974  */
975 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
976 {
977         struct hfi1_asic_data *ad;
978         int other;
979
980         if (!dd->asic_data)
981                 return NULL;
982         dd->asic_data->dds[dd->hfi1_id] = NULL;
983         other = dd->hfi1_id ? 0 : 1;
984         ad = dd->asic_data;
985         dd->asic_data = NULL;
986         /* return NULL if the other dd still has a link */
987         return ad->dds[other] ? NULL : ad;
988 }
989
990 static void finalize_asic_data(struct hfi1_devdata *dd,
991                                struct hfi1_asic_data *ad)
992 {
993         clean_up_i2c(dd, ad);
994         kfree(ad);
995 }
996
997 static void __hfi1_free_devdata(struct kobject *kobj)
998 {
999         struct hfi1_devdata *dd =
1000                 container_of(kobj, struct hfi1_devdata, kobj);
1001         struct hfi1_asic_data *ad;
1002         unsigned long flags;
1003
1004         spin_lock_irqsave(&hfi1_devs_lock, flags);
1005         idr_remove(&hfi1_unit_table, dd->unit);
1006         list_del(&dd->list);
1007         ad = release_asic_data(dd);
1008         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1009         if (ad)
1010                 finalize_asic_data(dd, ad);
1011         free_platform_config(dd);
1012         rcu_barrier(); /* wait for rcu callbacks to complete */
1013         free_percpu(dd->int_counter);
1014         free_percpu(dd->rcv_limit);
1015         free_percpu(dd->send_schedule);
1016         rvt_dealloc_device(&dd->verbs_dev.rdi);
1017 }
1018
1019 static struct kobj_type hfi1_devdata_type = {
1020         .release = __hfi1_free_devdata,
1021 };
1022
1023 void hfi1_free_devdata(struct hfi1_devdata *dd)
1024 {
1025         kobject_put(&dd->kobj);
1026 }
1027
1028 /*
1029  * Allocate our primary per-unit data structure.  Must be done via verbs
1030  * allocator, because the verbs cleanup process both does cleanup and
1031  * free of the data structure.
1032  * "extra" is for chip-specific data.
1033  *
1034  * Use the idr mechanism to get a unit number for this unit.
1035  */
1036 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
1037 {
1038         unsigned long flags;
1039         struct hfi1_devdata *dd;
1040         int ret, nports;
1041
1042         /* extra is * number of ports */
1043         nports = extra / sizeof(struct hfi1_pportdata);
1044
1045         dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1046                                                      nports);
1047         if (!dd)
1048                 return ERR_PTR(-ENOMEM);
1049         dd->num_pports = nports;
1050         dd->pport = (struct hfi1_pportdata *)(dd + 1);
1051
1052         INIT_LIST_HEAD(&dd->list);
1053         idr_preload(GFP_KERNEL);
1054         spin_lock_irqsave(&hfi1_devs_lock, flags);
1055
1056         ret = idr_alloc(&hfi1_unit_table, dd, 0, 0, GFP_NOWAIT);
1057         if (ret >= 0) {
1058                 dd->unit = ret;
1059                 list_add(&dd->list, &hfi1_dev_list);
1060         }
1061
1062         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
1063         idr_preload_end();
1064
1065         if (ret < 0) {
1066                 hfi1_early_err(&pdev->dev,
1067                                "Could not allocate unit ID: error %d\n", -ret);
1068                 goto bail;
1069         }
1070         /*
1071          * Initialize all locks for the device. This needs to be as early as
1072          * possible so locks are usable.
1073          */
1074         spin_lock_init(&dd->sc_lock);
1075         spin_lock_init(&dd->sendctrl_lock);
1076         spin_lock_init(&dd->rcvctrl_lock);
1077         spin_lock_init(&dd->uctxt_lock);
1078         spin_lock_init(&dd->hfi1_diag_trans_lock);
1079         spin_lock_init(&dd->sc_init_lock);
1080         spin_lock_init(&dd->dc8051_lock);
1081         spin_lock_init(&dd->dc8051_memlock);
1082         seqlock_init(&dd->sc2vl_lock);
1083         spin_lock_init(&dd->sde_map_lock);
1084         spin_lock_init(&dd->pio_map_lock);
1085         init_waitqueue_head(&dd->event_queue);
1086
1087         dd->int_counter = alloc_percpu(u64);
1088         if (!dd->int_counter) {
1089                 ret = -ENOMEM;
1090                 hfi1_early_err(&pdev->dev,
1091                                "Could not allocate per-cpu int_counter\n");
1092                 goto bail;
1093         }
1094
1095         dd->rcv_limit = alloc_percpu(u64);
1096         if (!dd->rcv_limit) {
1097                 ret = -ENOMEM;
1098                 hfi1_early_err(&pdev->dev,
1099                                "Could not allocate per-cpu rcv_limit\n");
1100                 goto bail;
1101         }
1102
1103         dd->send_schedule = alloc_percpu(u64);
1104         if (!dd->send_schedule) {
1105                 ret = -ENOMEM;
1106                 hfi1_early_err(&pdev->dev,
1107                                "Could not allocate per-cpu int_counter\n");
1108                 goto bail;
1109         }
1110
1111         if (!hfi1_cpulist_count) {
1112                 u32 count = num_online_cpus();
1113
1114                 hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
1115                                        GFP_KERNEL);
1116                 if (hfi1_cpulist)
1117                         hfi1_cpulist_count = count;
1118                 else
1119                         hfi1_early_err(
1120                         &pdev->dev,
1121                         "Could not alloc cpulist info, cpu affinity might be wrong\n");
1122         }
1123         kobject_init(&dd->kobj, &hfi1_devdata_type);
1124         return dd;
1125
1126 bail:
1127         if (!list_empty(&dd->list))
1128                 list_del_init(&dd->list);
1129         rvt_dealloc_device(&dd->verbs_dev.rdi);
1130         return ERR_PTR(ret);
1131 }
1132
1133 /*
1134  * Called from freeze mode handlers, and from PCI error
1135  * reporting code.  Should be paranoid about state of
1136  * system and data structures.
1137  */
1138 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1139 {
1140         if (dd->flags & HFI1_INITTED) {
1141                 u32 pidx;
1142
1143                 dd->flags &= ~HFI1_INITTED;
1144                 if (dd->pport)
1145                         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1146                                 struct hfi1_pportdata *ppd;
1147
1148                                 ppd = dd->pport + pidx;
1149                                 if (dd->flags & HFI1_PRESENT)
1150                                         set_link_state(ppd, HLS_DN_DISABLE);
1151
1152                                 if (ppd->statusp)
1153                                         *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1154                         }
1155         }
1156
1157         /*
1158          * Mark as having had an error for driver, and also
1159          * for /sys and status word mapped to user programs.
1160          * This marks unit as not usable, until reset.
1161          */
1162         if (dd->status)
1163                 dd->status->dev |= HFI1_STATUS_HWERROR;
1164 }
1165
1166 static void remove_one(struct pci_dev *);
1167 static int init_one(struct pci_dev *, const struct pci_device_id *);
1168
1169 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1170 #define PFX DRIVER_NAME ": "
1171
1172 const struct pci_device_id hfi1_pci_tbl[] = {
1173         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1174         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1175         { 0, }
1176 };
1177
1178 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1179
1180 static struct pci_driver hfi1_pci_driver = {
1181         .name = DRIVER_NAME,
1182         .probe = init_one,
1183         .remove = remove_one,
1184         .id_table = hfi1_pci_tbl,
1185         .err_handler = &hfi1_pci_err_handler,
1186 };
1187
1188 static void __init compute_krcvqs(void)
1189 {
1190         int i;
1191
1192         for (i = 0; i < krcvqsset; i++)
1193                 n_krcvqs += krcvqs[i];
1194 }
1195
1196 /*
1197  * Do all the generic driver unit- and chip-independent memory
1198  * allocation and initialization.
1199  */
1200 static int __init hfi1_mod_init(void)
1201 {
1202         int ret;
1203
1204         ret = dev_init();
1205         if (ret)
1206                 goto bail;
1207
1208         ret = node_affinity_init();
1209         if (ret)
1210                 goto bail;
1211
1212         /* validate max MTU before any devices start */
1213         if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1214                 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1215                        hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1216                 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1217         }
1218         /* valid CUs run from 1-128 in powers of 2 */
1219         if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1220                 hfi1_cu = 1;
1221         /* valid credit return threshold is 0-100, variable is unsigned */
1222         if (user_credit_return_threshold > 100)
1223                 user_credit_return_threshold = 100;
1224
1225         compute_krcvqs();
1226         /*
1227          * sanitize receive interrupt count, time must wait until after
1228          * the hardware type is known
1229          */
1230         if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1231                 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1232         /* reject invalid combinations */
1233         if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1234                 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1235                 rcv_intr_count = 1;
1236         }
1237         if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1238                 /*
1239                  * Avoid indefinite packet delivery by requiring a timeout
1240                  * if count is > 1.
1241                  */
1242                 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1243                 rcv_intr_timeout = 1;
1244         }
1245         if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1246                 /*
1247                  * The dynamic algorithm expects a non-zero timeout
1248                  * and a count > 1.
1249                  */
1250                 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1251                 rcv_intr_dynamic = 0;
1252         }
1253
1254         /* sanitize link CRC options */
1255         link_crc_mask &= SUPPORTED_CRCS;
1256
1257         /*
1258          * These must be called before the driver is registered with
1259          * the PCI subsystem.
1260          */
1261         idr_init(&hfi1_unit_table);
1262
1263         hfi1_dbg_init();
1264         ret = hfi1_wss_init();
1265         if (ret < 0)
1266                 goto bail_wss;
1267         ret = pci_register_driver(&hfi1_pci_driver);
1268         if (ret < 0) {
1269                 pr_err("Unable to register driver: error %d\n", -ret);
1270                 goto bail_dev;
1271         }
1272         goto bail; /* all OK */
1273
1274 bail_dev:
1275         hfi1_wss_exit();
1276 bail_wss:
1277         hfi1_dbg_exit();
1278         idr_destroy(&hfi1_unit_table);
1279         dev_cleanup();
1280 bail:
1281         return ret;
1282 }
1283
1284 module_init(hfi1_mod_init);
1285
1286 /*
1287  * Do the non-unit driver cleanup, memory free, etc. at unload.
1288  */
1289 static void __exit hfi1_mod_cleanup(void)
1290 {
1291         pci_unregister_driver(&hfi1_pci_driver);
1292         node_affinity_destroy();
1293         hfi1_wss_exit();
1294         hfi1_dbg_exit();
1295         hfi1_cpulist_count = 0;
1296         kfree(hfi1_cpulist);
1297
1298         idr_destroy(&hfi1_unit_table);
1299         dispose_firmware();     /* asymmetric with obtain_firmware() */
1300         dev_cleanup();
1301 }
1302
1303 module_exit(hfi1_mod_cleanup);
1304
1305 /* this can only be called after a successful initialization */
1306 static void cleanup_device_data(struct hfi1_devdata *dd)
1307 {
1308         int ctxt;
1309         int pidx;
1310         struct hfi1_ctxtdata **tmp;
1311         unsigned long flags;
1312
1313         /* users can't do anything more with chip */
1314         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1315                 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1316                 struct cc_state *cc_state;
1317                 int i;
1318
1319                 if (ppd->statusp)
1320                         *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1321
1322                 for (i = 0; i < OPA_MAX_SLS; i++)
1323                         hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1324
1325                 spin_lock(&ppd->cc_state_lock);
1326                 cc_state = get_cc_state_protected(ppd);
1327                 RCU_INIT_POINTER(ppd->cc_state, NULL);
1328                 spin_unlock(&ppd->cc_state_lock);
1329
1330                 if (cc_state)
1331                         kfree_rcu(cc_state, rcu);
1332         }
1333
1334         free_credit_return(dd);
1335
1336         /*
1337          * Free any resources still in use (usually just kernel contexts)
1338          * at unload; we do for ctxtcnt, because that's what we allocate.
1339          * We acquire lock to be really paranoid that rcd isn't being
1340          * accessed from some interrupt-related code (that should not happen,
1341          * but best to be sure).
1342          */
1343         spin_lock_irqsave(&dd->uctxt_lock, flags);
1344         tmp = dd->rcd;
1345         dd->rcd = NULL;
1346         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1347
1348         if (dd->rcvhdrtail_dummy_kvaddr) {
1349                 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1350                                   (void *)dd->rcvhdrtail_dummy_kvaddr,
1351                                   dd->rcvhdrtail_dummy_dma);
1352                 dd->rcvhdrtail_dummy_kvaddr = NULL;
1353         }
1354
1355         for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
1356                 struct hfi1_ctxtdata *rcd = tmp[ctxt];
1357
1358                 tmp[ctxt] = NULL; /* debugging paranoia */
1359                 if (rcd) {
1360                         hfi1_clear_tids(rcd);
1361                         hfi1_free_ctxtdata(dd, rcd);
1362                 }
1363         }
1364         kfree(tmp);
1365         free_pio_map(dd);
1366         /* must follow rcv context free - need to remove rcv's hooks */
1367         for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1368                 sc_free(dd->send_contexts[ctxt].sc);
1369         dd->num_send_contexts = 0;
1370         kfree(dd->send_contexts);
1371         dd->send_contexts = NULL;
1372         kfree(dd->hw_to_sw);
1373         dd->hw_to_sw = NULL;
1374         kfree(dd->boardname);
1375         vfree(dd->events);
1376         vfree(dd->status);
1377 }
1378
1379 /*
1380  * Clean up on unit shutdown, or error during unit load after
1381  * successful initialization.
1382  */
1383 static void postinit_cleanup(struct hfi1_devdata *dd)
1384 {
1385         hfi1_start_cleanup(dd);
1386
1387         hfi1_pcie_ddcleanup(dd);
1388         hfi1_pcie_cleanup(dd->pcidev);
1389
1390         cleanup_device_data(dd);
1391
1392         hfi1_free_devdata(dd);
1393 }
1394
1395 static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
1396 {
1397         if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1398                 hfi1_early_err(dev, "Receive header queue count too small\n");
1399                 return -EINVAL;
1400         }
1401
1402         if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1403                 hfi1_early_err(dev,
1404                                "Receive header queue count cannot be greater than %u\n",
1405                                HFI1_MAX_HDRQ_EGRBUF_CNT);
1406                 return -EINVAL;
1407         }
1408
1409         if (thecnt % HDRQ_INCREMENT) {
1410                 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
1411                                thecnt, HDRQ_INCREMENT);
1412                 return -EINVAL;
1413         }
1414
1415         return 0;
1416 }
1417
1418 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1419 {
1420         int ret = 0, j, pidx, initfail;
1421         struct hfi1_devdata *dd;
1422         struct hfi1_pportdata *ppd;
1423
1424         /* First, lock the non-writable module parameters */
1425         HFI1_CAP_LOCK();
1426
1427         /* Validate some global module parameters */
1428         ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
1429         if (ret)
1430                 goto bail;
1431
1432         /* use the encoding function as a sanitization check */
1433         if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1434                 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
1435                                hfi1_hdrq_entsize);
1436                 ret = -EINVAL;
1437                 goto bail;
1438         }
1439
1440         /* The receive eager buffer size must be set before the receive
1441          * contexts are created.
1442          *
1443          * Set the eager buffer size.  Validate that it falls in a range
1444          * allowed by the hardware - all powers of 2 between the min and
1445          * max.  The maximum valid MTU is within the eager buffer range
1446          * so we do not need to cap the max_mtu by an eager buffer size
1447          * setting.
1448          */
1449         if (eager_buffer_size) {
1450                 if (!is_power_of_2(eager_buffer_size))
1451                         eager_buffer_size =
1452                                 roundup_pow_of_two(eager_buffer_size);
1453                 eager_buffer_size =
1454                         clamp_val(eager_buffer_size,
1455                                   MIN_EAGER_BUFFER * 8,
1456                                   MAX_EAGER_BUFFER_TOTAL);
1457                 hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
1458                                 eager_buffer_size);
1459         } else {
1460                 hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
1461                 ret = -EINVAL;
1462                 goto bail;
1463         }
1464
1465         /* restrict value of hfi1_rcvarr_split */
1466         hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1467
1468         ret = hfi1_pcie_init(pdev, ent);
1469         if (ret)
1470                 goto bail;
1471
1472         if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1473               ent->device == PCI_DEVICE_ID_INTEL1)) {
1474                 hfi1_early_err(&pdev->dev,
1475                                "Failing on unknown Intel deviceid 0x%x\n",
1476                                ent->device);
1477                 ret = -ENODEV;
1478                 goto clean_bail;
1479         }
1480
1481         /*
1482          * Do device-specific initialization, function table setup, dd
1483          * allocation, etc.
1484          */
1485         dd = hfi1_init_dd(pdev, ent);
1486
1487         if (IS_ERR(dd)) {
1488                 ret = PTR_ERR(dd);
1489                 goto clean_bail; /* error already printed */
1490         }
1491
1492         ret = create_workqueues(dd);
1493         if (ret)
1494                 goto clean_bail;
1495
1496         /* do the generic initialization */
1497         initfail = hfi1_init(dd, 0);
1498
1499         ret = hfi1_register_ib_device(dd);
1500
1501         /*
1502          * Now ready for use.  this should be cleared whenever we
1503          * detect a reset, or initiate one.  If earlier failure,
1504          * we still create devices, so diags, etc. can be used
1505          * to determine cause of problem.
1506          */
1507         if (!initfail && !ret) {
1508                 dd->flags |= HFI1_INITTED;
1509                 /* create debufs files after init and ib register */
1510                 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1511         }
1512
1513         j = hfi1_device_create(dd);
1514         if (j)
1515                 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1516
1517         if (initfail || ret) {
1518                 stop_timers(dd);
1519                 flush_workqueue(ib_wq);
1520                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1521                         hfi1_quiet_serdes(dd->pport + pidx);
1522                         ppd = dd->pport + pidx;
1523                         if (ppd->hfi1_wq) {
1524                                 destroy_workqueue(ppd->hfi1_wq);
1525                                 ppd->hfi1_wq = NULL;
1526                         }
1527                 }
1528                 if (!j)
1529                         hfi1_device_remove(dd);
1530                 if (!ret)
1531                         hfi1_unregister_ib_device(dd);
1532                 postinit_cleanup(dd);
1533                 if (initfail)
1534                         ret = initfail;
1535                 goto bail;      /* everything already cleaned */
1536         }
1537
1538         sdma_start(dd);
1539
1540         return 0;
1541
1542 clean_bail:
1543         hfi1_pcie_cleanup(pdev);
1544 bail:
1545         return ret;
1546 }
1547
1548 static void wait_for_clients(struct hfi1_devdata *dd)
1549 {
1550         /*
1551          * Remove the device init value and complete the device if there is
1552          * no clients or wait for active clients to finish.
1553          */
1554         if (atomic_dec_and_test(&dd->user_refcount))
1555                 complete(&dd->user_comp);
1556
1557         wait_for_completion(&dd->user_comp);
1558 }
1559
1560 static void remove_one(struct pci_dev *pdev)
1561 {
1562         struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1563
1564         /* close debugfs files before ib unregister */
1565         hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1566
1567         /* remove the /dev hfi1 interface */
1568         hfi1_device_remove(dd);
1569
1570         /* wait for existing user space clients to finish */
1571         wait_for_clients(dd);
1572
1573         /* unregister from IB core */
1574         hfi1_unregister_ib_device(dd);
1575
1576         /*
1577          * Disable the IB link, disable interrupts on the device,
1578          * clear dma engines, etc.
1579          */
1580         shutdown_device(dd);
1581
1582         stop_timers(dd);
1583
1584         /* wait until all of our (qsfp) queue_work() calls complete */
1585         flush_workqueue(ib_wq);
1586
1587         postinit_cleanup(dd);
1588 }
1589
1590 /**
1591  * hfi1_create_rcvhdrq - create a receive header queue
1592  * @dd: the hfi1_ib device
1593  * @rcd: the context data
1594  *
1595  * This must be contiguous memory (from an i/o perspective), and must be
1596  * DMA'able (which means for some systems, it will go through an IOMMU,
1597  * or be forced into a low address range).
1598  */
1599 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1600 {
1601         unsigned amt;
1602         u64 reg;
1603
1604         if (!rcd->rcvhdrq) {
1605                 dma_addr_t dma_hdrqtail;
1606                 gfp_t gfp_flags;
1607
1608                 /*
1609                  * rcvhdrqentsize is in DWs, so we have to convert to bytes
1610                  * (* sizeof(u32)).
1611                  */
1612                 amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
1613                                  sizeof(u32));
1614
1615                 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1616                         GFP_USER : GFP_KERNEL;
1617                 rcd->rcvhdrq = dma_zalloc_coherent(
1618                         &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
1619                         gfp_flags | __GFP_COMP);
1620
1621                 if (!rcd->rcvhdrq) {
1622                         dd_dev_err(dd,
1623                                    "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1624                                    amt, rcd->ctxt);
1625                         goto bail;
1626                 }
1627
1628                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
1629                         rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
1630                                 &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
1631                                 gfp_flags);
1632                         if (!rcd->rcvhdrtail_kvaddr)
1633                                 goto bail_free;
1634                         rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
1635                 }
1636
1637                 rcd->rcvhdrq_size = amt;
1638         }
1639         /*
1640          * These values are per-context:
1641          *      RcvHdrCnt
1642          *      RcvHdrEntSize
1643          *      RcvHdrSize
1644          */
1645         reg = ((u64)(rcd->rcvhdrq_cnt >> HDRQ_SIZE_SHIFT)
1646                         & RCV_HDR_CNT_CNT_MASK)
1647                 << RCV_HDR_CNT_CNT_SHIFT;
1648         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_CNT, reg);
1649         reg = (encode_rcv_header_entry_size(rcd->rcvhdrqentsize)
1650                         & RCV_HDR_ENT_SIZE_ENT_SIZE_MASK)
1651                 << RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
1652         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_ENT_SIZE, reg);
1653         reg = (dd->rcvhdrsize & RCV_HDR_SIZE_HDR_SIZE_MASK)
1654                 << RCV_HDR_SIZE_HDR_SIZE_SHIFT;
1655         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_SIZE, reg);
1656
1657         /*
1658          * Program dummy tail address for every receive context
1659          * before enabling any receive context
1660          */
1661         write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
1662                         dd->rcvhdrtail_dummy_dma);
1663
1664         return 0;
1665
1666 bail_free:
1667         dd_dev_err(dd,
1668                    "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1669                    rcd->ctxt);
1670         vfree(rcd->user_event_mask);
1671         rcd->user_event_mask = NULL;
1672         dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1673                           rcd->rcvhdrq_dma);
1674         rcd->rcvhdrq = NULL;
1675 bail:
1676         return -ENOMEM;
1677 }
1678
1679 /**
1680  * allocate eager buffers, both kernel and user contexts.
1681  * @rcd: the context we are setting up.
1682  *
1683  * Allocate the eager TID buffers and program them into hip.
1684  * They are no longer completely contiguous, we do multiple allocation
1685  * calls.  Otherwise we get the OOM code involved, by asking for too
1686  * much per call, with disastrous results on some kernels.
1687  */
1688 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1689 {
1690         struct hfi1_devdata *dd = rcd->dd;
1691         u32 max_entries, egrtop, alloced_bytes = 0, idx = 0;
1692         gfp_t gfp_flags;
1693         u16 order;
1694         int ret = 0;
1695         u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1696
1697         /*
1698          * GFP_USER, but without GFP_FS, so buffer cache can be
1699          * coalesced (we hope); otherwise, even at order 4,
1700          * heavy filesystem activity makes these fail, and we can
1701          * use compound pages.
1702          */
1703         gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1704
1705         /*
1706          * The minimum size of the eager buffers is a groups of MTU-sized
1707          * buffers.
1708          * The global eager_buffer_size parameter is checked against the
1709          * theoretical lower limit of the value. Here, we check against the
1710          * MTU.
1711          */
1712         if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1713                 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1714         /*
1715          * If using one-pkt-per-egr-buffer, lower the eager buffer
1716          * size to the max MTU (page-aligned).
1717          */
1718         if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1719                 rcd->egrbufs.rcvtid_size = round_mtu;
1720
1721         /*
1722          * Eager buffers sizes of 1MB or less require smaller TID sizes
1723          * to satisfy the "multiple of 8 RcvArray entries" requirement.
1724          */
1725         if (rcd->egrbufs.size <= (1 << 20))
1726                 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1727                         rounddown_pow_of_two(rcd->egrbufs.size / 8));
1728
1729         while (alloced_bytes < rcd->egrbufs.size &&
1730                rcd->egrbufs.alloced < rcd->egrbufs.count) {
1731                 rcd->egrbufs.buffers[idx].addr =
1732                         dma_zalloc_coherent(&dd->pcidev->dev,
1733                                             rcd->egrbufs.rcvtid_size,
1734                                             &rcd->egrbufs.buffers[idx].dma,
1735                                             gfp_flags);
1736                 if (rcd->egrbufs.buffers[idx].addr) {
1737                         rcd->egrbufs.buffers[idx].len =
1738                                 rcd->egrbufs.rcvtid_size;
1739                         rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1740                                 rcd->egrbufs.buffers[idx].addr;
1741                         rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1742                                 rcd->egrbufs.buffers[idx].dma;
1743                         rcd->egrbufs.alloced++;
1744                         alloced_bytes += rcd->egrbufs.rcvtid_size;
1745                         idx++;
1746                 } else {
1747                         u32 new_size, i, j;
1748                         u64 offset = 0;
1749
1750                         /*
1751                          * Fail the eager buffer allocation if:
1752                          *   - we are already using the lowest acceptable size
1753                          *   - we are using one-pkt-per-egr-buffer (this implies
1754                          *     that we are accepting only one size)
1755                          */
1756                         if (rcd->egrbufs.rcvtid_size == round_mtu ||
1757                             !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1758                                 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1759                                            rcd->ctxt);
1760                                 goto bail_rcvegrbuf_phys;
1761                         }
1762
1763                         new_size = rcd->egrbufs.rcvtid_size / 2;
1764
1765                         /*
1766                          * If the first attempt to allocate memory failed, don't
1767                          * fail everything but continue with the next lower
1768                          * size.
1769                          */
1770                         if (idx == 0) {
1771                                 rcd->egrbufs.rcvtid_size = new_size;
1772                                 continue;
1773                         }
1774
1775                         /*
1776                          * Re-partition already allocated buffers to a smaller
1777                          * size.
1778                          */
1779                         rcd->egrbufs.alloced = 0;
1780                         for (i = 0, j = 0, offset = 0; j < idx; i++) {
1781                                 if (i >= rcd->egrbufs.count)
1782                                         break;
1783                                 rcd->egrbufs.rcvtids[i].dma =
1784                                         rcd->egrbufs.buffers[j].dma + offset;
1785                                 rcd->egrbufs.rcvtids[i].addr =
1786                                         rcd->egrbufs.buffers[j].addr + offset;
1787                                 rcd->egrbufs.alloced++;
1788                                 if ((rcd->egrbufs.buffers[j].dma + offset +
1789                                      new_size) ==
1790                                     (rcd->egrbufs.buffers[j].dma +
1791                                      rcd->egrbufs.buffers[j].len)) {
1792                                         j++;
1793                                         offset = 0;
1794                                 } else {
1795                                         offset += new_size;
1796                                 }
1797                         }
1798                         rcd->egrbufs.rcvtid_size = new_size;
1799                 }
1800         }
1801         rcd->egrbufs.numbufs = idx;
1802         rcd->egrbufs.size = alloced_bytes;
1803
1804         hfi1_cdbg(PROC,
1805                   "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
1806                   rcd->ctxt, rcd->egrbufs.alloced,
1807                   rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1808
1809         /*
1810          * Set the contexts rcv array head update threshold to the closest
1811          * power of 2 (so we can use a mask instead of modulo) below half
1812          * the allocated entries.
1813          */
1814         rcd->egrbufs.threshold =
1815                 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1816         /*
1817          * Compute the expected RcvArray entry base. This is done after
1818          * allocating the eager buffers in order to maximize the
1819          * expected RcvArray entries for the context.
1820          */
1821         max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1822         egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1823         rcd->expected_count = max_entries - egrtop;
1824         if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
1825                 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
1826
1827         rcd->expected_base = rcd->eager_base + egrtop;
1828         hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
1829                   rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
1830                   rcd->eager_base, rcd->expected_base);
1831
1832         if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
1833                 hfi1_cdbg(PROC,
1834                           "ctxt%u: current Eager buffer size is invalid %u\n",
1835                           rcd->ctxt, rcd->egrbufs.rcvtid_size);
1836                 ret = -EINVAL;
1837                 goto bail;
1838         }
1839
1840         for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
1841                 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
1842                              rcd->egrbufs.rcvtids[idx].dma, order);
1843                 cond_resched();
1844         }
1845         goto bail;
1846
1847 bail_rcvegrbuf_phys:
1848         for (idx = 0; idx < rcd->egrbufs.alloced &&
1849              rcd->egrbufs.buffers[idx].addr;
1850              idx++) {
1851                 dma_free_coherent(&dd->pcidev->dev,
1852                                   rcd->egrbufs.buffers[idx].len,
1853                                   rcd->egrbufs.buffers[idx].addr,
1854                                   rcd->egrbufs.buffers[idx].dma);
1855                 rcd->egrbufs.buffers[idx].addr = NULL;
1856                 rcd->egrbufs.buffers[idx].dma = 0;
1857                 rcd->egrbufs.buffers[idx].len = 0;
1858         }
1859 bail:
1860         return ret;
1861 }