]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/hw/hfi1/init.c
Merge tag 'block-5.6-2020-02-22' of git://git.kernel.dk/linux-block
[linux.git] / drivers / infiniband / hw / hfi1 / init.c
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #include <linux/pci.h>
49 #include <linux/netdevice.h>
50 #include <linux/vmalloc.h>
51 #include <linux/delay.h>
52 #include <linux/xarray.h>
53 #include <linux/module.h>
54 #include <linux/printk.h>
55 #include <linux/hrtimer.h>
56 #include <linux/bitmap.h>
57 #include <linux/numa.h>
58 #include <rdma/rdma_vt.h>
59
60 #include "hfi.h"
61 #include "device.h"
62 #include "common.h"
63 #include "trace.h"
64 #include "mad.h"
65 #include "sdma.h"
66 #include "debugfs.h"
67 #include "verbs.h"
68 #include "aspm.h"
69 #include "affinity.h"
70 #include "vnic.h"
71 #include "exp_rcv.h"
72
73 #undef pr_fmt
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
75
76 /*
77  * min buffers we want to have per context, after driver
78  */
79 #define HFI1_MIN_USER_CTXT_BUFCNT 7
80
81 #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
82 #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
83
84 #define NUM_IB_PORTS 1
85
86 /*
87  * Number of user receive contexts we are configured to use (to allow for more
88  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
89  */
90 int num_user_contexts = -1;
91 module_param_named(num_user_contexts, num_user_contexts, int, 0444);
92 MODULE_PARM_DESC(
93         num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
94
95 uint krcvqs[RXE_NUM_DATA_VL];
96 int krcvqsset;
97 module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
98 MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
99
100 /* computed based on above array */
101 unsigned long n_krcvqs;
102
103 static unsigned hfi1_rcvarr_split = 25;
104 module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
105 MODULE_PARM_DESC(rcvarr_split, "Percent of context's RcvArray entries used for Eager buffers");
106
107 static uint eager_buffer_size = (8 << 20); /* 8MB */
108 module_param(eager_buffer_size, uint, S_IRUGO);
109 MODULE_PARM_DESC(eager_buffer_size, "Size of the eager buffers, default: 8MB");
110
111 static uint rcvhdrcnt = 2048; /* 2x the max eager buffer count */
112 module_param_named(rcvhdrcnt, rcvhdrcnt, uint, S_IRUGO);
113 MODULE_PARM_DESC(rcvhdrcnt, "Receive header queue count (default 2048)");
114
115 static uint hfi1_hdrq_entsize = 32;
116 module_param_named(hdrq_entsize, hfi1_hdrq_entsize, uint, 0444);
117 MODULE_PARM_DESC(hdrq_entsize, "Size of header queue entries: 2 - 8B, 16 - 64B, 32 - 128B (default)");
118
119 unsigned int user_credit_return_threshold = 33; /* default is 33% */
120 module_param(user_credit_return_threshold, uint, S_IRUGO);
121 MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user send contexts, return when unreturned credits passes this many blocks (in percent of allocated blocks, 0 is off)");
122
123 DEFINE_XARRAY_FLAGS(hfi1_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
124
125 static int hfi1_create_kctxt(struct hfi1_devdata *dd,
126                              struct hfi1_pportdata *ppd)
127 {
128         struct hfi1_ctxtdata *rcd;
129         int ret;
130
131         /* Control context has to be always 0 */
132         BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
133
134         ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
135         if (ret < 0) {
136                 dd_dev_err(dd, "Kernel receive context allocation failed\n");
137                 return ret;
138         }
139
140         /*
141          * Set up the kernel context flags here and now because they use
142          * default values for all receive side memories.  User contexts will
143          * be handled as they are created.
144          */
145         rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
146                 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
147                 HFI1_CAP_KGET(NODROP_EGR_FULL) |
148                 HFI1_CAP_KGET(DMA_RTAIL);
149
150         /* Control context must use DMA_RTAIL */
151         if (rcd->ctxt == HFI1_CTRL_CTXT)
152                 rcd->flags |= HFI1_CAP_DMA_RTAIL;
153         rcd->fast_handler = get_dma_rtail_setting(rcd) ?
154                                 handle_receive_interrupt_dma_rtail :
155                                 handle_receive_interrupt_nodma_rtail;
156         rcd->slow_handler = handle_receive_interrupt;
157
158         hfi1_set_seq_cnt(rcd, 1);
159
160         rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
161         if (!rcd->sc) {
162                 dd_dev_err(dd, "Kernel send context allocation failed\n");
163                 return -ENOMEM;
164         }
165         hfi1_init_ctxt(rcd->sc);
166
167         return 0;
168 }
169
170 /*
171  * Create the receive context array and one or more kernel contexts
172  */
173 int hfi1_create_kctxts(struct hfi1_devdata *dd)
174 {
175         u16 i;
176         int ret;
177
178         dd->rcd = kcalloc_node(dd->num_rcv_contexts, sizeof(*dd->rcd),
179                                GFP_KERNEL, dd->node);
180         if (!dd->rcd)
181                 return -ENOMEM;
182
183         for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
184                 ret = hfi1_create_kctxt(dd, dd->pport);
185                 if (ret)
186                         goto bail;
187         }
188
189         return 0;
190 bail:
191         for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
192                 hfi1_free_ctxt(dd->rcd[i]);
193
194         /* All the contexts should be freed, free the array */
195         kfree(dd->rcd);
196         dd->rcd = NULL;
197         return ret;
198 }
199
200 /*
201  * Helper routines for the receive context reference count (rcd and uctxt).
202  */
203 static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
204 {
205         kref_init(&rcd->kref);
206 }
207
208 /**
209  * hfi1_rcd_free - When reference is zero clean up.
210  * @kref: pointer to an initialized rcd data structure
211  *
212  */
213 static void hfi1_rcd_free(struct kref *kref)
214 {
215         unsigned long flags;
216         struct hfi1_ctxtdata *rcd =
217                 container_of(kref, struct hfi1_ctxtdata, kref);
218
219         spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
220         rcd->dd->rcd[rcd->ctxt] = NULL;
221         spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
222
223         hfi1_free_ctxtdata(rcd->dd, rcd);
224
225         kfree(rcd);
226 }
227
228 /**
229  * hfi1_rcd_put - decrement reference for rcd
230  * @rcd: pointer to an initialized rcd data structure
231  *
232  * Use this to put a reference after the init.
233  */
234 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
235 {
236         if (rcd)
237                 return kref_put(&rcd->kref, hfi1_rcd_free);
238
239         return 0;
240 }
241
242 /**
243  * hfi1_rcd_get - increment reference for rcd
244  * @rcd: pointer to an initialized rcd data structure
245  *
246  * Use this to get a reference after the init.
247  *
248  * Return : reflect kref_get_unless_zero(), which returns non-zero on
249  * increment, otherwise 0.
250  */
251 int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
252 {
253         return kref_get_unless_zero(&rcd->kref);
254 }
255
256 /**
257  * allocate_rcd_index - allocate an rcd index from the rcd array
258  * @dd: pointer to a valid devdata structure
259  * @rcd: rcd data structure to assign
260  * @index: pointer to index that is allocated
261  *
262  * Find an empty index in the rcd array, and assign the given rcd to it.
263  * If the array is full, we are EBUSY.
264  *
265  */
266 static int allocate_rcd_index(struct hfi1_devdata *dd,
267                               struct hfi1_ctxtdata *rcd, u16 *index)
268 {
269         unsigned long flags;
270         u16 ctxt;
271
272         spin_lock_irqsave(&dd->uctxt_lock, flags);
273         for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
274                 if (!dd->rcd[ctxt])
275                         break;
276
277         if (ctxt < dd->num_rcv_contexts) {
278                 rcd->ctxt = ctxt;
279                 dd->rcd[ctxt] = rcd;
280                 hfi1_rcd_init(rcd);
281         }
282         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
283
284         if (ctxt >= dd->num_rcv_contexts)
285                 return -EBUSY;
286
287         *index = ctxt;
288
289         return 0;
290 }
291
292 /**
293  * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
294  * array
295  * @dd: pointer to a valid devdata structure
296  * @ctxt: the index of an possilbe rcd
297  *
298  * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
299  * ctxt index is valid.
300  *
301  * The caller is responsible for making the _put().
302  *
303  */
304 struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
305                                                  u16 ctxt)
306 {
307         if (ctxt < dd->num_rcv_contexts)
308                 return hfi1_rcd_get_by_index(dd, ctxt);
309
310         return NULL;
311 }
312
313 /**
314  * hfi1_rcd_get_by_index
315  * @dd: pointer to a valid devdata structure
316  * @ctxt: the index of an possilbe rcd
317  *
318  * We need to protect access to the rcd array.  If access is needed to
319  * one or more index, get the protecting spinlock and then increment the
320  * kref.
321  *
322  * The caller is responsible for making the _put().
323  *
324  */
325 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
326 {
327         unsigned long flags;
328         struct hfi1_ctxtdata *rcd = NULL;
329
330         spin_lock_irqsave(&dd->uctxt_lock, flags);
331         if (dd->rcd[ctxt]) {
332                 rcd = dd->rcd[ctxt];
333                 if (!hfi1_rcd_get(rcd))
334                         rcd = NULL;
335         }
336         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
337
338         return rcd;
339 }
340
341 /*
342  * Common code for user and kernel context create and setup.
343  * NOTE: the initial kref is done here (hf1_rcd_init()).
344  */
345 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
346                          struct hfi1_ctxtdata **context)
347 {
348         struct hfi1_devdata *dd = ppd->dd;
349         struct hfi1_ctxtdata *rcd;
350         unsigned kctxt_ngroups = 0;
351         u32 base;
352
353         if (dd->rcv_entries.nctxt_extra >
354             dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt)
355                 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
356                          (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt));
357         rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
358         if (rcd) {
359                 u32 rcvtids, max_entries;
360                 u16 ctxt;
361                 int ret;
362
363                 ret = allocate_rcd_index(dd, rcd, &ctxt);
364                 if (ret) {
365                         *context = NULL;
366                         kfree(rcd);
367                         return ret;
368                 }
369
370                 INIT_LIST_HEAD(&rcd->qp_wait_list);
371                 hfi1_exp_tid_group_init(rcd);
372                 rcd->ppd = ppd;
373                 rcd->dd = dd;
374                 rcd->numa_id = numa;
375                 rcd->rcv_array_groups = dd->rcv_entries.ngroups;
376                 rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
377
378                 mutex_init(&rcd->exp_mutex);
379                 spin_lock_init(&rcd->exp_lock);
380                 INIT_LIST_HEAD(&rcd->flow_queue.queue_head);
381                 INIT_LIST_HEAD(&rcd->rarr_queue.queue_head);
382
383                 hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
384
385                 /*
386                  * Calculate the context's RcvArray entry starting point.
387                  * We do this here because we have to take into account all
388                  * the RcvArray entries that previous context would have
389                  * taken and we have to account for any extra groups assigned
390                  * to the static (kernel) or dynamic (vnic/user) contexts.
391                  */
392                 if (ctxt < dd->first_dyn_alloc_ctxt) {
393                         if (ctxt < kctxt_ngroups) {
394                                 base = ctxt * (dd->rcv_entries.ngroups + 1);
395                                 rcd->rcv_array_groups++;
396                         } else {
397                                 base = kctxt_ngroups +
398                                         (ctxt * dd->rcv_entries.ngroups);
399                         }
400                 } else {
401                         u16 ct = ctxt - dd->first_dyn_alloc_ctxt;
402
403                         base = ((dd->n_krcv_queues * dd->rcv_entries.ngroups) +
404                                 kctxt_ngroups);
405                         if (ct < dd->rcv_entries.nctxt_extra) {
406                                 base += ct * (dd->rcv_entries.ngroups + 1);
407                                 rcd->rcv_array_groups++;
408                         } else {
409                                 base += dd->rcv_entries.nctxt_extra +
410                                         (ct * dd->rcv_entries.ngroups);
411                         }
412                 }
413                 rcd->eager_base = base * dd->rcv_entries.group_size;
414
415                 rcd->rcvhdrq_cnt = rcvhdrcnt;
416                 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
417                 rcd->rhf_offset =
418                         rcd->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
419                 /*
420                  * Simple Eager buffer allocation: we have already pre-allocated
421                  * the number of RcvArray entry groups. Each ctxtdata structure
422                  * holds the number of groups for that context.
423                  *
424                  * To follow CSR requirements and maintain cacheline alignment,
425                  * make sure all sizes and bases are multiples of group_size.
426                  *
427                  * The expected entry count is what is left after assigning
428                  * eager.
429                  */
430                 max_entries = rcd->rcv_array_groups *
431                         dd->rcv_entries.group_size;
432                 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100);
433                 rcd->egrbufs.count = round_down(rcvtids,
434                                                 dd->rcv_entries.group_size);
435                 if (rcd->egrbufs.count > MAX_EAGER_ENTRIES) {
436                         dd_dev_err(dd, "ctxt%u: requested too many RcvArray entries.\n",
437                                    rcd->ctxt);
438                         rcd->egrbufs.count = MAX_EAGER_ENTRIES;
439                 }
440                 hfi1_cdbg(PROC,
441                           "ctxt%u: max Eager buffer RcvArray entries: %u\n",
442                           rcd->ctxt, rcd->egrbufs.count);
443
444                 /*
445                  * Allocate array that will hold the eager buffer accounting
446                  * data.
447                  * This will allocate the maximum possible buffer count based
448                  * on the value of the RcvArray split parameter.
449                  * The resulting value will be rounded down to the closest
450                  * multiple of dd->rcv_entries.group_size.
451                  */
452                 rcd->egrbufs.buffers =
453                         kcalloc_node(rcd->egrbufs.count,
454                                      sizeof(*rcd->egrbufs.buffers),
455                                      GFP_KERNEL, numa);
456                 if (!rcd->egrbufs.buffers)
457                         goto bail;
458                 rcd->egrbufs.rcvtids =
459                         kcalloc_node(rcd->egrbufs.count,
460                                      sizeof(*rcd->egrbufs.rcvtids),
461                                      GFP_KERNEL, numa);
462                 if (!rcd->egrbufs.rcvtids)
463                         goto bail;
464                 rcd->egrbufs.size = eager_buffer_size;
465                 /*
466                  * The size of the buffers programmed into the RcvArray
467                  * entries needs to be big enough to handle the highest
468                  * MTU supported.
469                  */
470                 if (rcd->egrbufs.size < hfi1_max_mtu) {
471                         rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
472                         hfi1_cdbg(PROC,
473                                   "ctxt%u: eager bufs size too small. Adjusting to %u\n",
474                                     rcd->ctxt, rcd->egrbufs.size);
475                 }
476                 rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
477
478                 /* Applicable only for statically created kernel contexts */
479                 if (ctxt < dd->first_dyn_alloc_ctxt) {
480                         rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
481                                                     GFP_KERNEL, numa);
482                         if (!rcd->opstats)
483                                 goto bail;
484
485                         /* Initialize TID flow generations for the context */
486                         hfi1_kern_init_ctxt_generations(rcd);
487                 }
488
489                 *context = rcd;
490                 return 0;
491         }
492
493 bail:
494         *context = NULL;
495         hfi1_free_ctxt(rcd);
496         return -ENOMEM;
497 }
498
499 /**
500  * hfi1_free_ctxt
501  * @rcd: pointer to an initialized rcd data structure
502  *
503  * This wrapper is the free function that matches hfi1_create_ctxtdata().
504  * When a context is done being used (kernel or user), this function is called
505  * for the "final" put to match the kref init from hf1i_create_ctxtdata().
506  * Other users of the context do a get/put sequence to make sure that the
507  * structure isn't removed while in use.
508  */
509 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
510 {
511         hfi1_rcd_put(rcd);
512 }
513
514 /*
515  * Select the largest ccti value over all SLs to determine the intra-
516  * packet gap for the link.
517  *
518  * called with cca_timer_lock held (to protect access to cca_timer
519  * array), and rcu_read_lock() (to protect access to cc_state).
520  */
521 void set_link_ipg(struct hfi1_pportdata *ppd)
522 {
523         struct hfi1_devdata *dd = ppd->dd;
524         struct cc_state *cc_state;
525         int i;
526         u16 cce, ccti_limit, max_ccti = 0;
527         u16 shift, mult;
528         u64 src;
529         u32 current_egress_rate; /* Mbits /sec */
530         u32 max_pkt_time;
531         /*
532          * max_pkt_time is the maximum packet egress time in units
533          * of the fabric clock period 1/(805 MHz).
534          */
535
536         cc_state = get_cc_state(ppd);
537
538         if (!cc_state)
539                 /*
540                  * This should _never_ happen - rcu_read_lock() is held,
541                  * and set_link_ipg() should not be called if cc_state
542                  * is NULL.
543                  */
544                 return;
545
546         for (i = 0; i < OPA_MAX_SLS; i++) {
547                 u16 ccti = ppd->cca_timer[i].ccti;
548
549                 if (ccti > max_ccti)
550                         max_ccti = ccti;
551         }
552
553         ccti_limit = cc_state->cct.ccti_limit;
554         if (max_ccti > ccti_limit)
555                 max_ccti = ccti_limit;
556
557         cce = cc_state->cct.entries[max_ccti].entry;
558         shift = (cce & 0xc000) >> 14;
559         mult = (cce & 0x3fff);
560
561         current_egress_rate = active_egress_rate(ppd);
562
563         max_pkt_time = egress_cycles(ppd->ibmaxlen, current_egress_rate);
564
565         src = (max_pkt_time >> shift) * mult;
566
567         src &= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK;
568         src <<= SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT;
569
570         write_csr(dd, SEND_STATIC_RATE_CONTROL, src);
571 }
572
573 static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
574 {
575         struct cca_timer *cca_timer;
576         struct hfi1_pportdata *ppd;
577         int sl;
578         u16 ccti_timer, ccti_min;
579         struct cc_state *cc_state;
580         unsigned long flags;
581         enum hrtimer_restart ret = HRTIMER_NORESTART;
582
583         cca_timer = container_of(t, struct cca_timer, hrtimer);
584         ppd = cca_timer->ppd;
585         sl = cca_timer->sl;
586
587         rcu_read_lock();
588
589         cc_state = get_cc_state(ppd);
590
591         if (!cc_state) {
592                 rcu_read_unlock();
593                 return HRTIMER_NORESTART;
594         }
595
596         /*
597          * 1) decrement ccti for SL
598          * 2) calculate IPG for link (set_link_ipg())
599          * 3) restart timer, unless ccti is at min value
600          */
601
602         ccti_min = cc_state->cong_setting.entries[sl].ccti_min;
603         ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
604
605         spin_lock_irqsave(&ppd->cca_timer_lock, flags);
606
607         if (cca_timer->ccti > ccti_min) {
608                 cca_timer->ccti--;
609                 set_link_ipg(ppd);
610         }
611
612         if (cca_timer->ccti > ccti_min) {
613                 unsigned long nsec = 1024 * ccti_timer;
614                 /* ccti_timer is in units of 1.024 usec */
615                 hrtimer_forward_now(t, ns_to_ktime(nsec));
616                 ret = HRTIMER_RESTART;
617         }
618
619         spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
620         rcu_read_unlock();
621         return ret;
622 }
623
624 /*
625  * Common code for initializing the physical port structure.
626  */
627 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
628                          struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
629 {
630         int i;
631         uint default_pkey_idx;
632         struct cc_state *cc_state;
633
634         ppd->dd = dd;
635         ppd->hw_pidx = hw_pidx;
636         ppd->port = port; /* IB port number, not index */
637         ppd->prev_link_width = LINK_WIDTH_DEFAULT;
638         /*
639          * There are C_VL_COUNT number of PortVLXmitWait counters.
640          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
641          */
642         for (i = 0; i < C_VL_COUNT + 1; i++) {
643                 ppd->port_vl_xmit_wait_last[i] = 0;
644                 ppd->vl_xmit_flit_cnt[i] = 0;
645         }
646
647         default_pkey_idx = 1;
648
649         ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
650         ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
651
652         if (loopback) {
653                 dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
654                            !default_pkey_idx);
655                 ppd->pkeys[!default_pkey_idx] = 0x8001;
656         }
657
658         INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
659         INIT_WORK(&ppd->link_up_work, handle_link_up);
660         INIT_WORK(&ppd->link_down_work, handle_link_down);
661         INIT_WORK(&ppd->freeze_work, handle_freeze);
662         INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
663         INIT_WORK(&ppd->sma_message_work, handle_sma_message);
664         INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
665         INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
666         INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
667         INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
668
669         mutex_init(&ppd->hls_lock);
670         spin_lock_init(&ppd->qsfp_info.qsfp_lock);
671
672         ppd->qsfp_info.ppd = ppd;
673         ppd->sm_trap_qp = 0x0;
674         ppd->sa_qp = 0x1;
675
676         ppd->hfi1_wq = NULL;
677
678         spin_lock_init(&ppd->cca_timer_lock);
679
680         for (i = 0; i < OPA_MAX_SLS; i++) {
681                 hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
682                              HRTIMER_MODE_REL);
683                 ppd->cca_timer[i].ppd = ppd;
684                 ppd->cca_timer[i].sl = i;
685                 ppd->cca_timer[i].ccti = 0;
686                 ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
687         }
688
689         ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
690
691         spin_lock_init(&ppd->cc_state_lock);
692         spin_lock_init(&ppd->cc_log_lock);
693         cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
694         RCU_INIT_POINTER(ppd->cc_state, cc_state);
695         if (!cc_state)
696                 goto bail;
697         return;
698
699 bail:
700         dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
701 }
702
703 /*
704  * Do initialization for device that is only needed on
705  * first detect, not on resets.
706  */
707 static int loadtime_init(struct hfi1_devdata *dd)
708 {
709         return 0;
710 }
711
712 /**
713  * init_after_reset - re-initialize after a reset
714  * @dd: the hfi1_ib device
715  *
716  * sanity check at least some of the values after reset, and
717  * ensure no receive or transmit (explicitly, in case reset
718  * failed
719  */
720 static int init_after_reset(struct hfi1_devdata *dd)
721 {
722         int i;
723         struct hfi1_ctxtdata *rcd;
724         /*
725          * Ensure chip does no sends or receives, tail updates, or
726          * pioavail updates while we re-initialize.  This is mostly
727          * for the driver data structures, not chip registers.
728          */
729         for (i = 0; i < dd->num_rcv_contexts; i++) {
730                 rcd = hfi1_rcd_get_by_index(dd, i);
731                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
732                              HFI1_RCVCTRL_INTRAVAIL_DIS |
733                              HFI1_RCVCTRL_TAILUPD_DIS, rcd);
734                 hfi1_rcd_put(rcd);
735         }
736         pio_send_control(dd, PSC_GLOBAL_DISABLE);
737         for (i = 0; i < dd->num_send_contexts; i++)
738                 sc_disable(dd->send_contexts[i].sc);
739
740         return 0;
741 }
742
743 static void enable_chip(struct hfi1_devdata *dd)
744 {
745         struct hfi1_ctxtdata *rcd;
746         u32 rcvmask;
747         u16 i;
748
749         /* enable PIO send */
750         pio_send_control(dd, PSC_GLOBAL_ENABLE);
751
752         /*
753          * Enable kernel ctxts' receive and receive interrupt.
754          * Other ctxts done as user opens and initializes them.
755          */
756         for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
757                 rcd = hfi1_rcd_get_by_index(dd, i);
758                 if (!rcd)
759                         continue;
760                 rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
761                 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
762                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
763                 if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
764                         rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
765                 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
766                         rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
767                 if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
768                         rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
769                 if (HFI1_CAP_IS_KSET(TID_RDMA))
770                         rcvmask |= HFI1_RCVCTRL_TIDFLOW_ENB;
771                 hfi1_rcvctrl(dd, rcvmask, rcd);
772                 sc_enable(rcd->sc);
773                 hfi1_rcd_put(rcd);
774         }
775 }
776
777 /**
778  * create_workqueues - create per port workqueues
779  * @dd: the hfi1_ib device
780  */
781 static int create_workqueues(struct hfi1_devdata *dd)
782 {
783         int pidx;
784         struct hfi1_pportdata *ppd;
785
786         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
787                 ppd = dd->pport + pidx;
788                 if (!ppd->hfi1_wq) {
789                         ppd->hfi1_wq =
790                                 alloc_workqueue(
791                                     "hfi%d_%d",
792                                     WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
793                                     WQ_MEM_RECLAIM,
794                                     HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
795                                     dd->unit, pidx);
796                         if (!ppd->hfi1_wq)
797                                 goto wq_error;
798                 }
799                 if (!ppd->link_wq) {
800                         /*
801                          * Make the link workqueue single-threaded to enforce
802                          * serialization.
803                          */
804                         ppd->link_wq =
805                                 alloc_workqueue(
806                                     "hfi_link_%d_%d",
807                                     WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
808                                     1, /* max_active */
809                                     dd->unit, pidx);
810                         if (!ppd->link_wq)
811                                 goto wq_error;
812                 }
813         }
814         return 0;
815 wq_error:
816         pr_err("alloc_workqueue failed for port %d\n", pidx + 1);
817         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
818                 ppd = dd->pport + pidx;
819                 if (ppd->hfi1_wq) {
820                         destroy_workqueue(ppd->hfi1_wq);
821                         ppd->hfi1_wq = NULL;
822                 }
823                 if (ppd->link_wq) {
824                         destroy_workqueue(ppd->link_wq);
825                         ppd->link_wq = NULL;
826                 }
827         }
828         return -ENOMEM;
829 }
830
831 /**
832  * enable_general_intr() - Enable the IRQs that will be handled by the
833  * general interrupt handler.
834  * @dd: valid devdata
835  *
836  */
837 static void enable_general_intr(struct hfi1_devdata *dd)
838 {
839         set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
840         set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
841         set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
842         set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
843         set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
844         set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
845         set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
846 }
847
848 /**
849  * hfi1_init - do the actual initialization sequence on the chip
850  * @dd: the hfi1_ib device
851  * @reinit: re-initializing, so don't allocate new memory
852  *
853  * Do the actual initialization sequence on the chip.  This is done
854  * both from the init routine called from the PCI infrastructure, and
855  * when we reset the chip, or detect that it was reset internally,
856  * or it's administratively re-enabled.
857  *
858  * Memory allocation here and in called routines is only done in
859  * the first case (reinit == 0).  We have to be careful, because even
860  * without memory allocation, we need to re-write all the chip registers
861  * TIDs, etc. after the reset or enable has completed.
862  */
863 int hfi1_init(struct hfi1_devdata *dd, int reinit)
864 {
865         int ret = 0, pidx, lastfail = 0;
866         unsigned long len;
867         u16 i;
868         struct hfi1_ctxtdata *rcd;
869         struct hfi1_pportdata *ppd;
870
871         /* Set up send low level handlers */
872         dd->process_pio_send = hfi1_verbs_send_pio;
873         dd->process_dma_send = hfi1_verbs_send_dma;
874         dd->pio_inline_send = pio_copy;
875         dd->process_vnic_dma_send = hfi1_vnic_send_dma;
876
877         if (is_ax(dd)) {
878                 atomic_set(&dd->drop_packet, DROP_PACKET_ON);
879                 dd->do_drop = true;
880         } else {
881                 atomic_set(&dd->drop_packet, DROP_PACKET_OFF);
882                 dd->do_drop = false;
883         }
884
885         /* make sure the link is not "up" */
886         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
887                 ppd = dd->pport + pidx;
888                 ppd->linkup = 0;
889         }
890
891         if (reinit)
892                 ret = init_after_reset(dd);
893         else
894                 ret = loadtime_init(dd);
895         if (ret)
896                 goto done;
897
898         /* allocate dummy tail memory for all receive contexts */
899         dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
900                                                          sizeof(u64),
901                                                          &dd->rcvhdrtail_dummy_dma,
902                                                          GFP_KERNEL);
903
904         if (!dd->rcvhdrtail_dummy_kvaddr) {
905                 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
906                 ret = -ENOMEM;
907                 goto done;
908         }
909
910         /* dd->rcd can be NULL if early initialization failed */
911         for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
912                 /*
913                  * Set up the (kernel) rcvhdr queue and egr TIDs.  If doing
914                  * re-init, the simplest way to handle this is to free
915                  * existing, and re-allocate.
916                  * Need to re-create rest of ctxt 0 ctxtdata as well.
917                  */
918                 rcd = hfi1_rcd_get_by_index(dd, i);
919                 if (!rcd)
920                         continue;
921
922                 rcd->do_interrupt = &handle_receive_interrupt;
923
924                 lastfail = hfi1_create_rcvhdrq(dd, rcd);
925                 if (!lastfail)
926                         lastfail = hfi1_setup_eagerbufs(rcd);
927                 if (!lastfail)
928                         lastfail = hfi1_kern_exp_rcv_init(rcd, reinit);
929                 if (lastfail) {
930                         dd_dev_err(dd,
931                                    "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
932                         ret = lastfail;
933                 }
934                 /* enable IRQ */
935                 hfi1_rcd_put(rcd);
936         }
937
938         /* Allocate enough memory for user event notification. */
939         len = PAGE_ALIGN(chip_rcv_contexts(dd) * HFI1_MAX_SHARED_CTXTS *
940                          sizeof(*dd->events));
941         dd->events = vmalloc_user(len);
942         if (!dd->events)
943                 dd_dev_err(dd, "Failed to allocate user events page\n");
944         /*
945          * Allocate a page for device and port status.
946          * Page will be shared amongst all user processes.
947          */
948         dd->status = vmalloc_user(PAGE_SIZE);
949         if (!dd->status)
950                 dd_dev_err(dd, "Failed to allocate dev status page\n");
951         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
952                 ppd = dd->pport + pidx;
953                 if (dd->status)
954                         /* Currently, we only have one port */
955                         ppd->statusp = &dd->status->port;
956
957                 set_mtu(ppd);
958         }
959
960         /* enable chip even if we have an error, so we can debug cause */
961         enable_chip(dd);
962
963 done:
964         /*
965          * Set status even if port serdes is not initialized
966          * so that diags will work.
967          */
968         if (dd->status)
969                 dd->status->dev |= HFI1_STATUS_CHIP_PRESENT |
970                         HFI1_STATUS_INITTED;
971         if (!ret) {
972                 /* enable all interrupts from the chip */
973                 enable_general_intr(dd);
974                 init_qsfp_int(dd);
975
976                 /* chip is OK for user apps; mark it as initialized */
977                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
978                         ppd = dd->pport + pidx;
979
980                         /*
981                          * start the serdes - must be after interrupts are
982                          * enabled so we are notified when the link goes up
983                          */
984                         lastfail = bringup_serdes(ppd);
985                         if (lastfail)
986                                 dd_dev_info(dd,
987                                             "Failed to bring up port %u\n",
988                                             ppd->port);
989
990                         /*
991                          * Set status even if port serdes is not initialized
992                          * so that diags will work.
993                          */
994                         if (ppd->statusp)
995                                 *ppd->statusp |= HFI1_STATUS_CHIP_PRESENT |
996                                                         HFI1_STATUS_INITTED;
997                         if (!ppd->link_speed_enabled)
998                                 continue;
999                 }
1000         }
1001
1002         /* if ret is non-zero, we probably should do some cleanup here... */
1003         return ret;
1004 }
1005
1006 struct hfi1_devdata *hfi1_lookup(int unit)
1007 {
1008         return xa_load(&hfi1_dev_table, unit);
1009 }
1010
1011 /*
1012  * Stop the timers during unit shutdown, or after an error late
1013  * in initialization.
1014  */
1015 static void stop_timers(struct hfi1_devdata *dd)
1016 {
1017         struct hfi1_pportdata *ppd;
1018         int pidx;
1019
1020         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1021                 ppd = dd->pport + pidx;
1022                 if (ppd->led_override_timer.function) {
1023                         del_timer_sync(&ppd->led_override_timer);
1024                         atomic_set(&ppd->led_override_timer_active, 0);
1025                 }
1026         }
1027 }
1028
1029 /**
1030  * shutdown_device - shut down a device
1031  * @dd: the hfi1_ib device
1032  *
1033  * This is called to make the device quiet when we are about to
1034  * unload the driver, and also when the device is administratively
1035  * disabled.   It does not free any data structures.
1036  * Everything it does has to be setup again by hfi1_init(dd, 1)
1037  */
1038 static void shutdown_device(struct hfi1_devdata *dd)
1039 {
1040         struct hfi1_pportdata *ppd;
1041         struct hfi1_ctxtdata *rcd;
1042         unsigned pidx;
1043         int i;
1044
1045         if (dd->flags & HFI1_SHUTDOWN)
1046                 return;
1047         dd->flags |= HFI1_SHUTDOWN;
1048
1049         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1050                 ppd = dd->pport + pidx;
1051
1052                 ppd->linkup = 0;
1053                 if (ppd->statusp)
1054                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
1055                                            HFI1_STATUS_IB_READY);
1056         }
1057         dd->flags &= ~HFI1_INITTED;
1058
1059         /* mask and clean up interrupts */
1060         set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
1061         msix_clean_up_interrupts(dd);
1062
1063         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1064                 ppd = dd->pport + pidx;
1065                 for (i = 0; i < dd->num_rcv_contexts; i++) {
1066                         rcd = hfi1_rcd_get_by_index(dd, i);
1067                         hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
1068                                      HFI1_RCVCTRL_CTXT_DIS |
1069                                      HFI1_RCVCTRL_INTRAVAIL_DIS |
1070                                      HFI1_RCVCTRL_PKEY_DIS |
1071                                      HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
1072                         hfi1_rcd_put(rcd);
1073                 }
1074                 /*
1075                  * Gracefully stop all sends allowing any in progress to
1076                  * trickle out first.
1077                  */
1078                 for (i = 0; i < dd->num_send_contexts; i++)
1079                         sc_flush(dd->send_contexts[i].sc);
1080         }
1081
1082         /*
1083          * Enough for anything that's going to trickle out to have actually
1084          * done so.
1085          */
1086         udelay(20);
1087
1088         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1089                 ppd = dd->pport + pidx;
1090
1091                 /* disable all contexts */
1092                 for (i = 0; i < dd->num_send_contexts; i++)
1093                         sc_disable(dd->send_contexts[i].sc);
1094                 /* disable the send device */
1095                 pio_send_control(dd, PSC_GLOBAL_DISABLE);
1096
1097                 shutdown_led_override(ppd);
1098
1099                 /*
1100                  * Clear SerdesEnable.
1101                  * We can't count on interrupts since we are stopping.
1102                  */
1103                 hfi1_quiet_serdes(ppd);
1104
1105                 if (ppd->hfi1_wq) {
1106                         destroy_workqueue(ppd->hfi1_wq);
1107                         ppd->hfi1_wq = NULL;
1108                 }
1109                 if (ppd->link_wq) {
1110                         destroy_workqueue(ppd->link_wq);
1111                         ppd->link_wq = NULL;
1112                 }
1113         }
1114         sdma_exit(dd);
1115 }
1116
1117 /**
1118  * hfi1_free_ctxtdata - free a context's allocated data
1119  * @dd: the hfi1_ib device
1120  * @rcd: the ctxtdata structure
1121  *
1122  * free up any allocated data for a context
1123  * It should never change any chip state, or global driver state.
1124  */
1125 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1126 {
1127         u32 e;
1128
1129         if (!rcd)
1130                 return;
1131
1132         if (rcd->rcvhdrq) {
1133                 dma_free_coherent(&dd->pcidev->dev, rcvhdrq_size(rcd),
1134                                   rcd->rcvhdrq, rcd->rcvhdrq_dma);
1135                 rcd->rcvhdrq = NULL;
1136                 if (hfi1_rcvhdrtail_kvaddr(rcd)) {
1137                         dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1138                                           (void *)hfi1_rcvhdrtail_kvaddr(rcd),
1139                                           rcd->rcvhdrqtailaddr_dma);
1140                         rcd->rcvhdrtail_kvaddr = NULL;
1141                 }
1142         }
1143
1144         /* all the RcvArray entries should have been cleared by now */
1145         kfree(rcd->egrbufs.rcvtids);
1146         rcd->egrbufs.rcvtids = NULL;
1147
1148         for (e = 0; e < rcd->egrbufs.alloced; e++) {
1149                 if (rcd->egrbufs.buffers[e].dma)
1150                         dma_free_coherent(&dd->pcidev->dev,
1151                                           rcd->egrbufs.buffers[e].len,
1152                                           rcd->egrbufs.buffers[e].addr,
1153                                           rcd->egrbufs.buffers[e].dma);
1154         }
1155         kfree(rcd->egrbufs.buffers);
1156         rcd->egrbufs.alloced = 0;
1157         rcd->egrbufs.buffers = NULL;
1158
1159         sc_free(rcd->sc);
1160         rcd->sc = NULL;
1161
1162         vfree(rcd->subctxt_uregbase);
1163         vfree(rcd->subctxt_rcvegrbuf);
1164         vfree(rcd->subctxt_rcvhdr_base);
1165         kfree(rcd->opstats);
1166
1167         rcd->subctxt_uregbase = NULL;
1168         rcd->subctxt_rcvegrbuf = NULL;
1169         rcd->subctxt_rcvhdr_base = NULL;
1170         rcd->opstats = NULL;
1171 }
1172
1173 /*
1174  * Release our hold on the shared asic data.  If we are the last one,
1175  * return the structure to be finalized outside the lock.  Must be
1176  * holding hfi1_dev_table lock.
1177  */
1178 static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
1179 {
1180         struct hfi1_asic_data *ad;
1181         int other;
1182
1183         if (!dd->asic_data)
1184                 return NULL;
1185         dd->asic_data->dds[dd->hfi1_id] = NULL;
1186         other = dd->hfi1_id ? 0 : 1;
1187         ad = dd->asic_data;
1188         dd->asic_data = NULL;
1189         /* return NULL if the other dd still has a link */
1190         return ad->dds[other] ? NULL : ad;
1191 }
1192
1193 static void finalize_asic_data(struct hfi1_devdata *dd,
1194                                struct hfi1_asic_data *ad)
1195 {
1196         clean_up_i2c(dd, ad);
1197         kfree(ad);
1198 }
1199
1200 /**
1201  * hfi1_clean_devdata - cleans up per-unit data structure
1202  * @dd: pointer to a valid devdata structure
1203  *
1204  * It cleans up all data structures set up by
1205  * by hfi1_alloc_devdata().
1206  */
1207 static void hfi1_clean_devdata(struct hfi1_devdata *dd)
1208 {
1209         struct hfi1_asic_data *ad;
1210         unsigned long flags;
1211
1212         xa_lock_irqsave(&hfi1_dev_table, flags);
1213         __xa_erase(&hfi1_dev_table, dd->unit);
1214         ad = release_asic_data(dd);
1215         xa_unlock_irqrestore(&hfi1_dev_table, flags);
1216
1217         finalize_asic_data(dd, ad);
1218         free_platform_config(dd);
1219         rcu_barrier(); /* wait for rcu callbacks to complete */
1220         free_percpu(dd->int_counter);
1221         free_percpu(dd->rcv_limit);
1222         free_percpu(dd->send_schedule);
1223         free_percpu(dd->tx_opstats);
1224         dd->int_counter   = NULL;
1225         dd->rcv_limit     = NULL;
1226         dd->send_schedule = NULL;
1227         dd->tx_opstats    = NULL;
1228         kfree(dd->comp_vect);
1229         dd->comp_vect = NULL;
1230         sdma_clean(dd, dd->num_sdma);
1231         rvt_dealloc_device(&dd->verbs_dev.rdi);
1232 }
1233
1234 static void __hfi1_free_devdata(struct kobject *kobj)
1235 {
1236         struct hfi1_devdata *dd =
1237                 container_of(kobj, struct hfi1_devdata, kobj);
1238
1239         hfi1_clean_devdata(dd);
1240 }
1241
1242 static struct kobj_type hfi1_devdata_type = {
1243         .release = __hfi1_free_devdata,
1244 };
1245
1246 void hfi1_free_devdata(struct hfi1_devdata *dd)
1247 {
1248         kobject_put(&dd->kobj);
1249 }
1250
1251 /**
1252  * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
1253  * @pdev: Valid PCI device
1254  * @extra: How many bytes to alloc past the default
1255  *
1256  * Must be done via verbs allocator, because the verbs cleanup process
1257  * both does cleanup and free of the data structure.
1258  * "extra" is for chip-specific data.
1259  */
1260 static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
1261                                                size_t extra)
1262 {
1263         struct hfi1_devdata *dd;
1264         int ret, nports;
1265
1266         /* extra is * number of ports */
1267         nports = extra / sizeof(struct hfi1_pportdata);
1268
1269         dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
1270                                                      nports);
1271         if (!dd)
1272                 return ERR_PTR(-ENOMEM);
1273         dd->num_pports = nports;
1274         dd->pport = (struct hfi1_pportdata *)(dd + 1);
1275         dd->pcidev = pdev;
1276         pci_set_drvdata(pdev, dd);
1277         dd->node = NUMA_NO_NODE;
1278
1279         ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
1280                         GFP_KERNEL);
1281         if (ret < 0) {
1282                 dev_err(&pdev->dev,
1283                         "Could not allocate unit ID: error %d\n", -ret);
1284                 goto bail;
1285         }
1286         rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
1287
1288         /*
1289          * Initialize all locks for the device. This needs to be as early as
1290          * possible so locks are usable.
1291          */
1292         spin_lock_init(&dd->sc_lock);
1293         spin_lock_init(&dd->sendctrl_lock);
1294         spin_lock_init(&dd->rcvctrl_lock);
1295         spin_lock_init(&dd->uctxt_lock);
1296         spin_lock_init(&dd->hfi1_diag_trans_lock);
1297         spin_lock_init(&dd->sc_init_lock);
1298         spin_lock_init(&dd->dc8051_memlock);
1299         seqlock_init(&dd->sc2vl_lock);
1300         spin_lock_init(&dd->sde_map_lock);
1301         spin_lock_init(&dd->pio_map_lock);
1302         mutex_init(&dd->dc8051_lock);
1303         init_waitqueue_head(&dd->event_queue);
1304         spin_lock_init(&dd->irq_src_lock);
1305
1306         dd->int_counter = alloc_percpu(u64);
1307         if (!dd->int_counter) {
1308                 ret = -ENOMEM;
1309                 goto bail;
1310         }
1311
1312         dd->rcv_limit = alloc_percpu(u64);
1313         if (!dd->rcv_limit) {
1314                 ret = -ENOMEM;
1315                 goto bail;
1316         }
1317
1318         dd->send_schedule = alloc_percpu(u64);
1319         if (!dd->send_schedule) {
1320                 ret = -ENOMEM;
1321                 goto bail;
1322         }
1323
1324         dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
1325         if (!dd->tx_opstats) {
1326                 ret = -ENOMEM;
1327                 goto bail;
1328         }
1329
1330         dd->comp_vect = kzalloc(sizeof(*dd->comp_vect), GFP_KERNEL);
1331         if (!dd->comp_vect) {
1332                 ret = -ENOMEM;
1333                 goto bail;
1334         }
1335
1336         kobject_init(&dd->kobj, &hfi1_devdata_type);
1337         return dd;
1338
1339 bail:
1340         hfi1_clean_devdata(dd);
1341         return ERR_PTR(ret);
1342 }
1343
1344 /*
1345  * Called from freeze mode handlers, and from PCI error
1346  * reporting code.  Should be paranoid about state of
1347  * system and data structures.
1348  */
1349 void hfi1_disable_after_error(struct hfi1_devdata *dd)
1350 {
1351         if (dd->flags & HFI1_INITTED) {
1352                 u32 pidx;
1353
1354                 dd->flags &= ~HFI1_INITTED;
1355                 if (dd->pport)
1356                         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1357                                 struct hfi1_pportdata *ppd;
1358
1359                                 ppd = dd->pport + pidx;
1360                                 if (dd->flags & HFI1_PRESENT)
1361                                         set_link_state(ppd, HLS_DN_DISABLE);
1362
1363                                 if (ppd->statusp)
1364                                         *ppd->statusp &= ~HFI1_STATUS_IB_READY;
1365                         }
1366         }
1367
1368         /*
1369          * Mark as having had an error for driver, and also
1370          * for /sys and status word mapped to user programs.
1371          * This marks unit as not usable, until reset.
1372          */
1373         if (dd->status)
1374                 dd->status->dev |= HFI1_STATUS_HWERROR;
1375 }
1376
1377 static void remove_one(struct pci_dev *);
1378 static int init_one(struct pci_dev *, const struct pci_device_id *);
1379 static void shutdown_one(struct pci_dev *);
1380
1381 #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
1382 #define PFX DRIVER_NAME ": "
1383
1384 const struct pci_device_id hfi1_pci_tbl[] = {
1385         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
1386         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
1387         { 0, }
1388 };
1389
1390 MODULE_DEVICE_TABLE(pci, hfi1_pci_tbl);
1391
1392 static struct pci_driver hfi1_pci_driver = {
1393         .name = DRIVER_NAME,
1394         .probe = init_one,
1395         .remove = remove_one,
1396         .shutdown = shutdown_one,
1397         .id_table = hfi1_pci_tbl,
1398         .err_handler = &hfi1_pci_err_handler,
1399 };
1400
1401 static void __init compute_krcvqs(void)
1402 {
1403         int i;
1404
1405         for (i = 0; i < krcvqsset; i++)
1406                 n_krcvqs += krcvqs[i];
1407 }
1408
1409 /*
1410  * Do all the generic driver unit- and chip-independent memory
1411  * allocation and initialization.
1412  */
1413 static int __init hfi1_mod_init(void)
1414 {
1415         int ret;
1416
1417         ret = dev_init();
1418         if (ret)
1419                 goto bail;
1420
1421         ret = node_affinity_init();
1422         if (ret)
1423                 goto bail;
1424
1425         /* validate max MTU before any devices start */
1426         if (!valid_opa_max_mtu(hfi1_max_mtu)) {
1427                 pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
1428                        hfi1_max_mtu, HFI1_DEFAULT_MAX_MTU);
1429                 hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
1430         }
1431         /* valid CUs run from 1-128 in powers of 2 */
1432         if (hfi1_cu > 128 || !is_power_of_2(hfi1_cu))
1433                 hfi1_cu = 1;
1434         /* valid credit return threshold is 0-100, variable is unsigned */
1435         if (user_credit_return_threshold > 100)
1436                 user_credit_return_threshold = 100;
1437
1438         compute_krcvqs();
1439         /*
1440          * sanitize receive interrupt count, time must wait until after
1441          * the hardware type is known
1442          */
1443         if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
1444                 rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
1445         /* reject invalid combinations */
1446         if (rcv_intr_count == 0 && rcv_intr_timeout == 0) {
1447                 pr_err("Invalid mode: both receive interrupt count and available timeout are zero - setting interrupt count to 1\n");
1448                 rcv_intr_count = 1;
1449         }
1450         if (rcv_intr_count > 1 && rcv_intr_timeout == 0) {
1451                 /*
1452                  * Avoid indefinite packet delivery by requiring a timeout
1453                  * if count is > 1.
1454                  */
1455                 pr_err("Invalid mode: receive interrupt count greater than 1 and available timeout is zero - setting available timeout to 1\n");
1456                 rcv_intr_timeout = 1;
1457         }
1458         if (rcv_intr_dynamic && !(rcv_intr_count > 1 && rcv_intr_timeout > 0)) {
1459                 /*
1460                  * The dynamic algorithm expects a non-zero timeout
1461                  * and a count > 1.
1462                  */
1463                 pr_err("Invalid mode: dynamic receive interrupt mitigation with invalid count and timeout - turning dynamic off\n");
1464                 rcv_intr_dynamic = 0;
1465         }
1466
1467         /* sanitize link CRC options */
1468         link_crc_mask &= SUPPORTED_CRCS;
1469
1470         ret = opfn_init();
1471         if (ret < 0) {
1472                 pr_err("Failed to allocate opfn_wq");
1473                 goto bail_dev;
1474         }
1475
1476         /*
1477          * These must be called before the driver is registered with
1478          * the PCI subsystem.
1479          */
1480         hfi1_dbg_init();
1481         ret = pci_register_driver(&hfi1_pci_driver);
1482         if (ret < 0) {
1483                 pr_err("Unable to register driver: error %d\n", -ret);
1484                 goto bail_dev;
1485         }
1486         goto bail; /* all OK */
1487
1488 bail_dev:
1489         hfi1_dbg_exit();
1490         dev_cleanup();
1491 bail:
1492         return ret;
1493 }
1494
1495 module_init(hfi1_mod_init);
1496
1497 /*
1498  * Do the non-unit driver cleanup, memory free, etc. at unload.
1499  */
1500 static void __exit hfi1_mod_cleanup(void)
1501 {
1502         pci_unregister_driver(&hfi1_pci_driver);
1503         opfn_exit();
1504         node_affinity_destroy_all();
1505         hfi1_dbg_exit();
1506
1507         WARN_ON(!xa_empty(&hfi1_dev_table));
1508         dispose_firmware();     /* asymmetric with obtain_firmware() */
1509         dev_cleanup();
1510 }
1511
1512 module_exit(hfi1_mod_cleanup);
1513
1514 /* this can only be called after a successful initialization */
1515 static void cleanup_device_data(struct hfi1_devdata *dd)
1516 {
1517         int ctxt;
1518         int pidx;
1519
1520         /* users can't do anything more with chip */
1521         for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1522                 struct hfi1_pportdata *ppd = &dd->pport[pidx];
1523                 struct cc_state *cc_state;
1524                 int i;
1525
1526                 if (ppd->statusp)
1527                         *ppd->statusp &= ~HFI1_STATUS_CHIP_PRESENT;
1528
1529                 for (i = 0; i < OPA_MAX_SLS; i++)
1530                         hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
1531
1532                 spin_lock(&ppd->cc_state_lock);
1533                 cc_state = get_cc_state_protected(ppd);
1534                 RCU_INIT_POINTER(ppd->cc_state, NULL);
1535                 spin_unlock(&ppd->cc_state_lock);
1536
1537                 if (cc_state)
1538                         kfree_rcu(cc_state, rcu);
1539         }
1540
1541         free_credit_return(dd);
1542
1543         if (dd->rcvhdrtail_dummy_kvaddr) {
1544                 dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
1545                                   (void *)dd->rcvhdrtail_dummy_kvaddr,
1546                                   dd->rcvhdrtail_dummy_dma);
1547                 dd->rcvhdrtail_dummy_kvaddr = NULL;
1548         }
1549
1550         /*
1551          * Free any resources still in use (usually just kernel contexts)
1552          * at unload; we do for ctxtcnt, because that's what we allocate.
1553          */
1554         for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
1555                 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
1556
1557                 if (rcd) {
1558                         hfi1_free_ctxt_rcv_groups(rcd);
1559                         hfi1_free_ctxt(rcd);
1560                 }
1561         }
1562
1563         kfree(dd->rcd);
1564         dd->rcd = NULL;
1565
1566         free_pio_map(dd);
1567         /* must follow rcv context free - need to remove rcv's hooks */
1568         for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
1569                 sc_free(dd->send_contexts[ctxt].sc);
1570         dd->num_send_contexts = 0;
1571         kfree(dd->send_contexts);
1572         dd->send_contexts = NULL;
1573         kfree(dd->hw_to_sw);
1574         dd->hw_to_sw = NULL;
1575         kfree(dd->boardname);
1576         vfree(dd->events);
1577         vfree(dd->status);
1578 }
1579
1580 /*
1581  * Clean up on unit shutdown, or error during unit load after
1582  * successful initialization.
1583  */
1584 static void postinit_cleanup(struct hfi1_devdata *dd)
1585 {
1586         hfi1_start_cleanup(dd);
1587         hfi1_comp_vectors_clean_up(dd);
1588         hfi1_dev_affinity_clean_up(dd);
1589
1590         hfi1_pcie_ddcleanup(dd);
1591         hfi1_pcie_cleanup(dd->pcidev);
1592
1593         cleanup_device_data(dd);
1594
1595         hfi1_free_devdata(dd);
1596 }
1597
1598 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1599 {
1600         int ret = 0, j, pidx, initfail;
1601         struct hfi1_devdata *dd;
1602         struct hfi1_pportdata *ppd;
1603
1604         /* First, lock the non-writable module parameters */
1605         HFI1_CAP_LOCK();
1606
1607         /* Validate dev ids */
1608         if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1609               ent->device == PCI_DEVICE_ID_INTEL1)) {
1610                 dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
1611                         ent->device);
1612                 ret = -ENODEV;
1613                 goto bail;
1614         }
1615
1616         /* Allocate the dd so we can get to work */
1617         dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
1618                                 sizeof(struct hfi1_pportdata));
1619         if (IS_ERR(dd)) {
1620                 ret = PTR_ERR(dd);
1621                 goto bail;
1622         }
1623
1624         /* Validate some global module parameters */
1625         ret = hfi1_validate_rcvhdrcnt(dd, rcvhdrcnt);
1626         if (ret)
1627                 goto bail;
1628
1629         /* use the encoding function as a sanitization check */
1630         if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1631                 dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
1632                            hfi1_hdrq_entsize);
1633                 ret = -EINVAL;
1634                 goto bail;
1635         }
1636
1637         /* The receive eager buffer size must be set before the receive
1638          * contexts are created.
1639          *
1640          * Set the eager buffer size.  Validate that it falls in a range
1641          * allowed by the hardware - all powers of 2 between the min and
1642          * max.  The maximum valid MTU is within the eager buffer range
1643          * so we do not need to cap the max_mtu by an eager buffer size
1644          * setting.
1645          */
1646         if (eager_buffer_size) {
1647                 if (!is_power_of_2(eager_buffer_size))
1648                         eager_buffer_size =
1649                                 roundup_pow_of_two(eager_buffer_size);
1650                 eager_buffer_size =
1651                         clamp_val(eager_buffer_size,
1652                                   MIN_EAGER_BUFFER * 8,
1653                                   MAX_EAGER_BUFFER_TOTAL);
1654                 dd_dev_info(dd, "Eager buffer size %u\n",
1655                             eager_buffer_size);
1656         } else {
1657                 dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
1658                 ret = -EINVAL;
1659                 goto bail;
1660         }
1661
1662         /* restrict value of hfi1_rcvarr_split */
1663         hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
1664
1665         ret = hfi1_pcie_init(dd);
1666         if (ret)
1667                 goto bail;
1668
1669         /*
1670          * Do device-specific initialization, function table setup, dd
1671          * allocation, etc.
1672          */
1673         ret = hfi1_init_dd(dd);
1674         if (ret)
1675                 goto clean_bail; /* error already printed */
1676
1677         ret = create_workqueues(dd);
1678         if (ret)
1679                 goto clean_bail;
1680
1681         /* do the generic initialization */
1682         initfail = hfi1_init(dd, 0);
1683
1684         /* setup vnic */
1685         hfi1_vnic_setup(dd);
1686
1687         ret = hfi1_register_ib_device(dd);
1688
1689         /*
1690          * Now ready for use.  this should be cleared whenever we
1691          * detect a reset, or initiate one.  If earlier failure,
1692          * we still create devices, so diags, etc. can be used
1693          * to determine cause of problem.
1694          */
1695         if (!initfail && !ret) {
1696                 dd->flags |= HFI1_INITTED;
1697                 /* create debufs files after init and ib register */
1698                 hfi1_dbg_ibdev_init(&dd->verbs_dev);
1699         }
1700
1701         j = hfi1_device_create(dd);
1702         if (j)
1703                 dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1704
1705         if (initfail || ret) {
1706                 msix_clean_up_interrupts(dd);
1707                 stop_timers(dd);
1708                 flush_workqueue(ib_wq);
1709                 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1710                         hfi1_quiet_serdes(dd->pport + pidx);
1711                         ppd = dd->pport + pidx;
1712                         if (ppd->hfi1_wq) {
1713                                 destroy_workqueue(ppd->hfi1_wq);
1714                                 ppd->hfi1_wq = NULL;
1715                         }
1716                         if (ppd->link_wq) {
1717                                 destroy_workqueue(ppd->link_wq);
1718                                 ppd->link_wq = NULL;
1719                         }
1720                 }
1721                 if (!j)
1722                         hfi1_device_remove(dd);
1723                 if (!ret)
1724                         hfi1_unregister_ib_device(dd);
1725                 hfi1_vnic_cleanup(dd);
1726                 postinit_cleanup(dd);
1727                 if (initfail)
1728                         ret = initfail;
1729                 goto bail;      /* everything already cleaned */
1730         }
1731
1732         sdma_start(dd);
1733
1734         return 0;
1735
1736 clean_bail:
1737         hfi1_pcie_cleanup(pdev);
1738 bail:
1739         return ret;
1740 }
1741
1742 static void wait_for_clients(struct hfi1_devdata *dd)
1743 {
1744         /*
1745          * Remove the device init value and complete the device if there is
1746          * no clients or wait for active clients to finish.
1747          */
1748         if (atomic_dec_and_test(&dd->user_refcount))
1749                 complete(&dd->user_comp);
1750
1751         wait_for_completion(&dd->user_comp);
1752 }
1753
1754 static void remove_one(struct pci_dev *pdev)
1755 {
1756         struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1757
1758         /* close debugfs files before ib unregister */
1759         hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1760
1761         /* remove the /dev hfi1 interface */
1762         hfi1_device_remove(dd);
1763
1764         /* wait for existing user space clients to finish */
1765         wait_for_clients(dd);
1766
1767         /* unregister from IB core */
1768         hfi1_unregister_ib_device(dd);
1769
1770         /* cleanup vnic */
1771         hfi1_vnic_cleanup(dd);
1772
1773         /*
1774          * Disable the IB link, disable interrupts on the device,
1775          * clear dma engines, etc.
1776          */
1777         shutdown_device(dd);
1778
1779         stop_timers(dd);
1780
1781         /* wait until all of our (qsfp) queue_work() calls complete */
1782         flush_workqueue(ib_wq);
1783
1784         postinit_cleanup(dd);
1785 }
1786
1787 static void shutdown_one(struct pci_dev *pdev)
1788 {
1789         struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1790
1791         shutdown_device(dd);
1792 }
1793
1794 /**
1795  * hfi1_create_rcvhdrq - create a receive header queue
1796  * @dd: the hfi1_ib device
1797  * @rcd: the context data
1798  *
1799  * This must be contiguous memory (from an i/o perspective), and must be
1800  * DMA'able (which means for some systems, it will go through an IOMMU,
1801  * or be forced into a low address range).
1802  */
1803 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1804 {
1805         unsigned amt;
1806
1807         if (!rcd->rcvhdrq) {
1808                 gfp_t gfp_flags;
1809
1810                 amt = rcvhdrq_size(rcd);
1811
1812                 if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
1813                         gfp_flags = GFP_KERNEL;
1814                 else
1815                         gfp_flags = GFP_USER;
1816                 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1817                                                   &rcd->rcvhdrq_dma,
1818                                                   gfp_flags | __GFP_COMP);
1819
1820                 if (!rcd->rcvhdrq) {
1821                         dd_dev_err(dd,
1822                                    "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
1823                                    amt, rcd->ctxt);
1824                         goto bail;
1825                 }
1826
1827                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1828                     HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1829                         rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1830                                                                     PAGE_SIZE,
1831                                                                     &rcd->rcvhdrqtailaddr_dma,
1832                                                                     gfp_flags);
1833                         if (!rcd->rcvhdrtail_kvaddr)
1834                                 goto bail_free;
1835                 }
1836         }
1837
1838         set_hdrq_regs(rcd->dd, rcd->ctxt, rcd->rcvhdrqentsize,
1839                       rcd->rcvhdrq_cnt);
1840
1841         return 0;
1842
1843 bail_free:
1844         dd_dev_err(dd,
1845                    "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
1846                    rcd->ctxt);
1847         dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1848                           rcd->rcvhdrq_dma);
1849         rcd->rcvhdrq = NULL;
1850 bail:
1851         return -ENOMEM;
1852 }
1853
1854 /**
1855  * allocate eager buffers, both kernel and user contexts.
1856  * @rcd: the context we are setting up.
1857  *
1858  * Allocate the eager TID buffers and program them into hip.
1859  * They are no longer completely contiguous, we do multiple allocation
1860  * calls.  Otherwise we get the OOM code involved, by asking for too
1861  * much per call, with disastrous results on some kernels.
1862  */
1863 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1864 {
1865         struct hfi1_devdata *dd = rcd->dd;
1866         u32 max_entries, egrtop, alloced_bytes = 0;
1867         gfp_t gfp_flags;
1868         u16 order, idx = 0;
1869         int ret = 0;
1870         u16 round_mtu = roundup_pow_of_two(hfi1_max_mtu);
1871
1872         /*
1873          * GFP_USER, but without GFP_FS, so buffer cache can be
1874          * coalesced (we hope); otherwise, even at order 4,
1875          * heavy filesystem activity makes these fail, and we can
1876          * use compound pages.
1877          */
1878         gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
1879
1880         /*
1881          * The minimum size of the eager buffers is a groups of MTU-sized
1882          * buffers.
1883          * The global eager_buffer_size parameter is checked against the
1884          * theoretical lower limit of the value. Here, we check against the
1885          * MTU.
1886          */
1887         if (rcd->egrbufs.size < (round_mtu * dd->rcv_entries.group_size))
1888                 rcd->egrbufs.size = round_mtu * dd->rcv_entries.group_size;
1889         /*
1890          * If using one-pkt-per-egr-buffer, lower the eager buffer
1891          * size to the max MTU (page-aligned).
1892          */
1893         if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
1894                 rcd->egrbufs.rcvtid_size = round_mtu;
1895
1896         /*
1897          * Eager buffers sizes of 1MB or less require smaller TID sizes
1898          * to satisfy the "multiple of 8 RcvArray entries" requirement.
1899          */
1900         if (rcd->egrbufs.size <= (1 << 20))
1901                 rcd->egrbufs.rcvtid_size = max((unsigned long)round_mtu,
1902                         rounddown_pow_of_two(rcd->egrbufs.size / 8));
1903
1904         while (alloced_bytes < rcd->egrbufs.size &&
1905                rcd->egrbufs.alloced < rcd->egrbufs.count) {
1906                 rcd->egrbufs.buffers[idx].addr =
1907                         dma_alloc_coherent(&dd->pcidev->dev,
1908                                            rcd->egrbufs.rcvtid_size,
1909                                            &rcd->egrbufs.buffers[idx].dma,
1910                                            gfp_flags);
1911                 if (rcd->egrbufs.buffers[idx].addr) {
1912                         rcd->egrbufs.buffers[idx].len =
1913                                 rcd->egrbufs.rcvtid_size;
1914                         rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
1915                                 rcd->egrbufs.buffers[idx].addr;
1916                         rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
1917                                 rcd->egrbufs.buffers[idx].dma;
1918                         rcd->egrbufs.alloced++;
1919                         alloced_bytes += rcd->egrbufs.rcvtid_size;
1920                         idx++;
1921                 } else {
1922                         u32 new_size, i, j;
1923                         u64 offset = 0;
1924
1925                         /*
1926                          * Fail the eager buffer allocation if:
1927                          *   - we are already using the lowest acceptable size
1928                          *   - we are using one-pkt-per-egr-buffer (this implies
1929                          *     that we are accepting only one size)
1930                          */
1931                         if (rcd->egrbufs.rcvtid_size == round_mtu ||
1932                             !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
1933                                 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
1934                                            rcd->ctxt);
1935                                 ret = -ENOMEM;
1936                                 goto bail_rcvegrbuf_phys;
1937                         }
1938
1939                         new_size = rcd->egrbufs.rcvtid_size / 2;
1940
1941                         /*
1942                          * If the first attempt to allocate memory failed, don't
1943                          * fail everything but continue with the next lower
1944                          * size.
1945                          */
1946                         if (idx == 0) {
1947                                 rcd->egrbufs.rcvtid_size = new_size;
1948                                 continue;
1949                         }
1950
1951                         /*
1952                          * Re-partition already allocated buffers to a smaller
1953                          * size.
1954                          */
1955                         rcd->egrbufs.alloced = 0;
1956                         for (i = 0, j = 0, offset = 0; j < idx; i++) {
1957                                 if (i >= rcd->egrbufs.count)
1958                                         break;
1959                                 rcd->egrbufs.rcvtids[i].dma =
1960                                         rcd->egrbufs.buffers[j].dma + offset;
1961                                 rcd->egrbufs.rcvtids[i].addr =
1962                                         rcd->egrbufs.buffers[j].addr + offset;
1963                                 rcd->egrbufs.alloced++;
1964                                 if ((rcd->egrbufs.buffers[j].dma + offset +
1965                                      new_size) ==
1966                                     (rcd->egrbufs.buffers[j].dma +
1967                                      rcd->egrbufs.buffers[j].len)) {
1968                                         j++;
1969                                         offset = 0;
1970                                 } else {
1971                                         offset += new_size;
1972                                 }
1973                         }
1974                         rcd->egrbufs.rcvtid_size = new_size;
1975                 }
1976         }
1977         rcd->egrbufs.numbufs = idx;
1978         rcd->egrbufs.size = alloced_bytes;
1979
1980         hfi1_cdbg(PROC,
1981                   "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
1982                   rcd->ctxt, rcd->egrbufs.alloced,
1983                   rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
1984
1985         /*
1986          * Set the contexts rcv array head update threshold to the closest
1987          * power of 2 (so we can use a mask instead of modulo) below half
1988          * the allocated entries.
1989          */
1990         rcd->egrbufs.threshold =
1991                 rounddown_pow_of_two(rcd->egrbufs.alloced / 2);
1992         /*
1993          * Compute the expected RcvArray entry base. This is done after
1994          * allocating the eager buffers in order to maximize the
1995          * expected RcvArray entries for the context.
1996          */
1997         max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size;
1998         egrtop = roundup(rcd->egrbufs.alloced, dd->rcv_entries.group_size);
1999         rcd->expected_count = max_entries - egrtop;
2000         if (rcd->expected_count > MAX_TID_PAIR_ENTRIES * 2)
2001                 rcd->expected_count = MAX_TID_PAIR_ENTRIES * 2;
2002
2003         rcd->expected_base = rcd->eager_base + egrtop;
2004         hfi1_cdbg(PROC, "ctxt%u: eager:%u, exp:%u, egrbase:%u, expbase:%u\n",
2005                   rcd->ctxt, rcd->egrbufs.alloced, rcd->expected_count,
2006                   rcd->eager_base, rcd->expected_base);
2007
2008         if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
2009                 hfi1_cdbg(PROC,
2010                           "ctxt%u: current Eager buffer size is invalid %u\n",
2011                           rcd->ctxt, rcd->egrbufs.rcvtid_size);
2012                 ret = -EINVAL;
2013                 goto bail_rcvegrbuf_phys;
2014         }
2015
2016         for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
2017                 hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
2018                              rcd->egrbufs.rcvtids[idx].dma, order);
2019                 cond_resched();
2020         }
2021
2022         return 0;
2023
2024 bail_rcvegrbuf_phys:
2025         for (idx = 0; idx < rcd->egrbufs.alloced &&
2026              rcd->egrbufs.buffers[idx].addr;
2027              idx++) {
2028                 dma_free_coherent(&dd->pcidev->dev,
2029                                   rcd->egrbufs.buffers[idx].len,
2030                                   rcd->egrbufs.buffers[idx].addr,
2031                                   rcd->egrbufs.buffers[idx].dma);
2032                 rcd->egrbufs.buffers[idx].addr = NULL;
2033                 rcd->egrbufs.buffers[idx].dma = 0;
2034                 rcd->egrbufs.buffers[idx].len = 0;
2035         }
2036
2037         return ret;
2038 }