]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/ntb/hw/intel/ntb_hw_intel.c
PM / QoS: Remove global notifiers
[linux.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * Intel PCIe NTB Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50
51 #include <linux/debugfs.h>
52 #include <linux/delay.h>
53 #include <linux/init.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/random.h>
58 #include <linux/slab.h>
59 #include <linux/ntb.h>
60
61 #include "ntb_hw_intel.h"
62
63 #define NTB_NAME        "ntb_hw_intel"
64 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
65 #define NTB_VER         "2.0"
66
67 MODULE_DESCRIPTION(NTB_DESC);
68 MODULE_VERSION(NTB_VER);
69 MODULE_LICENSE("Dual BSD/GPL");
70 MODULE_AUTHOR("Intel Corporation");
71
72 #define bar0_off(base, bar) ((base) + ((bar) << 2))
73 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75 static const struct intel_ntb_reg atom_reg;
76 static const struct intel_ntb_alt_reg atom_pri_reg;
77 static const struct intel_ntb_alt_reg atom_sec_reg;
78 static const struct intel_ntb_alt_reg atom_b2b_reg;
79 static const struct intel_ntb_xlat_reg atom_pri_xlat;
80 static const struct intel_ntb_xlat_reg atom_sec_xlat;
81 static const struct intel_ntb_reg xeon_reg;
82 static const struct intel_ntb_alt_reg xeon_pri_reg;
83 static const struct intel_ntb_alt_reg xeon_sec_reg;
84 static const struct intel_ntb_alt_reg xeon_b2b_reg;
85 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
86 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
87 static struct intel_b2b_addr xeon_b2b_usd_addr;
88 static struct intel_b2b_addr xeon_b2b_dsd_addr;
89 static const struct intel_ntb_reg skx_reg;
90 static const struct intel_ntb_alt_reg skx_pri_reg;
91 static const struct intel_ntb_alt_reg skx_b2b_reg;
92 static const struct intel_ntb_xlat_reg skx_sec_xlat;
93 static const struct ntb_dev_ops intel_ntb_ops;
94 static const struct ntb_dev_ops intel_ntb3_ops;
95
96 static const struct file_operations intel_ntb_debugfs_info;
97 static struct dentry *debugfs_dir;
98
99 static int b2b_mw_idx = -1;
100 module_param(b2b_mw_idx, int, 0644);
101 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
102                  "value of zero or positive starts from first mw idx, and a "
103                  "negative value starts from last mw idx.  Both sides MUST "
104                  "set the same value here!");
105
106 static unsigned int b2b_mw_share;
107 module_param(b2b_mw_share, uint, 0644);
108 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
109                  "ntb so that the peer ntb only occupies the first half of "
110                  "the mw, so the second half can still be used as a mw.  Both "
111                  "sides MUST set the same value here!");
112
113 module_param_named(xeon_b2b_usd_bar2_addr64,
114                    xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
115 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
116                  "XEON B2B USD BAR 2 64-bit address");
117
118 module_param_named(xeon_b2b_usd_bar4_addr64,
119                    xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
120 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
121                  "XEON B2B USD BAR 4 64-bit address");
122
123 module_param_named(xeon_b2b_usd_bar4_addr32,
124                    xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
125 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
126                  "XEON B2B USD split-BAR 4 32-bit address");
127
128 module_param_named(xeon_b2b_usd_bar5_addr32,
129                    xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
130 MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
131                  "XEON B2B USD split-BAR 5 32-bit address");
132
133 module_param_named(xeon_b2b_dsd_bar2_addr64,
134                    xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
135 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
136                  "XEON B2B DSD BAR 2 64-bit address");
137
138 module_param_named(xeon_b2b_dsd_bar4_addr64,
139                    xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
140 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
141                  "XEON B2B DSD BAR 4 64-bit address");
142
143 module_param_named(xeon_b2b_dsd_bar4_addr32,
144                    xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
145 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
146                  "XEON B2B DSD split-BAR 4 32-bit address");
147
148 module_param_named(xeon_b2b_dsd_bar5_addr32,
149                    xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
150 MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
151                  "XEON B2B DSD split-BAR 5 32-bit address");
152
153 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
154 static int xeon_init_isr(struct intel_ntb_dev *ndev);
155
156 #ifndef ioread64
157 #ifdef readq
158 #define ioread64 readq
159 #else
160 #define ioread64 _ioread64
161 static inline u64 _ioread64(void __iomem *mmio)
162 {
163         u64 low, high;
164
165         low = ioread32(mmio);
166         high = ioread32(mmio + sizeof(u32));
167         return low | (high << 32);
168 }
169 #endif
170 #endif
171
172 #ifndef iowrite64
173 #ifdef writeq
174 #define iowrite64 writeq
175 #else
176 #define iowrite64 _iowrite64
177 static inline void _iowrite64(u64 val, void __iomem *mmio)
178 {
179         iowrite32(val, mmio);
180         iowrite32(val >> 32, mmio + sizeof(u32));
181 }
182 #endif
183 #endif
184
185 static inline int pdev_is_atom(struct pci_dev *pdev)
186 {
187         switch (pdev->device) {
188         case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
189                 return 1;
190         }
191         return 0;
192 }
193
194 static inline int pdev_is_xeon(struct pci_dev *pdev)
195 {
196         switch (pdev->device) {
197         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
198         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
199         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
200         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
201         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
202         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
203         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
204         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
205         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
206         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
207         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
208         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
209         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
210         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
211         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
212                 return 1;
213         }
214         return 0;
215 }
216
217 static inline int pdev_is_skx_xeon(struct pci_dev *pdev)
218 {
219         if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)
220                 return 1;
221
222         return 0;
223 }
224
225 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
226 {
227         ndev->unsafe_flags = 0;
228         ndev->unsafe_flags_ignore = 0;
229
230         /* Only B2B has a workaround to avoid SDOORBELL */
231         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
232                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
233                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
234
235         /* No low level workaround to avoid SB01BASE */
236         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
237                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
238                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
239         }
240 }
241
242 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
243                                  unsigned long flag)
244 {
245         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
246 }
247
248 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
249                                      unsigned long flag)
250 {
251         flag &= ndev->unsafe_flags;
252         ndev->unsafe_flags_ignore |= flag;
253
254         return !!flag;
255 }
256
257 static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
258 {
259         if (idx < 0 || idx >= ndev->mw_count)
260                 return -EINVAL;
261         return ndev->reg->mw_bar[idx];
262 }
263
264 static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
265                                phys_addr_t *db_addr, resource_size_t *db_size,
266                                phys_addr_t reg_addr, unsigned long reg)
267 {
268         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
269                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
270
271         if (db_addr) {
272                 *db_addr = reg_addr + reg;
273                 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
274         }
275
276         if (db_size) {
277                 *db_size = ndev->reg->db_size;
278                 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
279         }
280
281         return 0;
282 }
283
284 static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
285                                void __iomem *mmio)
286 {
287         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
288                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
289
290         return ndev->reg->db_ioread(mmio);
291 }
292
293 static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
294                                 void __iomem *mmio)
295 {
296         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
297                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
298
299         if (db_bits & ~ndev->db_valid_mask)
300                 return -EINVAL;
301
302         ndev->reg->db_iowrite(db_bits, mmio);
303
304         return 0;
305 }
306
307 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
308                                    void __iomem *mmio)
309 {
310         unsigned long irqflags;
311
312         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
313                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
314
315         if (db_bits & ~ndev->db_valid_mask)
316                 return -EINVAL;
317
318         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
319         {
320                 ndev->db_mask |= db_bits;
321                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
322         }
323         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
324
325         return 0;
326 }
327
328 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
329                                      void __iomem *mmio)
330 {
331         unsigned long irqflags;
332
333         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
334                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
335
336         if (db_bits & ~ndev->db_valid_mask)
337                 return -EINVAL;
338
339         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
340         {
341                 ndev->db_mask &= ~db_bits;
342                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
343         }
344         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
345
346         return 0;
347 }
348
349 static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
350 {
351         u64 shift, mask;
352
353         shift = ndev->db_vec_shift;
354         mask = BIT_ULL(shift) - 1;
355
356         return mask << (shift * db_vector);
357 }
358
359 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
360                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
361                                  unsigned long reg)
362 {
363         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
364                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
365
366         if (idx < 0 || idx >= ndev->spad_count)
367                 return -EINVAL;
368
369         if (spad_addr) {
370                 *spad_addr = reg_addr + reg + (idx << 2);
371                 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
372         }
373
374         return 0;
375 }
376
377 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
378                                  void __iomem *mmio)
379 {
380         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
381                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
382
383         if (idx < 0 || idx >= ndev->spad_count)
384                 return 0;
385
386         return ioread32(mmio + (idx << 2));
387 }
388
389 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
390                                   void __iomem *mmio)
391 {
392         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
393                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
394
395         if (idx < 0 || idx >= ndev->spad_count)
396                 return -EINVAL;
397
398         iowrite32(val, mmio + (idx << 2));
399
400         return 0;
401 }
402
403 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
404 {
405         u64 vec_mask;
406
407         vec_mask = ndev_vec_mask(ndev, vec);
408
409         if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
410                 vec_mask |= ndev->db_link_mask;
411
412         dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
413
414         ndev->last_ts = jiffies;
415
416         if (vec_mask & ndev->db_link_mask) {
417                 if (ndev->reg->poll_link(ndev))
418                         ntb_link_event(&ndev->ntb);
419         }
420
421         if (vec_mask & ndev->db_valid_mask)
422                 ntb_db_event(&ndev->ntb, vec);
423
424         return IRQ_HANDLED;
425 }
426
427 static irqreturn_t ndev_vec_isr(int irq, void *dev)
428 {
429         struct intel_ntb_vec *nvec = dev;
430
431         dev_dbg(ndev_dev(nvec->ndev), "irq: %d  nvec->num: %d\n",
432                 irq, nvec->num);
433
434         return ndev_interrupt(nvec->ndev, nvec->num);
435 }
436
437 static irqreturn_t ndev_irq_isr(int irq, void *dev)
438 {
439         struct intel_ntb_dev *ndev = dev;
440
441         return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
442 }
443
444 static int ndev_init_isr(struct intel_ntb_dev *ndev,
445                          int msix_min, int msix_max,
446                          int msix_shift, int total_shift)
447 {
448         struct pci_dev *pdev;
449         int rc, i, msix_count, node;
450
451         pdev = ndev_pdev(ndev);
452
453         node = dev_to_node(&pdev->dev);
454
455         /* Mask all doorbell interrupts */
456         ndev->db_mask = ndev->db_valid_mask;
457         ndev->reg->db_iowrite(ndev->db_mask,
458                               ndev->self_mmio +
459                               ndev->self_reg->db_mask);
460
461         /* Try to set up msix irq */
462
463         ndev->vec = kzalloc_node(msix_max * sizeof(*ndev->vec),
464                                  GFP_KERNEL, node);
465         if (!ndev->vec)
466                 goto err_msix_vec_alloc;
467
468         ndev->msix = kzalloc_node(msix_max * sizeof(*ndev->msix),
469                                   GFP_KERNEL, node);
470         if (!ndev->msix)
471                 goto err_msix_alloc;
472
473         for (i = 0; i < msix_max; ++i)
474                 ndev->msix[i].entry = i;
475
476         msix_count = pci_enable_msix_range(pdev, ndev->msix,
477                                            msix_min, msix_max);
478         if (msix_count < 0)
479                 goto err_msix_enable;
480
481         for (i = 0; i < msix_count; ++i) {
482                 ndev->vec[i].ndev = ndev;
483                 ndev->vec[i].num = i;
484                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
485                                  "ndev_vec_isr", &ndev->vec[i]);
486                 if (rc)
487                         goto err_msix_request;
488         }
489
490         dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count);
491         ndev->db_vec_count = msix_count;
492         ndev->db_vec_shift = msix_shift;
493         return 0;
494
495 err_msix_request:
496         while (i-- > 0)
497                 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
498         pci_disable_msix(pdev);
499 err_msix_enable:
500         kfree(ndev->msix);
501 err_msix_alloc:
502         kfree(ndev->vec);
503 err_msix_vec_alloc:
504         ndev->msix = NULL;
505         ndev->vec = NULL;
506
507         /* Try to set up msi irq */
508
509         rc = pci_enable_msi(pdev);
510         if (rc)
511                 goto err_msi_enable;
512
513         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
514                          "ndev_irq_isr", ndev);
515         if (rc)
516                 goto err_msi_request;
517
518         dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
519         ndev->db_vec_count = 1;
520         ndev->db_vec_shift = total_shift;
521         return 0;
522
523 err_msi_request:
524         pci_disable_msi(pdev);
525 err_msi_enable:
526
527         /* Try to set up intx irq */
528
529         pci_intx(pdev, 1);
530
531         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
532                          "ndev_irq_isr", ndev);
533         if (rc)
534                 goto err_intx_request;
535
536         dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
537         ndev->db_vec_count = 1;
538         ndev->db_vec_shift = total_shift;
539         return 0;
540
541 err_intx_request:
542         return rc;
543 }
544
545 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
546 {
547         struct pci_dev *pdev;
548         int i;
549
550         pdev = ndev_pdev(ndev);
551
552         /* Mask all doorbell interrupts */
553         ndev->db_mask = ndev->db_valid_mask;
554         ndev->reg->db_iowrite(ndev->db_mask,
555                               ndev->self_mmio +
556                               ndev->self_reg->db_mask);
557
558         if (ndev->msix) {
559                 i = ndev->db_vec_count;
560                 while (i--)
561                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
562                 pci_disable_msix(pdev);
563                 kfree(ndev->msix);
564                 kfree(ndev->vec);
565         } else {
566                 free_irq(pdev->irq, ndev);
567                 if (pci_dev_msi_enabled(pdev))
568                         pci_disable_msi(pdev);
569         }
570 }
571
572 static ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
573                                       size_t count, loff_t *offp)
574 {
575         struct intel_ntb_dev *ndev;
576         void __iomem *mmio;
577         char *buf;
578         size_t buf_size;
579         ssize_t ret, off;
580         union { u64 v64; u32 v32; u16 v16; } u;
581
582         ndev = filp->private_data;
583         mmio = ndev->self_mmio;
584
585         buf_size = min(count, 0x800ul);
586
587         buf = kmalloc(buf_size, GFP_KERNEL);
588         if (!buf)
589                 return -ENOMEM;
590
591         off = 0;
592
593         off += scnprintf(buf + off, buf_size - off,
594                          "NTB Device Information:\n");
595
596         off += scnprintf(buf + off, buf_size - off,
597                          "Connection Topology -\t%s\n",
598                          ntb_topo_string(ndev->ntb.topo));
599
600         off += scnprintf(buf + off, buf_size - off,
601                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
602         off += scnprintf(buf + off, buf_size - off,
603                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
604
605         if (!ndev->reg->link_is_up(ndev))
606                 off += scnprintf(buf + off, buf_size - off,
607                                  "Link Status -\t\tDown\n");
608         else {
609                 off += scnprintf(buf + off, buf_size - off,
610                                  "Link Status -\t\tUp\n");
611                 off += scnprintf(buf + off, buf_size - off,
612                                  "Link Speed -\t\tPCI-E Gen %u\n",
613                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
614                 off += scnprintf(buf + off, buf_size - off,
615                                  "Link Width -\t\tx%u\n",
616                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
617         }
618
619         off += scnprintf(buf + off, buf_size - off,
620                          "Memory Window Count -\t%u\n", ndev->mw_count);
621         off += scnprintf(buf + off, buf_size - off,
622                          "Scratchpad Count -\t%u\n", ndev->spad_count);
623         off += scnprintf(buf + off, buf_size - off,
624                          "Doorbell Count -\t%u\n", ndev->db_count);
625         off += scnprintf(buf + off, buf_size - off,
626                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
627         off += scnprintf(buf + off, buf_size - off,
628                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
629
630         off += scnprintf(buf + off, buf_size - off,
631                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
632         off += scnprintf(buf + off, buf_size - off,
633                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
634         off += scnprintf(buf + off, buf_size - off,
635                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
636
637         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
638         off += scnprintf(buf + off, buf_size - off,
639                          "Doorbell Mask -\t\t%#llx\n", u.v64);
640
641         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
642         off += scnprintf(buf + off, buf_size - off,
643                          "Doorbell Bell -\t\t%#llx\n", u.v64);
644
645         off += scnprintf(buf + off, buf_size - off,
646                          "\nNTB Incoming XLAT:\n");
647
648         u.v64 = ioread64(mmio + SKX_IMBAR1XBASE_OFFSET);
649         off += scnprintf(buf + off, buf_size - off,
650                          "IMBAR1XBASE -\t\t%#018llx\n", u.v64);
651
652         u.v64 = ioread64(mmio + SKX_IMBAR2XBASE_OFFSET);
653         off += scnprintf(buf + off, buf_size - off,
654                          "IMBAR2XBASE -\t\t%#018llx\n", u.v64);
655
656         u.v64 = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
657         off += scnprintf(buf + off, buf_size - off,
658                          "IMBAR1XLMT -\t\t\t%#018llx\n", u.v64);
659
660         u.v64 = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
661         off += scnprintf(buf + off, buf_size - off,
662                          "IMBAR2XLMT -\t\t\t%#018llx\n", u.v64);
663
664         if (ntb_topo_is_b2b(ndev->ntb.topo)) {
665                 off += scnprintf(buf + off, buf_size - off,
666                                  "\nNTB Outgoing B2B XLAT:\n");
667
668                 u.v64 = ioread64(mmio + SKX_EMBAR1XBASE_OFFSET);
669                 off += scnprintf(buf + off, buf_size - off,
670                                  "EMBAR1XBASE -\t\t%#018llx\n", u.v64);
671
672                 u.v64 = ioread64(mmio + SKX_EMBAR2XBASE_OFFSET);
673                 off += scnprintf(buf + off, buf_size - off,
674                                  "EMBAR2XBASE -\t\t%#018llx\n", u.v64);
675
676                 u.v64 = ioread64(mmio + SKX_EMBAR1XLMT_OFFSET);
677                 off += scnprintf(buf + off, buf_size - off,
678                                  "EMBAR1XLMT -\t\t%#018llx\n", u.v64);
679
680                 u.v64 = ioread64(mmio + SKX_EMBAR2XLMT_OFFSET);
681                 off += scnprintf(buf + off, buf_size - off,
682                                  "EMBAR2XLMT -\t\t%#018llx\n", u.v64);
683
684                 off += scnprintf(buf + off, buf_size - off,
685                                  "\nNTB Secondary BAR:\n");
686
687                 u.v64 = ioread64(mmio + SKX_EMBAR0_OFFSET);
688                 off += scnprintf(buf + off, buf_size - off,
689                                  "EMBAR0 -\t\t%#018llx\n", u.v64);
690
691                 u.v64 = ioread64(mmio + SKX_EMBAR1_OFFSET);
692                 off += scnprintf(buf + off, buf_size - off,
693                                  "EMBAR1 -\t\t%#018llx\n", u.v64);
694
695                 u.v64 = ioread64(mmio + SKX_EMBAR2_OFFSET);
696                 off += scnprintf(buf + off, buf_size - off,
697                                  "EMBAR2 -\t\t%#018llx\n", u.v64);
698         }
699
700         off += scnprintf(buf + off, buf_size - off,
701                          "\nNTB Statistics:\n");
702
703         u.v16 = ioread16(mmio + SKX_USMEMMISS_OFFSET);
704         off += scnprintf(buf + off, buf_size - off,
705                          "Upstream Memory Miss -\t%u\n", u.v16);
706
707         off += scnprintf(buf + off, buf_size - off,
708                          "\nNTB Hardware Errors:\n");
709
710         if (!pci_read_config_word(ndev->ntb.pdev,
711                                   SKX_DEVSTS_OFFSET, &u.v16))
712                 off += scnprintf(buf + off, buf_size - off,
713                                  "DEVSTS -\t\t%#06x\n", u.v16);
714
715         if (!pci_read_config_word(ndev->ntb.pdev,
716                                   SKX_LINK_STATUS_OFFSET, &u.v16))
717                 off += scnprintf(buf + off, buf_size - off,
718                                  "LNKSTS -\t\t%#06x\n", u.v16);
719
720         if (!pci_read_config_dword(ndev->ntb.pdev,
721                                    SKX_UNCERRSTS_OFFSET, &u.v32))
722                 off += scnprintf(buf + off, buf_size - off,
723                                  "UNCERRSTS -\t\t%#06x\n", u.v32);
724
725         if (!pci_read_config_dword(ndev->ntb.pdev,
726                                    SKX_CORERRSTS_OFFSET, &u.v32))
727                 off += scnprintf(buf + off, buf_size - off,
728                                  "CORERRSTS -\t\t%#06x\n", u.v32);
729
730         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
731         kfree(buf);
732         return ret;
733 }
734
735 static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
736                                      size_t count, loff_t *offp)
737 {
738         struct intel_ntb_dev *ndev;
739         struct pci_dev *pdev;
740         void __iomem *mmio;
741         char *buf;
742         size_t buf_size;
743         ssize_t ret, off;
744         union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
745
746         ndev = filp->private_data;
747         pdev = ndev_pdev(ndev);
748         mmio = ndev->self_mmio;
749
750         buf_size = min(count, 0x800ul);
751
752         buf = kmalloc(buf_size, GFP_KERNEL);
753         if (!buf)
754                 return -ENOMEM;
755
756         off = 0;
757
758         off += scnprintf(buf + off, buf_size - off,
759                          "NTB Device Information:\n");
760
761         off += scnprintf(buf + off, buf_size - off,
762                          "Connection Topology -\t%s\n",
763                          ntb_topo_string(ndev->ntb.topo));
764
765         if (ndev->b2b_idx != UINT_MAX) {
766                 off += scnprintf(buf + off, buf_size - off,
767                                  "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
768                 off += scnprintf(buf + off, buf_size - off,
769                                  "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
770         }
771
772         off += scnprintf(buf + off, buf_size - off,
773                          "BAR4 Split -\t\t%s\n",
774                          ndev->bar4_split ? "yes" : "no");
775
776         off += scnprintf(buf + off, buf_size - off,
777                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
778         off += scnprintf(buf + off, buf_size - off,
779                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
780
781         if (!ndev->reg->link_is_up(ndev)) {
782                 off += scnprintf(buf + off, buf_size - off,
783                                  "Link Status -\t\tDown\n");
784         } else {
785                 off += scnprintf(buf + off, buf_size - off,
786                                  "Link Status -\t\tUp\n");
787                 off += scnprintf(buf + off, buf_size - off,
788                                  "Link Speed -\t\tPCI-E Gen %u\n",
789                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
790                 off += scnprintf(buf + off, buf_size - off,
791                                  "Link Width -\t\tx%u\n",
792                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
793         }
794
795         off += scnprintf(buf + off, buf_size - off,
796                          "Memory Window Count -\t%u\n", ndev->mw_count);
797         off += scnprintf(buf + off, buf_size - off,
798                          "Scratchpad Count -\t%u\n", ndev->spad_count);
799         off += scnprintf(buf + off, buf_size - off,
800                          "Doorbell Count -\t%u\n", ndev->db_count);
801         off += scnprintf(buf + off, buf_size - off,
802                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
803         off += scnprintf(buf + off, buf_size - off,
804                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
805
806         off += scnprintf(buf + off, buf_size - off,
807                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
808         off += scnprintf(buf + off, buf_size - off,
809                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
810         off += scnprintf(buf + off, buf_size - off,
811                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
812
813         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
814         off += scnprintf(buf + off, buf_size - off,
815                          "Doorbell Mask -\t\t%#llx\n", u.v64);
816
817         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
818         off += scnprintf(buf + off, buf_size - off,
819                          "Doorbell Bell -\t\t%#llx\n", u.v64);
820
821         off += scnprintf(buf + off, buf_size - off,
822                          "\nNTB Window Size:\n");
823
824         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
825         off += scnprintf(buf + off, buf_size - off,
826                          "PBAR23SZ %hhu\n", u.v8);
827         if (!ndev->bar4_split) {
828                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
829                 off += scnprintf(buf + off, buf_size - off,
830                                  "PBAR45SZ %hhu\n", u.v8);
831         } else {
832                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
833                 off += scnprintf(buf + off, buf_size - off,
834                                  "PBAR4SZ %hhu\n", u.v8);
835                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
836                 off += scnprintf(buf + off, buf_size - off,
837                                  "PBAR5SZ %hhu\n", u.v8);
838         }
839
840         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
841         off += scnprintf(buf + off, buf_size - off,
842                          "SBAR23SZ %hhu\n", u.v8);
843         if (!ndev->bar4_split) {
844                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
845                 off += scnprintf(buf + off, buf_size - off,
846                                  "SBAR45SZ %hhu\n", u.v8);
847         } else {
848                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
849                 off += scnprintf(buf + off, buf_size - off,
850                                  "SBAR4SZ %hhu\n", u.v8);
851                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
852                 off += scnprintf(buf + off, buf_size - off,
853                                  "SBAR5SZ %hhu\n", u.v8);
854         }
855
856         off += scnprintf(buf + off, buf_size - off,
857                          "\nNTB Incoming XLAT:\n");
858
859         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
860         off += scnprintf(buf + off, buf_size - off,
861                          "XLAT23 -\t\t%#018llx\n", u.v64);
862
863         if (ndev->bar4_split) {
864                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
865                 off += scnprintf(buf + off, buf_size - off,
866                                  "XLAT4 -\t\t\t%#06x\n", u.v32);
867
868                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
869                 off += scnprintf(buf + off, buf_size - off,
870                                  "XLAT5 -\t\t\t%#06x\n", u.v32);
871         } else {
872                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
873                 off += scnprintf(buf + off, buf_size - off,
874                                  "XLAT45 -\t\t%#018llx\n", u.v64);
875         }
876
877         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
878         off += scnprintf(buf + off, buf_size - off,
879                          "LMT23 -\t\t\t%#018llx\n", u.v64);
880
881         if (ndev->bar4_split) {
882                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
883                 off += scnprintf(buf + off, buf_size - off,
884                                  "LMT4 -\t\t\t%#06x\n", u.v32);
885                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
886                 off += scnprintf(buf + off, buf_size - off,
887                                  "LMT5 -\t\t\t%#06x\n", u.v32);
888         } else {
889                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
890                 off += scnprintf(buf + off, buf_size - off,
891                                  "LMT45 -\t\t\t%#018llx\n", u.v64);
892         }
893
894         if (pdev_is_xeon(pdev)) {
895                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
896                         off += scnprintf(buf + off, buf_size - off,
897                                          "\nNTB Outgoing B2B XLAT:\n");
898
899                         u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
900                         off += scnprintf(buf + off, buf_size - off,
901                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
902
903                         if (ndev->bar4_split) {
904                                 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
905                                 off += scnprintf(buf + off, buf_size - off,
906                                                  "B2B XLAT4 -\t\t%#06x\n",
907                                                  u.v32);
908                                 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
909                                 off += scnprintf(buf + off, buf_size - off,
910                                                  "B2B XLAT5 -\t\t%#06x\n",
911                                                  u.v32);
912                         } else {
913                                 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
914                                 off += scnprintf(buf + off, buf_size - off,
915                                                  "B2B XLAT45 -\t\t%#018llx\n",
916                                                  u.v64);
917                         }
918
919                         u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
920                         off += scnprintf(buf + off, buf_size - off,
921                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
922
923                         if (ndev->bar4_split) {
924                                 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
925                                 off += scnprintf(buf + off, buf_size - off,
926                                                  "B2B LMT4 -\t\t%#06x\n",
927                                                  u.v32);
928                                 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
929                                 off += scnprintf(buf + off, buf_size - off,
930                                                  "B2B LMT5 -\t\t%#06x\n",
931                                                  u.v32);
932                         } else {
933                                 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
934                                 off += scnprintf(buf + off, buf_size - off,
935                                                  "B2B LMT45 -\t\t%#018llx\n",
936                                                  u.v64);
937                         }
938
939                         off += scnprintf(buf + off, buf_size - off,
940                                          "\nNTB Secondary BAR:\n");
941
942                         u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
943                         off += scnprintf(buf + off, buf_size - off,
944                                          "SBAR01 -\t\t%#018llx\n", u.v64);
945
946                         u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
947                         off += scnprintf(buf + off, buf_size - off,
948                                          "SBAR23 -\t\t%#018llx\n", u.v64);
949
950                         if (ndev->bar4_split) {
951                                 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
952                                 off += scnprintf(buf + off, buf_size - off,
953                                                  "SBAR4 -\t\t\t%#06x\n", u.v32);
954                                 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
955                                 off += scnprintf(buf + off, buf_size - off,
956                                                  "SBAR5 -\t\t\t%#06x\n", u.v32);
957                         } else {
958                                 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
959                                 off += scnprintf(buf + off, buf_size - off,
960                                                  "SBAR45 -\t\t%#018llx\n",
961                                                  u.v64);
962                         }
963                 }
964
965                 off += scnprintf(buf + off, buf_size - off,
966                                  "\nXEON NTB Statistics:\n");
967
968                 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
969                 off += scnprintf(buf + off, buf_size - off,
970                                  "Upstream Memory Miss -\t%u\n", u.v16);
971
972                 off += scnprintf(buf + off, buf_size - off,
973                                  "\nXEON NTB Hardware Errors:\n");
974
975                 if (!pci_read_config_word(pdev,
976                                           XEON_DEVSTS_OFFSET, &u.v16))
977                         off += scnprintf(buf + off, buf_size - off,
978                                          "DEVSTS -\t\t%#06x\n", u.v16);
979
980                 if (!pci_read_config_word(pdev,
981                                           XEON_LINK_STATUS_OFFSET, &u.v16))
982                         off += scnprintf(buf + off, buf_size - off,
983                                          "LNKSTS -\t\t%#06x\n", u.v16);
984
985                 if (!pci_read_config_dword(pdev,
986                                            XEON_UNCERRSTS_OFFSET, &u.v32))
987                         off += scnprintf(buf + off, buf_size - off,
988                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
989
990                 if (!pci_read_config_dword(pdev,
991                                            XEON_CORERRSTS_OFFSET, &u.v32))
992                         off += scnprintf(buf + off, buf_size - off,
993                                          "CORERRSTS -\t\t%#06x\n", u.v32);
994         }
995
996         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
997         kfree(buf);
998         return ret;
999 }
1000
1001 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
1002                                  size_t count, loff_t *offp)
1003 {
1004         struct intel_ntb_dev *ndev = filp->private_data;
1005
1006         if (pdev_is_xeon(ndev->ntb.pdev) ||
1007             pdev_is_atom(ndev->ntb.pdev))
1008                 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
1009         else if (pdev_is_skx_xeon(ndev->ntb.pdev))
1010                 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
1011
1012         return -ENXIO;
1013 }
1014
1015 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
1016 {
1017         if (!debugfs_dir) {
1018                 ndev->debugfs_dir = NULL;
1019                 ndev->debugfs_info = NULL;
1020         } else {
1021                 ndev->debugfs_dir =
1022                         debugfs_create_dir(ndev_name(ndev), debugfs_dir);
1023                 if (!ndev->debugfs_dir)
1024                         ndev->debugfs_info = NULL;
1025                 else
1026                         ndev->debugfs_info =
1027                                 debugfs_create_file("info", S_IRUSR,
1028                                                     ndev->debugfs_dir, ndev,
1029                                                     &intel_ntb_debugfs_info);
1030         }
1031 }
1032
1033 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
1034 {
1035         debugfs_remove_recursive(ndev->debugfs_dir);
1036 }
1037
1038 static int intel_ntb_mw_count(struct ntb_dev *ntb)
1039 {
1040         return ntb_ndev(ntb)->mw_count;
1041 }
1042
1043 static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
1044                                   phys_addr_t *base,
1045                                   resource_size_t *size,
1046                                   resource_size_t *align,
1047                                   resource_size_t *align_size)
1048 {
1049         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1050         int bar;
1051
1052         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1053                 idx += 1;
1054
1055         bar = ndev_mw_to_bar(ndev, idx);
1056         if (bar < 0)
1057                 return bar;
1058
1059         if (base)
1060                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1061                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1062
1063         if (size)
1064                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1065                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1066
1067         if (align)
1068                 *align = pci_resource_len(ndev->ntb.pdev, bar);
1069
1070         if (align_size)
1071                 *align_size = 1;
1072
1073         return 0;
1074 }
1075
1076 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
1077                                   dma_addr_t addr, resource_size_t size)
1078 {
1079         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1080         unsigned long base_reg, xlat_reg, limit_reg;
1081         resource_size_t bar_size, mw_size;
1082         void __iomem *mmio;
1083         u64 base, limit, reg_val;
1084         int bar;
1085
1086         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1087                 idx += 1;
1088
1089         bar = ndev_mw_to_bar(ndev, idx);
1090         if (bar < 0)
1091                 return bar;
1092
1093         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1094
1095         if (idx == ndev->b2b_idx)
1096                 mw_size = bar_size - ndev->b2b_off;
1097         else
1098                 mw_size = bar_size;
1099
1100         /* hardware requires that addr is aligned to bar size */
1101         if (addr & (bar_size - 1))
1102                 return -EINVAL;
1103
1104         /* make sure the range fits in the usable mw size */
1105         if (size > mw_size)
1106                 return -EINVAL;
1107
1108         mmio = ndev->self_mmio;
1109         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
1110         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
1111         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
1112
1113         if (bar < 4 || !ndev->bar4_split) {
1114                 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
1115
1116                 /* Set the limit if supported, if size is not mw_size */
1117                 if (limit_reg && size != mw_size)
1118                         limit = base + size;
1119                 else
1120                         limit = 0;
1121
1122                 /* set and verify setting the translation address */
1123                 iowrite64(addr, mmio + xlat_reg);
1124                 reg_val = ioread64(mmio + xlat_reg);
1125                 if (reg_val != addr) {
1126                         iowrite64(0, mmio + xlat_reg);
1127                         return -EIO;
1128                 }
1129
1130                 /* set and verify setting the limit */
1131                 iowrite64(limit, mmio + limit_reg);
1132                 reg_val = ioread64(mmio + limit_reg);
1133                 if (reg_val != limit) {
1134                         iowrite64(base, mmio + limit_reg);
1135                         iowrite64(0, mmio + xlat_reg);
1136                         return -EIO;
1137                 }
1138         } else {
1139                 /* split bar addr range must all be 32 bit */
1140                 if (addr & (~0ull << 32))
1141                         return -EINVAL;
1142                 if ((addr + size) & (~0ull << 32))
1143                         return -EINVAL;
1144
1145                 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
1146
1147                 /* Set the limit if supported, if size is not mw_size */
1148                 if (limit_reg && size != mw_size)
1149                         limit = base + size;
1150                 else
1151                         limit = 0;
1152
1153                 /* set and verify setting the translation address */
1154                 iowrite32(addr, mmio + xlat_reg);
1155                 reg_val = ioread32(mmio + xlat_reg);
1156                 if (reg_val != addr) {
1157                         iowrite32(0, mmio + xlat_reg);
1158                         return -EIO;
1159                 }
1160
1161                 /* set and verify setting the limit */
1162                 iowrite32(limit, mmio + limit_reg);
1163                 reg_val = ioread32(mmio + limit_reg);
1164                 if (reg_val != limit) {
1165                         iowrite32(base, mmio + limit_reg);
1166                         iowrite32(0, mmio + xlat_reg);
1167                         return -EIO;
1168                 }
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int intel_ntb_link_is_up(struct ntb_dev *ntb,
1175                                 enum ntb_speed *speed,
1176                                 enum ntb_width *width)
1177 {
1178         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1179
1180         if (ndev->reg->link_is_up(ndev)) {
1181                 if (speed)
1182                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
1183                 if (width)
1184                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
1185                 return 1;
1186         } else {
1187                 /* TODO MAYBE: is it possible to observe the link speed and
1188                  * width while link is training? */
1189                 if (speed)
1190                         *speed = NTB_SPEED_NONE;
1191                 if (width)
1192                         *width = NTB_WIDTH_NONE;
1193                 return 0;
1194         }
1195 }
1196
1197 static int intel_ntb_link_enable(struct ntb_dev *ntb,
1198                                  enum ntb_speed max_speed,
1199                                  enum ntb_width max_width)
1200 {
1201         struct intel_ntb_dev *ndev;
1202         u32 ntb_ctl;
1203
1204         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1205
1206         if (ndev->ntb.topo == NTB_TOPO_SEC)
1207                 return -EINVAL;
1208
1209         dev_dbg(ndev_dev(ndev),
1210                 "Enabling link with max_speed %d max_width %d\n",
1211                 max_speed, max_width);
1212         if (max_speed != NTB_SPEED_AUTO)
1213                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1214         if (max_width != NTB_WIDTH_AUTO)
1215                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1216
1217         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1218         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1219         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1220         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1221         if (ndev->bar4_split)
1222                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
1223         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1224
1225         return 0;
1226 }
1227
1228 static int intel_ntb_link_disable(struct ntb_dev *ntb)
1229 {
1230         struct intel_ntb_dev *ndev;
1231         u32 ntb_cntl;
1232
1233         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1234
1235         if (ndev->ntb.topo == NTB_TOPO_SEC)
1236                 return -EINVAL;
1237
1238         dev_dbg(ndev_dev(ndev), "Disabling link\n");
1239
1240         /* Bring NTB link down */
1241         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1242         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1243         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1244         if (ndev->bar4_split)
1245                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1246         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1247         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1248
1249         return 0;
1250 }
1251
1252 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1253 {
1254         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1255 }
1256
1257 static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1258 {
1259         return ntb_ndev(ntb)->db_valid_mask;
1260 }
1261
1262 static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1263 {
1264         struct intel_ntb_dev *ndev;
1265
1266         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1267
1268         return ndev->db_vec_count;
1269 }
1270
1271 static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1272 {
1273         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1274
1275         if (db_vector < 0 || db_vector > ndev->db_vec_count)
1276                 return 0;
1277
1278         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1279 }
1280
1281 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1282 {
1283         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1284
1285         return ndev_db_read(ndev,
1286                             ndev->self_mmio +
1287                             ndev->self_reg->db_bell);
1288 }
1289
1290 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1291 {
1292         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1293
1294         return ndev_db_write(ndev, db_bits,
1295                              ndev->self_mmio +
1296                              ndev->self_reg->db_bell);
1297 }
1298
1299 static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1300 {
1301         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1302
1303         return ndev_db_set_mask(ndev, db_bits,
1304                                 ndev->self_mmio +
1305                                 ndev->self_reg->db_mask);
1306 }
1307
1308 static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1309 {
1310         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1311
1312         return ndev_db_clear_mask(ndev, db_bits,
1313                                   ndev->self_mmio +
1314                                   ndev->self_reg->db_mask);
1315 }
1316
1317 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
1318                                   phys_addr_t *db_addr,
1319                                   resource_size_t *db_size)
1320 {
1321         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1322
1323         return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1324                             ndev->peer_reg->db_bell);
1325 }
1326
1327 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1328 {
1329         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1330
1331         return ndev_db_write(ndev, db_bits,
1332                              ndev->peer_mmio +
1333                              ndev->peer_reg->db_bell);
1334 }
1335
1336 static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1337 {
1338         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1339 }
1340
1341 static int intel_ntb_spad_count(struct ntb_dev *ntb)
1342 {
1343         struct intel_ntb_dev *ndev;
1344
1345         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1346
1347         return ndev->spad_count;
1348 }
1349
1350 static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1351 {
1352         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1353
1354         return ndev_spad_read(ndev, idx,
1355                               ndev->self_mmio +
1356                               ndev->self_reg->spad);
1357 }
1358
1359 static int intel_ntb_spad_write(struct ntb_dev *ntb,
1360                                 int idx, u32 val)
1361 {
1362         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1363
1364         return ndev_spad_write(ndev, idx, val,
1365                                ndev->self_mmio +
1366                                ndev->self_reg->spad);
1367 }
1368
1369 static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1370                                     phys_addr_t *spad_addr)
1371 {
1372         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1373
1374         return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1375                               ndev->peer_reg->spad);
1376 }
1377
1378 static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1379 {
1380         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1381
1382         return ndev_spad_read(ndev, idx,
1383                               ndev->peer_mmio +
1384                               ndev->peer_reg->spad);
1385 }
1386
1387 static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1388                                      int idx, u32 val)
1389 {
1390         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1391
1392         return ndev_spad_write(ndev, idx, val,
1393                                ndev->peer_mmio +
1394                                ndev->peer_reg->spad);
1395 }
1396
1397 /* ATOM */
1398
1399 static u64 atom_db_ioread(void __iomem *mmio)
1400 {
1401         return ioread64(mmio);
1402 }
1403
1404 static void atom_db_iowrite(u64 bits, void __iomem *mmio)
1405 {
1406         iowrite64(bits, mmio);
1407 }
1408
1409 static int atom_poll_link(struct intel_ntb_dev *ndev)
1410 {
1411         u32 ntb_ctl;
1412
1413         ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
1414
1415         if (ntb_ctl == ndev->ntb_ctl)
1416                 return 0;
1417
1418         ndev->ntb_ctl = ntb_ctl;
1419
1420         ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
1421
1422         return 1;
1423 }
1424
1425 static int atom_link_is_up(struct intel_ntb_dev *ndev)
1426 {
1427         return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1428 }
1429
1430 static int atom_link_is_err(struct intel_ntb_dev *ndev)
1431 {
1432         if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
1433             & ATOM_LTSSMSTATEJMP_FORCEDETECT)
1434                 return 1;
1435
1436         if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
1437             & ATOM_IBIST_ERR_OFLOW)
1438                 return 1;
1439
1440         return 0;
1441 }
1442
1443 static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
1444 {
1445         switch (ppd & ATOM_PPD_TOPO_MASK) {
1446         case ATOM_PPD_TOPO_B2B_USD:
1447                 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1448                 return NTB_TOPO_B2B_USD;
1449
1450         case ATOM_PPD_TOPO_B2B_DSD:
1451                 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1452                 return NTB_TOPO_B2B_DSD;
1453
1454         case ATOM_PPD_TOPO_PRI_USD:
1455         case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1456         case ATOM_PPD_TOPO_SEC_USD:
1457         case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1458                 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1459                 return NTB_TOPO_NONE;
1460         }
1461
1462         dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1463         return NTB_TOPO_NONE;
1464 }
1465
1466 static void atom_link_hb(struct work_struct *work)
1467 {
1468         struct intel_ntb_dev *ndev = hb_ndev(work);
1469         unsigned long poll_ts;
1470         void __iomem *mmio;
1471         u32 status32;
1472
1473         poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
1474
1475         /* Delay polling the link status if an interrupt was received,
1476          * unless the cached link status says the link is down.
1477          */
1478         if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
1479                 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1480                 return;
1481         }
1482
1483         if (atom_poll_link(ndev))
1484                 ntb_link_event(&ndev->ntb);
1485
1486         if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
1487                 schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1488                 return;
1489         }
1490
1491         /* Link is down with error: recover the link! */
1492
1493         mmio = ndev->self_mmio;
1494
1495         /* Driver resets the NTB ModPhy lanes - magic! */
1496         iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
1497         iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
1498         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
1499         iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
1500
1501         /* Driver waits 100ms to allow the NTB ModPhy to settle */
1502         msleep(100);
1503
1504         /* Clear AER Errors, write to clear */
1505         status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
1506         dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1507         status32 &= PCI_ERR_COR_REP_ROLL;
1508         iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
1509
1510         /* Clear unexpected electrical idle event in LTSSM, write to clear */
1511         status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
1512         dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1513         status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1514         iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
1515
1516         /* Clear DeSkew Buffer error, write to clear */
1517         status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
1518         dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1519         status32 |= ATOM_DESKEWSTS_DBERR;
1520         iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
1521
1522         status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1523         dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1524         status32 &= ATOM_IBIST_ERR_OFLOW;
1525         iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
1526
1527         /* Releases the NTB state machine to allow the link to retrain */
1528         status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1529         dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1530         status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1531         iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
1532
1533         /* There is a potential race between the 2 NTB devices recovering at the
1534          * same time.  If the times are the same, the link will not recover and
1535          * the driver will be stuck in this loop forever.  Add a random interval
1536          * to the recovery time to prevent this race.
1537          */
1538         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
1539                               + prandom_u32() % ATOM_LINK_RECOVERY_TIME);
1540 }
1541
1542 static int atom_init_isr(struct intel_ntb_dev *ndev)
1543 {
1544         int rc;
1545
1546         rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
1547                            ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
1548         if (rc)
1549                 return rc;
1550
1551         /* ATOM doesn't have link status interrupt, poll on that platform */
1552         ndev->last_ts = jiffies;
1553         INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
1554         schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
1555
1556         return 0;
1557 }
1558
1559 static void atom_deinit_isr(struct intel_ntb_dev *ndev)
1560 {
1561         cancel_delayed_work_sync(&ndev->hb_timer);
1562         ndev_deinit_isr(ndev);
1563 }
1564
1565 static int atom_init_ntb(struct intel_ntb_dev *ndev)
1566 {
1567         ndev->mw_count = ATOM_MW_COUNT;
1568         ndev->spad_count = ATOM_SPAD_COUNT;
1569         ndev->db_count = ATOM_DB_COUNT;
1570
1571         switch (ndev->ntb.topo) {
1572         case NTB_TOPO_B2B_USD:
1573         case NTB_TOPO_B2B_DSD:
1574                 ndev->self_reg = &atom_pri_reg;
1575                 ndev->peer_reg = &atom_b2b_reg;
1576                 ndev->xlat_reg = &atom_sec_xlat;
1577
1578                 /* Enable Bus Master and Memory Space on the secondary side */
1579                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1580                           ndev->self_mmio + ATOM_SPCICMD_OFFSET);
1581
1582                 break;
1583
1584         default:
1585                 return -EINVAL;
1586         }
1587
1588         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1589
1590         return 0;
1591 }
1592
1593 static int atom_init_dev(struct intel_ntb_dev *ndev)
1594 {
1595         u32 ppd;
1596         int rc;
1597
1598         rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
1599         if (rc)
1600                 return -EIO;
1601
1602         ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
1603         if (ndev->ntb.topo == NTB_TOPO_NONE)
1604                 return -EINVAL;
1605
1606         rc = atom_init_ntb(ndev);
1607         if (rc)
1608                 return rc;
1609
1610         rc = atom_init_isr(ndev);
1611         if (rc)
1612                 return rc;
1613
1614         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1615                 /* Initiate PCI-E link training */
1616                 rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
1617                                             ppd | ATOM_PPD_INIT_LINK);
1618                 if (rc)
1619                         return rc;
1620         }
1621
1622         return 0;
1623 }
1624
1625 static void atom_deinit_dev(struct intel_ntb_dev *ndev)
1626 {
1627         atom_deinit_isr(ndev);
1628 }
1629
1630 /* Skylake Xeon NTB */
1631
1632 static u64 skx_db_ioread(void __iomem *mmio)
1633 {
1634         return ioread64(mmio);
1635 }
1636
1637 static void skx_db_iowrite(u64 bits, void __iomem *mmio)
1638 {
1639         iowrite64(bits, mmio);
1640 }
1641
1642 static int skx_init_isr(struct intel_ntb_dev *ndev)
1643 {
1644         int i;
1645
1646         /*
1647          * The MSIX vectors and the interrupt status bits are not lined up
1648          * on Skylake. By default the link status bit is bit 32, however it
1649          * is by default MSIX vector0. We need to fixup to line them up.
1650          * The vectors at reset is 1-32,0. We need to reprogram to 0-32.
1651          */
1652
1653         for (i = 0; i < SKX_DB_MSIX_VECTOR_COUNT; i++)
1654                 iowrite8(i, ndev->self_mmio + SKX_INTVEC_OFFSET + i);
1655
1656         /* move link status down one as workaround */
1657         if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) {
1658                 iowrite8(SKX_DB_MSIX_VECTOR_COUNT - 2,
1659                          ndev->self_mmio + SKX_INTVEC_OFFSET +
1660                          (SKX_DB_MSIX_VECTOR_COUNT - 1));
1661         }
1662
1663         return ndev_init_isr(ndev, SKX_DB_MSIX_VECTOR_COUNT,
1664                              SKX_DB_MSIX_VECTOR_COUNT,
1665                              SKX_DB_MSIX_VECTOR_SHIFT,
1666                              SKX_DB_TOTAL_SHIFT);
1667 }
1668
1669 static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
1670                             const struct intel_b2b_addr *addr,
1671                             const struct intel_b2b_addr *peer_addr)
1672 {
1673         struct pci_dev *pdev;
1674         void __iomem *mmio;
1675         resource_size_t bar_size;
1676         phys_addr_t bar_addr;
1677         int b2b_bar;
1678         u8 bar_sz;
1679
1680         pdev = ndev_pdev(ndev);
1681         mmio = ndev->self_mmio;
1682
1683         if (ndev->b2b_idx == UINT_MAX) {
1684                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1685                 b2b_bar = 0;
1686                 ndev->b2b_off = 0;
1687         } else {
1688                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1689                 if (b2b_bar < 0)
1690                         return -EIO;
1691
1692                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
1693
1694                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1695
1696                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
1697
1698                 if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
1699                         dev_dbg(ndev_dev(ndev),
1700                                 "b2b using first half of bar\n");
1701                         ndev->b2b_off = bar_size >> 1;
1702                 } else if (bar_size >= XEON_B2B_MIN_SIZE) {
1703                         dev_dbg(ndev_dev(ndev),
1704                                 "b2b using whole bar\n");
1705                         ndev->b2b_off = 0;
1706                         --ndev->mw_count;
1707                 } else {
1708                         dev_dbg(ndev_dev(ndev),
1709                                 "b2b bar size is too small\n");
1710                         return -EIO;
1711                 }
1712         }
1713
1714         /*
1715          * Reset the secondary bar sizes to match the primary bar sizes,
1716          * except disable or halve the size of the b2b secondary bar.
1717          */
1718         pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
1719         dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz);
1720         if (b2b_bar == 1) {
1721                 if (ndev->b2b_off)
1722                         bar_sz -= 1;
1723                 else
1724                         bar_sz = 0;
1725         }
1726
1727         pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
1728         pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
1729         dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz);
1730
1731         pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
1732         dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz);
1733         if (b2b_bar == 2) {
1734                 if (ndev->b2b_off)
1735                         bar_sz -= 1;
1736                 else
1737                         bar_sz = 0;
1738         }
1739
1740         pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
1741         pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
1742         dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz);
1743
1744         /* SBAR01 hit by first part of the b2b bar */
1745         if (b2b_bar == 0)
1746                 bar_addr = addr->bar0_addr;
1747         else if (b2b_bar == 1)
1748                 bar_addr = addr->bar2_addr64;
1749         else if (b2b_bar == 2)
1750                 bar_addr = addr->bar4_addr64;
1751         else
1752                 return -EIO;
1753
1754         /* setup incoming bar limits == base addrs (zero length windows) */
1755         bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
1756         iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
1757         bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
1758         dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr);
1759
1760         bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1761         iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
1762         bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
1763         dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr);
1764
1765         /* zero incoming translation addrs */
1766         iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET);
1767         iowrite64(0, mmio + SKX_IMBAR2XBASE_OFFSET);
1768
1769         ndev->peer_mmio = ndev->self_mmio;
1770
1771         return 0;
1772 }
1773
1774 static int skx_init_ntb(struct intel_ntb_dev *ndev)
1775 {
1776         int rc;
1777
1778
1779         ndev->mw_count = XEON_MW_COUNT;
1780         ndev->spad_count = SKX_SPAD_COUNT;
1781         ndev->db_count = SKX_DB_COUNT;
1782         ndev->db_link_mask = SKX_DB_LINK_BIT;
1783
1784         /* DB fixup for using 31 right now */
1785         if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD)
1786                 ndev->db_link_mask |= BIT_ULL(31);
1787
1788         switch (ndev->ntb.topo) {
1789         case NTB_TOPO_B2B_USD:
1790         case NTB_TOPO_B2B_DSD:
1791                 ndev->self_reg = &skx_pri_reg;
1792                 ndev->peer_reg = &skx_b2b_reg;
1793                 ndev->xlat_reg = &skx_sec_xlat;
1794
1795                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1796                         rc = skx_setup_b2b_mw(ndev,
1797                                               &xeon_b2b_dsd_addr,
1798                                               &xeon_b2b_usd_addr);
1799                 } else {
1800                         rc = skx_setup_b2b_mw(ndev,
1801                                               &xeon_b2b_usd_addr,
1802                                               &xeon_b2b_dsd_addr);
1803                 }
1804
1805                 if (rc)
1806                         return rc;
1807
1808                 /* Enable Bus Master and Memory Space on the secondary side */
1809                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1810                           ndev->self_mmio + SKX_SPCICMD_OFFSET);
1811
1812                 break;
1813
1814         default:
1815                 return -EINVAL;
1816         }
1817
1818         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1819
1820         ndev->reg->db_iowrite(ndev->db_valid_mask,
1821                               ndev->self_mmio +
1822                               ndev->self_reg->db_mask);
1823
1824         return 0;
1825 }
1826
1827 static int skx_init_dev(struct intel_ntb_dev *ndev)
1828 {
1829         struct pci_dev *pdev;
1830         u8 ppd;
1831         int rc;
1832
1833         pdev = ndev_pdev(ndev);
1834
1835         ndev->reg = &skx_reg;
1836
1837         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1838         if (rc)
1839                 return -EIO;
1840
1841         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1842         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1843                 ntb_topo_string(ndev->ntb.topo));
1844         if (ndev->ntb.topo == NTB_TOPO_NONE)
1845                 return -EINVAL;
1846
1847         if (pdev_is_skx_xeon(pdev))
1848                 ndev->hwerr_flags |= NTB_HWERR_MSIX_VECTOR32_BAD;
1849
1850         rc = skx_init_ntb(ndev);
1851         if (rc)
1852                 return rc;
1853
1854         return skx_init_isr(ndev);
1855 }
1856
1857 static int intel_ntb3_link_enable(struct ntb_dev *ntb,
1858                                   enum ntb_speed max_speed,
1859                                   enum ntb_width max_width)
1860 {
1861         struct intel_ntb_dev *ndev;
1862         u32 ntb_ctl;
1863
1864         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1865
1866         dev_dbg(ndev_dev(ndev),
1867                 "Enabling link with max_speed %d max_width %d\n",
1868                 max_speed, max_width);
1869
1870         if (max_speed != NTB_SPEED_AUTO)
1871                 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
1872         if (max_width != NTB_WIDTH_AUTO)
1873                 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
1874
1875         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1876         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
1877         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
1878         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
1879         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1880
1881         return 0;
1882 }
1883 static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx,
1884                                    dma_addr_t addr, resource_size_t size)
1885 {
1886         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1887         unsigned long xlat_reg, limit_reg;
1888         resource_size_t bar_size, mw_size;
1889         void __iomem *mmio;
1890         u64 base, limit, reg_val;
1891         int bar;
1892
1893         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1894                 idx += 1;
1895
1896         bar = ndev_mw_to_bar(ndev, idx);
1897         if (bar < 0)
1898                 return bar;
1899
1900         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
1901
1902         if (idx == ndev->b2b_idx)
1903                 mw_size = bar_size - ndev->b2b_off;
1904         else
1905                 mw_size = bar_size;
1906
1907         /* hardware requires that addr is aligned to bar size */
1908         if (addr & (bar_size - 1))
1909                 return -EINVAL;
1910
1911         /* make sure the range fits in the usable mw size */
1912         if (size > mw_size)
1913                 return -EINVAL;
1914
1915         mmio = ndev->self_mmio;
1916         xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
1917         limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
1918         base = pci_resource_start(ndev->ntb.pdev, bar);
1919
1920         /* Set the limit if supported, if size is not mw_size */
1921         if (limit_reg && size != mw_size)
1922                 limit = base + size;
1923         else
1924                 limit = base + mw_size;
1925
1926         /* set and verify setting the translation address */
1927         iowrite64(addr, mmio + xlat_reg);
1928         reg_val = ioread64(mmio + xlat_reg);
1929         if (reg_val != addr) {
1930                 iowrite64(0, mmio + xlat_reg);
1931                 return -EIO;
1932         }
1933
1934         dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val);
1935
1936         /* set and verify setting the limit */
1937         iowrite64(limit, mmio + limit_reg);
1938         reg_val = ioread64(mmio + limit_reg);
1939         if (reg_val != limit) {
1940                 iowrite64(base, mmio + limit_reg);
1941                 iowrite64(0, mmio + xlat_reg);
1942                 return -EIO;
1943         }
1944
1945         dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val);
1946
1947         /* setup the EP */
1948         limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000;
1949         base = ioread64(mmio + SKX_EMBAR1_OFFSET + (8 * idx));
1950         base &= ~0xf;
1951
1952         if (limit_reg && size != mw_size)
1953                 limit = base + size;
1954         else
1955                 limit = base + mw_size;
1956
1957         /* set and verify setting the limit */
1958         iowrite64(limit, mmio + limit_reg);
1959         reg_val = ioread64(mmio + limit_reg);
1960         if (reg_val != limit) {
1961                 iowrite64(base, mmio + limit_reg);
1962                 iowrite64(0, mmio + xlat_reg);
1963                 return -EIO;
1964         }
1965
1966         dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val);
1967
1968         return 0;
1969 }
1970
1971 static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1972 {
1973         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1974         int bit;
1975
1976         if (db_bits & ~ndev->db_valid_mask)
1977                 return -EINVAL;
1978
1979         while (db_bits) {
1980                 bit = __ffs(db_bits);
1981                 iowrite32(1, ndev->peer_mmio +
1982                                 ndev->peer_reg->db_bell + (bit * 4));
1983                 db_bits &= db_bits - 1;
1984         }
1985
1986         return 0;
1987 }
1988
1989 static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
1990 {
1991         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1992
1993         return ndev_db_read(ndev,
1994                             ndev->self_mmio +
1995                             ndev->self_reg->db_clear);
1996 }
1997
1998 static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
1999 {
2000         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
2001
2002         return ndev_db_write(ndev, db_bits,
2003                              ndev->self_mmio +
2004                              ndev->self_reg->db_clear);
2005 }
2006
2007 /* XEON */
2008
2009 static u64 xeon_db_ioread(void __iomem *mmio)
2010 {
2011         return (u64)ioread16(mmio);
2012 }
2013
2014 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
2015 {
2016         iowrite16((u16)bits, mmio);
2017 }
2018
2019 static int xeon_poll_link(struct intel_ntb_dev *ndev)
2020 {
2021         u16 reg_val;
2022         int rc;
2023
2024         ndev->reg->db_iowrite(ndev->db_link_mask,
2025                               ndev->self_mmio +
2026                               ndev->self_reg->db_bell);
2027
2028         rc = pci_read_config_word(ndev->ntb.pdev,
2029                                   XEON_LINK_STATUS_OFFSET, &reg_val);
2030         if (rc)
2031                 return 0;
2032
2033         if (reg_val == ndev->lnk_sta)
2034                 return 0;
2035
2036         ndev->lnk_sta = reg_val;
2037
2038         return 1;
2039 }
2040
2041 static int xeon_link_is_up(struct intel_ntb_dev *ndev)
2042 {
2043         if (ndev->ntb.topo == NTB_TOPO_SEC)
2044                 return 1;
2045
2046         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
2047 }
2048
2049 static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
2050 {
2051         switch (ppd & XEON_PPD_TOPO_MASK) {
2052         case XEON_PPD_TOPO_B2B_USD:
2053                 return NTB_TOPO_B2B_USD;
2054
2055         case XEON_PPD_TOPO_B2B_DSD:
2056                 return NTB_TOPO_B2B_DSD;
2057
2058         case XEON_PPD_TOPO_PRI_USD:
2059         case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
2060                 return NTB_TOPO_PRI;
2061
2062         case XEON_PPD_TOPO_SEC_USD:
2063         case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
2064                 return NTB_TOPO_SEC;
2065         }
2066
2067         return NTB_TOPO_NONE;
2068 }
2069
2070 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
2071 {
2072         if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
2073                 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
2074                 return 1;
2075         }
2076         return 0;
2077 }
2078
2079 static int xeon_init_isr(struct intel_ntb_dev *ndev)
2080 {
2081         return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
2082                              XEON_DB_MSIX_VECTOR_COUNT,
2083                              XEON_DB_MSIX_VECTOR_SHIFT,
2084                              XEON_DB_TOTAL_SHIFT);
2085 }
2086
2087 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
2088 {
2089         ndev_deinit_isr(ndev);
2090 }
2091
2092 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
2093                              const struct intel_b2b_addr *addr,
2094                              const struct intel_b2b_addr *peer_addr)
2095 {
2096         struct pci_dev *pdev;
2097         void __iomem *mmio;
2098         resource_size_t bar_size;
2099         phys_addr_t bar_addr;
2100         int b2b_bar;
2101         u8 bar_sz;
2102
2103         pdev = ndev_pdev(ndev);
2104         mmio = ndev->self_mmio;
2105
2106         if (ndev->b2b_idx == UINT_MAX) {
2107                 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
2108                 b2b_bar = 0;
2109                 ndev->b2b_off = 0;
2110         } else {
2111                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
2112                 if (b2b_bar < 0)
2113                         return -EIO;
2114
2115                 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
2116
2117                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
2118
2119                 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
2120
2121                 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
2122                         dev_dbg(ndev_dev(ndev),
2123                                 "b2b using first half of bar\n");
2124                         ndev->b2b_off = bar_size >> 1;
2125                 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
2126                         dev_dbg(ndev_dev(ndev),
2127                                 "b2b using whole bar\n");
2128                         ndev->b2b_off = 0;
2129                         --ndev->mw_count;
2130                 } else {
2131                         dev_dbg(ndev_dev(ndev),
2132                                 "b2b bar size is too small\n");
2133                         return -EIO;
2134                 }
2135         }
2136
2137         /* Reset the secondary bar sizes to match the primary bar sizes,
2138          * except disable or halve the size of the b2b secondary bar.
2139          *
2140          * Note: code for each specific bar size register, because the register
2141          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
2142          */
2143         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
2144         dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
2145         if (b2b_bar == 2) {
2146                 if (ndev->b2b_off)
2147                         bar_sz -= 1;
2148                 else
2149                         bar_sz = 0;
2150         }
2151         pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
2152         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
2153         dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
2154
2155         if (!ndev->bar4_split) {
2156                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
2157                 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
2158                 if (b2b_bar == 4) {
2159                         if (ndev->b2b_off)
2160                                 bar_sz -= 1;
2161                         else
2162                                 bar_sz = 0;
2163                 }
2164                 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
2165                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
2166                 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
2167         } else {
2168                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
2169                 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
2170                 if (b2b_bar == 4) {
2171                         if (ndev->b2b_off)
2172                                 bar_sz -= 1;
2173                         else
2174                                 bar_sz = 0;
2175                 }
2176                 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
2177                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
2178                 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
2179
2180                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
2181                 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
2182                 if (b2b_bar == 5) {
2183                         if (ndev->b2b_off)
2184                                 bar_sz -= 1;
2185                         else
2186                                 bar_sz = 0;
2187                 }
2188                 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
2189                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
2190                 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
2191         }
2192
2193         /* SBAR01 hit by first part of the b2b bar */
2194         if (b2b_bar == 0)
2195                 bar_addr = addr->bar0_addr;
2196         else if (b2b_bar == 2)
2197                 bar_addr = addr->bar2_addr64;
2198         else if (b2b_bar == 4 && !ndev->bar4_split)
2199                 bar_addr = addr->bar4_addr64;
2200         else if (b2b_bar == 4)
2201                 bar_addr = addr->bar4_addr32;
2202         else if (b2b_bar == 5)
2203                 bar_addr = addr->bar5_addr32;
2204         else
2205                 return -EIO;
2206
2207         dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
2208         iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
2209
2210         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
2211          * The b2b bar is either disabled above, or configured half-size, and
2212          * it starts at the PBAR xlat + offset.
2213          */
2214
2215         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
2216         iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
2217         bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
2218         dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
2219
2220         if (!ndev->bar4_split) {
2221                 bar_addr = addr->bar4_addr64 +
2222                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2223                 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
2224                 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
2225                 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
2226         } else {
2227                 bar_addr = addr->bar4_addr32 +
2228                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2229                 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
2230                 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
2231                 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
2232
2233                 bar_addr = addr->bar5_addr32 +
2234                         (b2b_bar == 5 ? ndev->b2b_off : 0);
2235                 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
2236                 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
2237                 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
2238         }
2239
2240         /* setup incoming bar limits == base addrs (zero length windows) */
2241
2242         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
2243         iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
2244         bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
2245         dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
2246
2247         if (!ndev->bar4_split) {
2248                 bar_addr = addr->bar4_addr64 +
2249                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2250                 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
2251                 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
2252                 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
2253         } else {
2254                 bar_addr = addr->bar4_addr32 +
2255                         (b2b_bar == 4 ? ndev->b2b_off : 0);
2256                 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
2257                 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
2258                 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
2259
2260                 bar_addr = addr->bar5_addr32 +
2261                         (b2b_bar == 5 ? ndev->b2b_off : 0);
2262                 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
2263                 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
2264                 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
2265         }
2266
2267         /* zero incoming translation addrs */
2268         iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
2269
2270         if (!ndev->bar4_split) {
2271                 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
2272         } else {
2273                 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
2274                 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
2275         }
2276
2277         /* zero outgoing translation limits (whole bar size windows) */
2278         iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
2279         if (!ndev->bar4_split) {
2280                 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
2281         } else {
2282                 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
2283                 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
2284         }
2285
2286         /* set outgoing translation offsets */
2287         bar_addr = peer_addr->bar2_addr64;
2288         iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
2289         bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
2290         dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
2291
2292         if (!ndev->bar4_split) {
2293                 bar_addr = peer_addr->bar4_addr64;
2294                 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
2295                 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
2296                 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
2297         } else {
2298                 bar_addr = peer_addr->bar4_addr32;
2299                 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
2300                 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
2301                 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
2302
2303                 bar_addr = peer_addr->bar5_addr32;
2304                 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
2305                 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
2306                 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
2307         }
2308
2309         /* set the translation offset for b2b registers */
2310         if (b2b_bar == 0)
2311                 bar_addr = peer_addr->bar0_addr;
2312         else if (b2b_bar == 2)
2313                 bar_addr = peer_addr->bar2_addr64;
2314         else if (b2b_bar == 4 && !ndev->bar4_split)
2315                 bar_addr = peer_addr->bar4_addr64;
2316         else if (b2b_bar == 4)
2317                 bar_addr = peer_addr->bar4_addr32;
2318         else if (b2b_bar == 5)
2319                 bar_addr = peer_addr->bar5_addr32;
2320         else
2321                 return -EIO;
2322
2323         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
2324         dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
2325         iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
2326         iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
2327
2328         if (b2b_bar) {
2329                 /* map peer ntb mmio config space registers */
2330                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
2331                                             XEON_B2B_MIN_SIZE);
2332                 if (!ndev->peer_mmio)
2333                         return -EIO;
2334
2335                 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
2336         }
2337
2338         return 0;
2339 }
2340
2341 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
2342 {
2343         int rc;
2344         u32 ntb_ctl;
2345
2346         if (ndev->bar4_split)
2347                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
2348         else
2349                 ndev->mw_count = XEON_MW_COUNT;
2350
2351         ndev->spad_count = XEON_SPAD_COUNT;
2352         ndev->db_count = XEON_DB_COUNT;
2353         ndev->db_link_mask = XEON_DB_LINK_BIT;
2354
2355         switch (ndev->ntb.topo) {
2356         case NTB_TOPO_PRI:
2357                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2358                         dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
2359                         return -EINVAL;
2360                 }
2361
2362                 /* enable link to allow secondary side device to appear */
2363                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
2364                 ntb_ctl &= ~NTB_CTL_DISABLE;
2365                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
2366
2367                 /* use half the spads for the peer */
2368                 ndev->spad_count >>= 1;
2369                 ndev->self_reg = &xeon_pri_reg;
2370                 ndev->peer_reg = &xeon_sec_reg;
2371                 ndev->xlat_reg = &xeon_sec_xlat;
2372                 break;
2373
2374         case NTB_TOPO_SEC:
2375                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2376                         dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
2377                         return -EINVAL;
2378                 }
2379                 /* use half the spads for the peer */
2380                 ndev->spad_count >>= 1;
2381                 ndev->self_reg = &xeon_sec_reg;
2382                 ndev->peer_reg = &xeon_pri_reg;
2383                 ndev->xlat_reg = &xeon_pri_xlat;
2384                 break;
2385
2386         case NTB_TOPO_B2B_USD:
2387         case NTB_TOPO_B2B_DSD:
2388                 ndev->self_reg = &xeon_pri_reg;
2389                 ndev->peer_reg = &xeon_b2b_reg;
2390                 ndev->xlat_reg = &xeon_sec_xlat;
2391
2392                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
2393                         ndev->peer_reg = &xeon_pri_reg;
2394
2395                         if (b2b_mw_idx < 0)
2396                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
2397                         else
2398                                 ndev->b2b_idx = b2b_mw_idx;
2399
2400                         if (ndev->b2b_idx >= ndev->mw_count) {
2401                                 dev_dbg(ndev_dev(ndev),
2402                                         "b2b_mw_idx %d invalid for mw_count %u\n",
2403                                         b2b_mw_idx, ndev->mw_count);
2404                                 return -EINVAL;
2405                         }
2406
2407                         dev_dbg(ndev_dev(ndev),
2408                                 "setting up b2b mw idx %d means %d\n",
2409                                 b2b_mw_idx, ndev->b2b_idx);
2410
2411                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
2412                         dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
2413                         ndev->db_count -= 1;
2414                 }
2415
2416                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
2417                         rc = xeon_setup_b2b_mw(ndev,
2418                                                &xeon_b2b_dsd_addr,
2419                                                &xeon_b2b_usd_addr);
2420                 } else {
2421                         rc = xeon_setup_b2b_mw(ndev,
2422                                                &xeon_b2b_usd_addr,
2423                                                &xeon_b2b_dsd_addr);
2424                 }
2425                 if (rc)
2426                         return rc;
2427
2428                 /* Enable Bus Master and Memory Space on the secondary side */
2429                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
2430                           ndev->self_mmio + XEON_SPCICMD_OFFSET);
2431
2432                 break;
2433
2434         default:
2435                 return -EINVAL;
2436         }
2437
2438         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
2439
2440         ndev->reg->db_iowrite(ndev->db_valid_mask,
2441                               ndev->self_mmio +
2442                               ndev->self_reg->db_mask);
2443
2444         return 0;
2445 }
2446
2447 static int xeon_init_dev(struct intel_ntb_dev *ndev)
2448 {
2449         struct pci_dev *pdev;
2450         u8 ppd;
2451         int rc, mem;
2452
2453         pdev = ndev_pdev(ndev);
2454
2455         switch (pdev->device) {
2456         /* There is a Xeon hardware errata related to writes to SDOORBELL or
2457          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
2458          * which may hang the system.  To workaround this use the second memory
2459          * window to access the interrupt and scratch pad registers on the
2460          * remote system.
2461          */
2462         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2463         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2464         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2465         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2466         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2467         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2468         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2469         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2470         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2471         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2472         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2473         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
2474         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2475         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2476         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
2477                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
2478                 break;
2479         }
2480
2481         switch (pdev->device) {
2482         /* There is a hardware errata related to accessing any register in
2483          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
2484          */
2485         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2486         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2487         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2488         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2489         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2490         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
2491         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2492         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2493         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
2494                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
2495                 break;
2496         }
2497
2498         switch (pdev->device) {
2499         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
2500          * mirrored to the remote system.  Shrink the number of bits by one,
2501          * since bit 14 is the last bit.
2502          */
2503         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
2504         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
2505         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
2506         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
2507         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
2508         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
2509         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
2510         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
2511         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
2512         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
2513         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
2514         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
2515         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
2516         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
2517         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
2518                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
2519                 break;
2520         }
2521
2522         ndev->reg = &xeon_reg;
2523
2524         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
2525         if (rc)
2526                 return -EIO;
2527
2528         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
2529         dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
2530                 ntb_topo_string(ndev->ntb.topo));
2531         if (ndev->ntb.topo == NTB_TOPO_NONE)
2532                 return -EINVAL;
2533
2534         if (ndev->ntb.topo != NTB_TOPO_SEC) {
2535                 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
2536                 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
2537                         ppd, ndev->bar4_split);
2538         } else {
2539                 /* This is a way for transparent BAR to figure out if we are
2540                  * doing split BAR or not. There is no way for the hw on the
2541                  * transparent side to know and set the PPD.
2542                  */
2543                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
2544                 ndev->bar4_split = hweight32(mem) ==
2545                         HSX_SPLIT_BAR_MW_COUNT + 1;
2546                 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
2547                         mem, ndev->bar4_split);
2548         }
2549
2550         rc = xeon_init_ntb(ndev);
2551         if (rc)
2552                 return rc;
2553
2554         return xeon_init_isr(ndev);
2555 }
2556
2557 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
2558 {
2559         xeon_deinit_isr(ndev);
2560 }
2561
2562 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2563 {
2564         int rc;
2565
2566         pci_set_drvdata(pdev, ndev);
2567
2568         rc = pci_enable_device(pdev);
2569         if (rc)
2570                 goto err_pci_enable;
2571
2572         rc = pci_request_regions(pdev, NTB_NAME);
2573         if (rc)
2574                 goto err_pci_regions;
2575
2576         pci_set_master(pdev);
2577
2578         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2579         if (rc) {
2580                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2581                 if (rc)
2582                         goto err_dma_mask;
2583                 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
2584         }
2585
2586         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2587         if (rc) {
2588                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2589                 if (rc)
2590                         goto err_dma_mask;
2591                 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
2592         }
2593
2594         ndev->self_mmio = pci_iomap(pdev, 0, 0);
2595         if (!ndev->self_mmio) {
2596                 rc = -EIO;
2597                 goto err_mmio;
2598         }
2599         ndev->peer_mmio = ndev->self_mmio;
2600         ndev->peer_addr = pci_resource_start(pdev, 0);
2601
2602         return 0;
2603
2604 err_mmio:
2605 err_dma_mask:
2606         pci_clear_master(pdev);
2607         pci_release_regions(pdev);
2608 err_pci_regions:
2609         pci_disable_device(pdev);
2610 err_pci_enable:
2611         pci_set_drvdata(pdev, NULL);
2612         return rc;
2613 }
2614
2615 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
2616 {
2617         struct pci_dev *pdev = ndev_pdev(ndev);
2618
2619         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
2620                 pci_iounmap(pdev, ndev->peer_mmio);
2621         pci_iounmap(pdev, ndev->self_mmio);
2622
2623         pci_clear_master(pdev);
2624         pci_release_regions(pdev);
2625         pci_disable_device(pdev);
2626         pci_set_drvdata(pdev, NULL);
2627 }
2628
2629 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
2630                                     struct pci_dev *pdev)
2631 {
2632         ndev->ntb.pdev = pdev;
2633         ndev->ntb.topo = NTB_TOPO_NONE;
2634         ndev->ntb.ops = &intel_ntb_ops;
2635
2636         ndev->b2b_off = 0;
2637         ndev->b2b_idx = UINT_MAX;
2638
2639         ndev->bar4_split = 0;
2640
2641         ndev->mw_count = 0;
2642         ndev->spad_count = 0;
2643         ndev->db_count = 0;
2644         ndev->db_vec_count = 0;
2645         ndev->db_vec_shift = 0;
2646
2647         ndev->ntb_ctl = 0;
2648         ndev->lnk_sta = 0;
2649
2650         ndev->db_valid_mask = 0;
2651         ndev->db_link_mask = 0;
2652         ndev->db_mask = 0;
2653
2654         spin_lock_init(&ndev->db_mask_lock);
2655 }
2656
2657 static int intel_ntb_pci_probe(struct pci_dev *pdev,
2658                                const struct pci_device_id *id)
2659 {
2660         struct intel_ntb_dev *ndev;
2661         int rc, node;
2662
2663         node = dev_to_node(&pdev->dev);
2664
2665         if (pdev_is_atom(pdev)) {
2666                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2667                 if (!ndev) {
2668                         rc = -ENOMEM;
2669                         goto err_ndev;
2670                 }
2671
2672                 ndev_init_struct(ndev, pdev);
2673
2674                 rc = intel_ntb_init_pci(ndev, pdev);
2675                 if (rc)
2676                         goto err_init_pci;
2677
2678                 rc = atom_init_dev(ndev);
2679                 if (rc)
2680                         goto err_init_dev;
2681
2682         } else if (pdev_is_xeon(pdev)) {
2683                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2684                 if (!ndev) {
2685                         rc = -ENOMEM;
2686                         goto err_ndev;
2687                 }
2688
2689                 ndev_init_struct(ndev, pdev);
2690
2691                 rc = intel_ntb_init_pci(ndev, pdev);
2692                 if (rc)
2693                         goto err_init_pci;
2694
2695                 rc = xeon_init_dev(ndev);
2696                 if (rc)
2697                         goto err_init_dev;
2698
2699         } else if (pdev_is_skx_xeon(pdev)) {
2700                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
2701                 if (!ndev) {
2702                         rc = -ENOMEM;
2703                         goto err_ndev;
2704                 }
2705
2706                 ndev_init_struct(ndev, pdev);
2707                 ndev->ntb.ops = &intel_ntb3_ops;
2708
2709                 rc = intel_ntb_init_pci(ndev, pdev);
2710                 if (rc)
2711                         goto err_init_pci;
2712
2713                 rc = skx_init_dev(ndev);
2714                 if (rc)
2715                         goto err_init_dev;
2716
2717         } else {
2718                 rc = -EINVAL;
2719                 goto err_ndev;
2720         }
2721
2722         ndev_reset_unsafe_flags(ndev);
2723
2724         ndev->reg->poll_link(ndev);
2725
2726         ndev_init_debugfs(ndev);
2727
2728         rc = ntb_register_device(&ndev->ntb);
2729         if (rc)
2730                 goto err_register;
2731
2732         dev_info(&pdev->dev, "NTB device registered.\n");
2733
2734         return 0;
2735
2736 err_register:
2737         ndev_deinit_debugfs(ndev);
2738         if (pdev_is_atom(pdev))
2739                 atom_deinit_dev(ndev);
2740         else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
2741                 xeon_deinit_dev(ndev);
2742 err_init_dev:
2743         intel_ntb_deinit_pci(ndev);
2744 err_init_pci:
2745         kfree(ndev);
2746 err_ndev:
2747         return rc;
2748 }
2749
2750 static void intel_ntb_pci_remove(struct pci_dev *pdev)
2751 {
2752         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
2753
2754         ntb_unregister_device(&ndev->ntb);
2755         ndev_deinit_debugfs(ndev);
2756         if (pdev_is_atom(pdev))
2757                 atom_deinit_dev(ndev);
2758         else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
2759                 xeon_deinit_dev(ndev);
2760         intel_ntb_deinit_pci(ndev);
2761         kfree(ndev);
2762 }
2763
2764 static const struct intel_ntb_reg atom_reg = {
2765         .poll_link              = atom_poll_link,
2766         .link_is_up             = atom_link_is_up,
2767         .db_ioread              = atom_db_ioread,
2768         .db_iowrite             = atom_db_iowrite,
2769         .db_size                = sizeof(u64),
2770         .ntb_ctl                = ATOM_NTBCNTL_OFFSET,
2771         .mw_bar                 = {2, 4},
2772 };
2773
2774 static const struct intel_ntb_alt_reg atom_pri_reg = {
2775         .db_bell                = ATOM_PDOORBELL_OFFSET,
2776         .db_mask                = ATOM_PDBMSK_OFFSET,
2777         .spad                   = ATOM_SPAD_OFFSET,
2778 };
2779
2780 static const struct intel_ntb_alt_reg atom_b2b_reg = {
2781         .db_bell                = ATOM_B2B_DOORBELL_OFFSET,
2782         .spad                   = ATOM_B2B_SPAD_OFFSET,
2783 };
2784
2785 static const struct intel_ntb_xlat_reg atom_sec_xlat = {
2786         /* FIXME : .bar0_base   = ATOM_SBAR0BASE_OFFSET, */
2787         /* FIXME : .bar2_limit  = ATOM_SBAR2LMT_OFFSET, */
2788         .bar2_xlat              = ATOM_SBAR2XLAT_OFFSET,
2789 };
2790
2791 static const struct intel_ntb_reg xeon_reg = {
2792         .poll_link              = xeon_poll_link,
2793         .link_is_up             = xeon_link_is_up,
2794         .db_ioread              = xeon_db_ioread,
2795         .db_iowrite             = xeon_db_iowrite,
2796         .db_size                = sizeof(u32),
2797         .ntb_ctl                = XEON_NTBCNTL_OFFSET,
2798         .mw_bar                 = {2, 4, 5},
2799 };
2800
2801 static const struct intel_ntb_alt_reg xeon_pri_reg = {
2802         .db_bell                = XEON_PDOORBELL_OFFSET,
2803         .db_mask                = XEON_PDBMSK_OFFSET,
2804         .spad                   = XEON_SPAD_OFFSET,
2805 };
2806
2807 static const struct intel_ntb_alt_reg xeon_sec_reg = {
2808         .db_bell                = XEON_SDOORBELL_OFFSET,
2809         .db_mask                = XEON_SDBMSK_OFFSET,
2810         /* second half of the scratchpads */
2811         .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
2812 };
2813
2814 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
2815         .db_bell                = XEON_B2B_DOORBELL_OFFSET,
2816         .spad                   = XEON_B2B_SPAD_OFFSET,
2817 };
2818
2819 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
2820         /* Note: no primary .bar0_base visible to the secondary side.
2821          *
2822          * The secondary side cannot get the base address stored in primary
2823          * bars.  The base address is necessary to set the limit register to
2824          * any value other than zero, or unlimited.
2825          *
2826          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2827          * window by setting the limit equal to base, nor can it limit the size
2828          * of the memory window by setting the limit to base + size.
2829          */
2830         .bar2_limit             = XEON_PBAR23LMT_OFFSET,
2831         .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
2832 };
2833
2834 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
2835         .bar0_base              = XEON_SBAR0BASE_OFFSET,
2836         .bar2_limit             = XEON_SBAR23LMT_OFFSET,
2837         .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
2838 };
2839
2840 static struct intel_b2b_addr xeon_b2b_usd_addr = {
2841         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
2842         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
2843         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2844         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2845 };
2846
2847 static struct intel_b2b_addr xeon_b2b_dsd_addr = {
2848         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
2849         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
2850         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2851         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2852 };
2853
2854 static const struct intel_ntb_reg skx_reg = {
2855         .poll_link              = xeon_poll_link,
2856         .link_is_up             = xeon_link_is_up,
2857         .db_ioread              = skx_db_ioread,
2858         .db_iowrite             = skx_db_iowrite,
2859         .db_size                = sizeof(u64),
2860         .ntb_ctl                = SKX_NTBCNTL_OFFSET,
2861         .mw_bar                 = {2, 4},
2862 };
2863
2864 static const struct intel_ntb_alt_reg skx_pri_reg = {
2865         .db_bell                = SKX_EM_DOORBELL_OFFSET,
2866         .db_clear               = SKX_IM_INT_STATUS_OFFSET,
2867         .db_mask                = SKX_IM_INT_DISABLE_OFFSET,
2868         .spad                   = SKX_IM_SPAD_OFFSET,
2869 };
2870
2871 static const struct intel_ntb_alt_reg skx_b2b_reg = {
2872         .db_bell                = SKX_IM_DOORBELL_OFFSET,
2873         .db_clear               = SKX_EM_INT_STATUS_OFFSET,
2874         .db_mask                = SKX_EM_INT_DISABLE_OFFSET,
2875         .spad                   = SKX_B2B_SPAD_OFFSET,
2876 };
2877
2878 static const struct intel_ntb_xlat_reg skx_sec_xlat = {
2879 /*      .bar0_base              = SKX_EMBAR0_OFFSET, */
2880         .bar2_limit             = SKX_IMBAR1XLMT_OFFSET,
2881         .bar2_xlat              = SKX_IMBAR1XBASE_OFFSET,
2882 };
2883
2884 /* operations for primary side of local ntb */
2885 static const struct ntb_dev_ops intel_ntb_ops = {
2886         .mw_count               = intel_ntb_mw_count,
2887         .mw_get_range           = intel_ntb_mw_get_range,
2888         .mw_set_trans           = intel_ntb_mw_set_trans,
2889         .link_is_up             = intel_ntb_link_is_up,
2890         .link_enable            = intel_ntb_link_enable,
2891         .link_disable           = intel_ntb_link_disable,
2892         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2893         .db_valid_mask          = intel_ntb_db_valid_mask,
2894         .db_vector_count        = intel_ntb_db_vector_count,
2895         .db_vector_mask         = intel_ntb_db_vector_mask,
2896         .db_read                = intel_ntb_db_read,
2897         .db_clear               = intel_ntb_db_clear,
2898         .db_set_mask            = intel_ntb_db_set_mask,
2899         .db_clear_mask          = intel_ntb_db_clear_mask,
2900         .peer_db_addr           = intel_ntb_peer_db_addr,
2901         .peer_db_set            = intel_ntb_peer_db_set,
2902         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2903         .spad_count             = intel_ntb_spad_count,
2904         .spad_read              = intel_ntb_spad_read,
2905         .spad_write             = intel_ntb_spad_write,
2906         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2907         .peer_spad_read         = intel_ntb_peer_spad_read,
2908         .peer_spad_write        = intel_ntb_peer_spad_write,
2909 };
2910
2911 static const struct ntb_dev_ops intel_ntb3_ops = {
2912         .mw_count               = intel_ntb_mw_count,
2913         .mw_get_range           = intel_ntb_mw_get_range,
2914         .mw_set_trans           = intel_ntb3_mw_set_trans,
2915         .link_is_up             = intel_ntb_link_is_up,
2916         .link_enable            = intel_ntb3_link_enable,
2917         .link_disable           = intel_ntb_link_disable,
2918         .db_valid_mask          = intel_ntb_db_valid_mask,
2919         .db_vector_count        = intel_ntb_db_vector_count,
2920         .db_vector_mask         = intel_ntb_db_vector_mask,
2921         .db_read                = intel_ntb3_db_read,
2922         .db_clear               = intel_ntb3_db_clear,
2923         .db_set_mask            = intel_ntb_db_set_mask,
2924         .db_clear_mask          = intel_ntb_db_clear_mask,
2925         .peer_db_addr           = intel_ntb_peer_db_addr,
2926         .peer_db_set            = intel_ntb3_peer_db_set,
2927         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2928         .spad_count             = intel_ntb_spad_count,
2929         .spad_read              = intel_ntb_spad_read,
2930         .spad_write             = intel_ntb_spad_write,
2931         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2932         .peer_spad_read         = intel_ntb_peer_spad_read,
2933         .peer_spad_write        = intel_ntb_peer_spad_write,
2934 };
2935
2936 static const struct file_operations intel_ntb_debugfs_info = {
2937         .owner = THIS_MODULE,
2938         .open = simple_open,
2939         .read = ndev_debugfs_read,
2940 };
2941
2942 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2943         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2944         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2945         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2946         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2947         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2948         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2949         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2950         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2951         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2952         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2953         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2954         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2955         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2956         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2957         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2958         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2959         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
2960         {0}
2961 };
2962 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2963
2964 static struct pci_driver intel_ntb_pci_driver = {
2965         .name = KBUILD_MODNAME,
2966         .id_table = intel_ntb_pci_tbl,
2967         .probe = intel_ntb_pci_probe,
2968         .remove = intel_ntb_pci_remove,
2969 };
2970
2971 static int __init intel_ntb_pci_driver_init(void)
2972 {
2973         pr_info("%s %s\n", NTB_DESC, NTB_VER);
2974
2975         if (debugfs_initialized())
2976                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2977
2978         return pci_register_driver(&intel_ntb_pci_driver);
2979 }
2980 module_init(intel_ntb_pci_driver_init);
2981
2982 static void __exit intel_ntb_pci_driver_exit(void)
2983 {
2984         pci_unregister_driver(&intel_ntb_pci_driver);
2985
2986         debugfs_remove_recursive(debugfs_dir);
2987 }
2988 module_exit(intel_ntb_pci_driver_exit);
2989