2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/workqueue.h>
28 #include <linux/prefetch.h>
29 #include <linux/dca.h>
30 #include <linux/aer.h>
31 #include <linux/sizes.h>
33 #include "registers.h"
36 #include "../dmaengine.h"
38 MODULE_VERSION(IOAT_DMA_VERSION);
39 MODULE_LICENSE("Dual BSD/GPL");
40 MODULE_AUTHOR("Intel Corporation");
42 static const struct pci_device_id ioat_pci_tbl[] = {
43 /* I/OAT v3 platforms */
44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
50 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
51 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
53 /* I/OAT v3.2 platforms */
54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
62 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
63 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
74 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
85 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
96 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
109 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
111 /* I/OAT v3.3 platforms */
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
118 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
119 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
120 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
122 /* I/OAT v3.4 platforms */
123 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) },
127 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
129 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
130 static void ioat_remove(struct pci_dev *pdev);
132 ioat_init_channel(struct ioatdma_device *ioat_dma,
133 struct ioatdma_chan *ioat_chan, int idx);
134 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
135 static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
136 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
138 static int ioat_dca_enabled = 1;
139 module_param(ioat_dca_enabled, int, 0644);
140 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
141 int ioat_pending_level = 7;
142 module_param(ioat_pending_level, int, 0644);
143 MODULE_PARM_DESC(ioat_pending_level,
144 "high-water mark for pushing ioat descriptors (default: 7)");
145 static char ioat_interrupt_style[32] = "msix";
146 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
147 sizeof(ioat_interrupt_style), 0644);
148 MODULE_PARM_DESC(ioat_interrupt_style,
149 "set ioat interrupt style: msix (default), msi, intx");
151 struct kmem_cache *ioat_cache;
152 struct kmem_cache *ioat_sed_cache;
154 static bool is_jf_ioat(struct pci_dev *pdev)
156 switch (pdev->device) {
157 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
158 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
162 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
163 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
164 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
165 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
166 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
173 static bool is_snb_ioat(struct pci_dev *pdev)
175 switch (pdev->device) {
176 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
177 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
181 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
182 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
183 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
184 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
185 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
192 static bool is_ivb_ioat(struct pci_dev *pdev)
194 switch (pdev->device) {
195 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
196 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
200 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
201 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
202 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
203 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
204 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
212 static bool is_hsw_ioat(struct pci_dev *pdev)
214 switch (pdev->device) {
215 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
216 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
220 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
221 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
222 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
223 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
224 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
232 static bool is_bdx_ioat(struct pci_dev *pdev)
234 switch (pdev->device) {
235 case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
240 case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
241 case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
242 case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
243 case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
244 case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
251 static inline bool is_skx_ioat(struct pci_dev *pdev)
253 return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
256 static bool is_xeon_cb32(struct pci_dev *pdev)
258 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
259 is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
262 bool is_bwd_ioat(struct pci_dev *pdev)
264 switch (pdev->device) {
265 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
266 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
267 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
268 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
269 /* even though not Atom, BDX-DE has same DMA silicon */
270 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
271 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
272 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
273 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
280 static bool is_bwd_noraid(struct pci_dev *pdev)
282 switch (pdev->device) {
283 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
284 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
285 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
286 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
287 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
288 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
297 * Perform a IOAT transaction to verify the HW works.
299 #define IOAT_TEST_SIZE 2000
301 static void ioat_dma_test_callback(void *dma_async_param)
303 struct completion *cmp = dma_async_param;
309 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
310 * @ioat_dma: dma device to be tested
312 static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
317 struct dma_device *dma = &ioat_dma->dma_dev;
318 struct device *dev = &ioat_dma->pdev->dev;
319 struct dma_chan *dma_chan;
320 struct dma_async_tx_descriptor *tx;
321 dma_addr_t dma_dest, dma_src;
324 struct completion cmp;
328 src = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
331 dest = kzalloc(IOAT_TEST_SIZE, GFP_KERNEL);
337 /* Fill in src buffer */
338 for (i = 0; i < IOAT_TEST_SIZE; i++)
341 /* Start copy, using first DMA channel */
342 dma_chan = container_of(dma->channels.next, struct dma_chan,
344 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
345 dev_err(dev, "selftest cannot allocate chan resource\n");
350 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
351 if (dma_mapping_error(dev, dma_src)) {
352 dev_err(dev, "mapping src buffer failed\n");
356 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
357 if (dma_mapping_error(dev, dma_dest)) {
358 dev_err(dev, "mapping dest buffer failed\n");
362 flags = DMA_PREP_INTERRUPT;
363 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
364 dma_src, IOAT_TEST_SIZE,
367 dev_err(dev, "Self-test prep failed, disabling\n");
373 init_completion(&cmp);
374 tx->callback = ioat_dma_test_callback;
375 tx->callback_param = &cmp;
376 cookie = tx->tx_submit(tx);
378 dev_err(dev, "Self-test setup failed, disabling\n");
382 dma->device_issue_pending(dma_chan);
384 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
387 dma->device_tx_status(dma_chan, cookie, NULL)
389 dev_err(dev, "Self-test copy timed out, disabling\n");
393 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
394 dev_err(dev, "Self-test copy failed compare, disabling\n");
400 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
402 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
404 dma->device_free_chan_resources(dma_chan);
412 * ioat_dma_setup_interrupts - setup interrupt handler
413 * @ioat_dma: ioat dma device
415 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
417 struct ioatdma_chan *ioat_chan;
418 struct pci_dev *pdev = ioat_dma->pdev;
419 struct device *dev = &pdev->dev;
420 struct msix_entry *msix;
425 if (!strcmp(ioat_interrupt_style, "msix"))
427 if (!strcmp(ioat_interrupt_style, "msi"))
429 if (!strcmp(ioat_interrupt_style, "intx"))
431 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
435 /* The number of MSI-X vectors should equal the number of channels */
436 msixcnt = ioat_dma->dma_dev.chancnt;
437 for (i = 0; i < msixcnt; i++)
438 ioat_dma->msix_entries[i].entry = i;
440 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
444 for (i = 0; i < msixcnt; i++) {
445 msix = &ioat_dma->msix_entries[i];
446 ioat_chan = ioat_chan_by_index(ioat_dma, i);
447 err = devm_request_irq(dev, msix->vector,
448 ioat_dma_do_interrupt_msix, 0,
449 "ioat-msix", ioat_chan);
451 for (j = 0; j < i; j++) {
452 msix = &ioat_dma->msix_entries[j];
453 ioat_chan = ioat_chan_by_index(ioat_dma, j);
454 devm_free_irq(dev, msix->vector, ioat_chan);
459 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
460 ioat_dma->irq_mode = IOAT_MSIX;
464 err = pci_enable_msi(pdev);
468 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
469 "ioat-msi", ioat_dma);
471 pci_disable_msi(pdev);
474 ioat_dma->irq_mode = IOAT_MSI;
478 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
479 IRQF_SHARED, "ioat-intx", ioat_dma);
483 ioat_dma->irq_mode = IOAT_INTX;
485 if (is_bwd_ioat(pdev))
486 ioat_intr_quirk(ioat_dma);
487 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
488 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
492 /* Disable all interrupt generation */
493 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
494 ioat_dma->irq_mode = IOAT_NOIRQ;
495 dev_err(dev, "no usable interrupts\n");
499 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
501 /* Disable all interrupt generation */
502 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
505 static int ioat_probe(struct ioatdma_device *ioat_dma)
508 struct dma_device *dma = &ioat_dma->dma_dev;
509 struct pci_dev *pdev = ioat_dma->pdev;
510 struct device *dev = &pdev->dev;
512 ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
517 if (!ioat_dma->completion_pool) {
522 ioat_enumerate_channels(ioat_dma);
524 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
525 dma->dev = &pdev->dev;
528 dev_err(dev, "channel enumeration error\n");
529 goto err_setup_interrupts;
532 err = ioat_dma_setup_interrupts(ioat_dma);
534 goto err_setup_interrupts;
536 err = ioat3_dma_self_test(ioat_dma);
543 ioat_disable_interrupts(ioat_dma);
544 err_setup_interrupts:
545 dma_pool_destroy(ioat_dma->completion_pool);
550 static int ioat_register(struct ioatdma_device *ioat_dma)
552 int err = dma_async_device_register(&ioat_dma->dma_dev);
555 ioat_disable_interrupts(ioat_dma);
556 dma_pool_destroy(ioat_dma->completion_pool);
562 static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
564 struct dma_device *dma = &ioat_dma->dma_dev;
566 ioat_disable_interrupts(ioat_dma);
568 ioat_kobject_del(ioat_dma);
570 dma_async_device_unregister(dma);
572 dma_pool_destroy(ioat_dma->completion_pool);
574 INIT_LIST_HEAD(&dma->channels);
578 * ioat_enumerate_channels - find and initialize the device's channels
579 * @ioat_dma: the ioat dma device to be enumerated
581 static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
583 struct ioatdma_chan *ioat_chan;
584 struct device *dev = &ioat_dma->pdev->dev;
585 struct dma_device *dma = &ioat_dma->dma_dev;
589 INIT_LIST_HEAD(&dma->channels);
590 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
591 dma->chancnt &= 0x1f; /* bits [4:0] valid */
592 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
593 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
594 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
595 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
597 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
598 xfercap_log &= 0x1f; /* bits [4:0] valid */
599 if (xfercap_log == 0)
601 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
603 for (i = 0; i < dma->chancnt; i++) {
604 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
608 ioat_init_channel(ioat_dma, ioat_chan, i);
609 ioat_chan->xfercap_log = xfercap_log;
610 spin_lock_init(&ioat_chan->prep_lock);
611 if (ioat_reset_hw(ioat_chan)) {
620 * ioat_free_chan_resources - release all the descriptors
621 * @chan: the channel to be cleaned
623 static void ioat_free_chan_resources(struct dma_chan *c)
625 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
626 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
627 struct ioat_ring_ent *desc;
628 const int total_descs = 1 << ioat_chan->alloc_order;
632 /* Before freeing channel resources first check
633 * if they have been previously allocated for this channel.
635 if (!ioat_chan->ring)
638 ioat_stop(ioat_chan);
639 ioat_reset_hw(ioat_chan);
641 spin_lock_bh(&ioat_chan->cleanup_lock);
642 spin_lock_bh(&ioat_chan->prep_lock);
643 descs = ioat_ring_space(ioat_chan);
644 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
645 for (i = 0; i < descs; i++) {
646 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
647 ioat_free_ring_ent(desc, c);
650 if (descs < total_descs)
651 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
652 total_descs - descs);
654 for (i = 0; i < total_descs - descs; i++) {
655 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
656 dump_desc_dbg(ioat_chan, desc);
657 ioat_free_ring_ent(desc, c);
660 for (i = 0; i < ioat_chan->desc_chunks; i++) {
661 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
662 ioat_chan->descs[i].virt,
663 ioat_chan->descs[i].hw);
664 ioat_chan->descs[i].virt = NULL;
665 ioat_chan->descs[i].hw = 0;
667 ioat_chan->desc_chunks = 0;
669 kfree(ioat_chan->ring);
670 ioat_chan->ring = NULL;
671 ioat_chan->alloc_order = 0;
672 dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
673 ioat_chan->completion_dma);
674 spin_unlock_bh(&ioat_chan->prep_lock);
675 spin_unlock_bh(&ioat_chan->cleanup_lock);
677 ioat_chan->last_completion = 0;
678 ioat_chan->completion_dma = 0;
679 ioat_chan->dmacount = 0;
682 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
683 * @chan: channel to be initialized
685 static int ioat_alloc_chan_resources(struct dma_chan *c)
687 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
688 struct ioat_ring_ent **ring;
694 /* have we already been set up? */
696 return 1 << ioat_chan->alloc_order;
698 /* Setup register to interrupt and write completion status on error */
699 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
701 /* allocate a completion writeback area */
702 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
703 ioat_chan->completion =
704 dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
705 GFP_NOWAIT, &ioat_chan->completion_dma);
706 if (!ioat_chan->completion)
709 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
710 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
711 writel(((u64)ioat_chan->completion_dma) >> 32,
712 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
714 order = IOAT_MAX_ORDER;
715 ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
719 spin_lock_bh(&ioat_chan->cleanup_lock);
720 spin_lock_bh(&ioat_chan->prep_lock);
721 ioat_chan->ring = ring;
723 ioat_chan->issued = 0;
725 ioat_chan->alloc_order = order;
726 set_bit(IOAT_RUN, &ioat_chan->state);
727 spin_unlock_bh(&ioat_chan->prep_lock);
728 spin_unlock_bh(&ioat_chan->cleanup_lock);
730 ioat_start_null_desc(ioat_chan);
732 /* check that we got off the ground */
735 status = ioat_chansts(ioat_chan);
736 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
738 if (is_ioat_active(status) || is_ioat_idle(status))
739 return 1 << ioat_chan->alloc_order;
741 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
743 dev_WARN(to_dev(ioat_chan),
744 "failed to start channel chanerr: %#x\n", chanerr);
745 ioat_free_chan_resources(c);
749 /* common channel initialization */
751 ioat_init_channel(struct ioatdma_device *ioat_dma,
752 struct ioatdma_chan *ioat_chan, int idx)
754 struct dma_device *dma = &ioat_dma->dma_dev;
755 struct dma_chan *c = &ioat_chan->dma_chan;
756 unsigned long data = (unsigned long) c;
758 ioat_chan->ioat_dma = ioat_dma;
759 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
760 spin_lock_init(&ioat_chan->cleanup_lock);
761 ioat_chan->dma_chan.device = dma;
762 dma_cookie_init(&ioat_chan->dma_chan);
763 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
764 ioat_dma->idx[idx] = ioat_chan;
765 timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
766 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
769 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
770 static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
774 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
775 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
776 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
778 struct dma_async_tx_descriptor *tx;
779 struct dma_chan *dma_chan;
785 struct completion cmp;
787 struct device *dev = &ioat_dma->pdev->dev;
788 struct dma_device *dma = &ioat_dma->dma_dev;
791 dev_dbg(dev, "%s\n", __func__);
793 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
796 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
797 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
798 if (!xor_srcs[src_idx]) {
800 __free_page(xor_srcs[src_idx]);
805 dest = alloc_page(GFP_KERNEL);
808 __free_page(xor_srcs[src_idx]);
812 /* Fill in src buffers */
813 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
814 u8 *ptr = page_address(xor_srcs[src_idx]);
816 for (i = 0; i < PAGE_SIZE; i++)
817 ptr[i] = (1 << src_idx);
820 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
821 cmp_byte ^= (u8) (1 << src_idx);
823 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
824 (cmp_byte << 8) | cmp_byte;
826 memset(page_address(dest), 0, PAGE_SIZE);
828 dma_chan = container_of(dma->channels.next, struct dma_chan,
830 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
838 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
839 if (dma_mapping_error(dev, dest_dma)) {
844 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
845 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
847 if (dma_mapping_error(dev, dma_srcs[i])) {
852 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
853 IOAT_NUM_SRC_TEST, PAGE_SIZE,
857 dev_err(dev, "Self-test xor prep failed\n");
863 init_completion(&cmp);
864 tx->callback = ioat_dma_test_callback;
865 tx->callback_param = &cmp;
866 cookie = tx->tx_submit(tx);
868 dev_err(dev, "Self-test xor setup failed\n");
872 dma->device_issue_pending(dma_chan);
874 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
877 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
878 dev_err(dev, "Self-test xor timed out\n");
883 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
884 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
886 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
887 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
888 u32 *ptr = page_address(dest);
890 if (ptr[i] != cmp_word) {
891 dev_err(dev, "Self-test xor failed compare\n");
896 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
898 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
900 /* skip validate if the capability is not present */
901 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
904 op = IOAT_OP_XOR_VAL;
906 /* validate the sources with the destintation page */
907 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
908 xor_val_srcs[i] = xor_srcs[i];
909 xor_val_srcs[i] = dest;
913 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
914 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
916 if (dma_mapping_error(dev, dma_srcs[i])) {
921 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
922 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
923 &xor_val_result, DMA_PREP_INTERRUPT);
925 dev_err(dev, "Self-test zero prep failed\n");
931 init_completion(&cmp);
932 tx->callback = ioat_dma_test_callback;
933 tx->callback_param = &cmp;
934 cookie = tx->tx_submit(tx);
936 dev_err(dev, "Self-test zero setup failed\n");
940 dma->device_issue_pending(dma_chan);
942 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
945 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
946 dev_err(dev, "Self-test validate timed out\n");
951 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
952 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
954 if (xor_val_result != 0) {
955 dev_err(dev, "Self-test validate failed compare\n");
960 memset(page_address(dest), 0, PAGE_SIZE);
962 /* test for non-zero parity sum */
963 op = IOAT_OP_XOR_VAL;
966 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
967 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
969 if (dma_mapping_error(dev, dma_srcs[i])) {
974 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
975 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
976 &xor_val_result, DMA_PREP_INTERRUPT);
978 dev_err(dev, "Self-test 2nd zero prep failed\n");
984 init_completion(&cmp);
985 tx->callback = ioat_dma_test_callback;
986 tx->callback_param = &cmp;
987 cookie = tx->tx_submit(tx);
989 dev_err(dev, "Self-test 2nd zero setup failed\n");
993 dma->device_issue_pending(dma_chan);
995 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
998 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
999 dev_err(dev, "Self-test 2nd validate timed out\n");
1004 if (xor_val_result != SUM_CHECK_P_RESULT) {
1005 dev_err(dev, "Self-test validate failed compare\n");
1010 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1011 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1013 goto free_resources;
1015 if (op == IOAT_OP_XOR) {
1017 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1019 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1020 } else if (op == IOAT_OP_XOR_VAL) {
1022 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1026 dma->device_free_chan_resources(dma_chan);
1028 src_idx = IOAT_NUM_SRC_TEST;
1030 __free_page(xor_srcs[src_idx]);
1035 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1039 rc = ioat_dma_self_test(ioat_dma);
1043 rc = ioat_xor_val_self_test(ioat_dma);
1048 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1050 struct dma_device *dma;
1052 struct ioatdma_chan *ioat_chan;
1055 dma = &ioat_dma->dma_dev;
1058 * if we have descriptor write back error status, we mask the
1061 if (ioat_dma->cap & IOAT_CAP_DWBES) {
1062 list_for_each_entry(c, &dma->channels, device_node) {
1063 ioat_chan = to_ioat_chan(c);
1064 errmask = readl(ioat_chan->reg_base +
1065 IOAT_CHANERR_MASK_OFFSET);
1066 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1067 IOAT_CHANERR_XOR_Q_ERR;
1068 writel(errmask, ioat_chan->reg_base +
1069 IOAT_CHANERR_MASK_OFFSET);
1074 static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1076 struct pci_dev *pdev = ioat_dma->pdev;
1077 int dca_en = system_has_dca_enabled(pdev);
1078 struct dma_device *dma;
1080 struct ioatdma_chan *ioat_chan;
1084 dma = &ioat_dma->dma_dev;
1085 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1086 dma->device_issue_pending = ioat_issue_pending;
1087 dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1088 dma->device_free_chan_resources = ioat_free_chan_resources;
1090 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1091 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1093 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1095 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1097 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1099 /* dca is incompatible with raid operations */
1100 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1101 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1103 if (ioat_dma->cap & IOAT_CAP_XOR) {
1106 dma_cap_set(DMA_XOR, dma->cap_mask);
1107 dma->device_prep_dma_xor = ioat_prep_xor;
1109 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1110 dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1113 if (ioat_dma->cap & IOAT_CAP_PQ) {
1115 dma->device_prep_dma_pq = ioat_prep_pq;
1116 dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1117 dma_cap_set(DMA_PQ, dma->cap_mask);
1118 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1120 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1121 dma_set_maxpq(dma, 16, 0);
1123 dma_set_maxpq(dma, 8, 0);
1125 if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1126 dma->device_prep_dma_xor = ioat_prep_pqxor;
1127 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1128 dma_cap_set(DMA_XOR, dma->cap_mask);
1129 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1131 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1138 dma->device_tx_status = ioat_tx_status;
1140 /* starting with CB3.3 super extended descriptors are supported */
1141 if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1145 for (i = 0; i < MAX_SED_POOLS; i++) {
1146 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1148 /* allocate SED DMA pool */
1149 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1151 SED_SIZE * (i + 1), 64, 0);
1152 if (!ioat_dma->sed_hw_pool[i])
1158 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1159 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1161 err = ioat_probe(ioat_dma);
1165 list_for_each_entry(c, &dma->channels, device_node) {
1166 ioat_chan = to_ioat_chan(c);
1167 writel(IOAT_DMA_DCA_ANY_CPU,
1168 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1171 err = ioat_register(ioat_dma);
1175 ioat_kobject_add(ioat_dma, &ioat_ktype);
1178 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1180 /* disable relaxed ordering */
1181 err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
1185 /* clear relaxed ordering enable */
1186 val16 &= ~IOAT_DEVCTRL_ROE;
1187 err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
1191 if (ioat_dma->cap & IOAT_CAP_DPS)
1192 writeb(ioat_pending_level + 1,
1193 ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
1198 static void ioat_shutdown(struct pci_dev *pdev)
1200 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1201 struct ioatdma_chan *ioat_chan;
1207 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1208 ioat_chan = ioat_dma->idx[i];
1212 spin_lock_bh(&ioat_chan->prep_lock);
1213 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1214 spin_unlock_bh(&ioat_chan->prep_lock);
1216 * Synchronization rule for del_timer_sync():
1217 * - The caller must not hold locks which would prevent
1218 * completion of the timer's handler.
1219 * So prep_lock cannot be held before calling it.
1221 del_timer_sync(&ioat_chan->timer);
1223 /* this should quiesce then reset */
1224 ioat_reset_hw(ioat_chan);
1227 ioat_disable_interrupts(ioat_dma);
1230 static void ioat_resume(struct ioatdma_device *ioat_dma)
1232 struct ioatdma_chan *ioat_chan;
1236 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1237 ioat_chan = ioat_dma->idx[i];
1241 spin_lock_bh(&ioat_chan->prep_lock);
1242 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1243 spin_unlock_bh(&ioat_chan->prep_lock);
1245 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1246 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1248 /* no need to reset as shutdown already did that */
1252 #define DRV_NAME "ioatdma"
1254 static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1255 enum pci_channel_state error)
1257 dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1259 /* quiesce and block I/O */
1260 ioat_shutdown(pdev);
1262 return PCI_ERS_RESULT_NEED_RESET;
1265 static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1267 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1269 dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1271 if (pci_enable_device_mem(pdev) < 0) {
1273 "Failed to enable PCIe device after reset.\n");
1274 result = PCI_ERS_RESULT_DISCONNECT;
1276 pci_set_master(pdev);
1277 pci_restore_state(pdev);
1278 pci_save_state(pdev);
1279 pci_wake_from_d3(pdev, false);
1285 static void ioat_pcie_error_resume(struct pci_dev *pdev)
1287 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1289 dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1291 /* initialize and bring everything back */
1292 ioat_resume(ioat_dma);
1295 static const struct pci_error_handlers ioat_err_handler = {
1296 .error_detected = ioat_pcie_error_detected,
1297 .slot_reset = ioat_pcie_error_slot_reset,
1298 .resume = ioat_pcie_error_resume,
1301 static struct pci_driver ioat_pci_driver = {
1303 .id_table = ioat_pci_tbl,
1304 .probe = ioat_pci_probe,
1305 .remove = ioat_remove,
1306 .shutdown = ioat_shutdown,
1307 .err_handler = &ioat_err_handler,
1310 static struct ioatdma_device *
1311 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1313 struct device *dev = &pdev->dev;
1314 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1319 d->reg_base = iobase;
1323 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1325 void __iomem * const *iomap;
1326 struct device *dev = &pdev->dev;
1327 struct ioatdma_device *device;
1330 err = pcim_enable_device(pdev);
1334 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1337 iomap = pcim_iomap_table(pdev);
1341 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1343 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1347 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1349 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1353 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1356 pci_set_master(pdev);
1357 pci_set_drvdata(pdev, device);
1359 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1360 if (device->version >= IOAT_VER_3_4)
1361 ioat_dca_enabled = 0;
1362 if (device->version >= IOAT_VER_3_0) {
1363 if (is_skx_ioat(pdev))
1364 device->version = IOAT_VER_3_2;
1365 err = ioat3_dma_probe(device, ioat_dca_enabled);
1367 if (device->version >= IOAT_VER_3_3)
1368 pci_enable_pcie_error_reporting(pdev);
1373 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1374 pci_disable_pcie_error_reporting(pdev);
1381 static void ioat_remove(struct pci_dev *pdev)
1383 struct ioatdma_device *device = pci_get_drvdata(pdev);
1388 dev_err(&pdev->dev, "Removing dma and dca services\n");
1390 unregister_dca_provider(device->dca, &pdev->dev);
1391 free_dca_provider(device->dca);
1395 pci_disable_pcie_error_reporting(pdev);
1396 ioat_dma_remove(device);
1399 static int __init ioat_init_module(void)
1403 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1404 DRV_NAME, IOAT_DMA_VERSION);
1406 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1407 0, SLAB_HWCACHE_ALIGN, NULL);
1411 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1412 if (!ioat_sed_cache)
1413 goto err_ioat_cache;
1415 err = pci_register_driver(&ioat_pci_driver);
1417 goto err_ioat3_cache;
1422 kmem_cache_destroy(ioat_sed_cache);
1425 kmem_cache_destroy(ioat_cache);
1429 module_init(ioat_init_module);
1431 static void __exit ioat_exit_module(void)
1433 pci_unregister_driver(&ioat_pci_driver);
1434 kmem_cache_destroy(ioat_cache);
1436 module_exit(ioat_exit_module);