1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Accelerated Function Unit (AFU)
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
25 * port_enable - enable a port
26 * @pdev: port platform device.
28 * Enable Port by clear the port soft reset bit, which is set by default.
29 * The AFU is unable to respond to any MMIO access while in reset.
30 * port_enable function should only be used after port_disable function.
32 static void port_enable(struct platform_device *pdev)
34 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
38 WARN_ON(!pdata->disable_count);
40 if (--pdata->disable_count != 0)
43 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
45 /* Clear port soft reset */
46 v = readq(base + PORT_HDR_CTRL);
47 v &= ~PORT_CTRL_SFTRST;
48 writeq(v, base + PORT_HDR_CTRL);
51 #define RST_POLL_INVL 10 /* us */
52 #define RST_POLL_TIMEOUT 1000 /* us */
55 * port_disable - disable a port
56 * @pdev: port platform device.
58 * Disable Port by setting the port soft reset bit, it puts the port into
61 static int port_disable(struct platform_device *pdev)
63 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
67 if (pdata->disable_count++ != 0)
70 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
72 /* Set port soft reset */
73 v = readq(base + PORT_HDR_CTRL);
74 v |= PORT_CTRL_SFTRST;
75 writeq(v, base + PORT_HDR_CTRL);
78 * HW sets ack bit to 1 when all outstanding requests have been drained
79 * on this port and minimum soft reset pulse width has elapsed.
80 * Driver polls port_soft_reset_ack to determine if reset done by HW.
82 if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
83 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
84 dev_err(&pdev->dev, "timeout, fail to reset device\n");
92 * This function resets the FPGA Port and its accelerator (AFU) by function
93 * __port_disable and __port_enable (set port soft reset bit and then clear
94 * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
95 * Reconfiguration. But it should never cause any system level issue, only
96 * functional failure (e.g. DMA or PR operation failure) and be recoverable
99 * Note: the accelerator (AFU) is not accessible when its port is in reset
100 * (disabled). Any attempts on MMIO access to AFU while in reset, will
101 * result errors reported via port error reporting sub feature (if present).
103 static int __port_reset(struct platform_device *pdev)
107 ret = port_disable(pdev);
114 static int port_reset(struct platform_device *pdev)
116 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
119 mutex_lock(&pdata->lock);
120 ret = __port_reset(pdev);
121 mutex_unlock(&pdata->lock);
126 static int port_get_id(struct platform_device *pdev)
130 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
132 return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
136 id_show(struct device *dev, struct device_attribute *attr, char *buf)
138 int id = port_get_id(to_platform_device(dev));
140 return scnprintf(buf, PAGE_SIZE, "%d\n", id);
142 static DEVICE_ATTR_RO(id);
145 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
147 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
151 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
153 mutex_lock(&pdata->lock);
154 v = readq(base + PORT_HDR_CTRL);
155 mutex_unlock(&pdata->lock);
157 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
161 ltr_store(struct device *dev, struct device_attribute *attr,
162 const char *buf, size_t count)
164 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
169 if (kstrtobool(buf, <r))
172 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
174 mutex_lock(&pdata->lock);
175 v = readq(base + PORT_HDR_CTRL);
176 v &= ~PORT_CTRL_LATENCY;
177 v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
178 writeq(v, base + PORT_HDR_CTRL);
179 mutex_unlock(&pdata->lock);
183 static DEVICE_ATTR_RW(ltr);
186 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
188 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
192 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
194 mutex_lock(&pdata->lock);
195 v = readq(base + PORT_HDR_STS);
196 mutex_unlock(&pdata->lock);
198 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
202 ap1_event_store(struct device *dev, struct device_attribute *attr,
203 const char *buf, size_t count)
205 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
209 if (kstrtobool(buf, &clear) || !clear)
212 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
214 mutex_lock(&pdata->lock);
215 writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
216 mutex_unlock(&pdata->lock);
220 static DEVICE_ATTR_RW(ap1_event);
223 ap2_event_show(struct device *dev, struct device_attribute *attr,
226 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
230 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
232 mutex_lock(&pdata->lock);
233 v = readq(base + PORT_HDR_STS);
234 mutex_unlock(&pdata->lock);
236 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
240 ap2_event_store(struct device *dev, struct device_attribute *attr,
241 const char *buf, size_t count)
243 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
247 if (kstrtobool(buf, &clear) || !clear)
250 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
252 mutex_lock(&pdata->lock);
253 writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
254 mutex_unlock(&pdata->lock);
258 static DEVICE_ATTR_RW(ap2_event);
261 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
263 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
267 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
269 mutex_lock(&pdata->lock);
270 v = readq(base + PORT_HDR_STS);
271 mutex_unlock(&pdata->lock);
273 return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
275 static DEVICE_ATTR_RO(power_state);
278 userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
279 const char *buf, size_t count)
281 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
282 u64 userclk_freq_cmd;
285 if (kstrtou64(buf, 0, &userclk_freq_cmd))
288 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
290 mutex_lock(&pdata->lock);
291 writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
292 mutex_unlock(&pdata->lock);
296 static DEVICE_ATTR_WO(userclk_freqcmd);
299 userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
300 const char *buf, size_t count)
302 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
303 u64 userclk_freqcntr_cmd;
306 if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
309 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
311 mutex_lock(&pdata->lock);
312 writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
313 mutex_unlock(&pdata->lock);
317 static DEVICE_ATTR_WO(userclk_freqcntrcmd);
320 userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
323 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
327 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
329 mutex_lock(&pdata->lock);
330 userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
331 mutex_unlock(&pdata->lock);
333 return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
335 static DEVICE_ATTR_RO(userclk_freqsts);
338 userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
341 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
342 u64 userclk_freqcntrsts;
345 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
347 mutex_lock(&pdata->lock);
348 userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
349 mutex_unlock(&pdata->lock);
351 return sprintf(buf, "0x%llx\n",
352 (unsigned long long)userclk_freqcntrsts);
354 static DEVICE_ATTR_RO(userclk_freqcntrsts);
356 static struct attribute *port_hdr_attrs[] = {
359 &dev_attr_ap1_event.attr,
360 &dev_attr_ap2_event.attr,
361 &dev_attr_power_state.attr,
362 &dev_attr_userclk_freqcmd.attr,
363 &dev_attr_userclk_freqcntrcmd.attr,
364 &dev_attr_userclk_freqsts.attr,
365 &dev_attr_userclk_freqcntrsts.attr,
369 static umode_t port_hdr_attrs_visible(struct kobject *kobj,
370 struct attribute *attr, int n)
372 struct device *dev = kobj_to_dev(kobj);
373 umode_t mode = attr->mode;
376 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
378 if (dfl_feature_revision(base) > 0) {
380 * userclk sysfs interfaces are only visible in case port
381 * revision is 0, as hardware with revision >0 doesn't
384 if (attr == &dev_attr_userclk_freqcmd.attr ||
385 attr == &dev_attr_userclk_freqcntrcmd.attr ||
386 attr == &dev_attr_userclk_freqsts.attr ||
387 attr == &dev_attr_userclk_freqcntrsts.attr)
394 static const struct attribute_group port_hdr_group = {
395 .attrs = port_hdr_attrs,
396 .is_visible = port_hdr_attrs_visible,
399 static int port_hdr_init(struct platform_device *pdev,
400 struct dfl_feature *feature)
408 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
409 unsigned int cmd, unsigned long arg)
414 case DFL_FPGA_PORT_RESET:
416 ret = port_reset(pdev);
421 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
428 static const struct dfl_feature_id port_hdr_id_table[] = {
429 {.id = PORT_FEATURE_ID_HEADER,},
433 static const struct dfl_feature_ops port_hdr_ops = {
434 .init = port_hdr_init,
435 .ioctl = port_hdr_ioctl,
439 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
441 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
445 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
447 mutex_lock(&pdata->lock);
448 if (pdata->disable_count) {
449 mutex_unlock(&pdata->lock);
453 guidl = readq(base + GUID_L);
454 guidh = readq(base + GUID_H);
455 mutex_unlock(&pdata->lock);
457 return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
459 static DEVICE_ATTR_RO(afu_id);
461 static struct attribute *port_afu_attrs[] = {
462 &dev_attr_afu_id.attr,
466 static umode_t port_afu_attrs_visible(struct kobject *kobj,
467 struct attribute *attr, int n)
469 struct device *dev = kobj_to_dev(kobj);
472 * sysfs entries are visible only if related private feature is
475 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
481 static const struct attribute_group port_afu_group = {
482 .attrs = port_afu_attrs,
483 .is_visible = port_afu_attrs_visible,
486 static int port_afu_init(struct platform_device *pdev,
487 struct dfl_feature *feature)
489 struct resource *res = &pdev->resource[feature->resource_index];
491 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
492 DFL_PORT_REGION_INDEX_AFU,
493 resource_size(res), res->start,
494 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
495 DFL_PORT_REGION_WRITE);
498 static const struct dfl_feature_id port_afu_id_table[] = {
499 {.id = PORT_FEATURE_ID_AFU,},
503 static const struct dfl_feature_ops port_afu_ops = {
504 .init = port_afu_init,
507 static struct dfl_feature_driver port_feature_drvs[] = {
509 .id_table = port_hdr_id_table,
510 .ops = &port_hdr_ops,
513 .id_table = port_afu_id_table,
514 .ops = &port_afu_ops,
521 static int afu_open(struct inode *inode, struct file *filp)
523 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
524 struct dfl_feature_platform_data *pdata;
527 pdata = dev_get_platdata(&fdev->dev);
531 ret = dfl_feature_dev_use_begin(pdata);
535 dev_dbg(&fdev->dev, "Device File Open\n");
536 filp->private_data = fdev;
541 static int afu_release(struct inode *inode, struct file *filp)
543 struct platform_device *pdev = filp->private_data;
544 struct dfl_feature_platform_data *pdata;
546 dev_dbg(&pdev->dev, "Device File Release\n");
548 pdata = dev_get_platdata(&pdev->dev);
550 mutex_lock(&pdata->lock);
552 afu_dma_region_destroy(pdata);
553 mutex_unlock(&pdata->lock);
555 dfl_feature_dev_use_end(pdata);
560 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
563 /* No extension support for now */
568 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
570 struct dfl_fpga_port_info info;
574 minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
576 if (copy_from_user(&info, arg, minsz))
579 if (info.argsz < minsz)
582 mutex_lock(&pdata->lock);
583 afu = dfl_fpga_pdata_get_private(pdata);
585 info.num_regions = afu->num_regions;
586 info.num_umsgs = afu->num_umsgs;
587 mutex_unlock(&pdata->lock);
589 if (copy_to_user(arg, &info, sizeof(info)))
595 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
598 struct dfl_fpga_port_region_info rinfo;
599 struct dfl_afu_mmio_region region;
603 minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
605 if (copy_from_user(&rinfo, arg, minsz))
608 if (rinfo.argsz < minsz || rinfo.padding)
611 ret = afu_mmio_region_get_by_index(pdata, rinfo.index, ®ion);
615 rinfo.flags = region.flags;
616 rinfo.size = region.size;
617 rinfo.offset = region.offset;
619 if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
626 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
628 struct dfl_fpga_port_dma_map map;
632 minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
634 if (copy_from_user(&map, arg, minsz))
637 if (map.argsz < minsz || map.flags)
640 ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
644 if (copy_to_user(arg, &map, sizeof(map))) {
645 afu_dma_unmap_region(pdata, map.iova);
649 dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
650 (unsigned long long)map.user_addr,
651 (unsigned long long)map.length,
652 (unsigned long long)map.iova);
658 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
660 struct dfl_fpga_port_dma_unmap unmap;
663 minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
665 if (copy_from_user(&unmap, arg, minsz))
668 if (unmap.argsz < minsz || unmap.flags)
671 return afu_dma_unmap_region(pdata, unmap.iova);
674 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
676 struct platform_device *pdev = filp->private_data;
677 struct dfl_feature_platform_data *pdata;
678 struct dfl_feature *f;
681 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
683 pdata = dev_get_platdata(&pdev->dev);
686 case DFL_FPGA_GET_API_VERSION:
687 return DFL_FPGA_API_VERSION;
688 case DFL_FPGA_CHECK_EXTENSION:
689 return afu_ioctl_check_extension(pdata, arg);
690 case DFL_FPGA_PORT_GET_INFO:
691 return afu_ioctl_get_info(pdata, (void __user *)arg);
692 case DFL_FPGA_PORT_GET_REGION_INFO:
693 return afu_ioctl_get_region_info(pdata, (void __user *)arg);
694 case DFL_FPGA_PORT_DMA_MAP:
695 return afu_ioctl_dma_map(pdata, (void __user *)arg);
696 case DFL_FPGA_PORT_DMA_UNMAP:
697 return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
700 * Let sub-feature's ioctl function to handle the cmd
701 * Sub-feature's ioctl returns -ENODEV when cmd is not
702 * handled in this sub feature, and returns 0 and other
703 * error code if cmd is handled.
705 dfl_fpga_dev_for_each_feature(pdata, f)
706 if (f->ops && f->ops->ioctl) {
707 ret = f->ops->ioctl(pdev, f, cmd, arg);
716 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
718 struct platform_device *pdev = filp->private_data;
719 struct dfl_feature_platform_data *pdata;
720 u64 size = vma->vm_end - vma->vm_start;
721 struct dfl_afu_mmio_region region;
725 if (!(vma->vm_flags & VM_SHARED))
728 pdata = dev_get_platdata(&pdev->dev);
730 offset = vma->vm_pgoff << PAGE_SHIFT;
731 ret = afu_mmio_region_get_by_offset(pdata, offset, size, ®ion);
735 if (!(region.flags & DFL_PORT_REGION_MMAP))
738 if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
741 if ((vma->vm_flags & VM_WRITE) &&
742 !(region.flags & DFL_PORT_REGION_WRITE))
745 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
747 return remap_pfn_range(vma, vma->vm_start,
748 (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
749 size, vma->vm_page_prot);
752 static const struct file_operations afu_fops = {
753 .owner = THIS_MODULE,
755 .release = afu_release,
756 .unlocked_ioctl = afu_ioctl,
760 static int afu_dev_init(struct platform_device *pdev)
762 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
765 afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
771 mutex_lock(&pdata->lock);
772 dfl_fpga_pdata_set_private(pdata, afu);
773 afu_mmio_region_init(pdata);
774 afu_dma_region_init(pdata);
775 mutex_unlock(&pdata->lock);
780 static int afu_dev_destroy(struct platform_device *pdev)
782 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
785 mutex_lock(&pdata->lock);
786 afu = dfl_fpga_pdata_get_private(pdata);
787 afu_mmio_region_destroy(pdata);
788 afu_dma_region_destroy(pdata);
789 dfl_fpga_pdata_set_private(pdata, NULL);
790 mutex_unlock(&pdata->lock);
795 static int port_enable_set(struct platform_device *pdev, bool enable)
797 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
800 mutex_lock(&pdata->lock);
804 ret = port_disable(pdev);
805 mutex_unlock(&pdata->lock);
810 static struct dfl_fpga_port_ops afu_port_ops = {
811 .name = DFL_FPGA_FEATURE_DEV_PORT,
812 .owner = THIS_MODULE,
813 .get_id = port_get_id,
814 .enable_set = port_enable_set,
817 static int afu_probe(struct platform_device *pdev)
821 dev_dbg(&pdev->dev, "%s\n", __func__);
823 ret = afu_dev_init(pdev);
827 ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
831 ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
833 dfl_fpga_dev_feature_uinit(pdev);
840 afu_dev_destroy(pdev);
845 static int afu_remove(struct platform_device *pdev)
847 dev_dbg(&pdev->dev, "%s\n", __func__);
849 dfl_fpga_dev_ops_unregister(pdev);
850 dfl_fpga_dev_feature_uinit(pdev);
851 afu_dev_destroy(pdev);
856 static const struct attribute_group *afu_dev_groups[] = {
862 static struct platform_driver afu_driver = {
864 .name = DFL_FPGA_FEATURE_DEV_PORT,
865 .dev_groups = afu_dev_groups,
868 .remove = afu_remove,
871 static int __init afu_init(void)
875 dfl_fpga_port_ops_add(&afu_port_ops);
877 ret = platform_driver_register(&afu_driver);
879 dfl_fpga_port_ops_del(&afu_port_ops);
884 static void __exit afu_exit(void)
886 platform_driver_unregister(&afu_driver);
888 dfl_fpga_port_ops_del(&afu_port_ops);
891 module_init(afu_init);
892 module_exit(afu_exit);
894 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
895 MODULE_AUTHOR("Intel Corporation");
896 MODULE_LICENSE("GPL v2");
897 MODULE_ALIAS("platform:dfl-port");