2 * Microsemi Switchtec(tm) PCIe Management Driver
3 * Copyright (c) 2017, Microsemi Corporation
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/switchtec_ioctl.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/poll.h>
23 #include <linux/pci.h>
24 #include <linux/cdev.h>
25 #include <linux/wait.h>
27 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
28 MODULE_VERSION("0.1");
29 MODULE_LICENSE("GPL");
30 MODULE_AUTHOR("Microsemi Corporation");
32 static int max_devices = 16;
33 module_param(max_devices, int, 0644);
34 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
36 static dev_t switchtec_devt;
37 static struct class *switchtec_class;
38 static DEFINE_IDA(switchtec_minor_ida);
40 #define MICROSEMI_VENDOR_ID 0x11f8
41 #define MICROSEMI_NTB_CLASSCODE 0x068000
42 #define MICROSEMI_MGMT_CLASSCODE 0x058000
44 #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
45 #define SWITCHTEC_MAX_PFF_CSR 48
47 #define SWITCHTEC_EVENT_OCCURRED BIT(0)
48 #define SWITCHTEC_EVENT_CLEAR BIT(0)
49 #define SWITCHTEC_EVENT_EN_LOG BIT(1)
50 #define SWITCHTEC_EVENT_EN_CLI BIT(2)
51 #define SWITCHTEC_EVENT_EN_IRQ BIT(3)
52 #define SWITCHTEC_EVENT_FATAL BIT(4)
55 SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
56 SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
57 SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
58 SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
59 SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
60 SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
61 SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
62 SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
66 u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
67 u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
74 SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
75 SWITCHTEC_MRPC_STATUS_DONE = 2,
76 SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
77 SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
80 struct sw_event_regs {
81 u64 event_report_ctrl;
83 u64 part_event_bitmap;
87 u32 stack_error_event_hdr;
88 u32 stack_error_event_data;
90 u32 ppu_error_event_hdr;
91 u32 ppu_error_event_data;
93 u32 isp_error_event_hdr;
94 u32 isp_error_event_data;
96 u32 sys_reset_event_hdr;
102 u32 fw_non_fatal_hdr;
106 u32 twi_mrpc_comp_hdr;
107 u32 twi_mrpc_comp_data;
109 u32 twi_mrpc_comp_async_hdr;
110 u32 twi_mrpc_comp_async_data;
112 u32 cli_mrpc_comp_hdr;
113 u32 cli_mrpc_comp_data;
115 u32 cli_mrpc_comp_async_hdr;
116 u32 cli_mrpc_comp_async_data;
118 u32 gpio_interrupt_hdr;
119 u32 gpio_interrupt_data;
124 SWITCHTEC_CFG0_RUNNING = 0x04,
125 SWITCHTEC_CFG1_RUNNING = 0x05,
126 SWITCHTEC_IMG0_RUNNING = 0x03,
127 SWITCHTEC_IMG1_RUNNING = 0x07,
130 struct sys_info_regs {
133 u32 firmware_version;
135 u32 vendor_table_revision;
136 u32 table_format_version;
138 u32 cfg_file_fmt_version;
144 char product_revision[4];
145 char component_vendor[8];
147 u8 component_revision;
150 struct flash_info_regs {
151 u32 flash_part_map_upd_idx;
153 struct active_partition_info {
159 struct active_partition_info active_cfg;
160 struct active_partition_info inactive_img;
161 struct active_partition_info inactive_cfg;
165 struct partition_info {
170 struct partition_info cfg1;
171 struct partition_info img0;
172 struct partition_info img1;
173 struct partition_info nvlog;
174 struct partition_info vendor[8];
177 struct ntb_info_regs {
185 struct part_cfg_regs {
192 u32 dsp_pff_inst_id[47];
194 u16 vep_vector_number;
195 u16 usp_vector_number;
196 u32 port_event_bitmap;
198 u32 part_event_summary;
201 u32 part_reset_data[5];
203 u32 mrpc_comp_data[5];
204 u32 mrpc_comp_async_hdr;
205 u32 mrpc_comp_async_data[5];
207 u32 dyn_binding_data[5];
212 SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
213 SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
214 SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
215 SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
218 struct pff_csr_regs {
221 u32 pci_cfg_header[15];
222 u32 pci_cap_region[48];
223 u32 pcie_cap_region[448];
224 u32 indirect_gas_window[128];
225 u32 indirect_gas_window_off;
227 u32 pff_event_summary;
230 u32 aer_in_p2p_data[5];
232 u32 aer_in_vep_data[5];
243 u32 threshold_data[5];
245 u32 power_mgmt_data[5];
246 u32 tlp_throttling_hdr;
247 u32 tlp_throttling_data[5];
249 u32 force_speed_data[5];
250 u32 credit_timeout_hdr;
251 u32 credit_timeout_data[5];
253 u32 link_state_data[5];
257 struct switchtec_dev {
258 struct pci_dev *pdev;
265 char pff_local[SWITCHTEC_MAX_PFF_CSR];
268 struct mrpc_regs __iomem *mmio_mrpc;
269 struct sw_event_regs __iomem *mmio_sw_event;
270 struct sys_info_regs __iomem *mmio_sys_info;
271 struct flash_info_regs __iomem *mmio_flash_info;
272 struct ntb_info_regs __iomem *mmio_ntb;
273 struct part_cfg_regs __iomem *mmio_part_cfg;
274 struct part_cfg_regs __iomem *mmio_part_cfg_all;
275 struct pff_csr_regs __iomem *mmio_pff_csr;
278 * The mrpc mutex must be held when accessing the other
279 * mrpc_ fields, alive flag and stuser->state field
281 struct mutex mrpc_mutex;
282 struct list_head mrpc_queue;
284 struct work_struct mrpc_work;
285 struct delayed_work mrpc_timeout;
288 wait_queue_head_t event_wq;
292 static struct switchtec_dev *to_stdev(struct device *dev)
294 return container_of(dev, struct switchtec_dev, dev);
304 struct switchtec_user {
305 struct switchtec_dev *stdev;
307 enum mrpc_state state;
309 struct completion comp;
311 struct list_head list;
318 unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
322 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
324 struct switchtec_user *stuser;
326 stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
328 return ERR_PTR(-ENOMEM);
330 get_device(&stdev->dev);
331 stuser->stdev = stdev;
332 kref_init(&stuser->kref);
333 INIT_LIST_HEAD(&stuser->list);
334 init_completion(&stuser->comp);
335 stuser->event_cnt = atomic_read(&stdev->event_cnt);
337 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
342 static void stuser_free(struct kref *kref)
344 struct switchtec_user *stuser;
346 stuser = container_of(kref, struct switchtec_user, kref);
348 dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
350 put_device(&stuser->stdev->dev);
354 static void stuser_put(struct switchtec_user *stuser)
356 kref_put(&stuser->kref, stuser_free);
359 static void stuser_set_state(struct switchtec_user *stuser,
360 enum mrpc_state state)
362 /* requires the mrpc_mutex to already be held when called */
364 const char * const state_names[] = {
365 [MRPC_IDLE] = "IDLE",
366 [MRPC_QUEUED] = "QUEUED",
367 [MRPC_RUNNING] = "RUNNING",
368 [MRPC_DONE] = "DONE",
371 stuser->state = state;
373 dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
374 stuser, state_names[state]);
377 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
379 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
381 /* requires the mrpc_mutex to already be held when called */
383 struct switchtec_user *stuser;
385 if (stdev->mrpc_busy)
388 if (list_empty(&stdev->mrpc_queue))
391 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
394 stuser_set_state(stuser, MRPC_RUNNING);
395 stdev->mrpc_busy = 1;
396 memcpy_toio(&stdev->mmio_mrpc->input_data,
397 stuser->data, stuser->data_len);
398 iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
400 stuser->status = ioread32(&stdev->mmio_mrpc->status);
401 if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
402 mrpc_complete_cmd(stdev);
404 schedule_delayed_work(&stdev->mrpc_timeout,
405 msecs_to_jiffies(500));
408 static int mrpc_queue_cmd(struct switchtec_user *stuser)
410 /* requires the mrpc_mutex to already be held when called */
412 struct switchtec_dev *stdev = stuser->stdev;
414 kref_get(&stuser->kref);
415 stuser->read_len = sizeof(stuser->data);
416 stuser_set_state(stuser, MRPC_QUEUED);
417 init_completion(&stuser->comp);
418 list_add_tail(&stuser->list, &stdev->mrpc_queue);
420 mrpc_cmd_submit(stdev);
425 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
427 /* requires the mrpc_mutex to already be held when called */
428 struct switchtec_user *stuser;
430 if (list_empty(&stdev->mrpc_queue))
433 stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
436 stuser->status = ioread32(&stdev->mmio_mrpc->status);
437 if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
440 stuser_set_state(stuser, MRPC_DONE);
441 stuser->return_code = 0;
443 if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
446 stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
447 if (stuser->return_code != 0)
450 memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
454 complete_all(&stuser->comp);
455 list_del_init(&stuser->list);
457 stdev->mrpc_busy = 0;
459 mrpc_cmd_submit(stdev);
462 static void mrpc_event_work(struct work_struct *work)
464 struct switchtec_dev *stdev;
466 stdev = container_of(work, struct switchtec_dev, mrpc_work);
468 dev_dbg(&stdev->dev, "%s\n", __func__);
470 mutex_lock(&stdev->mrpc_mutex);
471 cancel_delayed_work(&stdev->mrpc_timeout);
472 mrpc_complete_cmd(stdev);
473 mutex_unlock(&stdev->mrpc_mutex);
476 static void mrpc_timeout_work(struct work_struct *work)
478 struct switchtec_dev *stdev;
481 stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
483 dev_dbg(&stdev->dev, "%s\n", __func__);
485 mutex_lock(&stdev->mrpc_mutex);
487 status = ioread32(&stdev->mmio_mrpc->status);
488 if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
489 schedule_delayed_work(&stdev->mrpc_timeout,
490 msecs_to_jiffies(500));
494 mrpc_complete_cmd(stdev);
497 mutex_unlock(&stdev->mrpc_mutex);
500 static ssize_t device_version_show(struct device *dev,
501 struct device_attribute *attr, char *buf)
503 struct switchtec_dev *stdev = to_stdev(dev);
506 ver = ioread32(&stdev->mmio_sys_info->device_version);
508 return sprintf(buf, "%x\n", ver);
510 static DEVICE_ATTR_RO(device_version);
512 static ssize_t fw_version_show(struct device *dev,
513 struct device_attribute *attr, char *buf)
515 struct switchtec_dev *stdev = to_stdev(dev);
518 ver = ioread32(&stdev->mmio_sys_info->firmware_version);
520 return sprintf(buf, "%08x\n", ver);
522 static DEVICE_ATTR_RO(fw_version);
524 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
528 memcpy_fromio(buf, attr, len);
532 for (i = len - 1; i > 0; i--) {
542 #define DEVICE_ATTR_SYS_INFO_STR(field) \
543 static ssize_t field ## _show(struct device *dev, \
544 struct device_attribute *attr, char *buf) \
546 struct switchtec_dev *stdev = to_stdev(dev); \
547 return io_string_show(buf, &stdev->mmio_sys_info->field, \
548 sizeof(stdev->mmio_sys_info->field)); \
551 static DEVICE_ATTR_RO(field)
553 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
554 DEVICE_ATTR_SYS_INFO_STR(product_id);
555 DEVICE_ATTR_SYS_INFO_STR(product_revision);
556 DEVICE_ATTR_SYS_INFO_STR(component_vendor);
558 static ssize_t component_id_show(struct device *dev,
559 struct device_attribute *attr, char *buf)
561 struct switchtec_dev *stdev = to_stdev(dev);
562 int id = ioread16(&stdev->mmio_sys_info->component_id);
564 return sprintf(buf, "PM%04X\n", id);
566 static DEVICE_ATTR_RO(component_id);
568 static ssize_t component_revision_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
571 struct switchtec_dev *stdev = to_stdev(dev);
572 int rev = ioread8(&stdev->mmio_sys_info->component_revision);
574 return sprintf(buf, "%d\n", rev);
576 static DEVICE_ATTR_RO(component_revision);
578 static ssize_t partition_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
581 struct switchtec_dev *stdev = to_stdev(dev);
583 return sprintf(buf, "%d\n", stdev->partition);
585 static DEVICE_ATTR_RO(partition);
587 static ssize_t partition_count_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
590 struct switchtec_dev *stdev = to_stdev(dev);
592 return sprintf(buf, "%d\n", stdev->partition_count);
594 static DEVICE_ATTR_RO(partition_count);
596 static struct attribute *switchtec_device_attrs[] = {
597 &dev_attr_device_version.attr,
598 &dev_attr_fw_version.attr,
599 &dev_attr_vendor_id.attr,
600 &dev_attr_product_id.attr,
601 &dev_attr_product_revision.attr,
602 &dev_attr_component_vendor.attr,
603 &dev_attr_component_id.attr,
604 &dev_attr_component_revision.attr,
605 &dev_attr_partition.attr,
606 &dev_attr_partition_count.attr,
610 ATTRIBUTE_GROUPS(switchtec_device);
612 static int switchtec_dev_open(struct inode *inode, struct file *filp)
614 struct switchtec_dev *stdev;
615 struct switchtec_user *stuser;
617 stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
619 stuser = stuser_create(stdev);
621 return PTR_ERR(stuser);
623 filp->private_data = stuser;
624 nonseekable_open(inode, filp);
626 dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
631 static int switchtec_dev_release(struct inode *inode, struct file *filp)
633 struct switchtec_user *stuser = filp->private_data;
640 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
642 if (mutex_lock_interruptible(&stdev->mrpc_mutex))
646 mutex_unlock(&stdev->mrpc_mutex);
653 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
654 size_t size, loff_t *off)
656 struct switchtec_user *stuser = filp->private_data;
657 struct switchtec_dev *stdev = stuser->stdev;
660 if (size < sizeof(stuser->cmd) ||
661 size > sizeof(stuser->cmd) + sizeof(stuser->data))
664 stuser->data_len = size - sizeof(stuser->cmd);
666 rc = lock_mutex_and_test_alive(stdev);
670 if (stuser->state != MRPC_IDLE) {
675 rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
681 data += sizeof(stuser->cmd);
682 rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
688 rc = mrpc_queue_cmd(stuser);
691 mutex_unlock(&stdev->mrpc_mutex);
699 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
700 size_t size, loff_t *off)
702 struct switchtec_user *stuser = filp->private_data;
703 struct switchtec_dev *stdev = stuser->stdev;
706 if (size < sizeof(stuser->cmd) ||
707 size > sizeof(stuser->cmd) + sizeof(stuser->data))
710 rc = lock_mutex_and_test_alive(stdev);
714 if (stuser->state == MRPC_IDLE) {
715 mutex_unlock(&stdev->mrpc_mutex);
719 stuser->read_len = size - sizeof(stuser->return_code);
721 mutex_unlock(&stdev->mrpc_mutex);
723 if (filp->f_flags & O_NONBLOCK) {
724 if (!try_wait_for_completion(&stuser->comp))
727 rc = wait_for_completion_interruptible(&stuser->comp);
732 rc = lock_mutex_and_test_alive(stdev);
736 if (stuser->state != MRPC_DONE) {
737 mutex_unlock(&stdev->mrpc_mutex);
741 rc = copy_to_user(data, &stuser->return_code,
742 sizeof(stuser->return_code));
748 data += sizeof(stuser->return_code);
749 rc = copy_to_user(data, &stuser->data,
750 size - sizeof(stuser->return_code));
756 stuser_set_state(stuser, MRPC_IDLE);
759 mutex_unlock(&stdev->mrpc_mutex);
761 if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
763 else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
769 static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
771 struct switchtec_user *stuser = filp->private_data;
772 struct switchtec_dev *stdev = stuser->stdev;
775 poll_wait(filp, &stuser->comp.wait, wait);
776 poll_wait(filp, &stdev->event_wq, wait);
778 if (lock_mutex_and_test_alive(stdev))
779 return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
781 mutex_unlock(&stdev->mrpc_mutex);
783 if (try_wait_for_completion(&stuser->comp))
784 ret |= POLLIN | POLLRDNORM;
786 if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
787 ret |= POLLPRI | POLLRDBAND;
792 static int ioctl_flash_info(struct switchtec_dev *stdev,
793 struct switchtec_ioctl_flash_info __user *uinfo)
795 struct switchtec_ioctl_flash_info info = {0};
796 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
798 info.flash_length = ioread32(&fi->flash_length);
799 info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
801 if (copy_to_user(uinfo, &info, sizeof(info)))
807 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
808 struct partition_info __iomem *pi)
810 info->address = ioread32(&pi->address);
811 info->length = ioread32(&pi->length);
814 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
815 struct switchtec_ioctl_flash_part_info __user *uinfo)
817 struct switchtec_ioctl_flash_part_info info = {0};
818 struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
819 struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
820 u32 active_addr = -1;
822 if (copy_from_user(&info, uinfo, sizeof(info)))
825 switch (info.flash_partition) {
826 case SWITCHTEC_IOCTL_PART_CFG0:
827 active_addr = ioread32(&fi->active_cfg);
828 set_fw_info_part(&info, &fi->cfg0);
829 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
830 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
832 case SWITCHTEC_IOCTL_PART_CFG1:
833 active_addr = ioread32(&fi->active_cfg);
834 set_fw_info_part(&info, &fi->cfg1);
835 if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
836 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
838 case SWITCHTEC_IOCTL_PART_IMG0:
839 active_addr = ioread32(&fi->active_img);
840 set_fw_info_part(&info, &fi->img0);
841 if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
842 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
844 case SWITCHTEC_IOCTL_PART_IMG1:
845 active_addr = ioread32(&fi->active_img);
846 set_fw_info_part(&info, &fi->img1);
847 if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
848 info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
850 case SWITCHTEC_IOCTL_PART_NVLOG:
851 set_fw_info_part(&info, &fi->nvlog);
853 case SWITCHTEC_IOCTL_PART_VENDOR0:
854 set_fw_info_part(&info, &fi->vendor[0]);
856 case SWITCHTEC_IOCTL_PART_VENDOR1:
857 set_fw_info_part(&info, &fi->vendor[1]);
859 case SWITCHTEC_IOCTL_PART_VENDOR2:
860 set_fw_info_part(&info, &fi->vendor[2]);
862 case SWITCHTEC_IOCTL_PART_VENDOR3:
863 set_fw_info_part(&info, &fi->vendor[3]);
865 case SWITCHTEC_IOCTL_PART_VENDOR4:
866 set_fw_info_part(&info, &fi->vendor[4]);
868 case SWITCHTEC_IOCTL_PART_VENDOR5:
869 set_fw_info_part(&info, &fi->vendor[5]);
871 case SWITCHTEC_IOCTL_PART_VENDOR6:
872 set_fw_info_part(&info, &fi->vendor[6]);
874 case SWITCHTEC_IOCTL_PART_VENDOR7:
875 set_fw_info_part(&info, &fi->vendor[7]);
881 if (info.address == active_addr)
882 info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
884 if (copy_to_user(uinfo, &info, sizeof(info)))
890 static int ioctl_event_summary(struct switchtec_dev *stdev,
891 struct switchtec_user *stuser,
892 struct switchtec_ioctl_event_summary __user *usum)
894 struct switchtec_ioctl_event_summary s = {0};
898 s.global = ioread32(&stdev->mmio_sw_event->global_summary);
899 s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
900 s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
902 for (i = 0; i < stdev->partition_count; i++) {
903 reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
907 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
908 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
909 if (reg != MICROSEMI_VENDOR_ID)
912 reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
916 if (copy_to_user(usum, &s, sizeof(s)))
919 stuser->event_cnt = atomic_read(&stdev->event_cnt);
924 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
925 size_t offset, int index)
927 return (void __iomem *)stdev->mmio_sw_event + offset;
930 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
931 size_t offset, int index)
933 return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
936 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
937 size_t offset, int index)
939 return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
942 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
943 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
944 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
946 const struct event_reg {
948 u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
949 size_t offset, int index);
951 EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
952 EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
953 EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
954 EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
955 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
956 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
957 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
958 EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
959 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
960 EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
961 twi_mrpc_comp_async_hdr),
962 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
963 EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
964 cli_mrpc_comp_async_hdr),
965 EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
966 EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
967 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
968 EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
969 EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
970 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
971 EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
972 EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
973 EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
974 EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
975 EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
976 EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
977 EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
978 EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
979 EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
980 EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
981 EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
984 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
985 int event_id, int index)
989 if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
990 return ERR_PTR(-EINVAL);
992 off = event_regs[event_id].offset;
994 if (event_regs[event_id].map_reg == part_ev_reg) {
995 if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
996 index = stdev->partition;
997 else if (index < 0 || index >= stdev->partition_count)
998 return ERR_PTR(-EINVAL);
999 } else if (event_regs[event_id].map_reg == pff_ev_reg) {
1000 if (index < 0 || index >= stdev->pff_csr_count)
1001 return ERR_PTR(-EINVAL);
1004 return event_regs[event_id].map_reg(stdev, off, index);
1007 static int event_ctl(struct switchtec_dev *stdev,
1008 struct switchtec_ioctl_event_ctl *ctl)
1014 reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
1016 return PTR_ERR(reg);
1018 hdr = ioread32(reg);
1019 for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
1020 ctl->data[i] = ioread32(®[i + 1]);
1022 ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
1023 ctl->count = (hdr >> 5) & 0xFF;
1025 if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
1026 hdr &= ~SWITCHTEC_EVENT_CLEAR;
1027 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
1028 hdr |= SWITCHTEC_EVENT_EN_IRQ;
1029 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
1030 hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
1031 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
1032 hdr |= SWITCHTEC_EVENT_EN_LOG;
1033 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
1034 hdr &= ~SWITCHTEC_EVENT_EN_LOG;
1035 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
1036 hdr |= SWITCHTEC_EVENT_EN_CLI;
1037 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
1038 hdr &= ~SWITCHTEC_EVENT_EN_CLI;
1039 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
1040 hdr |= SWITCHTEC_EVENT_FATAL;
1041 if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
1042 hdr &= ~SWITCHTEC_EVENT_FATAL;
1045 iowrite32(hdr, reg);
1048 if (hdr & SWITCHTEC_EVENT_EN_IRQ)
1049 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
1050 if (hdr & SWITCHTEC_EVENT_EN_LOG)
1051 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
1052 if (hdr & SWITCHTEC_EVENT_EN_CLI)
1053 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
1054 if (hdr & SWITCHTEC_EVENT_FATAL)
1055 ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
1060 static int ioctl_event_ctl(struct switchtec_dev *stdev,
1061 struct switchtec_ioctl_event_ctl __user *uctl)
1065 struct switchtec_ioctl_event_ctl ctl;
1067 if (copy_from_user(&ctl, uctl, sizeof(ctl)))
1070 if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
1073 if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
1076 if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
1077 if (event_regs[ctl.event_id].map_reg == global_ev_reg)
1079 else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
1080 nr_idxs = stdev->partition_count;
1081 else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
1082 nr_idxs = stdev->pff_csr_count;
1086 for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
1087 ret = event_ctl(stdev, &ctl);
1092 ret = event_ctl(stdev, &ctl);
1097 if (copy_to_user(uctl, &ctl, sizeof(ctl)))
1103 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
1104 struct switchtec_ioctl_pff_port *up)
1108 struct part_cfg_regs *pcfg;
1109 struct switchtec_ioctl_pff_port p;
1111 if (copy_from_user(&p, up, sizeof(p)))
1115 for (part = 0; part < stdev->partition_count; part++) {
1116 pcfg = &stdev->mmio_part_cfg_all[part];
1119 reg = ioread32(&pcfg->usp_pff_inst_id);
1125 reg = ioread32(&pcfg->vep_pff_inst_id);
1127 p.port = SWITCHTEC_IOCTL_PFF_VEP;
1131 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1132 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1144 if (copy_to_user(up, &p, sizeof(p)))
1150 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
1151 struct switchtec_ioctl_pff_port *up)
1153 struct switchtec_ioctl_pff_port p;
1154 struct part_cfg_regs *pcfg;
1156 if (copy_from_user(&p, up, sizeof(p)))
1159 if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
1160 pcfg = stdev->mmio_part_cfg;
1161 else if (p.partition < stdev->partition_count)
1162 pcfg = &stdev->mmio_part_cfg_all[p.partition];
1168 p.pff = ioread32(&pcfg->usp_pff_inst_id);
1170 case SWITCHTEC_IOCTL_PFF_VEP:
1171 p.pff = ioread32(&pcfg->vep_pff_inst_id);
1174 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
1176 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
1180 if (copy_to_user(up, &p, sizeof(p)))
1186 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
1189 struct switchtec_user *stuser = filp->private_data;
1190 struct switchtec_dev *stdev = stuser->stdev;
1192 void __user *argp = (void __user *)arg;
1194 rc = lock_mutex_and_test_alive(stdev);
1199 case SWITCHTEC_IOCTL_FLASH_INFO:
1200 rc = ioctl_flash_info(stdev, argp);
1202 case SWITCHTEC_IOCTL_FLASH_PART_INFO:
1203 rc = ioctl_flash_part_info(stdev, argp);
1205 case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1206 rc = ioctl_event_summary(stdev, stuser, argp);
1208 case SWITCHTEC_IOCTL_EVENT_CTL:
1209 rc = ioctl_event_ctl(stdev, argp);
1211 case SWITCHTEC_IOCTL_PFF_TO_PORT:
1212 rc = ioctl_pff_to_port(stdev, argp);
1214 case SWITCHTEC_IOCTL_PORT_TO_PFF:
1215 rc = ioctl_port_to_pff(stdev, argp);
1222 mutex_unlock(&stdev->mrpc_mutex);
1226 static const struct file_operations switchtec_fops = {
1227 .owner = THIS_MODULE,
1228 .open = switchtec_dev_open,
1229 .release = switchtec_dev_release,
1230 .write = switchtec_dev_write,
1231 .read = switchtec_dev_read,
1232 .poll = switchtec_dev_poll,
1233 .unlocked_ioctl = switchtec_dev_ioctl,
1234 .compat_ioctl = switchtec_dev_ioctl,
1237 static void stdev_release(struct device *dev)
1239 struct switchtec_dev *stdev = to_stdev(dev);
1244 static void stdev_kill(struct switchtec_dev *stdev)
1246 struct switchtec_user *stuser, *tmpuser;
1248 pci_clear_master(stdev->pdev);
1250 cancel_delayed_work_sync(&stdev->mrpc_timeout);
1252 /* Mark the hardware as unavailable and complete all completions */
1253 mutex_lock(&stdev->mrpc_mutex);
1254 stdev->alive = false;
1256 /* Wake up and kill any users waiting on an MRPC request */
1257 list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1258 complete_all(&stuser->comp);
1259 list_del_init(&stuser->list);
1263 mutex_unlock(&stdev->mrpc_mutex);
1265 /* Wake up any users waiting on event_wq */
1266 wake_up_interruptible(&stdev->event_wq);
1269 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1271 struct switchtec_dev *stdev;
1277 stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1278 dev_to_node(&pdev->dev));
1280 return ERR_PTR(-ENOMEM);
1282 stdev->alive = true;
1284 INIT_LIST_HEAD(&stdev->mrpc_queue);
1285 mutex_init(&stdev->mrpc_mutex);
1286 stdev->mrpc_busy = 0;
1287 INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1288 INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1289 init_waitqueue_head(&stdev->event_wq);
1290 atomic_set(&stdev->event_cnt, 0);
1293 device_initialize(dev);
1294 dev->class = switchtec_class;
1295 dev->parent = &pdev->dev;
1296 dev->groups = switchtec_device_groups;
1297 dev->release = stdev_release;
1299 minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1306 dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1307 dev_set_name(dev, "switchtec%d", minor);
1309 cdev = &stdev->cdev;
1310 cdev_init(cdev, &switchtec_fops);
1311 cdev->owner = THIS_MODULE;
1316 put_device(&stdev->dev);
1320 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1322 size_t off = event_regs[eid].offset;
1323 u32 __iomem *hdr_reg;
1326 hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1327 hdr = ioread32(hdr_reg);
1329 if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1332 dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1333 hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1334 iowrite32(hdr, hdr_reg);
1339 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1344 if (event_regs[eid].map_reg == part_ev_reg) {
1345 for (idx = 0; idx < stdev->partition_count; idx++)
1346 count += mask_event(stdev, eid, idx);
1347 } else if (event_regs[eid].map_reg == pff_ev_reg) {
1348 for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1349 if (!stdev->pff_local[idx])
1351 count += mask_event(stdev, eid, idx);
1354 count += mask_event(stdev, eid, 0);
1360 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1362 struct switchtec_dev *stdev = dev;
1364 irqreturn_t ret = IRQ_NONE;
1365 int eid, event_count = 0;
1367 reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1368 if (reg & SWITCHTEC_EVENT_OCCURRED) {
1369 dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1371 schedule_work(&stdev->mrpc_work);
1372 iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1375 for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1376 event_count += mask_all_events(stdev, eid);
1379 atomic_inc(&stdev->event_cnt);
1380 wake_up_interruptible(&stdev->event_wq);
1381 dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1389 static int switchtec_init_isr(struct switchtec_dev *stdev)
1394 nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1395 PCI_IRQ_MSIX | PCI_IRQ_MSI);
1399 event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1400 if (event_irq < 0 || event_irq >= nvecs)
1403 event_irq = pci_irq_vector(stdev->pdev, event_irq);
1407 return devm_request_irq(&stdev->pdev->dev, event_irq,
1408 switchtec_event_isr, 0,
1409 KBUILD_MODNAME, stdev);
1412 static void init_pff(struct switchtec_dev *stdev)
1416 struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1418 for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1419 reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1420 if (reg != MICROSEMI_VENDOR_ID)
1424 stdev->pff_csr_count = i;
1426 reg = ioread32(&pcfg->usp_pff_inst_id);
1427 if (reg < SWITCHTEC_MAX_PFF_CSR)
1428 stdev->pff_local[reg] = 1;
1430 reg = ioread32(&pcfg->vep_pff_inst_id);
1431 if (reg < SWITCHTEC_MAX_PFF_CSR)
1432 stdev->pff_local[reg] = 1;
1434 for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1435 reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1436 if (reg < SWITCHTEC_MAX_PFF_CSR)
1437 stdev->pff_local[reg] = 1;
1441 static int switchtec_init_pci(struct switchtec_dev *stdev,
1442 struct pci_dev *pdev)
1446 rc = pcim_enable_device(pdev);
1450 rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
1454 pci_set_master(pdev);
1456 stdev->mmio = pcim_iomap_table(pdev)[0];
1457 stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
1458 stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1459 stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1460 stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1461 stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1462 stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1463 stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1464 stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1465 stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1466 stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1468 if (stdev->partition_count < 1)
1469 stdev->partition_count = 1;
1473 pci_set_drvdata(pdev, stdev);
1478 static int switchtec_pci_probe(struct pci_dev *pdev,
1479 const struct pci_device_id *id)
1481 struct switchtec_dev *stdev;
1484 stdev = stdev_create(pdev);
1486 return PTR_ERR(stdev);
1488 rc = switchtec_init_pci(stdev, pdev);
1492 rc = switchtec_init_isr(stdev);
1494 dev_err(&stdev->dev, "failed to init isr.\n");
1498 iowrite32(SWITCHTEC_EVENT_CLEAR |
1499 SWITCHTEC_EVENT_EN_IRQ,
1500 &stdev->mmio_part_cfg->mrpc_comp_hdr);
1502 rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1506 dev_info(&stdev->dev, "Management device registered.\n");
1513 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1514 put_device(&stdev->dev);
1518 static void switchtec_pci_remove(struct pci_dev *pdev)
1520 struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1522 pci_set_drvdata(pdev, NULL);
1524 cdev_device_del(&stdev->cdev, &stdev->dev);
1525 ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1526 dev_info(&stdev->dev, "unregistered.\n");
1529 put_device(&stdev->dev);
1532 #define SWITCHTEC_PCI_DEVICE(device_id) \
1534 .vendor = MICROSEMI_VENDOR_ID, \
1535 .device = device_id, \
1536 .subvendor = PCI_ANY_ID, \
1537 .subdevice = PCI_ANY_ID, \
1538 .class = MICROSEMI_MGMT_CLASSCODE, \
1539 .class_mask = 0xFFFFFFFF, \
1542 .vendor = MICROSEMI_VENDOR_ID, \
1543 .device = device_id, \
1544 .subvendor = PCI_ANY_ID, \
1545 .subdevice = PCI_ANY_ID, \
1546 .class = MICROSEMI_NTB_CLASSCODE, \
1547 .class_mask = 0xFFFFFFFF, \
1550 static const struct pci_device_id switchtec_pci_tbl[] = {
1551 SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3
1552 SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3
1553 SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3
1554 SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3
1555 SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3
1556 SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3
1557 SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3
1558 SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
1559 SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
1560 SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
1561 SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
1562 SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
1563 SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
1564 SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
1565 SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
1566 SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
1567 SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
1568 SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
1569 SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
1570 SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
1571 SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
1572 SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
1573 SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
1574 SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
1575 SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
1576 SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
1577 SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
1578 SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
1581 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1583 static struct pci_driver switchtec_pci_driver = {
1584 .name = KBUILD_MODNAME,
1585 .id_table = switchtec_pci_tbl,
1586 .probe = switchtec_pci_probe,
1587 .remove = switchtec_pci_remove,
1590 static int __init switchtec_init(void)
1594 rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1599 switchtec_class = class_create(THIS_MODULE, "switchtec");
1600 if (IS_ERR(switchtec_class)) {
1601 rc = PTR_ERR(switchtec_class);
1602 goto err_create_class;
1605 rc = pci_register_driver(&switchtec_pci_driver);
1607 goto err_pci_register;
1609 pr_info(KBUILD_MODNAME ": loaded.\n");
1614 class_destroy(switchtec_class);
1617 unregister_chrdev_region(switchtec_devt, max_devices);
1621 module_init(switchtec_init);
1623 static void __exit switchtec_exit(void)
1625 pci_unregister_driver(&switchtec_pci_driver);
1626 class_destroy(switchtec_class);
1627 unregister_chrdev_region(switchtec_devt, max_devices);
1628 ida_destroy(&switchtec_minor_ida);
1630 pr_info(KBUILD_MODNAME ": unloaded.\n");
1632 module_exit(switchtec_exit);