1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics 2017
5 * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/hwspinlock.h>
11 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <linux/syscore_ops.h>
21 #include <dt-bindings/interrupt-controller/arm-gic.h>
23 #define IRQS_PER_BANK 32
25 #define HWSPNLCK_TIMEOUT 1000 /* usec */
26 #define HWSPNLCK_RETRY_DELAY 100 /* usec */
28 struct stm32_exti_bank {
40 enum stm32_exti_hwspinlock {
46 struct stm32_desc_irq {
51 struct stm32_exti_drv_data {
52 const struct stm32_exti_bank **exti_banks;
53 const struct stm32_desc_irq *desc_irqs;
58 struct stm32_exti_chip_data {
59 struct stm32_exti_host_data *host_data;
60 const struct stm32_exti_bank *reg_bank;
61 struct raw_spinlock rlock;
68 struct stm32_exti_host_data {
70 struct stm32_exti_chip_data *chips_data;
71 const struct stm32_exti_drv_data *drv_data;
72 struct device_node *node;
73 enum stm32_exti_hwspinlock hwlock_state;
74 struct hwspinlock *hwlock;
77 static struct stm32_exti_host_data *stm32_host_data;
79 static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
86 .fpr_ofst = UNDEF_REG,
89 static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
93 static const struct stm32_exti_drv_data stm32f4xx_drv_data = {
94 .exti_banks = stm32f4xx_exti_banks,
95 .bank_nr = ARRAY_SIZE(stm32f4xx_exti_banks),
98 static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
105 .fpr_ofst = UNDEF_REG,
108 static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
115 .fpr_ofst = UNDEF_REG,
118 static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
125 .fpr_ofst = UNDEF_REG,
128 static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
134 static const struct stm32_exti_drv_data stm32h7xx_drv_data = {
135 .exti_banks = stm32h7xx_exti_banks,
136 .bank_nr = ARRAY_SIZE(stm32h7xx_exti_banks),
139 static const struct stm32_exti_bank stm32mp1_exti_b1 = {
149 static const struct stm32_exti_bank stm32mp1_exti_b2 = {
159 static const struct stm32_exti_bank stm32mp1_exti_b3 = {
169 static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
175 static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
176 { .exti = 0, .irq_parent = 6 },
177 { .exti = 1, .irq_parent = 7 },
178 { .exti = 2, .irq_parent = 8 },
179 { .exti = 3, .irq_parent = 9 },
180 { .exti = 4, .irq_parent = 10 },
181 { .exti = 5, .irq_parent = 23 },
182 { .exti = 6, .irq_parent = 64 },
183 { .exti = 7, .irq_parent = 65 },
184 { .exti = 8, .irq_parent = 66 },
185 { .exti = 9, .irq_parent = 67 },
186 { .exti = 10, .irq_parent = 40 },
187 { .exti = 11, .irq_parent = 42 },
188 { .exti = 12, .irq_parent = 76 },
189 { .exti = 13, .irq_parent = 77 },
190 { .exti = 14, .irq_parent = 121 },
191 { .exti = 15, .irq_parent = 127 },
192 { .exti = 16, .irq_parent = 1 },
193 { .exti = 65, .irq_parent = 144 },
194 { .exti = 68, .irq_parent = 143 },
195 { .exti = 73, .irq_parent = 129 },
198 static const struct stm32_exti_drv_data stm32mp1_drv_data = {
199 .exti_banks = stm32mp1_exti_banks,
200 .bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
201 .desc_irqs = stm32mp1_desc_irq,
202 .irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
205 static int stm32_exti_to_irq(const struct stm32_exti_drv_data *drv_data,
206 irq_hw_number_t hwirq)
208 const struct stm32_desc_irq *desc_irq;
211 if (!drv_data->desc_irqs)
214 for (i = 0; i < drv_data->irq_nr; i++) {
215 desc_irq = &drv_data->desc_irqs[i];
216 if (desc_irq->exti == hwirq)
217 return desc_irq->irq_parent;
223 static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
225 struct stm32_exti_chip_data *chip_data = gc->private;
226 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
227 unsigned long pending;
229 pending = irq_reg_readl(gc, stm32_bank->rpr_ofst);
230 if (stm32_bank->fpr_ofst != UNDEF_REG)
231 pending |= irq_reg_readl(gc, stm32_bank->fpr_ofst);
236 static void stm32_irq_handler(struct irq_desc *desc)
238 struct irq_domain *domain = irq_desc_get_handler_data(desc);
239 struct irq_chip *chip = irq_desc_get_chip(desc);
240 unsigned int virq, nbanks = domain->gc->num_chips;
241 struct irq_chip_generic *gc;
242 unsigned long pending;
243 int n, i, irq_base = 0;
245 chained_irq_enter(chip, desc);
247 for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
248 gc = irq_get_domain_generic_chip(domain, irq_base);
250 while ((pending = stm32_exti_pending(gc))) {
251 for_each_set_bit(n, &pending, IRQS_PER_BANK) {
252 virq = irq_find_mapping(domain, irq_base + n);
253 generic_handle_irq(virq);
258 chained_irq_exit(chip, desc);
261 static int stm32_exti_set_type(struct irq_data *d,
262 unsigned int type, u32 *rtsr, u32 *ftsr)
264 u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
267 case IRQ_TYPE_EDGE_RISING:
271 case IRQ_TYPE_EDGE_FALLING:
275 case IRQ_TYPE_EDGE_BOTH:
286 static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
288 struct stm32_exti_host_data *host_data = chip_data->host_data;
289 struct hwspinlock *hwlock;
290 int id, ret = 0, timeout = 0;
292 /* first time, check for hwspinlock availability */
293 if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) {
294 id = of_hwspin_lock_get_id(host_data->node, 0);
296 hwlock = hwspin_lock_request_specific(id);
298 /* found valid hwspinlock */
299 host_data->hwlock_state = HWSPINLOCK_READY;
300 host_data->hwlock = hwlock;
301 pr_debug("%s hwspinlock = %d\n", __func__, id);
303 host_data->hwlock_state = HWSPINLOCK_NONE;
305 } else if (id != -EPROBE_DEFER) {
306 host_data->hwlock_state = HWSPINLOCK_NONE;
308 /* hwspinlock driver shall be ready at that stage */
313 if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) {
315 * Use the x_raw API since we are under spin_lock protection.
316 * Do not use the x_timeout API because we are under irq_disable
317 * mode (see __setup_irq())
320 ret = hwspin_trylock_raw(host_data->hwlock);
324 udelay(HWSPNLCK_RETRY_DELAY);
325 timeout += HWSPNLCK_RETRY_DELAY;
326 } while (timeout < HWSPNLCK_TIMEOUT);
333 pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
338 static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
340 if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY))
341 hwspin_unlock_raw(chip_data->host_data->hwlock);
344 static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
346 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
347 struct stm32_exti_chip_data *chip_data = gc->private;
348 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
354 err = stm32_exti_hwspin_lock(chip_data);
358 rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
359 ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
361 err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
365 irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
366 irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
369 stm32_exti_hwspin_unlock(chip_data);
376 static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
379 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
380 void __iomem *base = chip_data->host_data->base;
382 /* save rtsr, ftsr registers */
383 chip_data->rtsr_cache = readl_relaxed(base + stm32_bank->rtsr_ofst);
384 chip_data->ftsr_cache = readl_relaxed(base + stm32_bank->ftsr_ofst);
386 writel_relaxed(wake_active, base + stm32_bank->imr_ofst);
389 static void stm32_chip_resume(struct stm32_exti_chip_data *chip_data,
392 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
393 void __iomem *base = chip_data->host_data->base;
395 /* restore rtsr, ftsr, registers */
396 writel_relaxed(chip_data->rtsr_cache, base + stm32_bank->rtsr_ofst);
397 writel_relaxed(chip_data->ftsr_cache, base + stm32_bank->ftsr_ofst);
399 writel_relaxed(mask_cache, base + stm32_bank->imr_ofst);
402 static void stm32_irq_suspend(struct irq_chip_generic *gc)
404 struct stm32_exti_chip_data *chip_data = gc->private;
407 stm32_chip_suspend(chip_data, gc->wake_active);
411 static void stm32_irq_resume(struct irq_chip_generic *gc)
413 struct stm32_exti_chip_data *chip_data = gc->private;
416 stm32_chip_resume(chip_data, gc->mask_cache);
420 static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
421 unsigned int nr_irqs, void *data)
423 struct irq_fwspec *fwspec = data;
424 irq_hw_number_t hwirq;
426 hwirq = fwspec->param[0];
428 irq_map_generic_chip(d, virq, hwirq);
433 static void stm32_exti_free(struct irq_domain *d, unsigned int virq,
434 unsigned int nr_irqs)
436 struct irq_data *data = irq_domain_get_irq_data(d, virq);
438 irq_domain_reset_irq_data(data);
441 static const struct irq_domain_ops irq_exti_domain_ops = {
442 .map = irq_map_generic_chip,
443 .alloc = stm32_exti_alloc,
444 .free = stm32_exti_free,
447 static void stm32_irq_ack(struct irq_data *d)
449 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
450 struct stm32_exti_chip_data *chip_data = gc->private;
451 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
455 irq_reg_writel(gc, d->mask, stm32_bank->rpr_ofst);
456 if (stm32_bank->fpr_ofst != UNDEF_REG)
457 irq_reg_writel(gc, d->mask, stm32_bank->fpr_ofst);
462 static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
464 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
465 void __iomem *base = chip_data->host_data->base;
468 val = readl_relaxed(base + reg);
469 val |= BIT(d->hwirq % IRQS_PER_BANK);
470 writel_relaxed(val, base + reg);
475 static inline u32 stm32_exti_clr_bit(struct irq_data *d, u32 reg)
477 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
478 void __iomem *base = chip_data->host_data->base;
481 val = readl_relaxed(base + reg);
482 val &= ~BIT(d->hwirq % IRQS_PER_BANK);
483 writel_relaxed(val, base + reg);
488 static void stm32_exti_h_eoi(struct irq_data *d)
490 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
491 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
493 raw_spin_lock(&chip_data->rlock);
495 stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
496 if (stm32_bank->fpr_ofst != UNDEF_REG)
497 stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
499 raw_spin_unlock(&chip_data->rlock);
501 if (d->parent_data->chip)
502 irq_chip_eoi_parent(d);
505 static void stm32_exti_h_mask(struct irq_data *d)
507 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
508 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
510 raw_spin_lock(&chip_data->rlock);
511 chip_data->mask_cache = stm32_exti_clr_bit(d, stm32_bank->imr_ofst);
512 raw_spin_unlock(&chip_data->rlock);
514 if (d->parent_data->chip)
515 irq_chip_mask_parent(d);
518 static void stm32_exti_h_unmask(struct irq_data *d)
520 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
521 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
523 raw_spin_lock(&chip_data->rlock);
524 chip_data->mask_cache = stm32_exti_set_bit(d, stm32_bank->imr_ofst);
525 raw_spin_unlock(&chip_data->rlock);
527 if (d->parent_data->chip)
528 irq_chip_unmask_parent(d);
531 static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
533 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
534 const struct stm32_exti_bank *stm32_bank = chip_data->reg_bank;
535 void __iomem *base = chip_data->host_data->base;
539 raw_spin_lock(&chip_data->rlock);
541 err = stm32_exti_hwspin_lock(chip_data);
545 rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
546 ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
548 err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
552 writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
553 writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
556 stm32_exti_hwspin_unlock(chip_data);
558 raw_spin_unlock(&chip_data->rlock);
563 static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
565 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
566 u32 mask = BIT(d->hwirq % IRQS_PER_BANK);
568 raw_spin_lock(&chip_data->rlock);
571 chip_data->wake_active |= mask;
573 chip_data->wake_active &= ~mask;
575 raw_spin_unlock(&chip_data->rlock);
580 static int stm32_exti_h_set_affinity(struct irq_data *d,
581 const struct cpumask *dest, bool force)
583 if (d->parent_data->chip)
584 return irq_chip_set_affinity_parent(d, dest, force);
590 static int stm32_exti_h_suspend(void)
592 struct stm32_exti_chip_data *chip_data;
595 for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
596 chip_data = &stm32_host_data->chips_data[i];
597 raw_spin_lock(&chip_data->rlock);
598 stm32_chip_suspend(chip_data, chip_data->wake_active);
599 raw_spin_unlock(&chip_data->rlock);
605 static void stm32_exti_h_resume(void)
607 struct stm32_exti_chip_data *chip_data;
610 for (i = 0; i < stm32_host_data->drv_data->bank_nr; i++) {
611 chip_data = &stm32_host_data->chips_data[i];
612 raw_spin_lock(&chip_data->rlock);
613 stm32_chip_resume(chip_data, chip_data->mask_cache);
614 raw_spin_unlock(&chip_data->rlock);
618 static struct syscore_ops stm32_exti_h_syscore_ops = {
619 .suspend = stm32_exti_h_suspend,
620 .resume = stm32_exti_h_resume,
623 static void stm32_exti_h_syscore_init(void)
625 register_syscore_ops(&stm32_exti_h_syscore_ops);
628 static inline void stm32_exti_h_syscore_init(void) {}
631 static struct irq_chip stm32_exti_h_chip = {
632 .name = "stm32-exti-h",
633 .irq_eoi = stm32_exti_h_eoi,
634 .irq_mask = stm32_exti_h_mask,
635 .irq_unmask = stm32_exti_h_unmask,
636 .irq_retrigger = irq_chip_retrigger_hierarchy,
637 .irq_set_type = stm32_exti_h_set_type,
638 .irq_set_wake = stm32_exti_h_set_wake,
639 .flags = IRQCHIP_MASK_ON_SUSPEND,
640 .irq_set_affinity = IS_ENABLED(CONFIG_SMP) ? stm32_exti_h_set_affinity : NULL,
643 static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
645 unsigned int nr_irqs, void *data)
647 struct stm32_exti_host_data *host_data = dm->host_data;
648 struct stm32_exti_chip_data *chip_data;
649 struct irq_fwspec *fwspec = data;
650 struct irq_fwspec p_fwspec;
651 irq_hw_number_t hwirq;
654 hwirq = fwspec->param[0];
655 bank = hwirq / IRQS_PER_BANK;
656 chip_data = &host_data->chips_data[bank];
658 irq_domain_set_hwirq_and_chip(dm, virq, hwirq,
659 &stm32_exti_h_chip, chip_data);
661 p_irq = stm32_exti_to_irq(host_data->drv_data, hwirq);
663 p_fwspec.fwnode = dm->parent->fwnode;
664 p_fwspec.param_count = 3;
665 p_fwspec.param[0] = GIC_SPI;
666 p_fwspec.param[1] = p_irq;
667 p_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
669 return irq_domain_alloc_irqs_parent(dm, virq, 1, &p_fwspec);
676 stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
677 struct device_node *node)
679 struct stm32_exti_host_data *host_data;
681 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
685 host_data->drv_data = dd;
686 host_data->node = node;
687 host_data->hwlock_state = HWSPINLOCK_UNKNOWN;
688 host_data->chips_data = kcalloc(dd->bank_nr,
689 sizeof(struct stm32_exti_chip_data),
691 if (!host_data->chips_data)
694 host_data->base = of_iomap(node, 0);
695 if (!host_data->base) {
696 pr_err("%pOF: Unable to map registers\n", node);
697 goto free_chips_data;
700 stm32_host_data = host_data;
705 kfree(host_data->chips_data);
713 stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
716 const struct stm32_exti_bank *stm32_bank;
717 struct stm32_exti_chip_data *chip_data;
718 void __iomem *base = h_data->base;
720 stm32_bank = h_data->drv_data->exti_banks[bank_idx];
721 chip_data = &h_data->chips_data[bank_idx];
722 chip_data->host_data = h_data;
723 chip_data->reg_bank = stm32_bank;
725 raw_spin_lock_init(&chip_data->rlock);
728 * This IP has no reset, so after hot reboot we should
729 * clear registers to avoid residue
731 writel_relaxed(0, base + stm32_bank->imr_ofst);
732 writel_relaxed(0, base + stm32_bank->emr_ofst);
734 pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
739 static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
740 struct device_node *node)
742 struct stm32_exti_host_data *host_data;
743 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
745 struct irq_chip_generic *gc;
746 struct irq_domain *domain;
748 host_data = stm32_exti_host_init(drv_data, node);
752 domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
753 &irq_exti_domain_ops, NULL);
755 pr_err("%pOFn: Could not register interrupt domain.\n",
761 ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
762 handle_edge_irq, clr, 0, 0);
764 pr_err("%pOF: Could not allocate generic interrupt chip.\n",
766 goto out_free_domain;
769 for (i = 0; i < drv_data->bank_nr; i++) {
770 const struct stm32_exti_bank *stm32_bank;
771 struct stm32_exti_chip_data *chip_data;
773 stm32_bank = drv_data->exti_banks[i];
774 chip_data = stm32_exti_chip_init(host_data, i);
776 gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
778 gc->reg_base = host_data->base;
779 gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
780 gc->chip_types->chip.irq_ack = stm32_irq_ack;
781 gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
782 gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
783 gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
784 gc->chip_types->chip.irq_set_wake = irq_gc_set_wake;
785 gc->suspend = stm32_irq_suspend;
786 gc->resume = stm32_irq_resume;
787 gc->wake_enabled = IRQ_MSK(IRQS_PER_BANK);
789 gc->chip_types->regs.mask = stm32_bank->imr_ofst;
790 gc->private = (void *)chip_data;
793 nr_irqs = of_irq_count(node);
794 for (i = 0; i < nr_irqs; i++) {
795 unsigned int irq = irq_of_parse_and_map(node, i);
797 irq_set_handler_data(irq, domain);
798 irq_set_chained_handler(irq, stm32_irq_handler);
804 irq_domain_remove(domain);
806 iounmap(host_data->base);
807 kfree(host_data->chips_data);
812 static const struct irq_domain_ops stm32_exti_h_domain_ops = {
813 .alloc = stm32_exti_h_domain_alloc,
814 .free = irq_domain_free_irqs_common,
815 .xlate = irq_domain_xlate_twocell,
819 __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
820 struct device_node *node,
821 struct device_node *parent)
823 struct irq_domain *parent_domain, *domain;
824 struct stm32_exti_host_data *host_data;
827 parent_domain = irq_find_host(parent);
828 if (!parent_domain) {
829 pr_err("interrupt-parent not found\n");
833 host_data = stm32_exti_host_init(drv_data, node);
837 for (i = 0; i < drv_data->bank_nr; i++)
838 stm32_exti_chip_init(host_data, i);
840 domain = irq_domain_add_hierarchy(parent_domain, 0,
841 drv_data->bank_nr * IRQS_PER_BANK,
842 node, &stm32_exti_h_domain_ops,
846 pr_err("%pOFn: Could not register exti domain.\n", node);
851 stm32_exti_h_syscore_init();
856 iounmap(host_data->base);
857 kfree(host_data->chips_data);
862 static int __init stm32f4_exti_of_init(struct device_node *np,
863 struct device_node *parent)
865 return stm32_exti_init(&stm32f4xx_drv_data, np);
868 IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
870 static int __init stm32h7_exti_of_init(struct device_node *np,
871 struct device_node *parent)
873 return stm32_exti_init(&stm32h7xx_drv_data, np);
876 IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
878 static int __init stm32mp1_exti_of_init(struct device_node *np,
879 struct device_node *parent)
881 return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent);
884 IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init);