2 * regmap based irq_chip
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
24 struct regmap_irq_chip_data {
26 struct irq_chip irq_chip;
29 const struct regmap_irq_chip *chip;
32 struct irq_domain *domain;
38 unsigned int *status_buf;
39 unsigned int *mask_buf;
40 unsigned int *mask_buf_def;
41 unsigned int *wake_buf;
42 unsigned int *type_buf;
43 unsigned int *type_buf_def;
45 unsigned int irq_reg_stride;
46 unsigned int type_reg_stride;
50 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
53 return &data->chip->irqs[irq];
56 static void regmap_irq_lock(struct irq_data *data)
58 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
63 static void regmap_irq_sync_unlock(struct irq_data *data)
65 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
66 struct regmap *map = d->map;
71 if (d->chip->runtime_pm) {
72 ret = pm_runtime_get_sync(map->dev);
74 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
79 * If there's been a change in the mask write it back to the
80 * hardware. We rely on the use of the regmap core cache to
81 * suppress pointless writes.
83 for (i = 0; i < d->chip->num_regs; i++) {
84 reg = d->chip->mask_base +
85 (i * map->reg_stride * d->irq_reg_stride);
86 if (d->chip->mask_invert) {
87 ret = regmap_update_bits(d->map, reg,
88 d->mask_buf_def[i], ~d->mask_buf[i]);
89 } else if (d->chip->unmask_base) {
90 /* set mask with mask_base register */
91 ret = regmap_update_bits(d->map, reg,
92 d->mask_buf_def[i], ~d->mask_buf[i]);
95 "Failed to sync unmasks in %x\n",
97 unmask_offset = d->chip->unmask_base -
99 /* clear mask with unmask_base register */
100 ret = regmap_update_bits(d->map,
105 ret = regmap_update_bits(d->map, reg,
106 d->mask_buf_def[i], d->mask_buf[i]);
109 dev_err(d->map->dev, "Failed to sync masks in %x\n",
112 reg = d->chip->wake_base +
113 (i * map->reg_stride * d->irq_reg_stride);
115 if (d->chip->wake_invert)
116 ret = regmap_update_bits(d->map, reg,
120 ret = regmap_update_bits(d->map, reg,
125 "Failed to sync wakes in %x: %d\n",
129 if (!d->chip->init_ack_masked)
132 * Ack all the masked interrupts unconditionally,
133 * OR if there is masked interrupt which hasn't been Acked,
134 * it'll be ignored in irq handler, then may introduce irq storm
136 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
137 reg = d->chip->ack_base +
138 (i * map->reg_stride * d->irq_reg_stride);
139 /* some chips ack by write 0 */
140 if (d->chip->ack_invert)
141 ret = regmap_write(map, reg, ~d->mask_buf[i]);
143 ret = regmap_write(map, reg, d->mask_buf[i]);
145 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
150 for (i = 0; i < d->chip->num_type_reg; i++) {
151 if (!d->type_buf_def[i])
153 reg = d->chip->type_base +
154 (i * map->reg_stride * d->type_reg_stride);
155 if (d->chip->type_invert)
156 ret = regmap_update_bits(d->map, reg,
157 d->type_buf_def[i], ~d->type_buf[i]);
159 ret = regmap_update_bits(d->map, reg,
160 d->type_buf_def[i], d->type_buf[i]);
162 dev_err(d->map->dev, "Failed to sync type in %x\n",
166 if (d->chip->runtime_pm)
167 pm_runtime_put(map->dev);
169 /* If we've changed our wakeup count propagate it to the parent */
170 if (d->wake_count < 0)
171 for (i = d->wake_count; i < 0; i++)
172 irq_set_irq_wake(d->irq, 0);
173 else if (d->wake_count > 0)
174 for (i = 0; i < d->wake_count; i++)
175 irq_set_irq_wake(d->irq, 1);
179 mutex_unlock(&d->lock);
182 static void regmap_irq_enable(struct irq_data *data)
184 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
185 struct regmap *map = d->map;
186 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
188 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
191 static void regmap_irq_disable(struct irq_data *data)
193 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
194 struct regmap *map = d->map;
195 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
197 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
200 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
202 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
203 struct regmap *map = d->map;
204 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
205 int reg = irq_data->type_reg_offset / map->reg_stride;
207 if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
210 d->type_buf[reg] &= ~(irq_data->type_falling_mask |
211 irq_data->type_rising_mask);
213 case IRQ_TYPE_EDGE_FALLING:
214 d->type_buf[reg] |= irq_data->type_falling_mask;
217 case IRQ_TYPE_EDGE_RISING:
218 d->type_buf[reg] |= irq_data->type_rising_mask;
221 case IRQ_TYPE_EDGE_BOTH:
222 d->type_buf[reg] |= (irq_data->type_falling_mask |
223 irq_data->type_rising_mask);
232 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
234 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
235 struct regmap *map = d->map;
236 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
240 d->wake_buf[irq_data->reg_offset / map->reg_stride]
245 d->wake_buf[irq_data->reg_offset / map->reg_stride]
253 static const struct irq_chip regmap_irq_chip = {
254 .irq_bus_lock = regmap_irq_lock,
255 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
256 .irq_disable = regmap_irq_disable,
257 .irq_enable = regmap_irq_enable,
258 .irq_set_type = regmap_irq_set_type,
259 .irq_set_wake = regmap_irq_set_wake,
262 static irqreturn_t regmap_irq_thread(int irq, void *d)
264 struct regmap_irq_chip_data *data = d;
265 const struct regmap_irq_chip *chip = data->chip;
266 struct regmap *map = data->map;
268 bool handled = false;
271 if (chip->handle_pre_irq)
272 chip->handle_pre_irq(chip->irq_drv_data);
274 if (chip->runtime_pm) {
275 ret = pm_runtime_get_sync(map->dev);
277 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
279 pm_runtime_put(map->dev);
285 * Read in the statuses, using a single bulk read if possible
286 * in order to reduce the I/O overheads.
288 if (!map->use_single_read && map->reg_stride == 1 &&
289 data->irq_reg_stride == 1) {
290 u8 *buf8 = data->status_reg_buf;
291 u16 *buf16 = data->status_reg_buf;
292 u32 *buf32 = data->status_reg_buf;
294 BUG_ON(!data->status_reg_buf);
296 ret = regmap_bulk_read(map, chip->status_base,
297 data->status_reg_buf,
300 dev_err(map->dev, "Failed to read IRQ status: %d\n",
305 for (i = 0; i < data->chip->num_regs; i++) {
306 switch (map->format.val_bytes) {
308 data->status_buf[i] = buf8[i];
311 data->status_buf[i] = buf16[i];
314 data->status_buf[i] = buf32[i];
323 for (i = 0; i < data->chip->num_regs; i++) {
324 ret = regmap_read(map, chip->status_base +
326 * data->irq_reg_stride),
327 &data->status_buf[i]);
331 "Failed to read IRQ status: %d\n",
333 if (chip->runtime_pm)
334 pm_runtime_put(map->dev);
341 * Ignore masked IRQs and ack if we need to; we ack early so
342 * there is no race between handling and acknowleding the
343 * interrupt. We assume that typically few of the interrupts
344 * will fire simultaneously so don't worry about overhead from
345 * doing a write per register.
347 for (i = 0; i < data->chip->num_regs; i++) {
348 data->status_buf[i] &= ~data->mask_buf[i];
350 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
351 reg = chip->ack_base +
352 (i * map->reg_stride * data->irq_reg_stride);
353 ret = regmap_write(map, reg, data->status_buf[i]);
355 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
360 for (i = 0; i < chip->num_irqs; i++) {
361 if (data->status_buf[chip->irqs[i].reg_offset /
362 map->reg_stride] & chip->irqs[i].mask) {
363 handle_nested_irq(irq_find_mapping(data->domain, i));
368 if (chip->runtime_pm)
369 pm_runtime_put(map->dev);
372 if (chip->handle_post_irq)
373 chip->handle_post_irq(chip->irq_drv_data);
381 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
384 struct regmap_irq_chip_data *data = h->host_data;
386 irq_set_chip_data(virq, data);
387 irq_set_chip(virq, &data->irq_chip);
388 irq_set_nested_thread(virq, 1);
389 irq_set_parent(virq, data->irq);
390 irq_set_noprobe(virq);
395 static const struct irq_domain_ops regmap_domain_ops = {
396 .map = regmap_irq_map,
397 .xlate = irq_domain_xlate_twocell,
401 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
403 * map: The regmap for the device.
404 * irq: The IRQ the device uses to signal interrupts
405 * irq_flags: The IRQF_ flags to use for the primary interrupt.
406 * chip: Configuration for the interrupt controller.
407 * data: Runtime data structure for the controller, allocated on success
409 * Returns 0 on success or an errno on failure.
411 * In order for this to be efficient the chip really should use a
412 * register cache. The chip driver is responsible for restoring the
413 * register values used by the IRQ controller over suspend and resume.
415 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
416 int irq_base, const struct regmap_irq_chip *chip,
417 struct regmap_irq_chip_data **data)
419 struct regmap_irq_chip_data *d;
425 if (chip->num_regs <= 0)
428 for (i = 0; i < chip->num_irqs; i++) {
429 if (chip->irqs[i].reg_offset % map->reg_stride)
431 if (chip->irqs[i].reg_offset / map->reg_stride >=
437 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
439 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
445 d = kzalloc(sizeof(*d), GFP_KERNEL);
449 d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
454 d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
459 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
461 if (!d->mask_buf_def)
464 if (chip->wake_base) {
465 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
471 if (chip->num_type_reg) {
472 d->type_buf_def = kcalloc(chip->num_type_reg,
473 sizeof(unsigned int), GFP_KERNEL);
474 if (!d->type_buf_def)
477 d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
483 d->irq_chip = regmap_irq_chip;
484 d->irq_chip.name = chip->name;
488 d->irq_base = irq_base;
490 if (chip->irq_reg_stride)
491 d->irq_reg_stride = chip->irq_reg_stride;
493 d->irq_reg_stride = 1;
495 if (chip->type_reg_stride)
496 d->type_reg_stride = chip->type_reg_stride;
498 d->type_reg_stride = 1;
500 if (!map->use_single_read && map->reg_stride == 1 &&
501 d->irq_reg_stride == 1) {
502 d->status_reg_buf = kmalloc_array(chip->num_regs,
503 map->format.val_bytes,
505 if (!d->status_reg_buf)
509 mutex_init(&d->lock);
511 for (i = 0; i < chip->num_irqs; i++)
512 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
513 |= chip->irqs[i].mask;
515 /* Mask all the interrupts by default */
516 for (i = 0; i < chip->num_regs; i++) {
517 d->mask_buf[i] = d->mask_buf_def[i];
518 reg = chip->mask_base +
519 (i * map->reg_stride * d->irq_reg_stride);
520 if (chip->mask_invert)
521 ret = regmap_update_bits(map, reg,
522 d->mask_buf[i], ~d->mask_buf[i]);
523 else if (d->chip->unmask_base) {
524 unmask_offset = d->chip->unmask_base -
526 ret = regmap_update_bits(d->map,
531 ret = regmap_update_bits(map, reg,
532 d->mask_buf[i], d->mask_buf[i]);
534 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
539 if (!chip->init_ack_masked)
542 /* Ack masked but set interrupts */
543 reg = chip->status_base +
544 (i * map->reg_stride * d->irq_reg_stride);
545 ret = regmap_read(map, reg, &d->status_buf[i]);
547 dev_err(map->dev, "Failed to read IRQ status: %d\n",
552 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
553 reg = chip->ack_base +
554 (i * map->reg_stride * d->irq_reg_stride);
555 if (chip->ack_invert)
556 ret = regmap_write(map, reg,
557 ~(d->status_buf[i] & d->mask_buf[i]));
559 ret = regmap_write(map, reg,
560 d->status_buf[i] & d->mask_buf[i]);
562 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
569 /* Wake is disabled by default */
571 for (i = 0; i < chip->num_regs; i++) {
572 d->wake_buf[i] = d->mask_buf_def[i];
573 reg = chip->wake_base +
574 (i * map->reg_stride * d->irq_reg_stride);
576 if (chip->wake_invert)
577 ret = regmap_update_bits(map, reg,
581 ret = regmap_update_bits(map, reg,
585 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
592 if (chip->num_type_reg) {
593 for (i = 0; i < chip->num_irqs; i++) {
594 reg = chip->irqs[i].type_reg_offset / map->reg_stride;
595 d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
596 chip->irqs[i].type_falling_mask;
598 for (i = 0; i < chip->num_type_reg; ++i) {
599 if (!d->type_buf_def[i])
602 reg = chip->type_base +
603 (i * map->reg_stride * d->type_reg_stride);
604 if (chip->type_invert)
605 ret = regmap_update_bits(map, reg,
606 d->type_buf_def[i], 0xFF);
608 ret = regmap_update_bits(map, reg,
609 d->type_buf_def[i], 0x0);
612 "Failed to set type in 0x%x: %x\n",
620 d->domain = irq_domain_add_legacy(map->dev->of_node,
621 chip->num_irqs, irq_base, 0,
622 ®map_domain_ops, d);
624 d->domain = irq_domain_add_linear(map->dev->of_node,
626 ®map_domain_ops, d);
628 dev_err(map->dev, "Failed to create IRQ domain\n");
633 ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
634 irq_flags | IRQF_ONESHOT,
637 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
638 irq, chip->name, ret);
647 /* Should really dispose of the domain but... */
650 kfree(d->type_buf_def);
652 kfree(d->mask_buf_def);
654 kfree(d->status_buf);
655 kfree(d->status_reg_buf);
659 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
662 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
664 * @irq: Primary IRQ for the device
665 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
667 * This function also dispose all mapped irq on chip.
669 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
679 /* Dispose all virtual irq from irq domain before removing it */
680 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
681 /* Ignore hwirq if holes in the IRQ list */
682 if (!d->chip->irqs[hwirq].mask)
686 * Find the virtual irq of hwirq on chip and if it is
687 * there then dispose it
689 virq = irq_find_mapping(d->domain, hwirq);
691 irq_dispose_mapping(virq);
694 irq_domain_remove(d->domain);
696 kfree(d->type_buf_def);
698 kfree(d->mask_buf_def);
700 kfree(d->status_reg_buf);
701 kfree(d->status_buf);
704 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
706 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
708 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
710 regmap_del_irq_chip(d->irq, d);
713 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
716 struct regmap_irq_chip_data **r = res;
726 * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
728 * @dev: The device pointer on which irq_chip belongs to.
729 * @map: The regmap for the device.
730 * @irq: The IRQ the device uses to signal interrupts
731 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
732 * @chip: Configuration for the interrupt controller.
733 * @data: Runtime data structure for the controller, allocated on success
735 * Returns 0 on success or an errno on failure.
737 * The regmap_irq_chip data automatically be released when the device is
740 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
741 int irq_flags, int irq_base,
742 const struct regmap_irq_chip *chip,
743 struct regmap_irq_chip_data **data)
745 struct regmap_irq_chip_data **ptr, *d;
748 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
753 ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
761 devres_add(dev, ptr);
765 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
768 * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
770 * @dev: Device for which which resource was allocated.
771 * @irq: Primary IRQ for the device
772 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
774 void devm_regmap_del_irq_chip(struct device *dev, int irq,
775 struct regmap_irq_chip_data *data)
779 WARN_ON(irq != data->irq);
780 rc = devres_release(dev, devm_regmap_irq_chip_release,
781 devm_regmap_irq_chip_match, data);
786 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
789 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
791 * Useful for drivers to request their own IRQs.
793 * @data: regmap_irq controller to operate on.
795 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
797 WARN_ON(!data->irq_base);
798 return data->irq_base;
800 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
803 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
805 * Useful for drivers to request their own IRQs.
807 * @data: regmap_irq controller to operate on.
808 * @irq: index of the interrupt requested in the chip IRQs
810 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
812 /* Handle holes in the IRQ list */
813 if (!data->chip->irqs[irq].mask)
816 return irq_create_mapping(data->domain, irq);
818 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
821 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
823 * Useful for drivers to request their own IRQs and for integration
824 * with subsystems. For ease of integration NULL is accepted as a
825 * domain, allowing devices to just call this even if no domain is
828 * @data: regmap_irq controller to operate on.
830 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
837 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);