]> asedeno.scripts.mit.edu Git - linux.git/blob - kernel/irq/irqdesc.c
Merge tag 'selinux-pr-20170831' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / kernel / irq / irqdesc.c
1 /*
2  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4  *
5  * This file contains the interrupt descriptor management code
6  *
7  * Detailed information is available in Documentation/core-api/genericirq.rst
8  *
9  */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 #include <linux/irqdomain.h>
18 #include <linux/sysfs.h>
19
20 #include "internals.h"
21
22 /*
23  * lockdep: we want to handle all irq_desc locks as a single lock-class:
24  */
25 static struct lock_class_key irq_desc_lock_class;
26
27 #if defined(CONFIG_SMP)
28 static int __init irq_affinity_setup(char *str)
29 {
30         zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
31         cpulist_parse(str, irq_default_affinity);
32         /*
33          * Set at least the boot cpu. We don't want to end up with
34          * bugreports caused by random comandline masks
35          */
36         cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
37         return 1;
38 }
39 __setup("irqaffinity=", irq_affinity_setup);
40
41 static void __init init_irq_default_affinity(void)
42 {
43 #ifdef CONFIG_CPUMASK_OFFSTACK
44         if (!irq_default_affinity)
45                 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
46 #endif
47         if (cpumask_empty(irq_default_affinity))
48                 cpumask_setall(irq_default_affinity);
49 }
50 #else
51 static void __init init_irq_default_affinity(void)
52 {
53 }
54 #endif
55
56 #ifdef CONFIG_SMP
57 static int alloc_masks(struct irq_desc *desc, int node)
58 {
59         if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
60                                      GFP_KERNEL, node))
61                 return -ENOMEM;
62
63 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
64         if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
65                                      GFP_KERNEL, node)) {
66                 free_cpumask_var(desc->irq_common_data.affinity);
67                 return -ENOMEM;
68         }
69 #endif
70
71 #ifdef CONFIG_GENERIC_PENDING_IRQ
72         if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
73 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
74                 free_cpumask_var(desc->irq_common_data.effective_affinity);
75 #endif
76                 free_cpumask_var(desc->irq_common_data.affinity);
77                 return -ENOMEM;
78         }
79 #endif
80         return 0;
81 }
82
83 static void desc_smp_init(struct irq_desc *desc, int node,
84                           const struct cpumask *affinity)
85 {
86         if (!affinity)
87                 affinity = irq_default_affinity;
88         cpumask_copy(desc->irq_common_data.affinity, affinity);
89
90 #ifdef CONFIG_GENERIC_PENDING_IRQ
91         cpumask_clear(desc->pending_mask);
92 #endif
93 #ifdef CONFIG_NUMA
94         desc->irq_common_data.node = node;
95 #endif
96 }
97
98 #else
99 static inline int
100 alloc_masks(struct irq_desc *desc, int node) { return 0; }
101 static inline void
102 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
103 #endif
104
105 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
106                               const struct cpumask *affinity, struct module *owner)
107 {
108         int cpu;
109
110         desc->irq_common_data.handler_data = NULL;
111         desc->irq_common_data.msi_desc = NULL;
112
113         desc->irq_data.common = &desc->irq_common_data;
114         desc->irq_data.irq = irq;
115         desc->irq_data.chip = &no_irq_chip;
116         desc->irq_data.chip_data = NULL;
117         irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
118         irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
119         irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
120         desc->handle_irq = handle_bad_irq;
121         desc->depth = 1;
122         desc->irq_count = 0;
123         desc->irqs_unhandled = 0;
124         desc->name = NULL;
125         desc->owner = owner;
126         for_each_possible_cpu(cpu)
127                 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
128         desc_smp_init(desc, node, affinity);
129 }
130
131 int nr_irqs = NR_IRQS;
132 EXPORT_SYMBOL_GPL(nr_irqs);
133
134 static DEFINE_MUTEX(sparse_irq_lock);
135 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
136
137 #ifdef CONFIG_SPARSE_IRQ
138
139 static void irq_kobj_release(struct kobject *kobj);
140
141 #ifdef CONFIG_SYSFS
142 static struct kobject *irq_kobj_base;
143
144 #define IRQ_ATTR_RO(_name) \
145 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
146
147 static ssize_t per_cpu_count_show(struct kobject *kobj,
148                                   struct kobj_attribute *attr, char *buf)
149 {
150         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
151         int cpu, irq = desc->irq_data.irq;
152         ssize_t ret = 0;
153         char *p = "";
154
155         for_each_possible_cpu(cpu) {
156                 unsigned int c = kstat_irqs_cpu(irq, cpu);
157
158                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
159                 p = ",";
160         }
161
162         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
163         return ret;
164 }
165 IRQ_ATTR_RO(per_cpu_count);
166
167 static ssize_t chip_name_show(struct kobject *kobj,
168                               struct kobj_attribute *attr, char *buf)
169 {
170         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
171         ssize_t ret = 0;
172
173         raw_spin_lock_irq(&desc->lock);
174         if (desc->irq_data.chip && desc->irq_data.chip->name) {
175                 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
176                                 desc->irq_data.chip->name);
177         }
178         raw_spin_unlock_irq(&desc->lock);
179
180         return ret;
181 }
182 IRQ_ATTR_RO(chip_name);
183
184 static ssize_t hwirq_show(struct kobject *kobj,
185                           struct kobj_attribute *attr, char *buf)
186 {
187         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
188         ssize_t ret = 0;
189
190         raw_spin_lock_irq(&desc->lock);
191         if (desc->irq_data.domain)
192                 ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
193         raw_spin_unlock_irq(&desc->lock);
194
195         return ret;
196 }
197 IRQ_ATTR_RO(hwirq);
198
199 static ssize_t type_show(struct kobject *kobj,
200                          struct kobj_attribute *attr, char *buf)
201 {
202         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
203         ssize_t ret = 0;
204
205         raw_spin_lock_irq(&desc->lock);
206         ret = sprintf(buf, "%s\n",
207                       irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
208         raw_spin_unlock_irq(&desc->lock);
209
210         return ret;
211
212 }
213 IRQ_ATTR_RO(type);
214
215 static ssize_t name_show(struct kobject *kobj,
216                          struct kobj_attribute *attr, char *buf)
217 {
218         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
219         ssize_t ret = 0;
220
221         raw_spin_lock_irq(&desc->lock);
222         if (desc->name)
223                 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
224         raw_spin_unlock_irq(&desc->lock);
225
226         return ret;
227 }
228 IRQ_ATTR_RO(name);
229
230 static ssize_t actions_show(struct kobject *kobj,
231                             struct kobj_attribute *attr, char *buf)
232 {
233         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
234         struct irqaction *action;
235         ssize_t ret = 0;
236         char *p = "";
237
238         raw_spin_lock_irq(&desc->lock);
239         for (action = desc->action; action != NULL; action = action->next) {
240                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
241                                  p, action->name);
242                 p = ",";
243         }
244         raw_spin_unlock_irq(&desc->lock);
245
246         if (ret)
247                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
248
249         return ret;
250 }
251 IRQ_ATTR_RO(actions);
252
253 static struct attribute *irq_attrs[] = {
254         &per_cpu_count_attr.attr,
255         &chip_name_attr.attr,
256         &hwirq_attr.attr,
257         &type_attr.attr,
258         &name_attr.attr,
259         &actions_attr.attr,
260         NULL
261 };
262
263 static struct kobj_type irq_kobj_type = {
264         .release        = irq_kobj_release,
265         .sysfs_ops      = &kobj_sysfs_ops,
266         .default_attrs  = irq_attrs,
267 };
268
269 static void irq_sysfs_add(int irq, struct irq_desc *desc)
270 {
271         if (irq_kobj_base) {
272                 /*
273                  * Continue even in case of failure as this is nothing
274                  * crucial.
275                  */
276                 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
277                         pr_warn("Failed to add kobject for irq %d\n", irq);
278         }
279 }
280
281 static int __init irq_sysfs_init(void)
282 {
283         struct irq_desc *desc;
284         int irq;
285
286         /* Prevent concurrent irq alloc/free */
287         irq_lock_sparse();
288
289         irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
290         if (!irq_kobj_base) {
291                 irq_unlock_sparse();
292                 return -ENOMEM;
293         }
294
295         /* Add the already allocated interrupts */
296         for_each_irq_desc(irq, desc)
297                 irq_sysfs_add(irq, desc);
298         irq_unlock_sparse();
299
300         return 0;
301 }
302 postcore_initcall(irq_sysfs_init);
303
304 #else /* !CONFIG_SYSFS */
305
306 static struct kobj_type irq_kobj_type = {
307         .release        = irq_kobj_release,
308 };
309
310 static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
311
312 #endif /* CONFIG_SYSFS */
313
314 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
315
316 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
317 {
318         radix_tree_insert(&irq_desc_tree, irq, desc);
319 }
320
321 struct irq_desc *irq_to_desc(unsigned int irq)
322 {
323         return radix_tree_lookup(&irq_desc_tree, irq);
324 }
325 EXPORT_SYMBOL(irq_to_desc);
326
327 static void delete_irq_desc(unsigned int irq)
328 {
329         radix_tree_delete(&irq_desc_tree, irq);
330 }
331
332 #ifdef CONFIG_SMP
333 static void free_masks(struct irq_desc *desc)
334 {
335 #ifdef CONFIG_GENERIC_PENDING_IRQ
336         free_cpumask_var(desc->pending_mask);
337 #endif
338         free_cpumask_var(desc->irq_common_data.affinity);
339 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
340         free_cpumask_var(desc->irq_common_data.effective_affinity);
341 #endif
342 }
343 #else
344 static inline void free_masks(struct irq_desc *desc) { }
345 #endif
346
347 void irq_lock_sparse(void)
348 {
349         mutex_lock(&sparse_irq_lock);
350 }
351
352 void irq_unlock_sparse(void)
353 {
354         mutex_unlock(&sparse_irq_lock);
355 }
356
357 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
358                                    const struct cpumask *affinity,
359                                    struct module *owner)
360 {
361         struct irq_desc *desc;
362
363         desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
364         if (!desc)
365                 return NULL;
366         /* allocate based on nr_cpu_ids */
367         desc->kstat_irqs = alloc_percpu(unsigned int);
368         if (!desc->kstat_irqs)
369                 goto err_desc;
370
371         if (alloc_masks(desc, node))
372                 goto err_kstat;
373
374         raw_spin_lock_init(&desc->lock);
375         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
376         mutex_init(&desc->request_mutex);
377         init_rcu_head(&desc->rcu);
378
379         desc_set_defaults(irq, desc, node, affinity, owner);
380         irqd_set(&desc->irq_data, flags);
381         kobject_init(&desc->kobj, &irq_kobj_type);
382
383         return desc;
384
385 err_kstat:
386         free_percpu(desc->kstat_irqs);
387 err_desc:
388         kfree(desc);
389         return NULL;
390 }
391
392 static void irq_kobj_release(struct kobject *kobj)
393 {
394         struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
395
396         free_masks(desc);
397         free_percpu(desc->kstat_irqs);
398         kfree(desc);
399 }
400
401 static void delayed_free_desc(struct rcu_head *rhp)
402 {
403         struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
404
405         kobject_put(&desc->kobj);
406 }
407
408 static void free_desc(unsigned int irq)
409 {
410         struct irq_desc *desc = irq_to_desc(irq);
411
412         irq_remove_debugfs_entry(desc);
413         unregister_irq_proc(irq, desc);
414
415         /*
416          * sparse_irq_lock protects also show_interrupts() and
417          * kstat_irq_usr(). Once we deleted the descriptor from the
418          * sparse tree we can free it. Access in proc will fail to
419          * lookup the descriptor.
420          *
421          * The sysfs entry must be serialized against a concurrent
422          * irq_sysfs_init() as well.
423          */
424         kobject_del(&desc->kobj);
425         delete_irq_desc(irq);
426
427         /*
428          * We free the descriptor, masks and stat fields via RCU. That
429          * allows demultiplex interrupts to do rcu based management of
430          * the child interrupts.
431          */
432         call_rcu(&desc->rcu, delayed_free_desc);
433 }
434
435 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
436                        const struct cpumask *affinity, struct module *owner)
437 {
438         const struct cpumask *mask = NULL;
439         struct irq_desc *desc;
440         unsigned int flags;
441         int i;
442
443         /* Validate affinity mask(s) */
444         if (affinity) {
445                 for (i = 0, mask = affinity; i < cnt; i++, mask++) {
446                         if (cpumask_empty(mask))
447                                 return -EINVAL;
448                 }
449         }
450
451         flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
452         mask = NULL;
453
454         for (i = 0; i < cnt; i++) {
455                 if (affinity) {
456                         node = cpu_to_node(cpumask_first(affinity));
457                         mask = affinity;
458                         affinity++;
459                 }
460                 desc = alloc_desc(start + i, node, flags, mask, owner);
461                 if (!desc)
462                         goto err;
463                 irq_insert_desc(start + i, desc);
464                 irq_sysfs_add(start + i, desc);
465         }
466         bitmap_set(allocated_irqs, start, cnt);
467         return start;
468
469 err:
470         for (i--; i >= 0; i--)
471                 free_desc(start + i);
472         return -ENOMEM;
473 }
474
475 static int irq_expand_nr_irqs(unsigned int nr)
476 {
477         if (nr > IRQ_BITMAP_BITS)
478                 return -ENOMEM;
479         nr_irqs = nr;
480         return 0;
481 }
482
483 int __init early_irq_init(void)
484 {
485         int i, initcnt, node = first_online_node;
486         struct irq_desc *desc;
487
488         init_irq_default_affinity();
489
490         /* Let arch update nr_irqs and return the nr of preallocated irqs */
491         initcnt = arch_probe_nr_irqs();
492         printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
493                NR_IRQS, nr_irqs, initcnt);
494
495         if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
496                 nr_irqs = IRQ_BITMAP_BITS;
497
498         if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
499                 initcnt = IRQ_BITMAP_BITS;
500
501         if (initcnt > nr_irqs)
502                 nr_irqs = initcnt;
503
504         for (i = 0; i < initcnt; i++) {
505                 desc = alloc_desc(i, node, 0, NULL, NULL);
506                 set_bit(i, allocated_irqs);
507                 irq_insert_desc(i, desc);
508         }
509         return arch_early_irq_init();
510 }
511
512 #else /* !CONFIG_SPARSE_IRQ */
513
514 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
515         [0 ... NR_IRQS-1] = {
516                 .handle_irq     = handle_bad_irq,
517                 .depth          = 1,
518                 .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
519         }
520 };
521
522 int __init early_irq_init(void)
523 {
524         int count, i, node = first_online_node;
525         struct irq_desc *desc;
526
527         init_irq_default_affinity();
528
529         printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
530
531         desc = irq_desc;
532         count = ARRAY_SIZE(irq_desc);
533
534         for (i = 0; i < count; i++) {
535                 desc[i].kstat_irqs = alloc_percpu(unsigned int);
536                 alloc_masks(&desc[i], node);
537                 raw_spin_lock_init(&desc[i].lock);
538                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
539                 desc_set_defaults(i, &desc[i], node, NULL, NULL);
540         }
541         return arch_early_irq_init();
542 }
543
544 struct irq_desc *irq_to_desc(unsigned int irq)
545 {
546         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
547 }
548 EXPORT_SYMBOL(irq_to_desc);
549
550 static void free_desc(unsigned int irq)
551 {
552         struct irq_desc *desc = irq_to_desc(irq);
553         unsigned long flags;
554
555         raw_spin_lock_irqsave(&desc->lock, flags);
556         desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
557         raw_spin_unlock_irqrestore(&desc->lock, flags);
558 }
559
560 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
561                               const struct cpumask *affinity,
562                               struct module *owner)
563 {
564         u32 i;
565
566         for (i = 0; i < cnt; i++) {
567                 struct irq_desc *desc = irq_to_desc(start + i);
568
569                 desc->owner = owner;
570         }
571         bitmap_set(allocated_irqs, start, cnt);
572         return start;
573 }
574
575 static int irq_expand_nr_irqs(unsigned int nr)
576 {
577         return -ENOMEM;
578 }
579
580 void irq_mark_irq(unsigned int irq)
581 {
582         mutex_lock(&sparse_irq_lock);
583         bitmap_set(allocated_irqs, irq, 1);
584         mutex_unlock(&sparse_irq_lock);
585 }
586
587 #ifdef CONFIG_GENERIC_IRQ_LEGACY
588 void irq_init_desc(unsigned int irq)
589 {
590         free_desc(irq);
591 }
592 #endif
593
594 #endif /* !CONFIG_SPARSE_IRQ */
595
596 /**
597  * generic_handle_irq - Invoke the handler for a particular irq
598  * @irq:        The irq number to handle
599  *
600  */
601 int generic_handle_irq(unsigned int irq)
602 {
603         struct irq_desc *desc = irq_to_desc(irq);
604
605         if (!desc)
606                 return -EINVAL;
607         generic_handle_irq_desc(desc);
608         return 0;
609 }
610 EXPORT_SYMBOL_GPL(generic_handle_irq);
611
612 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
613 /**
614  * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
615  * @domain:     The domain where to perform the lookup
616  * @hwirq:      The HW irq number to convert to a logical one
617  * @lookup:     Whether to perform the domain lookup or not
618  * @regs:       Register file coming from the low-level handling code
619  *
620  * Returns:     0 on success, or -EINVAL if conversion has failed
621  */
622 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
623                         bool lookup, struct pt_regs *regs)
624 {
625         struct pt_regs *old_regs = set_irq_regs(regs);
626         unsigned int irq = hwirq;
627         int ret = 0;
628
629         irq_enter();
630
631 #ifdef CONFIG_IRQ_DOMAIN
632         if (lookup)
633                 irq = irq_find_mapping(domain, hwirq);
634 #endif
635
636         /*
637          * Some hardware gives randomly wrong interrupts.  Rather
638          * than crashing, do something sensible.
639          */
640         if (unlikely(!irq || irq >= nr_irqs)) {
641                 ack_bad_irq(irq);
642                 ret = -EINVAL;
643         } else {
644                 generic_handle_irq(irq);
645         }
646
647         irq_exit();
648         set_irq_regs(old_regs);
649         return ret;
650 }
651 #endif
652
653 /* Dynamic interrupt handling */
654
655 /**
656  * irq_free_descs - free irq descriptors
657  * @from:       Start of descriptor range
658  * @cnt:        Number of consecutive irqs to free
659  */
660 void irq_free_descs(unsigned int from, unsigned int cnt)
661 {
662         int i;
663
664         if (from >= nr_irqs || (from + cnt) > nr_irqs)
665                 return;
666
667         mutex_lock(&sparse_irq_lock);
668         for (i = 0; i < cnt; i++)
669                 free_desc(from + i);
670
671         bitmap_clear(allocated_irqs, from, cnt);
672         mutex_unlock(&sparse_irq_lock);
673 }
674 EXPORT_SYMBOL_GPL(irq_free_descs);
675
676 /**
677  * irq_alloc_descs - allocate and initialize a range of irq descriptors
678  * @irq:        Allocate for specific irq number if irq >= 0
679  * @from:       Start the search from this irq number
680  * @cnt:        Number of consecutive irqs to allocate.
681  * @node:       Preferred node on which the irq descriptor should be allocated
682  * @owner:      Owning module (can be NULL)
683  * @affinity:   Optional pointer to an affinity mask array of size @cnt which
684  *              hints where the irq descriptors should be allocated and which
685  *              default affinities to use
686  *
687  * Returns the first irq number or error code
688  */
689 int __ref
690 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
691                   struct module *owner, const struct cpumask *affinity)
692 {
693         int start, ret;
694
695         if (!cnt)
696                 return -EINVAL;
697
698         if (irq >= 0) {
699                 if (from > irq)
700                         return -EINVAL;
701                 from = irq;
702         } else {
703                 /*
704                  * For interrupts which are freely allocated the
705                  * architecture can force a lower bound to the @from
706                  * argument. x86 uses this to exclude the GSI space.
707                  */
708                 from = arch_dynirq_lower_bound(from);
709         }
710
711         mutex_lock(&sparse_irq_lock);
712
713         start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
714                                            from, cnt, 0);
715         ret = -EEXIST;
716         if (irq >=0 && start != irq)
717                 goto unlock;
718
719         if (start + cnt > nr_irqs) {
720                 ret = irq_expand_nr_irqs(start + cnt);
721                 if (ret)
722                         goto unlock;
723         }
724         ret = alloc_descs(start, cnt, node, affinity, owner);
725 unlock:
726         mutex_unlock(&sparse_irq_lock);
727         return ret;
728 }
729 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
730
731 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
732 /**
733  * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
734  * @cnt:        number of interrupts to allocate
735  * @node:       node on which to allocate
736  *
737  * Returns an interrupt number > 0 or 0, if the allocation fails.
738  */
739 unsigned int irq_alloc_hwirqs(int cnt, int node)
740 {
741         int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
742
743         if (irq < 0)
744                 return 0;
745
746         for (i = irq; cnt > 0; i++, cnt--) {
747                 if (arch_setup_hwirq(i, node))
748                         goto err;
749                 irq_clear_status_flags(i, _IRQ_NOREQUEST);
750         }
751         return irq;
752
753 err:
754         for (i--; i >= irq; i--) {
755                 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
756                 arch_teardown_hwirq(i);
757         }
758         irq_free_descs(irq, cnt);
759         return 0;
760 }
761 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
762
763 /**
764  * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
765  * @from:       Free from irq number
766  * @cnt:        number of interrupts to free
767  *
768  */
769 void irq_free_hwirqs(unsigned int from, int cnt)
770 {
771         int i, j;
772
773         for (i = from, j = cnt; j > 0; i++, j--) {
774                 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
775                 arch_teardown_hwirq(i);
776         }
777         irq_free_descs(from, cnt);
778 }
779 EXPORT_SYMBOL_GPL(irq_free_hwirqs);
780 #endif
781
782 /**
783  * irq_get_next_irq - get next allocated irq number
784  * @offset:     where to start the search
785  *
786  * Returns next irq number after offset or nr_irqs if none is found.
787  */
788 unsigned int irq_get_next_irq(unsigned int offset)
789 {
790         return find_next_bit(allocated_irqs, nr_irqs, offset);
791 }
792
793 struct irq_desc *
794 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
795                     unsigned int check)
796 {
797         struct irq_desc *desc = irq_to_desc(irq);
798
799         if (desc) {
800                 if (check & _IRQ_DESC_CHECK) {
801                         if ((check & _IRQ_DESC_PERCPU) &&
802                             !irq_settings_is_per_cpu_devid(desc))
803                                 return NULL;
804
805                         if (!(check & _IRQ_DESC_PERCPU) &&
806                             irq_settings_is_per_cpu_devid(desc))
807                                 return NULL;
808                 }
809
810                 if (bus)
811                         chip_bus_lock(desc);
812                 raw_spin_lock_irqsave(&desc->lock, *flags);
813         }
814         return desc;
815 }
816
817 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
818 {
819         raw_spin_unlock_irqrestore(&desc->lock, flags);
820         if (bus)
821                 chip_bus_sync_unlock(desc);
822 }
823
824 int irq_set_percpu_devid_partition(unsigned int irq,
825                                    const struct cpumask *affinity)
826 {
827         struct irq_desc *desc = irq_to_desc(irq);
828
829         if (!desc)
830                 return -EINVAL;
831
832         if (desc->percpu_enabled)
833                 return -EINVAL;
834
835         desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
836
837         if (!desc->percpu_enabled)
838                 return -ENOMEM;
839
840         if (affinity)
841                 desc->percpu_affinity = affinity;
842         else
843                 desc->percpu_affinity = cpu_possible_mask;
844
845         irq_set_percpu_devid_flags(irq);
846         return 0;
847 }
848
849 int irq_set_percpu_devid(unsigned int irq)
850 {
851         return irq_set_percpu_devid_partition(irq, NULL);
852 }
853
854 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
855 {
856         struct irq_desc *desc = irq_to_desc(irq);
857
858         if (!desc || !desc->percpu_enabled)
859                 return -EINVAL;
860
861         if (affinity)
862                 cpumask_copy(affinity, desc->percpu_affinity);
863
864         return 0;
865 }
866
867 void kstat_incr_irq_this_cpu(unsigned int irq)
868 {
869         kstat_incr_irqs_this_cpu(irq_to_desc(irq));
870 }
871
872 /**
873  * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
874  * @irq:        The interrupt number
875  * @cpu:        The cpu number
876  *
877  * Returns the sum of interrupt counts on @cpu since boot for
878  * @irq. The caller must ensure that the interrupt is not removed
879  * concurrently.
880  */
881 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
882 {
883         struct irq_desc *desc = irq_to_desc(irq);
884
885         return desc && desc->kstat_irqs ?
886                         *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
887 }
888
889 /**
890  * kstat_irqs - Get the statistics for an interrupt
891  * @irq:        The interrupt number
892  *
893  * Returns the sum of interrupt counts on all cpus since boot for
894  * @irq. The caller must ensure that the interrupt is not removed
895  * concurrently.
896  */
897 unsigned int kstat_irqs(unsigned int irq)
898 {
899         struct irq_desc *desc = irq_to_desc(irq);
900         int cpu;
901         unsigned int sum = 0;
902
903         if (!desc || !desc->kstat_irqs)
904                 return 0;
905         for_each_possible_cpu(cpu)
906                 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
907         return sum;
908 }
909
910 /**
911  * kstat_irqs_usr - Get the statistics for an interrupt
912  * @irq:        The interrupt number
913  *
914  * Returns the sum of interrupt counts on all cpus since boot for
915  * @irq. Contrary to kstat_irqs() this can be called from any
916  * preemptible context. It's protected against concurrent removal of
917  * an interrupt descriptor when sparse irqs are enabled.
918  */
919 unsigned int kstat_irqs_usr(unsigned int irq)
920 {
921         unsigned int sum;
922
923         irq_lock_sparse();
924         sum = kstat_irqs(irq);
925         irq_unlock_sparse();
926         return sum;
927 }