1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/irqdomain.h>
18 #include <linux/list.h>
19 #include <linux/log2.h>
20 #include <linux/memblock.h>
22 #include <linux/msi.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_pci.h>
27 #include <linux/of_platform.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/syscore_ops.h>
32 #include <linux/irqchip.h>
33 #include <linux/irqchip/arm-gic-v3.h>
34 #include <linux/irqchip/arm-gic-v4.h>
36 #include <asm/cputype.h>
37 #include <asm/exception.h>
39 #include "irq-gic-common.h"
41 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
42 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
44 #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
46 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
47 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
49 static u32 lpi_id_bits;
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
56 #define LPI_NRBITS lpi_id_bits
57 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
60 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
67 struct its_collection {
73 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
86 * The ITS structure - contains most of the infrastructure, with the
87 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
96 struct mutex dev_alloc_lock;
97 struct list_head entry;
99 phys_addr_t phys_base;
100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
102 struct its_baser tables[GITS_BASER_NR_REGS];
103 struct its_collection *collections;
104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
110 struct list_head its_device_list;
112 unsigned long list_nr;
114 unsigned int msi_domain_flags;
115 u32 pre_its_base; /* for Socionext Synquacer */
116 int vlpi_redist_offset;
119 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
120 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
121 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
123 #define ITS_ITT_ALIGN SZ_256
125 /* The maximum number of VPEID bits supported by VLPI commands */
126 #define ITS_MAX_VPEID_BITS \
129 if (gic_rdists->has_rvpeid && \
130 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
131 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
136 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
138 /* Convert page order to size in bytes */
139 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
141 struct event_lpi_map {
142 unsigned long *lpi_map;
144 irq_hw_number_t lpi_base;
146 raw_spinlock_t vlpi_lock;
148 struct its_vlpi_map *vlpi_maps;
153 * The ITS view of a device - belongs to an ITS, owns an interrupt
154 * translation table, and a list of interrupts. If it some of its
155 * LPIs are injected into a guest (GICv4), the event_map.vm field
156 * indicates which one.
159 struct list_head entry;
160 struct its_node *its;
161 struct event_lpi_map event_map;
170 struct its_device *dev;
171 struct its_vpe **vpes;
175 static LIST_HEAD(its_nodes);
176 static DEFINE_RAW_SPINLOCK(its_lock);
177 static struct rdists *gic_rdists;
178 static struct irq_domain *its_parent;
180 static unsigned long its_list_map;
181 static u16 vmovp_seq_num;
182 static DEFINE_RAW_SPINLOCK(vmovp_lock);
184 static DEFINE_IDA(its_vpeid_ida);
186 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
187 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
188 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
189 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
191 static u16 get_its_list(struct its_vm *vm)
193 struct its_node *its;
194 unsigned long its_list = 0;
196 list_for_each_entry(its, &its_nodes, entry) {
200 if (vm->vlpi_count[its->list_nr])
201 __set_bit(its->list_nr, &its_list);
204 return (u16)its_list;
207 static inline u32 its_get_event_id(struct irq_data *d)
209 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
210 return d->hwirq - its_dev->event_map.lpi_base;
213 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
216 struct its_node *its = its_dev->its;
218 return its->collections + its_dev->event_map.col_map[event];
221 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
224 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
227 return &its_dev->event_map.vlpi_maps[event];
230 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
232 if (irqd_is_forwarded_to_vcpu(d)) {
233 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
234 u32 event = its_get_event_id(d);
236 return dev_event_to_vlpi_map(its_dev, event);
242 static int irq_to_cpuid(struct irq_data *d)
244 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
245 struct its_vlpi_map *map = get_vlpi_map(d);
248 return map->vpe->col_idx;
250 return its_dev->event_map.col_map[its_get_event_id(d)];
253 static struct its_collection *valid_col(struct its_collection *col)
255 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
261 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
263 if (valid_col(its->collections + vpe->col_idx))
270 * ITS command descriptors - parameters to be encoded in a command
273 struct its_cmd_desc {
276 struct its_device *dev;
281 struct its_device *dev;
286 struct its_device *dev;
291 struct its_device *dev;
296 struct its_collection *col;
301 struct its_device *dev;
307 struct its_device *dev;
308 struct its_collection *col;
313 struct its_device *dev;
318 struct its_collection *col;
327 struct its_collection *col;
333 struct its_device *dev;
341 struct its_device *dev;
348 struct its_collection *col;
360 * The ITS command block, which is what the ITS actually parses.
362 struct its_cmd_block {
365 __le64 raw_cmd_le[4];
369 #define ITS_CMD_QUEUE_SZ SZ_64K
370 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
372 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
373 struct its_cmd_block *,
374 struct its_cmd_desc *);
376 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
377 struct its_cmd_block *,
378 struct its_cmd_desc *);
380 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
382 u64 mask = GENMASK_ULL(h, l);
384 *raw_cmd |= (val << l) & mask;
387 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
389 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
392 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
394 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
397 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
399 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
402 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
404 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
407 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
409 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
412 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
414 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
417 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
419 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
422 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
424 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
427 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
429 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
432 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
434 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
437 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
439 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
442 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
444 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
447 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
449 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
452 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
454 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
457 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
459 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
462 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
464 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
467 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
469 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
472 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
474 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
477 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
479 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
482 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
484 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
487 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
490 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
493 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
496 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
499 static void its_encode_db(struct its_cmd_block *cmd, bool db)
501 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
504 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
506 /* Let's fixup BE commands */
507 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
508 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
509 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
510 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
513 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
514 struct its_cmd_block *cmd,
515 struct its_cmd_desc *desc)
517 unsigned long itt_addr;
518 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
520 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
521 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
523 its_encode_cmd(cmd, GITS_CMD_MAPD);
524 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
525 its_encode_size(cmd, size - 1);
526 its_encode_itt(cmd, itt_addr);
527 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
534 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
535 struct its_cmd_block *cmd,
536 struct its_cmd_desc *desc)
538 its_encode_cmd(cmd, GITS_CMD_MAPC);
539 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
540 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
541 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
545 return desc->its_mapc_cmd.col;
548 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
549 struct its_cmd_block *cmd,
550 struct its_cmd_desc *desc)
552 struct its_collection *col;
554 col = dev_event_to_col(desc->its_mapti_cmd.dev,
555 desc->its_mapti_cmd.event_id);
557 its_encode_cmd(cmd, GITS_CMD_MAPTI);
558 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
559 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
560 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
561 its_encode_collection(cmd, col->col_id);
565 return valid_col(col);
568 static struct its_collection *its_build_movi_cmd(struct its_node *its,
569 struct its_cmd_block *cmd,
570 struct its_cmd_desc *desc)
572 struct its_collection *col;
574 col = dev_event_to_col(desc->its_movi_cmd.dev,
575 desc->its_movi_cmd.event_id);
577 its_encode_cmd(cmd, GITS_CMD_MOVI);
578 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
579 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
580 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
584 return valid_col(col);
587 static struct its_collection *its_build_discard_cmd(struct its_node *its,
588 struct its_cmd_block *cmd,
589 struct its_cmd_desc *desc)
591 struct its_collection *col;
593 col = dev_event_to_col(desc->its_discard_cmd.dev,
594 desc->its_discard_cmd.event_id);
596 its_encode_cmd(cmd, GITS_CMD_DISCARD);
597 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
598 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
602 return valid_col(col);
605 static struct its_collection *its_build_inv_cmd(struct its_node *its,
606 struct its_cmd_block *cmd,
607 struct its_cmd_desc *desc)
609 struct its_collection *col;
611 col = dev_event_to_col(desc->its_inv_cmd.dev,
612 desc->its_inv_cmd.event_id);
614 its_encode_cmd(cmd, GITS_CMD_INV);
615 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
616 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
620 return valid_col(col);
623 static struct its_collection *its_build_int_cmd(struct its_node *its,
624 struct its_cmd_block *cmd,
625 struct its_cmd_desc *desc)
627 struct its_collection *col;
629 col = dev_event_to_col(desc->its_int_cmd.dev,
630 desc->its_int_cmd.event_id);
632 its_encode_cmd(cmd, GITS_CMD_INT);
633 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
634 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
638 return valid_col(col);
641 static struct its_collection *its_build_clear_cmd(struct its_node *its,
642 struct its_cmd_block *cmd,
643 struct its_cmd_desc *desc)
645 struct its_collection *col;
647 col = dev_event_to_col(desc->its_clear_cmd.dev,
648 desc->its_clear_cmd.event_id);
650 its_encode_cmd(cmd, GITS_CMD_CLEAR);
651 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
652 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
656 return valid_col(col);
659 static struct its_collection *its_build_invall_cmd(struct its_node *its,
660 struct its_cmd_block *cmd,
661 struct its_cmd_desc *desc)
663 its_encode_cmd(cmd, GITS_CMD_INVALL);
664 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
671 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
672 struct its_cmd_block *cmd,
673 struct its_cmd_desc *desc)
675 its_encode_cmd(cmd, GITS_CMD_VINVALL);
676 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
680 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
683 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
684 struct its_cmd_block *cmd,
685 struct its_cmd_desc *desc)
687 unsigned long vpt_addr, vconf_addr;
691 its_encode_cmd(cmd, GITS_CMD_VMAPP);
692 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
693 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
695 if (!desc->its_vmapp_cmd.valid) {
697 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
698 its_encode_alloc(cmd, alloc);
704 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
705 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
707 its_encode_target(cmd, target);
708 its_encode_vpt_addr(cmd, vpt_addr);
709 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
714 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
716 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
718 its_encode_alloc(cmd, alloc);
720 /* We can only signal PTZ when alloc==1. Why do we have two bits? */
721 its_encode_ptz(cmd, alloc);
722 its_encode_vconf_addr(cmd, vconf_addr);
723 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
728 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
731 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
732 struct its_cmd_block *cmd,
733 struct its_cmd_desc *desc)
737 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
738 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
742 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
743 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
744 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
745 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
746 its_encode_db_phys_id(cmd, db);
747 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
751 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
754 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
755 struct its_cmd_block *cmd,
756 struct its_cmd_desc *desc)
760 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
761 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
765 its_encode_cmd(cmd, GITS_CMD_VMOVI);
766 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
767 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
768 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
769 its_encode_db_phys_id(cmd, db);
770 its_encode_db_valid(cmd, true);
774 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
777 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
778 struct its_cmd_block *cmd,
779 struct its_cmd_desc *desc)
783 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
784 its_encode_cmd(cmd, GITS_CMD_VMOVP);
785 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
786 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
787 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
788 its_encode_target(cmd, target);
791 its_encode_db(cmd, true);
792 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
797 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
800 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
801 struct its_cmd_block *cmd,
802 struct its_cmd_desc *desc)
804 struct its_vlpi_map *map;
806 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
807 desc->its_inv_cmd.event_id);
809 its_encode_cmd(cmd, GITS_CMD_INV);
810 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
811 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
815 return valid_vpe(its, map->vpe);
818 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
819 struct its_cmd_block *cmd,
820 struct its_cmd_desc *desc)
822 struct its_vlpi_map *map;
824 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
825 desc->its_int_cmd.event_id);
827 its_encode_cmd(cmd, GITS_CMD_INT);
828 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
829 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
833 return valid_vpe(its, map->vpe);
836 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
837 struct its_cmd_block *cmd,
838 struct its_cmd_desc *desc)
840 struct its_vlpi_map *map;
842 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
843 desc->its_clear_cmd.event_id);
845 its_encode_cmd(cmd, GITS_CMD_CLEAR);
846 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
847 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
851 return valid_vpe(its, map->vpe);
854 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
855 struct its_cmd_block *cmd,
856 struct its_cmd_desc *desc)
858 if (WARN_ON(!is_v4_1(its)))
861 its_encode_cmd(cmd, GITS_CMD_INVDB);
862 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
866 return valid_vpe(its, desc->its_invdb_cmd.vpe);
869 static u64 its_cmd_ptr_to_offset(struct its_node *its,
870 struct its_cmd_block *ptr)
872 return (ptr - its->cmd_base) * sizeof(*ptr);
875 static int its_queue_full(struct its_node *its)
880 widx = its->cmd_write - its->cmd_base;
881 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
883 /* This is incredibly unlikely to happen, unless the ITS locks up. */
884 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
890 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
892 struct its_cmd_block *cmd;
893 u32 count = 1000000; /* 1s! */
895 while (its_queue_full(its)) {
898 pr_err_ratelimited("ITS queue not draining\n");
905 cmd = its->cmd_write++;
907 /* Handle queue wrapping */
908 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
909 its->cmd_write = its->cmd_base;
920 static struct its_cmd_block *its_post_commands(struct its_node *its)
922 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
924 writel_relaxed(wr, its->base + GITS_CWRITER);
926 return its->cmd_write;
929 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
932 * Make sure the commands written to memory are observable by
935 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
936 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
941 static int its_wait_for_range_completion(struct its_node *its,
943 struct its_cmd_block *to)
945 u64 rd_idx, to_idx, linear_idx;
946 u32 count = 1000000; /* 1s! */
948 /* Linearize to_idx if the command set has wrapped around */
949 to_idx = its_cmd_ptr_to_offset(its, to);
950 if (to_idx < prev_idx)
951 to_idx += ITS_CMD_QUEUE_SZ;
953 linear_idx = prev_idx;
958 rd_idx = readl_relaxed(its->base + GITS_CREADR);
961 * Compute the read pointer progress, taking the
962 * potential wrap-around into account.
964 delta = rd_idx - prev_idx;
965 if (rd_idx < prev_idx)
966 delta += ITS_CMD_QUEUE_SZ;
969 if (linear_idx >= to_idx)
974 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
986 /* Warning, macro hell follows */
987 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
988 void name(struct its_node *its, \
990 struct its_cmd_desc *desc) \
992 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
993 synctype *sync_obj; \
994 unsigned long flags; \
997 raw_spin_lock_irqsave(&its->lock, flags); \
999 cmd = its_allocate_entry(its); \
1000 if (!cmd) { /* We're soooooo screewed... */ \
1001 raw_spin_unlock_irqrestore(&its->lock, flags); \
1004 sync_obj = builder(its, cmd, desc); \
1005 its_flush_cmd(its, cmd); \
1008 sync_cmd = its_allocate_entry(its); \
1012 buildfn(its, sync_cmd, sync_obj); \
1013 its_flush_cmd(its, sync_cmd); \
1017 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1018 next_cmd = its_post_commands(its); \
1019 raw_spin_unlock_irqrestore(&its->lock, flags); \
1021 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1022 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1025 static void its_build_sync_cmd(struct its_node *its,
1026 struct its_cmd_block *sync_cmd,
1027 struct its_collection *sync_col)
1029 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1030 its_encode_target(sync_cmd, sync_col->target_address);
1032 its_fixup_cmd(sync_cmd);
1035 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1036 struct its_collection, its_build_sync_cmd)
1038 static void its_build_vsync_cmd(struct its_node *its,
1039 struct its_cmd_block *sync_cmd,
1040 struct its_vpe *sync_vpe)
1042 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1043 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1045 its_fixup_cmd(sync_cmd);
1048 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1049 struct its_vpe, its_build_vsync_cmd)
1051 static void its_send_int(struct its_device *dev, u32 event_id)
1053 struct its_cmd_desc desc;
1055 desc.its_int_cmd.dev = dev;
1056 desc.its_int_cmd.event_id = event_id;
1058 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1061 static void its_send_clear(struct its_device *dev, u32 event_id)
1063 struct its_cmd_desc desc;
1065 desc.its_clear_cmd.dev = dev;
1066 desc.its_clear_cmd.event_id = event_id;
1068 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1071 static void its_send_inv(struct its_device *dev, u32 event_id)
1073 struct its_cmd_desc desc;
1075 desc.its_inv_cmd.dev = dev;
1076 desc.its_inv_cmd.event_id = event_id;
1078 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1081 static void its_send_mapd(struct its_device *dev, int valid)
1083 struct its_cmd_desc desc;
1085 desc.its_mapd_cmd.dev = dev;
1086 desc.its_mapd_cmd.valid = !!valid;
1088 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1091 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1094 struct its_cmd_desc desc;
1096 desc.its_mapc_cmd.col = col;
1097 desc.its_mapc_cmd.valid = !!valid;
1099 its_send_single_command(its, its_build_mapc_cmd, &desc);
1102 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1104 struct its_cmd_desc desc;
1106 desc.its_mapti_cmd.dev = dev;
1107 desc.its_mapti_cmd.phys_id = irq_id;
1108 desc.its_mapti_cmd.event_id = id;
1110 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1113 static void its_send_movi(struct its_device *dev,
1114 struct its_collection *col, u32 id)
1116 struct its_cmd_desc desc;
1118 desc.its_movi_cmd.dev = dev;
1119 desc.its_movi_cmd.col = col;
1120 desc.its_movi_cmd.event_id = id;
1122 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1125 static void its_send_discard(struct its_device *dev, u32 id)
1127 struct its_cmd_desc desc;
1129 desc.its_discard_cmd.dev = dev;
1130 desc.its_discard_cmd.event_id = id;
1132 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1135 static void its_send_invall(struct its_node *its, struct its_collection *col)
1137 struct its_cmd_desc desc;
1139 desc.its_invall_cmd.col = col;
1141 its_send_single_command(its, its_build_invall_cmd, &desc);
1144 static void its_send_vmapti(struct its_device *dev, u32 id)
1146 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1147 struct its_cmd_desc desc;
1149 desc.its_vmapti_cmd.vpe = map->vpe;
1150 desc.its_vmapti_cmd.dev = dev;
1151 desc.its_vmapti_cmd.virt_id = map->vintid;
1152 desc.its_vmapti_cmd.event_id = id;
1153 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1155 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1158 static void its_send_vmovi(struct its_device *dev, u32 id)
1160 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1161 struct its_cmd_desc desc;
1163 desc.its_vmovi_cmd.vpe = map->vpe;
1164 desc.its_vmovi_cmd.dev = dev;
1165 desc.its_vmovi_cmd.event_id = id;
1166 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1168 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1171 static void its_send_vmapp(struct its_node *its,
1172 struct its_vpe *vpe, bool valid)
1174 struct its_cmd_desc desc;
1176 desc.its_vmapp_cmd.vpe = vpe;
1177 desc.its_vmapp_cmd.valid = valid;
1178 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1180 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1183 static void its_send_vmovp(struct its_vpe *vpe)
1185 struct its_cmd_desc desc = {};
1186 struct its_node *its;
1187 unsigned long flags;
1188 int col_id = vpe->col_idx;
1190 desc.its_vmovp_cmd.vpe = vpe;
1192 if (!its_list_map) {
1193 its = list_first_entry(&its_nodes, struct its_node, entry);
1194 desc.its_vmovp_cmd.col = &its->collections[col_id];
1195 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1200 * Yet another marvel of the architecture. If using the
1201 * its_list "feature", we need to make sure that all ITSs
1202 * receive all VMOVP commands in the same order. The only way
1203 * to guarantee this is to make vmovp a serialization point.
1207 raw_spin_lock_irqsave(&vmovp_lock, flags);
1209 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1210 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1213 list_for_each_entry(its, &its_nodes, entry) {
1217 if (!vpe->its_vm->vlpi_count[its->list_nr])
1220 desc.its_vmovp_cmd.col = &its->collections[col_id];
1221 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1224 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1227 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1229 struct its_cmd_desc desc;
1231 desc.its_vinvall_cmd.vpe = vpe;
1232 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1235 static void its_send_vinv(struct its_device *dev, u32 event_id)
1237 struct its_cmd_desc desc;
1240 * There is no real VINV command. This is just a normal INV,
1241 * with a VSYNC instead of a SYNC.
1243 desc.its_inv_cmd.dev = dev;
1244 desc.its_inv_cmd.event_id = event_id;
1246 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1249 static void its_send_vint(struct its_device *dev, u32 event_id)
1251 struct its_cmd_desc desc;
1254 * There is no real VINT command. This is just a normal INT,
1255 * with a VSYNC instead of a SYNC.
1257 desc.its_int_cmd.dev = dev;
1258 desc.its_int_cmd.event_id = event_id;
1260 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1263 static void its_send_vclear(struct its_device *dev, u32 event_id)
1265 struct its_cmd_desc desc;
1268 * There is no real VCLEAR command. This is just a normal CLEAR,
1269 * with a VSYNC instead of a SYNC.
1271 desc.its_clear_cmd.dev = dev;
1272 desc.its_clear_cmd.event_id = event_id;
1274 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1277 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1279 struct its_cmd_desc desc;
1281 desc.its_invdb_cmd.vpe = vpe;
1282 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1286 * irqchip functions - assumes MSI, mostly.
1288 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1290 struct its_vlpi_map *map = get_vlpi_map(d);
1291 irq_hw_number_t hwirq;
1296 va = page_address(map->vm->vprop_page);
1297 hwirq = map->vintid;
1299 /* Remember the updated property */
1300 map->properties &= ~clr;
1301 map->properties |= set | LPI_PROP_GROUP1;
1303 va = gic_rdists->prop_table_va;
1307 cfg = va + hwirq - 8192;
1309 *cfg |= set | LPI_PROP_GROUP1;
1312 * Make the above write visible to the redistributors.
1313 * And yes, we're flushing exactly: One. Single. Byte.
1316 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1317 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1322 static void wait_for_syncr(void __iomem *rdbase)
1324 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
1328 static void direct_lpi_inv(struct irq_data *d)
1330 struct its_vlpi_map *map = get_vlpi_map(d);
1331 void __iomem *rdbase;
1335 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1337 WARN_ON(!is_v4_1(its_dev->its));
1339 val = GICR_INVLPIR_V;
1340 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1341 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1346 /* Target the redistributor this LPI is currently routed to */
1347 rdbase = per_cpu_ptr(gic_rdists->rdist, irq_to_cpuid(d))->rd_base;
1348 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1350 wait_for_syncr(rdbase);
1353 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1355 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1357 lpi_write_config(d, clr, set);
1358 if (gic_rdists->has_direct_lpi &&
1359 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1361 else if (!irqd_is_forwarded_to_vcpu(d))
1362 its_send_inv(its_dev, its_get_event_id(d));
1364 its_send_vinv(its_dev, its_get_event_id(d));
1367 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1369 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1370 u32 event = its_get_event_id(d);
1371 struct its_vlpi_map *map;
1374 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1377 if (is_v4_1(its_dev->its))
1380 map = dev_event_to_vlpi_map(its_dev, event);
1382 if (map->db_enabled == enable)
1385 map->db_enabled = enable;
1388 * More fun with the architecture:
1390 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1391 * value or to 1023, depending on the enable bit. But that
1392 * would be issueing a mapping for an /existing/ DevID+EventID
1393 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1394 * to the /same/ vPE, using this opportunity to adjust the
1395 * doorbell. Mouahahahaha. We loves it, Precious.
1397 its_send_vmovi(its_dev, event);
1400 static void its_mask_irq(struct irq_data *d)
1402 if (irqd_is_forwarded_to_vcpu(d))
1403 its_vlpi_set_doorbell(d, false);
1405 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1408 static void its_unmask_irq(struct irq_data *d)
1410 if (irqd_is_forwarded_to_vcpu(d))
1411 its_vlpi_set_doorbell(d, true);
1413 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1416 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1420 const struct cpumask *cpu_mask = cpu_online_mask;
1421 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1422 struct its_collection *target_col;
1423 u32 id = its_get_event_id(d);
1425 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1426 if (irqd_is_forwarded_to_vcpu(d))
1429 /* lpi cannot be routed to a redistributor that is on a foreign node */
1430 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1431 if (its_dev->its->numa_node >= 0) {
1432 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1433 if (!cpumask_intersects(mask_val, cpu_mask))
1438 cpu = cpumask_any_and(mask_val, cpu_mask);
1440 if (cpu >= nr_cpu_ids)
1443 /* don't set the affinity when the target cpu is same as current one */
1444 if (cpu != its_dev->event_map.col_map[id]) {
1445 target_col = &its_dev->its->collections[cpu];
1446 its_send_movi(its_dev, target_col, id);
1447 its_dev->event_map.col_map[id] = cpu;
1448 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1451 return IRQ_SET_MASK_OK_DONE;
1454 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1456 struct its_node *its = its_dev->its;
1458 return its->phys_base + GITS_TRANSLATER;
1461 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1463 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1464 struct its_node *its;
1468 addr = its->get_msi_base(its_dev);
1470 msg->address_lo = lower_32_bits(addr);
1471 msg->address_hi = upper_32_bits(addr);
1472 msg->data = its_get_event_id(d);
1474 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1477 static int its_irq_set_irqchip_state(struct irq_data *d,
1478 enum irqchip_irq_state which,
1481 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1482 u32 event = its_get_event_id(d);
1484 if (which != IRQCHIP_STATE_PENDING)
1487 if (irqd_is_forwarded_to_vcpu(d)) {
1489 its_send_vint(its_dev, event);
1491 its_send_vclear(its_dev, event);
1494 its_send_int(its_dev, event);
1496 its_send_clear(its_dev, event);
1502 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1504 unsigned long flags;
1506 /* Not using the ITS list? Everything is always mapped. */
1510 raw_spin_lock_irqsave(&vmovp_lock, flags);
1513 * If the VM wasn't mapped yet, iterate over the vpes and get
1516 vm->vlpi_count[its->list_nr]++;
1518 if (vm->vlpi_count[its->list_nr] == 1) {
1521 for (i = 0; i < vm->nr_vpes; i++) {
1522 struct its_vpe *vpe = vm->vpes[i];
1523 struct irq_data *d = irq_get_irq_data(vpe->irq);
1525 /* Map the VPE to the first possible CPU */
1526 vpe->col_idx = cpumask_first(cpu_online_mask);
1527 its_send_vmapp(its, vpe, true);
1528 its_send_vinvall(its, vpe);
1529 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1533 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1536 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1538 unsigned long flags;
1540 /* Not using the ITS list? Everything is always mapped. */
1544 raw_spin_lock_irqsave(&vmovp_lock, flags);
1546 if (!--vm->vlpi_count[its->list_nr]) {
1549 for (i = 0; i < vm->nr_vpes; i++)
1550 its_send_vmapp(its, vm->vpes[i], false);
1553 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1556 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1558 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1559 u32 event = its_get_event_id(d);
1565 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1567 if (!its_dev->event_map.vm) {
1568 struct its_vlpi_map *maps;
1570 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1577 its_dev->event_map.vm = info->map->vm;
1578 its_dev->event_map.vlpi_maps = maps;
1579 } else if (its_dev->event_map.vm != info->map->vm) {
1584 /* Get our private copy of the mapping information */
1585 its_dev->event_map.vlpi_maps[event] = *info->map;
1587 if (irqd_is_forwarded_to_vcpu(d)) {
1588 /* Already mapped, move it around */
1589 its_send_vmovi(its_dev, event);
1591 /* Ensure all the VPEs are mapped on this ITS */
1592 its_map_vm(its_dev->its, info->map->vm);
1595 * Flag the interrupt as forwarded so that we can
1596 * start poking the virtual property table.
1598 irqd_set_forwarded_to_vcpu(d);
1600 /* Write out the property to the prop table */
1601 lpi_write_config(d, 0xff, info->map->properties);
1603 /* Drop the physical mapping */
1604 its_send_discard(its_dev, event);
1606 /* and install the virtual one */
1607 its_send_vmapti(its_dev, event);
1609 /* Increment the number of VLPIs */
1610 its_dev->event_map.nr_vlpis++;
1614 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1618 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1620 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1621 struct its_vlpi_map *map;
1624 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1626 map = get_vlpi_map(d);
1628 if (!its_dev->event_map.vm || !map) {
1633 /* Copy our mapping information to the incoming request */
1637 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1641 static int its_vlpi_unmap(struct irq_data *d)
1643 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1644 u32 event = its_get_event_id(d);
1647 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1649 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1654 /* Drop the virtual mapping */
1655 its_send_discard(its_dev, event);
1657 /* and restore the physical one */
1658 irqd_clr_forwarded_to_vcpu(d);
1659 its_send_mapti(its_dev, d->hwirq, event);
1660 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1664 /* Potentially unmap the VM from this ITS */
1665 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1668 * Drop the refcount and make the device available again if
1669 * this was the last VLPI.
1671 if (!--its_dev->event_map.nr_vlpis) {
1672 its_dev->event_map.vm = NULL;
1673 kfree(its_dev->event_map.vlpi_maps);
1677 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1681 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1683 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1685 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1688 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1689 lpi_update_config(d, 0xff, info->config);
1691 lpi_write_config(d, 0xff, info->config);
1692 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1697 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1699 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1700 struct its_cmd_info *info = vcpu_info;
1703 if (!is_v4(its_dev->its))
1706 /* Unmap request? */
1708 return its_vlpi_unmap(d);
1710 switch (info->cmd_type) {
1712 return its_vlpi_map(d, info);
1715 return its_vlpi_get(d, info);
1717 case PROP_UPDATE_VLPI:
1718 case PROP_UPDATE_AND_INV_VLPI:
1719 return its_vlpi_prop_update(d, info);
1726 static struct irq_chip its_irq_chip = {
1728 .irq_mask = its_mask_irq,
1729 .irq_unmask = its_unmask_irq,
1730 .irq_eoi = irq_chip_eoi_parent,
1731 .irq_set_affinity = its_set_affinity,
1732 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1733 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1734 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1739 * How we allocate LPIs:
1741 * lpi_range_list contains ranges of LPIs that are to available to
1742 * allocate from. To allocate LPIs, just pick the first range that
1743 * fits the required allocation, and reduce it by the required
1744 * amount. Once empty, remove the range from the list.
1746 * To free a range of LPIs, add a free range to the list, sort it and
1747 * merge the result if the new range happens to be adjacent to an
1748 * already free block.
1750 * The consequence of the above is that allocation is cost is low, but
1751 * freeing is expensive. We assumes that freeing rarely occurs.
1753 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1755 static DEFINE_MUTEX(lpi_range_lock);
1756 static LIST_HEAD(lpi_range_list);
1759 struct list_head entry;
1764 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1766 struct lpi_range *range;
1768 range = kmalloc(sizeof(*range), GFP_KERNEL);
1770 range->base_id = base;
1777 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1779 struct lpi_range *range, *tmp;
1782 mutex_lock(&lpi_range_lock);
1784 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1785 if (range->span >= nr_lpis) {
1786 *base = range->base_id;
1787 range->base_id += nr_lpis;
1788 range->span -= nr_lpis;
1790 if (range->span == 0) {
1791 list_del(&range->entry);
1800 mutex_unlock(&lpi_range_lock);
1802 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1806 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1808 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1810 if (a->base_id + a->span != b->base_id)
1812 b->base_id = a->base_id;
1814 list_del(&a->entry);
1818 static int free_lpi_range(u32 base, u32 nr_lpis)
1820 struct lpi_range *new, *old;
1822 new = mk_lpi_range(base, nr_lpis);
1826 mutex_lock(&lpi_range_lock);
1828 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1829 if (old->base_id < base)
1833 * old is the last element with ->base_id smaller than base,
1834 * so new goes right after it. If there are no elements with
1835 * ->base_id smaller than base, &old->entry ends up pointing
1836 * at the head of the list, and inserting new it the start of
1837 * the list is the right thing to do in that case as well.
1839 list_add(&new->entry, &old->entry);
1841 * Now check if we can merge with the preceding and/or
1844 merge_lpi_ranges(old, new);
1845 merge_lpi_ranges(new, list_next_entry(new, entry));
1847 mutex_unlock(&lpi_range_lock);
1851 static int __init its_lpi_init(u32 id_bits)
1853 u32 lpis = (1UL << id_bits) - 8192;
1857 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1859 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1861 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1866 * Initializing the allocator is just the same as freeing the
1867 * full range of LPIs.
1869 err = free_lpi_range(8192, lpis);
1870 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1874 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1876 unsigned long *bitmap = NULL;
1880 err = alloc_lpi_range(nr_irqs, base);
1885 } while (nr_irqs > 0);
1893 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1901 *base = *nr_ids = 0;
1906 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1908 WARN_ON(free_lpi_range(base, nr_ids));
1912 static void gic_reset_prop_table(void *va)
1914 /* Priority 0xa0, Group-1, disabled */
1915 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1917 /* Make sure the GIC will observe the written configuration */
1918 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1921 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1923 struct page *prop_page;
1925 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1929 gic_reset_prop_table(page_address(prop_page));
1934 static void its_free_prop_table(struct page *prop_page)
1936 free_pages((unsigned long)page_address(prop_page),
1937 get_order(LPI_PROPBASE_SZ));
1940 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1942 phys_addr_t start, end, addr_end;
1946 * We don't bother checking for a kdump kernel as by
1947 * construction, the LPI tables are out of this kernel's
1950 if (is_kdump_kernel())
1953 addr_end = addr + size - 1;
1955 for_each_reserved_mem_region(i, &start, &end) {
1956 if (addr >= start && addr_end <= end)
1960 /* Not found, not a good sign... */
1961 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1963 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1967 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1969 if (efi_enabled(EFI_CONFIG_TABLES))
1970 return efi_mem_reserve_persistent(addr, size);
1975 static int __init its_setup_lpi_prop_table(void)
1977 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1980 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1981 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1983 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1984 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1987 gic_reset_prop_table(gic_rdists->prop_table_va);
1991 lpi_id_bits = min_t(u32,
1992 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1993 ITS_MAX_LPI_NRBITS);
1994 page = its_allocate_prop_table(GFP_NOWAIT);
1996 pr_err("Failed to allocate PROPBASE\n");
2000 gic_rdists->prop_table_pa = page_to_phys(page);
2001 gic_rdists->prop_table_va = page_address(page);
2002 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2006 pr_info("GICv3: using LPI property table @%pa\n",
2007 &gic_rdists->prop_table_pa);
2009 return its_lpi_init(lpi_id_bits);
2012 static const char *its_base_type_string[] = {
2013 [GITS_BASER_TYPE_DEVICE] = "Devices",
2014 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2015 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2016 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2017 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2018 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2019 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2022 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2024 u32 idx = baser - its->tables;
2026 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2029 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2032 u32 idx = baser - its->tables;
2034 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2035 baser->val = its_read_baser(its, baser);
2038 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2039 u64 cache, u64 shr, u32 psz, u32 order,
2042 u64 val = its_read_baser(its, baser);
2043 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2044 u64 type = GITS_BASER_TYPE(val);
2045 u64 baser_phys, tmp;
2051 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2052 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2053 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2054 &its->phys_base, its_base_type_string[type],
2055 alloc_pages, GITS_BASER_PAGES_MAX);
2056 alloc_pages = GITS_BASER_PAGES_MAX;
2057 order = get_order(GITS_BASER_PAGES_MAX * psz);
2060 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2064 base = (void *)page_address(page);
2065 baser_phys = virt_to_phys(base);
2067 /* Check if the physical address of the memory is above 48bits */
2068 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2070 /* 52bit PA is supported only when PageSize=64K */
2071 if (psz != SZ_64K) {
2072 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2073 free_pages((unsigned long)base, order);
2077 /* Convert 52bit PA to 48bit field */
2078 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2083 (type << GITS_BASER_TYPE_SHIFT) |
2084 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2085 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2090 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2094 val |= GITS_BASER_PAGE_SIZE_4K;
2097 val |= GITS_BASER_PAGE_SIZE_16K;
2100 val |= GITS_BASER_PAGE_SIZE_64K;
2104 its_write_baser(its, baser, val);
2107 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2109 * Shareability didn't stick. Just use
2110 * whatever the read reported, which is likely
2111 * to be the only thing this redistributor
2112 * supports. If that's zero, make it
2113 * non-cacheable as well.
2115 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2117 cache = GITS_BASER_nC;
2118 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2123 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
2125 * Page size didn't stick. Let's try a smaller
2126 * size and retry. If we reach 4K, then
2127 * something is horribly wrong...
2129 free_pages((unsigned long)base, order);
2135 goto retry_alloc_baser;
2138 goto retry_alloc_baser;
2143 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2144 &its->phys_base, its_base_type_string[type],
2146 free_pages((unsigned long)base, order);
2150 baser->order = order;
2153 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2155 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2156 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2157 its_base_type_string[type],
2158 (unsigned long)virt_to_phys(base),
2159 indirect ? "indirect" : "flat", (int)esz,
2160 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2165 static bool its_parse_indirect_baser(struct its_node *its,
2166 struct its_baser *baser,
2167 u32 psz, u32 *order, u32 ids)
2169 u64 tmp = its_read_baser(its, baser);
2170 u64 type = GITS_BASER_TYPE(tmp);
2171 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2172 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2173 u32 new_order = *order;
2174 bool indirect = false;
2176 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2177 if ((esz << ids) > (psz * 2)) {
2179 * Find out whether hw supports a single or two-level table by
2180 * table by reading bit at offset '62' after writing '1' to it.
2182 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2183 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2187 * The size of the lvl2 table is equal to ITS page size
2188 * which is 'psz'. For computing lvl1 table size,
2189 * subtract ID bits that sparse lvl2 table from 'ids'
2190 * which is reported by ITS hardware times lvl1 table
2193 ids -= ilog2(psz / (int)esz);
2194 esz = GITS_LVL1_ENTRY_SIZE;
2199 * Allocate as many entries as required to fit the
2200 * range of device IDs that the ITS can grok... The ID
2201 * space being incredibly sparse, this results in a
2202 * massive waste of memory if two-level device table
2203 * feature is not supported by hardware.
2205 new_order = max_t(u32, get_order(esz << ids), new_order);
2206 if (new_order >= MAX_ORDER) {
2207 new_order = MAX_ORDER - 1;
2208 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2209 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2210 &its->phys_base, its_base_type_string[type],
2211 device_ids(its), ids);
2219 static u32 compute_common_aff(u64 val)
2223 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2224 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2226 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2229 static u32 compute_its_aff(struct its_node *its)
2235 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2236 * the resulting affinity. We then use that to see if this match
2239 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2240 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2241 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2242 return compute_common_aff(val);
2245 static struct its_node *find_sibling_its(struct its_node *cur_its)
2247 struct its_node *its;
2250 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2253 aff = compute_its_aff(cur_its);
2255 list_for_each_entry(its, &its_nodes, entry) {
2258 if (!is_v4_1(its) || its == cur_its)
2261 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2264 if (aff != compute_its_aff(its))
2267 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2268 baser = its->tables[2].val;
2269 if (!(baser & GITS_BASER_VALID))
2278 static void its_free_tables(struct its_node *its)
2282 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2283 if (its->tables[i].base) {
2284 free_pages((unsigned long)its->tables[i].base,
2285 its->tables[i].order);
2286 its->tables[i].base = NULL;
2291 static int its_alloc_tables(struct its_node *its)
2293 u64 shr = GITS_BASER_InnerShareable;
2294 u64 cache = GITS_BASER_RaWaWb;
2298 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2299 /* erratum 24313: ignore memory access type */
2300 cache = GITS_BASER_nCnB;
2302 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2303 struct its_baser *baser = its->tables + i;
2304 u64 val = its_read_baser(its, baser);
2305 u64 type = GITS_BASER_TYPE(val);
2306 u32 order = get_order(psz);
2307 bool indirect = false;
2310 case GITS_BASER_TYPE_NONE:
2313 case GITS_BASER_TYPE_DEVICE:
2314 indirect = its_parse_indirect_baser(its, baser,
2319 case GITS_BASER_TYPE_VCPU:
2321 struct its_node *sibling;
2324 if ((sibling = find_sibling_its(its))) {
2325 *baser = sibling->tables[2];
2326 its_write_baser(its, baser, baser->val);
2331 indirect = its_parse_indirect_baser(its, baser,
2333 ITS_MAX_VPEID_BITS);
2337 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
2339 its_free_tables(its);
2343 /* Update settings which will be used for next BASERn */
2345 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2346 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2352 static u64 inherit_vpe_l1_table_from_its(void)
2354 struct its_node *its;
2358 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2359 aff = compute_common_aff(val);
2361 list_for_each_entry(its, &its_nodes, entry) {
2367 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2370 if (aff != compute_its_aff(its))
2373 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2374 baser = its->tables[2].val;
2375 if (!(baser & GITS_BASER_VALID))
2378 /* We have a winner! */
2379 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2381 val = GICR_VPROPBASER_4_1_VALID;
2382 if (baser & GITS_BASER_INDIRECT)
2383 val |= GICR_VPROPBASER_4_1_INDIRECT;
2384 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2385 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2386 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2387 case GIC_PAGE_SIZE_64K:
2388 addr = GITS_BASER_ADDR_48_to_52(baser);
2391 addr = baser & GENMASK_ULL(47, 12);
2394 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2395 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2396 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2397 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2398 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2399 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2407 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2413 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2414 aff = compute_common_aff(val);
2416 for_each_possible_cpu(cpu) {
2417 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2419 if (!base || cpu == smp_processor_id())
2422 val = gic_read_typer(base + GICR_TYPER);
2423 if (aff != compute_common_aff(val))
2427 * At this point, we have a victim. This particular CPU
2428 * has already booted, and has an affinity that matches
2429 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2430 * Make sure we don't write the Z bit in that case.
2432 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2433 val &= ~GICR_VPROPBASER_4_1_Z;
2435 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2436 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2444 static bool allocate_vpe_l2_table(int cpu, u32 id)
2446 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2447 unsigned int psz, esz, idx, npg, gpsz;
2452 if (!gic_rdists->has_rvpeid)
2455 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2457 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2458 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2459 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2465 case GIC_PAGE_SIZE_4K:
2468 case GIC_PAGE_SIZE_16K:
2471 case GIC_PAGE_SIZE_64K:
2476 /* Don't allow vpe_id that exceeds single, flat table limit */
2477 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2478 return (id < (npg * psz / (esz * SZ_8)));
2480 /* Compute 1st level table index & check if that exceeds table limit */
2481 idx = id >> ilog2(psz / (esz * SZ_8));
2482 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2485 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2487 /* Allocate memory for 2nd level table */
2489 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2493 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2494 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2495 gic_flush_dcache_to_poc(page_address(page), psz);
2497 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2499 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2500 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2501 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2503 /* Ensure updated table contents are visible to RD hardware */
2510 static int allocate_vpe_l1_table(void)
2512 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2513 u64 val, gpsz, npg, pa;
2514 unsigned int psz = SZ_64K;
2515 unsigned int np, epp, esz;
2518 if (!gic_rdists->has_rvpeid)
2522 * if VPENDBASER.Valid is set, disable any previously programmed
2523 * VPE by setting PendingLast while clearing Valid. This has the
2524 * effect of making sure no doorbell will be generated and we can
2525 * then safely clear VPROPBASER.Valid.
2527 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2528 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2529 vlpi_base + GICR_VPENDBASER);
2532 * If we can inherit the configuration from another RD, let's do
2533 * so. Otherwise, we have to go through the allocation process. We
2534 * assume that all RDs have the exact same requirements, as
2535 * nothing will work otherwise.
2537 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2538 if (val & GICR_VPROPBASER_4_1_VALID)
2541 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
2542 if (!gic_data_rdist()->vpe_table_mask)
2545 val = inherit_vpe_l1_table_from_its();
2546 if (val & GICR_VPROPBASER_4_1_VALID)
2549 /* First probe the page size */
2550 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2551 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2552 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2553 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2554 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2558 gpsz = GIC_PAGE_SIZE_4K;
2560 case GIC_PAGE_SIZE_4K:
2563 case GIC_PAGE_SIZE_16K:
2566 case GIC_PAGE_SIZE_64K:
2572 * Start populating the register from scratch, including RO fields
2573 * (which we want to print in debug cases...)
2576 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2577 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2579 /* How many entries per GIC page? */
2581 epp = psz / (esz * SZ_8);
2584 * If we need more than just a single L1 page, flag the table
2585 * as indirect and compute the number of required L1 pages.
2587 if (epp < ITS_MAX_VPEID) {
2590 val |= GICR_VPROPBASER_4_1_INDIRECT;
2592 /* Number of L2 pages required to cover the VPEID space */
2593 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2595 /* Number of L1 pages to point to the L2 pages */
2596 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2601 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2603 /* Right, that's the number of CPU pages we need for L1 */
2604 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2606 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2607 np, npg, psz, epp, esz);
2608 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
2612 gic_data_rdist()->vpe_l1_base = page_address(page);
2613 pa = virt_to_phys(page_address(page));
2614 WARN_ON(!IS_ALIGNED(pa, psz));
2616 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2617 val |= GICR_VPROPBASER_RaWb;
2618 val |= GICR_VPROPBASER_InnerShareable;
2619 val |= GICR_VPROPBASER_4_1_Z;
2620 val |= GICR_VPROPBASER_4_1_VALID;
2623 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2624 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2626 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2627 smp_processor_id(), val,
2628 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2633 static int its_alloc_collections(struct its_node *its)
2637 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2639 if (!its->collections)
2642 for (i = 0; i < nr_cpu_ids; i++)
2643 its->collections[i].target_address = ~0ULL;
2648 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2650 struct page *pend_page;
2652 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2653 get_order(LPI_PENDBASE_SZ));
2657 /* Make sure the GIC will observe the zero-ed page */
2658 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2663 static void its_free_pending_table(struct page *pt)
2665 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2669 * Booting with kdump and LPIs enabled is generally fine. Any other
2670 * case is wrong in the absence of firmware/EFI support.
2672 static bool enabled_lpis_allowed(void)
2677 /* Check whether the property table is in a reserved region */
2678 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2679 addr = val & GENMASK_ULL(51, 12);
2681 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2684 static int __init allocate_lpi_tables(void)
2690 * If LPIs are enabled while we run this from the boot CPU,
2691 * flag the RD tables as pre-allocated if the stars do align.
2693 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2694 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2695 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2696 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2697 pr_info("GICv3: Using preallocated redistributor tables\n");
2700 err = its_setup_lpi_prop_table();
2705 * We allocate all the pending tables anyway, as we may have a
2706 * mix of RDs that have had LPIs enabled, and some that
2707 * don't. We'll free the unused ones as each CPU comes online.
2709 for_each_possible_cpu(cpu) {
2710 struct page *pend_page;
2712 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2714 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2718 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2724 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
2726 u32 count = 1000000; /* 1s! */
2730 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2731 val &= ~GICR_VPENDBASER_Valid;
2734 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2737 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2738 clean = !(val & GICR_VPENDBASER_Dirty);
2744 } while (!clean && count);
2746 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2747 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2748 val |= GICR_VPENDBASER_PendingLast;
2754 static void its_cpu_init_lpis(void)
2756 void __iomem *rbase = gic_data_rdist_rd_base();
2757 struct page *pend_page;
2761 if (gic_data_rdist()->lpi_enabled)
2764 val = readl_relaxed(rbase + GICR_CTLR);
2765 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2766 (val & GICR_CTLR_ENABLE_LPIS)) {
2768 * Check that we get the same property table on all
2769 * RDs. If we don't, this is hopeless.
2771 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2772 paddr &= GENMASK_ULL(51, 12);
2773 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2774 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2776 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2777 paddr &= GENMASK_ULL(51, 16);
2779 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2780 its_free_pending_table(gic_data_rdist()->pend_page);
2781 gic_data_rdist()->pend_page = NULL;
2786 pend_page = gic_data_rdist()->pend_page;
2787 paddr = page_to_phys(pend_page);
2788 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
2791 val = (gic_rdists->prop_table_pa |
2792 GICR_PROPBASER_InnerShareable |
2793 GICR_PROPBASER_RaWaWb |
2794 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2796 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2797 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2799 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2800 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2802 * The HW reports non-shareable, we must
2803 * remove the cacheability attributes as
2806 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2807 GICR_PROPBASER_CACHEABILITY_MASK);
2808 val |= GICR_PROPBASER_nC;
2809 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2811 pr_info_once("GIC: using cache flushing for LPI property table\n");
2812 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2816 val = (page_to_phys(pend_page) |
2817 GICR_PENDBASER_InnerShareable |
2818 GICR_PENDBASER_RaWaWb);
2820 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2821 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2823 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2825 * The HW reports non-shareable, we must remove the
2826 * cacheability attributes as well.
2828 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2829 GICR_PENDBASER_CACHEABILITY_MASK);
2830 val |= GICR_PENDBASER_nC;
2831 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2835 val = readl_relaxed(rbase + GICR_CTLR);
2836 val |= GICR_CTLR_ENABLE_LPIS;
2837 writel_relaxed(val, rbase + GICR_CTLR);
2839 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
2840 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2843 * It's possible for CPU to receive VLPIs before it is
2844 * sheduled as a vPE, especially for the first CPU, and the
2845 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2846 * as out of range and dropped by GIC.
2847 * So we initialize IDbits to known value to avoid VLPI drop.
2849 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2850 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2851 smp_processor_id(), val);
2852 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2855 * Also clear Valid bit of GICR_VPENDBASER, in case some
2856 * ancient programming gets left in and has possibility of
2857 * corrupting memory.
2859 val = its_clear_vpend_valid(vlpi_base, 0, 0);
2862 if (allocate_vpe_l1_table()) {
2864 * If the allocation has failed, we're in massive trouble.
2865 * Disable direct injection, and pray that no VM was
2866 * already running...
2868 gic_rdists->has_rvpeid = false;
2869 gic_rdists->has_vlpis = false;
2872 /* Make sure the GIC has seen the above */
2875 gic_data_rdist()->lpi_enabled = true;
2876 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
2878 gic_data_rdist()->pend_page ? "allocated" : "reserved",
2882 static void its_cpu_init_collection(struct its_node *its)
2884 int cpu = smp_processor_id();
2887 /* avoid cross node collections and its mapping */
2888 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2889 struct device_node *cpu_node;
2891 cpu_node = of_get_cpu_node(cpu, NULL);
2892 if (its->numa_node != NUMA_NO_NODE &&
2893 its->numa_node != of_node_to_nid(cpu_node))
2898 * We now have to bind each collection to its target
2901 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2903 * This ITS wants the physical address of the
2906 target = gic_data_rdist()->phys_base;
2908 /* This ITS wants a linear CPU number. */
2909 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2910 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2913 /* Perform collection mapping */
2914 its->collections[cpu].target_address = target;
2915 its->collections[cpu].col_id = cpu;
2917 its_send_mapc(its, &its->collections[cpu], 1);
2918 its_send_invall(its, &its->collections[cpu]);
2921 static void its_cpu_init_collections(void)
2923 struct its_node *its;
2925 raw_spin_lock(&its_lock);
2927 list_for_each_entry(its, &its_nodes, entry)
2928 its_cpu_init_collection(its);
2930 raw_spin_unlock(&its_lock);
2933 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2935 struct its_device *its_dev = NULL, *tmp;
2936 unsigned long flags;
2938 raw_spin_lock_irqsave(&its->lock, flags);
2940 list_for_each_entry(tmp, &its->its_device_list, entry) {
2941 if (tmp->device_id == dev_id) {
2947 raw_spin_unlock_irqrestore(&its->lock, flags);
2952 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2956 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2957 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2958 return &its->tables[i];
2964 static bool its_alloc_table_entry(struct its_node *its,
2965 struct its_baser *baser, u32 id)
2971 /* Don't allow device id that exceeds single, flat table limit */
2972 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2973 if (!(baser->val & GITS_BASER_INDIRECT))
2974 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2976 /* Compute 1st level table index & check if that exceeds table limit */
2977 idx = id >> ilog2(baser->psz / esz);
2978 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2981 table = baser->base;
2983 /* Allocate memory for 2nd level table */
2985 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2986 get_order(baser->psz));
2990 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2991 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2992 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2994 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2996 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2997 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2998 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3000 /* Ensure updated table contents are visible to ITS hardware */
3007 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3009 struct its_baser *baser;
3011 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3013 /* Don't allow device id that exceeds ITS hardware limit */
3015 return (ilog2(dev_id) < device_ids(its));
3017 return its_alloc_table_entry(its, baser, dev_id);
3020 static bool its_alloc_vpe_table(u32 vpe_id)
3022 struct its_node *its;
3026 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3027 * could try and only do it on ITSs corresponding to devices
3028 * that have interrupts targeted at this VPE, but the
3029 * complexity becomes crazy (and you have tons of memory
3032 list_for_each_entry(its, &its_nodes, entry) {
3033 struct its_baser *baser;
3038 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3042 if (!its_alloc_table_entry(its, baser, vpe_id))
3046 /* Non v4.1? No need to iterate RDs and go back early. */
3047 if (!gic_rdists->has_rvpeid)
3051 * Make sure the L2 tables are allocated for all copies of
3052 * the L1 table on *all* v4.1 RDs.
3054 for_each_possible_cpu(cpu) {
3055 if (!allocate_vpe_l2_table(cpu, vpe_id))
3062 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3063 int nvecs, bool alloc_lpis)
3065 struct its_device *dev;
3066 unsigned long *lpi_map = NULL;
3067 unsigned long flags;
3068 u16 *col_map = NULL;
3075 if (!its_alloc_device_table(its, dev_id))
3078 if (WARN_ON(!is_power_of_2(nvecs)))
3079 nvecs = roundup_pow_of_two(nvecs);
3081 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3083 * Even if the device wants a single LPI, the ITT must be
3084 * sized as a power of two (and you need at least one bit...).
3086 nr_ites = max(2, nvecs);
3087 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3088 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3089 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3091 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3093 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3096 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3101 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3109 gic_flush_dcache_to_poc(itt, sz);
3113 dev->nr_ites = nr_ites;
3114 dev->event_map.lpi_map = lpi_map;
3115 dev->event_map.col_map = col_map;
3116 dev->event_map.lpi_base = lpi_base;
3117 dev->event_map.nr_lpis = nr_lpis;
3118 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3119 dev->device_id = dev_id;
3120 INIT_LIST_HEAD(&dev->entry);
3122 raw_spin_lock_irqsave(&its->lock, flags);
3123 list_add(&dev->entry, &its->its_device_list);
3124 raw_spin_unlock_irqrestore(&its->lock, flags);
3126 /* Map device to its ITT */
3127 its_send_mapd(dev, 1);
3132 static void its_free_device(struct its_device *its_dev)
3134 unsigned long flags;
3136 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3137 list_del(&its_dev->entry);
3138 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3139 kfree(its_dev->event_map.col_map);
3140 kfree(its_dev->itt);
3144 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3148 /* Find a free LPI region in lpi_map and allocate them. */
3149 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3150 dev->event_map.nr_lpis,
3151 get_count_order(nvecs));
3155 *hwirq = dev->event_map.lpi_base + idx;
3160 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3161 int nvec, msi_alloc_info_t *info)
3163 struct its_node *its;
3164 struct its_device *its_dev;
3165 struct msi_domain_info *msi_info;
3170 * We ignore "dev" entirely, and rely on the dev_id that has
3171 * been passed via the scratchpad. This limits this domain's
3172 * usefulness to upper layers that definitely know that they
3173 * are built on top of the ITS.
3175 dev_id = info->scratchpad[0].ul;
3177 msi_info = msi_get_domain_info(domain);
3178 its = msi_info->data;
3180 if (!gic_rdists->has_direct_lpi &&
3182 vpe_proxy.dev->its == its &&
3183 dev_id == vpe_proxy.dev->device_id) {
3184 /* Bad luck. Get yourself a better implementation */
3185 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3190 mutex_lock(&its->dev_alloc_lock);
3191 its_dev = its_find_device(its, dev_id);
3194 * We already have seen this ID, probably through
3195 * another alias (PCI bridge of some sort). No need to
3196 * create the device.
3198 its_dev->shared = true;
3199 pr_debug("Reusing ITT for devID %x\n", dev_id);
3203 its_dev = its_create_device(its, dev_id, nvec, true);
3209 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3211 mutex_unlock(&its->dev_alloc_lock);
3212 info->scratchpad[0].ptr = its_dev;
3216 static struct msi_domain_ops its_msi_domain_ops = {
3217 .msi_prepare = its_msi_prepare,
3220 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3222 irq_hw_number_t hwirq)
3224 struct irq_fwspec fwspec;
3226 if (irq_domain_get_of_node(domain->parent)) {
3227 fwspec.fwnode = domain->parent->fwnode;
3228 fwspec.param_count = 3;
3229 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3230 fwspec.param[1] = hwirq;
3231 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3232 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3233 fwspec.fwnode = domain->parent->fwnode;
3234 fwspec.param_count = 2;
3235 fwspec.param[0] = hwirq;
3236 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3241 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3244 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3245 unsigned int nr_irqs, void *args)
3247 msi_alloc_info_t *info = args;
3248 struct its_device *its_dev = info->scratchpad[0].ptr;
3249 struct its_node *its = its_dev->its;
3250 irq_hw_number_t hwirq;
3254 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3258 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3262 for (i = 0; i < nr_irqs; i++) {
3263 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3267 irq_domain_set_hwirq_and_chip(domain, virq + i,
3268 hwirq + i, &its_irq_chip, its_dev);
3269 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
3270 pr_debug("ID:%d pID:%d vID:%d\n",
3271 (int)(hwirq + i - its_dev->event_map.lpi_base),
3272 (int)(hwirq + i), virq + i);
3278 static int its_irq_domain_activate(struct irq_domain *domain,
3279 struct irq_data *d, bool reserve)
3281 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3282 u32 event = its_get_event_id(d);
3283 const struct cpumask *cpu_mask = cpu_online_mask;
3286 /* get the cpu_mask of local node */
3287 if (its_dev->its->numa_node >= 0)
3288 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
3290 /* Bind the LPI to the first possible CPU */
3291 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
3292 if (cpu >= nr_cpu_ids) {
3293 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
3296 cpu = cpumask_first(cpu_online_mask);
3299 its_dev->event_map.col_map[event] = cpu;
3300 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3302 /* Map the GIC IRQ and event to the device */
3303 its_send_mapti(its_dev, d->hwirq, event);
3307 static void its_irq_domain_deactivate(struct irq_domain *domain,
3310 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3311 u32 event = its_get_event_id(d);
3313 /* Stop the delivery of interrupts */
3314 its_send_discard(its_dev, event);
3317 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3318 unsigned int nr_irqs)
3320 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3321 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3322 struct its_node *its = its_dev->its;
3325 bitmap_release_region(its_dev->event_map.lpi_map,
3326 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3327 get_count_order(nr_irqs));
3329 for (i = 0; i < nr_irqs; i++) {
3330 struct irq_data *data = irq_domain_get_irq_data(domain,
3332 /* Nuke the entry in the domain */
3333 irq_domain_reset_irq_data(data);
3336 mutex_lock(&its->dev_alloc_lock);
3339 * If all interrupts have been freed, start mopping the
3340 * floor. This is conditionned on the device not being shared.
3342 if (!its_dev->shared &&
3343 bitmap_empty(its_dev->event_map.lpi_map,
3344 its_dev->event_map.nr_lpis)) {
3345 its_lpi_free(its_dev->event_map.lpi_map,
3346 its_dev->event_map.lpi_base,
3347 its_dev->event_map.nr_lpis);
3349 /* Unmap device/itt */
3350 its_send_mapd(its_dev, 0);
3351 its_free_device(its_dev);
3354 mutex_unlock(&its->dev_alloc_lock);
3356 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3359 static const struct irq_domain_ops its_domain_ops = {
3360 .alloc = its_irq_domain_alloc,
3361 .free = its_irq_domain_free,
3362 .activate = its_irq_domain_activate,
3363 .deactivate = its_irq_domain_deactivate,
3369 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3370 * likely), the only way to perform an invalidate is to use a fake
3371 * device to issue an INV command, implying that the LPI has first
3372 * been mapped to some event on that device. Since this is not exactly
3373 * cheap, we try to keep that mapping around as long as possible, and
3374 * only issue an UNMAP if we're short on available slots.
3376 * Broken by design(tm).
3378 * GICv4.1, on the other hand, mandates that we're able to invalidate
3379 * by writing to a MMIO register. It doesn't implement the whole of
3380 * DirectLPI, but that's good enough. And most of the time, we don't
3381 * even have to invalidate anything, as the redistributor can be told
3382 * whether to generate a doorbell or not (we thus leave it enabled,
3385 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3387 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3388 if (gic_rdists->has_rvpeid)
3391 /* Already unmapped? */
3392 if (vpe->vpe_proxy_event == -1)
3395 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3396 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3399 * We don't track empty slots at all, so let's move the
3400 * next_victim pointer if we can quickly reuse that slot
3401 * instead of nuking an existing entry. Not clear that this is
3402 * always a win though, and this might just generate a ripple
3403 * effect... Let's just hope VPEs don't migrate too often.
3405 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3406 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3408 vpe->vpe_proxy_event = -1;
3411 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3413 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3414 if (gic_rdists->has_rvpeid)
3417 if (!gic_rdists->has_direct_lpi) {
3418 unsigned long flags;
3420 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3421 its_vpe_db_proxy_unmap_locked(vpe);
3422 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3426 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3428 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3429 if (gic_rdists->has_rvpeid)
3432 /* Already mapped? */
3433 if (vpe->vpe_proxy_event != -1)
3436 /* This slot was already allocated. Kick the other VPE out. */
3437 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3438 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3440 /* Map the new VPE instead */
3441 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3442 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3443 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3445 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3446 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3449 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3451 unsigned long flags;
3452 struct its_collection *target_col;
3454 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3455 if (gic_rdists->has_rvpeid)
3458 if (gic_rdists->has_direct_lpi) {
3459 void __iomem *rdbase;
3461 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3462 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3463 wait_for_syncr(rdbase);
3468 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3470 its_vpe_db_proxy_map_locked(vpe);
3472 target_col = &vpe_proxy.dev->its->collections[to];
3473 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3474 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3476 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3479 static int its_vpe_set_affinity(struct irq_data *d,
3480 const struct cpumask *mask_val,
3483 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3484 int from, cpu = cpumask_first(mask_val);
3487 * Changing affinity is mega expensive, so let's be as lazy as
3488 * we can and only do it if we really have to. Also, if mapped
3489 * into the proxy device, we need to move the doorbell
3490 * interrupt to its new location.
3492 if (vpe->col_idx == cpu)
3495 from = vpe->col_idx;
3499 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3500 * is sharing its VPE table with the current one.
3502 if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3503 cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3506 its_send_vmovp(vpe);
3507 its_vpe_db_proxy_move(vpe, from, cpu);
3510 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3512 return IRQ_SET_MASK_OK_DONE;
3515 static void its_vpe_schedule(struct its_vpe *vpe)
3517 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3520 /* Schedule the VPE */
3521 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3522 GENMASK_ULL(51, 12);
3523 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3524 val |= GICR_VPROPBASER_RaWb;
3525 val |= GICR_VPROPBASER_InnerShareable;
3526 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3528 val = virt_to_phys(page_address(vpe->vpt_page)) &
3529 GENMASK_ULL(51, 16);
3530 val |= GICR_VPENDBASER_RaWaWb;
3531 val |= GICR_VPENDBASER_NonShareable;
3533 * There is no good way of finding out if the pending table is
3534 * empty as we can race against the doorbell interrupt very
3535 * easily. So in the end, vpe->pending_last is only an
3536 * indication that the vcpu has something pending, not one
3537 * that the pending table is empty. A good implementation
3538 * would be able to read its coarse map pretty quickly anyway,
3539 * making this a tolerable issue.
3541 val |= GICR_VPENDBASER_PendingLast;
3542 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3543 val |= GICR_VPENDBASER_Valid;
3544 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3547 static void its_vpe_deschedule(struct its_vpe *vpe)
3549 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3552 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3554 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3555 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3558 static void its_vpe_invall(struct its_vpe *vpe)
3560 struct its_node *its;
3562 list_for_each_entry(its, &its_nodes, entry) {
3566 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3570 * Sending a VINVALL to a single ITS is enough, as all
3571 * we need is to reach the redistributors.
3573 its_send_vinvall(its, vpe);
3578 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3580 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3581 struct its_cmd_info *info = vcpu_info;
3583 switch (info->cmd_type) {
3585 its_vpe_schedule(vpe);
3588 case DESCHEDULE_VPE:
3589 its_vpe_deschedule(vpe);
3593 its_vpe_invall(vpe);
3601 static void its_vpe_send_cmd(struct its_vpe *vpe,
3602 void (*cmd)(struct its_device *, u32))
3604 unsigned long flags;
3606 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3608 its_vpe_db_proxy_map_locked(vpe);
3609 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3611 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3614 static void its_vpe_send_inv(struct irq_data *d)
3616 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3618 if (gic_rdists->has_direct_lpi) {
3619 void __iomem *rdbase;
3621 /* Target the redistributor this VPE is currently known on */
3622 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3623 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3624 wait_for_syncr(rdbase);
3626 its_vpe_send_cmd(vpe, its_send_inv);
3630 static void its_vpe_mask_irq(struct irq_data *d)
3633 * We need to unmask the LPI, which is described by the parent
3634 * irq_data. Instead of calling into the parent (which won't
3635 * exactly do the right thing, let's simply use the
3636 * parent_data pointer. Yes, I'm naughty.
3638 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3639 its_vpe_send_inv(d);
3642 static void its_vpe_unmask_irq(struct irq_data *d)
3644 /* Same hack as above... */
3645 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3646 its_vpe_send_inv(d);
3649 static int its_vpe_set_irqchip_state(struct irq_data *d,
3650 enum irqchip_irq_state which,
3653 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3655 if (which != IRQCHIP_STATE_PENDING)
3658 if (gic_rdists->has_direct_lpi) {
3659 void __iomem *rdbase;
3661 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3663 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3665 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3666 wait_for_syncr(rdbase);
3670 its_vpe_send_cmd(vpe, its_send_int);
3672 its_vpe_send_cmd(vpe, its_send_clear);
3678 static struct irq_chip its_vpe_irq_chip = {
3679 .name = "GICv4-vpe",
3680 .irq_mask = its_vpe_mask_irq,
3681 .irq_unmask = its_vpe_unmask_irq,
3682 .irq_eoi = irq_chip_eoi_parent,
3683 .irq_set_affinity = its_vpe_set_affinity,
3684 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
3685 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
3688 static struct its_node *find_4_1_its(void)
3690 static struct its_node *its = NULL;
3693 list_for_each_entry(its, &its_nodes, entry) {
3705 static void its_vpe_4_1_send_inv(struct irq_data *d)
3707 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3708 struct its_node *its;
3711 * GICv4.1 wants doorbells to be invalidated using the
3712 * INVDB command in order to be broadcast to all RDs. Send
3713 * it to the first valid ITS, and let the HW do its magic.
3715 its = find_4_1_its();
3717 its_send_invdb(its, vpe);
3720 static void its_vpe_4_1_mask_irq(struct irq_data *d)
3722 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3723 its_vpe_4_1_send_inv(d);
3726 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
3728 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3729 its_vpe_4_1_send_inv(d);
3732 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
3733 struct its_cmd_info *info)
3735 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3738 /* Schedule the VPE */
3739 val |= GICR_VPENDBASER_Valid;
3740 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
3741 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
3742 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
3744 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3747 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
3748 struct its_cmd_info *info)
3750 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3755 * vPE is going to block: make the vPE non-resident with
3756 * PendingLast clear and DB set. The GIC guarantees that if
3757 * we read-back PendingLast clear, then a doorbell will be
3758 * delivered when an interrupt comes.
3760 val = its_clear_vpend_valid(vlpi_base,
3761 GICR_VPENDBASER_PendingLast,
3762 GICR_VPENDBASER_4_1_DB);
3763 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3766 * We're not blocking, so just make the vPE non-resident
3767 * with PendingLast set, indicating that we'll be back.
3769 val = its_clear_vpend_valid(vlpi_base,
3771 GICR_VPENDBASER_PendingLast);
3772 vpe->pending_last = true;
3776 static void its_vpe_4_1_invall(struct its_vpe *vpe)
3778 void __iomem *rdbase;
3781 val = GICR_INVALLR_V;
3782 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
3784 /* Target the redistributor this vPE is currently known on */
3785 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3786 gic_write_lpir(val, rdbase + GICR_INVALLR);
3789 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3791 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3792 struct its_cmd_info *info = vcpu_info;
3794 switch (info->cmd_type) {
3796 its_vpe_4_1_schedule(vpe, info);
3799 case DESCHEDULE_VPE:
3800 its_vpe_4_1_deschedule(vpe, info);
3804 its_vpe_4_1_invall(vpe);
3812 static struct irq_chip its_vpe_4_1_irq_chip = {
3813 .name = "GICv4.1-vpe",
3814 .irq_mask = its_vpe_4_1_mask_irq,
3815 .irq_unmask = its_vpe_4_1_unmask_irq,
3816 .irq_eoi = irq_chip_eoi_parent,
3817 .irq_set_affinity = its_vpe_set_affinity,
3818 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
3821 static int its_vpe_id_alloc(void)
3823 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
3826 static void its_vpe_id_free(u16 id)
3828 ida_simple_remove(&its_vpeid_ida, id);
3831 static int its_vpe_init(struct its_vpe *vpe)
3833 struct page *vpt_page;
3836 /* Allocate vpe_id */
3837 vpe_id = its_vpe_id_alloc();
3842 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3844 its_vpe_id_free(vpe_id);
3848 if (!its_alloc_vpe_table(vpe_id)) {
3849 its_vpe_id_free(vpe_id);
3850 its_free_pending_table(vpt_page);
3854 vpe->vpe_id = vpe_id;
3855 vpe->vpt_page = vpt_page;
3856 if (gic_rdists->has_rvpeid)
3857 atomic_set(&vpe->vmapp_count, 0);
3859 vpe->vpe_proxy_event = -1;
3864 static void its_vpe_teardown(struct its_vpe *vpe)
3866 its_vpe_db_proxy_unmap(vpe);
3867 its_vpe_id_free(vpe->vpe_id);
3868 its_free_pending_table(vpe->vpt_page);
3871 static void its_vpe_irq_domain_free(struct irq_domain *domain,
3873 unsigned int nr_irqs)
3875 struct its_vm *vm = domain->host_data;
3878 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3880 for (i = 0; i < nr_irqs; i++) {
3881 struct irq_data *data = irq_domain_get_irq_data(domain,
3883 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3885 BUG_ON(vm != vpe->its_vm);
3887 clear_bit(data->hwirq, vm->db_bitmap);
3888 its_vpe_teardown(vpe);
3889 irq_domain_reset_irq_data(data);
3892 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
3893 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
3894 its_free_prop_table(vm->vprop_page);
3898 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3899 unsigned int nr_irqs, void *args)
3901 struct irq_chip *irqchip = &its_vpe_irq_chip;
3902 struct its_vm *vm = args;
3903 unsigned long *bitmap;
3904 struct page *vprop_page;
3905 int base, nr_ids, i, err = 0;
3909 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
3913 if (nr_ids < nr_irqs) {
3914 its_lpi_free(bitmap, base, nr_ids);
3918 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3920 its_lpi_free(bitmap, base, nr_ids);
3924 vm->db_bitmap = bitmap;
3925 vm->db_lpi_base = base;
3926 vm->nr_db_lpis = nr_ids;
3927 vm->vprop_page = vprop_page;
3929 if (gic_rdists->has_rvpeid)
3930 irqchip = &its_vpe_4_1_irq_chip;
3932 for (i = 0; i < nr_irqs; i++) {
3933 vm->vpes[i]->vpe_db_lpi = base + i;
3934 err = its_vpe_init(vm->vpes[i]);
3937 err = its_irq_gic_domain_alloc(domain, virq + i,
3938 vm->vpes[i]->vpe_db_lpi);
3941 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3942 irqchip, vm->vpes[i]);
3948 its_vpe_irq_domain_free(domain, virq, i - 1);
3950 its_lpi_free(bitmap, base, nr_ids);
3951 its_free_prop_table(vprop_page);
3957 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
3958 struct irq_data *d, bool reserve)
3960 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3961 struct its_node *its;
3963 /* If we use the list map, we issue VMAPP on demand... */
3967 /* Map the VPE to the first possible CPU */
3968 vpe->col_idx = cpumask_first(cpu_online_mask);
3970 list_for_each_entry(its, &its_nodes, entry) {
3974 its_send_vmapp(its, vpe, true);
3975 its_send_vinvall(its, vpe);
3978 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3983 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3986 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3987 struct its_node *its;
3990 * If we use the list map, we unmap the VPE once no VLPIs are
3991 * associated with the VM.
3996 list_for_each_entry(its, &its_nodes, entry) {
4000 its_send_vmapp(its, vpe, false);
4004 static const struct irq_domain_ops its_vpe_domain_ops = {
4005 .alloc = its_vpe_irq_domain_alloc,
4006 .free = its_vpe_irq_domain_free,
4007 .activate = its_vpe_irq_domain_activate,
4008 .deactivate = its_vpe_irq_domain_deactivate,
4011 static int its_force_quiescent(void __iomem *base)
4013 u32 count = 1000000; /* 1s */
4016 val = readl_relaxed(base + GITS_CTLR);
4018 * GIC architecture specification requires the ITS to be both
4019 * disabled and quiescent for writes to GITS_BASER<n> or
4020 * GITS_CBASER to not have UNPREDICTABLE results.
4022 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4025 /* Disable the generation of all interrupts to this ITS */
4026 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4027 writel_relaxed(val, base + GITS_CTLR);
4029 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4031 val = readl_relaxed(base + GITS_CTLR);
4032 if (val & GITS_CTLR_QUIESCENT)
4044 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4046 struct its_node *its = data;
4048 /* erratum 22375: only alloc 8MB table size (20 bits) */
4049 its->typer &= ~GITS_TYPER_DEVBITS;
4050 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4051 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4056 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4058 struct its_node *its = data;
4060 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4065 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4067 struct its_node *its = data;
4069 /* On QDF2400, the size of the ITE is 16Bytes */
4070 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4071 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4076 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4078 struct its_node *its = its_dev->its;
4081 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4082 * which maps 32-bit writes targeted at a separate window of
4083 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4084 * with device ID taken from bits [device_id_bits + 1:2] of
4085 * the window offset.
4087 return its->pre_its_base + (its_dev->device_id << 2);
4090 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4092 struct its_node *its = data;
4093 u32 pre_its_window[2];
4096 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4097 "socionext,synquacer-pre-its",
4099 ARRAY_SIZE(pre_its_window))) {
4101 its->pre_its_base = pre_its_window[0];
4102 its->get_msi_base = its_irq_get_msi_base_pre_its;
4104 ids = ilog2(pre_its_window[1]) - 2;
4105 if (device_ids(its) > ids) {
4106 its->typer &= ~GITS_TYPER_DEVBITS;
4107 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4110 /* the pre-ITS breaks isolation, so disable MSI remapping */
4111 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4117 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4119 struct its_node *its = data;
4122 * Hip07 insists on using the wrong address for the VLPI
4123 * page. Trick it into doing the right thing...
4125 its->vlpi_redist_offset = SZ_128K;
4129 static const struct gic_quirk its_quirks[] = {
4130 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4132 .desc = "ITS: Cavium errata 22375, 24313",
4133 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4135 .init = its_enable_quirk_cavium_22375,
4138 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4140 .desc = "ITS: Cavium erratum 23144",
4141 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4143 .init = its_enable_quirk_cavium_23144,
4146 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4148 .desc = "ITS: QDF2400 erratum 0065",
4149 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4151 .init = its_enable_quirk_qdf2400_e0065,
4154 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4157 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4158 * implementation, but with a 'pre-ITS' added that requires
4159 * special handling in software.
4161 .desc = "ITS: Socionext Synquacer pre-ITS",
4164 .init = its_enable_quirk_socionext_synquacer,
4167 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4169 .desc = "ITS: Hip07 erratum 161600802",
4172 .init = its_enable_quirk_hip07_161600802,
4179 static void its_enable_quirks(struct its_node *its)
4181 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4183 gic_enable_quirks(iidr, its_quirks, its);
4186 static int its_save_disable(void)
4188 struct its_node *its;
4191 raw_spin_lock(&its_lock);
4192 list_for_each_entry(its, &its_nodes, entry) {
4195 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4199 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4200 err = its_force_quiescent(base);
4202 pr_err("ITS@%pa: failed to quiesce: %d\n",
4203 &its->phys_base, err);
4204 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4208 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4213 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4216 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4220 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4223 raw_spin_unlock(&its_lock);
4228 static void its_restore_enable(void)
4230 struct its_node *its;
4233 raw_spin_lock(&its_lock);
4234 list_for_each_entry(its, &its_nodes, entry) {
4238 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4244 * Make sure that the ITS is disabled. If it fails to quiesce,
4245 * don't restore it since writing to CBASER or BASER<n>
4246 * registers is undefined according to the GIC v3 ITS
4249 ret = its_force_quiescent(base);
4251 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4252 &its->phys_base, ret);
4256 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4259 * Writing CBASER resets CREADR to 0, so make CWRITER and
4260 * cmd_write line up with it.
4262 its->cmd_write = its->cmd_base;
4263 gits_write_cwriter(0, base + GITS_CWRITER);
4265 /* Restore GITS_BASER from the value cache. */
4266 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4267 struct its_baser *baser = &its->tables[i];
4269 if (!(baser->val & GITS_BASER_VALID))
4272 its_write_baser(its, baser, baser->val);
4274 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4277 * Reinit the collection if it's stored in the ITS. This is
4278 * indicated by the col_id being less than the HCC field.
4279 * CID < HCC as specified in the GIC v3 Documentation.
4281 if (its->collections[smp_processor_id()].col_id <
4282 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4283 its_cpu_init_collection(its);
4285 raw_spin_unlock(&its_lock);
4288 static struct syscore_ops its_syscore_ops = {
4289 .suspend = its_save_disable,
4290 .resume = its_restore_enable,
4293 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4295 struct irq_domain *inner_domain;
4296 struct msi_domain_info *info;
4298 info = kzalloc(sizeof(*info), GFP_KERNEL);
4302 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4303 if (!inner_domain) {
4308 inner_domain->parent = its_parent;
4309 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4310 inner_domain->flags |= its->msi_domain_flags;
4311 info->ops = &its_msi_domain_ops;
4313 inner_domain->host_data = info;
4318 static int its_init_vpe_domain(void)
4320 struct its_node *its;
4324 if (gic_rdists->has_direct_lpi) {
4325 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4329 /* Any ITS will do, even if not v4 */
4330 its = list_first_entry(&its_nodes, struct its_node, entry);
4332 entries = roundup_pow_of_two(nr_cpu_ids);
4333 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4335 if (!vpe_proxy.vpes) {
4336 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
4340 /* Use the last possible DevID */
4341 devid = GENMASK(device_ids(its) - 1, 0);
4342 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4343 if (!vpe_proxy.dev) {
4344 kfree(vpe_proxy.vpes);
4345 pr_err("ITS: Can't allocate GICv4 proxy device\n");
4349 BUG_ON(entries > vpe_proxy.dev->nr_ites);
4351 raw_spin_lock_init(&vpe_proxy.lock);
4352 vpe_proxy.next_victim = 0;
4353 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4354 devid, vpe_proxy.dev->nr_ites);
4359 static int __init its_compute_its_list_map(struct resource *res,
4360 void __iomem *its_base)
4366 * This is assumed to be done early enough that we're
4367 * guaranteed to be single-threaded, hence no
4368 * locking. Should this change, we should address
4371 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4372 if (its_number >= GICv4_ITS_LIST_MAX) {
4373 pr_err("ITS@%pa: No ITSList entry available!\n",
4378 ctlr = readl_relaxed(its_base + GITS_CTLR);
4379 ctlr &= ~GITS_CTLR_ITS_NUMBER;
4380 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4381 writel_relaxed(ctlr, its_base + GITS_CTLR);
4382 ctlr = readl_relaxed(its_base + GITS_CTLR);
4383 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4384 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4385 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4388 if (test_and_set_bit(its_number, &its_list_map)) {
4389 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4390 &res->start, its_number);
4397 static int __init its_probe_one(struct resource *res,
4398 struct fwnode_handle *handle, int numa_node)
4400 struct its_node *its;
4401 void __iomem *its_base;
4403 u64 baser, tmp, typer;
4407 its_base = ioremap(res->start, resource_size(res));
4409 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4413 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4414 if (val != 0x30 && val != 0x40) {
4415 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4420 err = its_force_quiescent(its_base);
4422 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4426 pr_info("ITS %pR\n", res);
4428 its = kzalloc(sizeof(*its), GFP_KERNEL);
4434 raw_spin_lock_init(&its->lock);
4435 mutex_init(&its->dev_alloc_lock);
4436 INIT_LIST_HEAD(&its->entry);
4437 INIT_LIST_HEAD(&its->its_device_list);
4438 typer = gic_read_typer(its_base + GITS_TYPER);
4440 its->base = its_base;
4441 its->phys_base = res->start;
4443 if (!(typer & GITS_TYPER_VMOVP)) {
4444 err = its_compute_its_list_map(res, its_base);
4450 pr_info("ITS@%pa: Using ITS number %d\n",
4453 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
4457 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
4458 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
4460 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
4461 &res->start, its->mpidr, svpet);
4465 its->numa_node = numa_node;
4467 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
4468 get_order(ITS_CMD_QUEUE_SZ));
4473 its->cmd_base = (void *)page_address(page);
4474 its->cmd_write = its->cmd_base;
4475 its->fwnode_handle = handle;
4476 its->get_msi_base = its_irq_get_msi_base;
4477 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
4479 its_enable_quirks(its);
4481 err = its_alloc_tables(its);
4485 err = its_alloc_collections(its);
4487 goto out_free_tables;
4489 baser = (virt_to_phys(its->cmd_base) |
4490 GITS_CBASER_RaWaWb |
4491 GITS_CBASER_InnerShareable |
4492 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
4495 gits_write_cbaser(baser, its->base + GITS_CBASER);
4496 tmp = gits_read_cbaser(its->base + GITS_CBASER);
4498 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
4499 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
4501 * The HW reports non-shareable, we must
4502 * remove the cacheability attributes as
4505 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
4506 GITS_CBASER_CACHEABILITY_MASK);
4507 baser |= GITS_CBASER_nC;
4508 gits_write_cbaser(baser, its->base + GITS_CBASER);
4510 pr_info("ITS: using cache flushing for cmd queue\n");
4511 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
4514 gits_write_cwriter(0, its->base + GITS_CWRITER);
4515 ctlr = readl_relaxed(its->base + GITS_CTLR);
4516 ctlr |= GITS_CTLR_ENABLE;
4518 ctlr |= GITS_CTLR_ImDe;
4519 writel_relaxed(ctlr, its->base + GITS_CTLR);
4521 if (GITS_TYPER_HCC(typer))
4522 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
4524 err = its_init_domain(handle, its);
4526 goto out_free_tables;
4528 raw_spin_lock(&its_lock);
4529 list_add(&its->entry, &its_nodes);
4530 raw_spin_unlock(&its_lock);
4535 its_free_tables(its);
4537 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
4542 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
4546 static bool gic_rdists_supports_plpis(void)
4548 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
4551 static int redist_disable_lpis(void)
4553 void __iomem *rbase = gic_data_rdist_rd_base();
4554 u64 timeout = USEC_PER_SEC;
4557 if (!gic_rdists_supports_plpis()) {
4558 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
4562 val = readl_relaxed(rbase + GICR_CTLR);
4563 if (!(val & GICR_CTLR_ENABLE_LPIS))
4567 * If coming via a CPU hotplug event, we don't need to disable
4568 * LPIs before trying to re-enable them. They are already
4569 * configured and all is well in the world.
4571 * If running with preallocated tables, there is nothing to do.
4573 if (gic_data_rdist()->lpi_enabled ||
4574 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
4578 * From that point on, we only try to do some damage control.
4580 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
4581 smp_processor_id());
4582 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
4585 val &= ~GICR_CTLR_ENABLE_LPIS;
4586 writel_relaxed(val, rbase + GICR_CTLR);
4588 /* Make sure any change to GICR_CTLR is observable by the GIC */
4592 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
4593 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
4594 * Error out if we time out waiting for RWP to clear.
4596 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
4598 pr_err("CPU%d: Timeout while disabling LPIs\n",
4599 smp_processor_id());
4607 * After it has been written to 1, it is IMPLEMENTATION
4608 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
4609 * cleared to 0. Error out if clearing the bit failed.
4611 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
4612 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
4619 int its_cpu_init(void)
4621 if (!list_empty(&its_nodes)) {
4624 ret = redist_disable_lpis();
4628 its_cpu_init_lpis();
4629 its_cpu_init_collections();
4635 static const struct of_device_id its_device_id[] = {
4636 { .compatible = "arm,gic-v3-its", },
4640 static int __init its_of_probe(struct device_node *node)
4642 struct device_node *np;
4643 struct resource res;
4645 for (np = of_find_matching_node(node, its_device_id); np;
4646 np = of_find_matching_node(np, its_device_id)) {
4647 if (!of_device_is_available(np))
4649 if (!of_property_read_bool(np, "msi-controller")) {
4650 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
4655 if (of_address_to_resource(np, 0, &res)) {
4656 pr_warn("%pOF: no regs?\n", np);
4660 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
4667 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
4669 #ifdef CONFIG_ACPI_NUMA
4670 struct its_srat_map {
4677 static struct its_srat_map *its_srat_maps __initdata;
4678 static int its_in_srat __initdata;
4680 static int __init acpi_get_its_numa_node(u32 its_id)
4684 for (i = 0; i < its_in_srat; i++) {
4685 if (its_id == its_srat_maps[i].its_id)
4686 return its_srat_maps[i].numa_node;
4688 return NUMA_NO_NODE;
4691 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
4692 const unsigned long end)
4697 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
4698 const unsigned long end)
4701 struct acpi_srat_gic_its_affinity *its_affinity;
4703 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
4707 if (its_affinity->header.length < sizeof(*its_affinity)) {
4708 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
4709 its_affinity->header.length);
4713 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
4715 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
4716 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
4720 its_srat_maps[its_in_srat].numa_node = node;
4721 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
4723 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
4724 its_affinity->proximity_domain, its_affinity->its_id, node);
4729 static void __init acpi_table_parse_srat_its(void)
4733 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
4734 sizeof(struct acpi_table_srat),
4735 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4736 gic_acpi_match_srat_its, 0);
4740 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
4742 if (!its_srat_maps) {
4743 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
4747 acpi_table_parse_entries(ACPI_SIG_SRAT,
4748 sizeof(struct acpi_table_srat),
4749 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4750 gic_acpi_parse_srat_its, 0);
4753 /* free the its_srat_maps after ITS probing */
4754 static void __init acpi_its_srat_maps_free(void)
4756 kfree(its_srat_maps);
4759 static void __init acpi_table_parse_srat_its(void) { }
4760 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
4761 static void __init acpi_its_srat_maps_free(void) { }
4764 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
4765 const unsigned long end)
4767 struct acpi_madt_generic_translator *its_entry;
4768 struct fwnode_handle *dom_handle;
4769 struct resource res;
4772 its_entry = (struct acpi_madt_generic_translator *)header;
4773 memset(&res, 0, sizeof(res));
4774 res.start = its_entry->base_address;
4775 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
4776 res.flags = IORESOURCE_MEM;
4778 dom_handle = irq_domain_alloc_fwnode(&res.start);
4780 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
4785 err = iort_register_domain_token(its_entry->translation_id, res.start,
4788 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
4789 &res.start, its_entry->translation_id);
4793 err = its_probe_one(&res, dom_handle,
4794 acpi_get_its_numa_node(its_entry->translation_id));
4798 iort_deregister_domain_token(its_entry->translation_id);
4800 irq_domain_free_fwnode(dom_handle);
4804 static void __init its_acpi_probe(void)
4806 acpi_table_parse_srat_its();
4807 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
4808 gic_acpi_parse_madt_its, 0);
4809 acpi_its_srat_maps_free();
4812 static void __init its_acpi_probe(void) { }
4815 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4816 struct irq_domain *parent_domain)
4818 struct device_node *of_node;
4819 struct its_node *its;
4820 bool has_v4 = false;
4823 gic_rdists = rdists;
4825 its_parent = parent_domain;
4826 of_node = to_of_node(handle);
4828 its_of_probe(of_node);
4832 if (list_empty(&its_nodes)) {
4833 pr_warn("ITS: No ITS available, not enabling LPIs\n");
4837 err = allocate_lpi_tables();
4841 list_for_each_entry(its, &its_nodes, entry)
4842 has_v4 |= is_v4(its);
4844 if (has_v4 & rdists->has_vlpis) {
4845 if (its_init_vpe_domain() ||
4846 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
4847 rdists->has_vlpis = false;
4848 pr_err("ITS: Disabling GICv4 support\n");
4852 register_syscore_ops(&its_syscore_ops);