1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/irqdomain.h>
18 #include <linux/list.h>
19 #include <linux/log2.h>
20 #include <linux/memblock.h>
22 #include <linux/msi.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_pci.h>
27 #include <linux/of_platform.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/syscore_ops.h>
32 #include <linux/irqchip.h>
33 #include <linux/irqchip/arm-gic-v3.h>
34 #include <linux/irqchip/arm-gic-v4.h>
36 #include <asm/cputype.h>
37 #include <asm/exception.h>
39 #include "irq-gic-common.h"
41 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
42 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
44 #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
46 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
47 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
49 static u32 lpi_id_bits;
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
56 #define LPI_NRBITS lpi_id_bits
57 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
60 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
67 struct its_collection {
73 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
86 * The ITS structure - contains most of the infrastructure, with the
87 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
96 struct mutex dev_alloc_lock;
97 struct list_head entry;
99 phys_addr_t phys_base;
100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
102 struct its_baser tables[GITS_BASER_NR_REGS];
103 struct its_collection *collections;
104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
110 struct list_head its_device_list;
112 unsigned long list_nr;
114 unsigned int msi_domain_flags;
115 u32 pre_its_base; /* for Socionext Synquacer */
116 int vlpi_redist_offset;
119 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
120 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
121 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
123 #define ITS_ITT_ALIGN SZ_256
125 /* The maximum number of VPEID bits supported by VLPI commands */
126 #define ITS_MAX_VPEID_BITS \
129 if (gic_rdists->has_rvpeid && \
130 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
131 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
136 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
138 /* Convert page order to size in bytes */
139 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
141 struct event_lpi_map {
142 unsigned long *lpi_map;
144 irq_hw_number_t lpi_base;
146 raw_spinlock_t vlpi_lock;
148 struct its_vlpi_map *vlpi_maps;
153 * The ITS view of a device - belongs to an ITS, owns an interrupt
154 * translation table, and a list of interrupts. If it some of its
155 * LPIs are injected into a guest (GICv4), the event_map.vm field
156 * indicates which one.
159 struct list_head entry;
160 struct its_node *its;
161 struct event_lpi_map event_map;
170 struct its_device *dev;
171 struct its_vpe **vpes;
175 static LIST_HEAD(its_nodes);
176 static DEFINE_RAW_SPINLOCK(its_lock);
177 static struct rdists *gic_rdists;
178 static struct irq_domain *its_parent;
180 static unsigned long its_list_map;
181 static u16 vmovp_seq_num;
182 static DEFINE_RAW_SPINLOCK(vmovp_lock);
184 static DEFINE_IDA(its_vpeid_ida);
186 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
187 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
188 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
189 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
191 static u16 get_its_list(struct its_vm *vm)
193 struct its_node *its;
194 unsigned long its_list = 0;
196 list_for_each_entry(its, &its_nodes, entry) {
200 if (vm->vlpi_count[its->list_nr])
201 __set_bit(its->list_nr, &its_list);
204 return (u16)its_list;
207 static inline u32 its_get_event_id(struct irq_data *d)
209 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
210 return d->hwirq - its_dev->event_map.lpi_base;
213 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
216 struct its_node *its = its_dev->its;
218 return its->collections + its_dev->event_map.col_map[event];
221 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
224 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
227 return &its_dev->event_map.vlpi_maps[event];
230 static struct its_collection *irq_to_col(struct irq_data *d)
232 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
234 return dev_event_to_col(its_dev, its_get_event_id(d));
237 static struct its_collection *valid_col(struct its_collection *col)
239 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
245 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
247 if (valid_col(its->collections + vpe->col_idx))
254 * ITS command descriptors - parameters to be encoded in a command
257 struct its_cmd_desc {
260 struct its_device *dev;
265 struct its_device *dev;
270 struct its_device *dev;
275 struct its_device *dev;
280 struct its_collection *col;
285 struct its_device *dev;
291 struct its_device *dev;
292 struct its_collection *col;
297 struct its_device *dev;
302 struct its_collection *col;
311 struct its_collection *col;
317 struct its_device *dev;
325 struct its_device *dev;
332 struct its_collection *col;
340 * The ITS command block, which is what the ITS actually parses.
342 struct its_cmd_block {
345 __le64 raw_cmd_le[4];
349 #define ITS_CMD_QUEUE_SZ SZ_64K
350 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
352 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
353 struct its_cmd_block *,
354 struct its_cmd_desc *);
356 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
357 struct its_cmd_block *,
358 struct its_cmd_desc *);
360 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
362 u64 mask = GENMASK_ULL(h, l);
364 *raw_cmd |= (val << l) & mask;
367 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
369 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
372 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
374 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
377 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
379 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
382 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
384 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
387 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
389 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
392 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
394 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
397 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
399 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
402 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
404 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
407 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
409 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
412 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
414 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
417 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
419 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
422 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
424 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
427 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
429 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
432 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
434 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
437 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
439 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
442 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
444 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
447 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
449 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
452 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
454 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
457 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
459 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
462 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
464 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
467 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
470 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
473 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
476 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
479 static void its_encode_db(struct its_cmd_block *cmd, bool db)
481 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
484 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
486 /* Let's fixup BE commands */
487 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
488 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
489 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
490 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
493 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
494 struct its_cmd_block *cmd,
495 struct its_cmd_desc *desc)
497 unsigned long itt_addr;
498 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
500 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
501 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
503 its_encode_cmd(cmd, GITS_CMD_MAPD);
504 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
505 its_encode_size(cmd, size - 1);
506 its_encode_itt(cmd, itt_addr);
507 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
514 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
515 struct its_cmd_block *cmd,
516 struct its_cmd_desc *desc)
518 its_encode_cmd(cmd, GITS_CMD_MAPC);
519 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
520 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
521 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
525 return desc->its_mapc_cmd.col;
528 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
529 struct its_cmd_block *cmd,
530 struct its_cmd_desc *desc)
532 struct its_collection *col;
534 col = dev_event_to_col(desc->its_mapti_cmd.dev,
535 desc->its_mapti_cmd.event_id);
537 its_encode_cmd(cmd, GITS_CMD_MAPTI);
538 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
539 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
540 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
541 its_encode_collection(cmd, col->col_id);
545 return valid_col(col);
548 static struct its_collection *its_build_movi_cmd(struct its_node *its,
549 struct its_cmd_block *cmd,
550 struct its_cmd_desc *desc)
552 struct its_collection *col;
554 col = dev_event_to_col(desc->its_movi_cmd.dev,
555 desc->its_movi_cmd.event_id);
557 its_encode_cmd(cmd, GITS_CMD_MOVI);
558 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
559 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
560 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
564 return valid_col(col);
567 static struct its_collection *its_build_discard_cmd(struct its_node *its,
568 struct its_cmd_block *cmd,
569 struct its_cmd_desc *desc)
571 struct its_collection *col;
573 col = dev_event_to_col(desc->its_discard_cmd.dev,
574 desc->its_discard_cmd.event_id);
576 its_encode_cmd(cmd, GITS_CMD_DISCARD);
577 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
578 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
582 return valid_col(col);
585 static struct its_collection *its_build_inv_cmd(struct its_node *its,
586 struct its_cmd_block *cmd,
587 struct its_cmd_desc *desc)
589 struct its_collection *col;
591 col = dev_event_to_col(desc->its_inv_cmd.dev,
592 desc->its_inv_cmd.event_id);
594 its_encode_cmd(cmd, GITS_CMD_INV);
595 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
596 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
600 return valid_col(col);
603 static struct its_collection *its_build_int_cmd(struct its_node *its,
604 struct its_cmd_block *cmd,
605 struct its_cmd_desc *desc)
607 struct its_collection *col;
609 col = dev_event_to_col(desc->its_int_cmd.dev,
610 desc->its_int_cmd.event_id);
612 its_encode_cmd(cmd, GITS_CMD_INT);
613 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
614 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
618 return valid_col(col);
621 static struct its_collection *its_build_clear_cmd(struct its_node *its,
622 struct its_cmd_block *cmd,
623 struct its_cmd_desc *desc)
625 struct its_collection *col;
627 col = dev_event_to_col(desc->its_clear_cmd.dev,
628 desc->its_clear_cmd.event_id);
630 its_encode_cmd(cmd, GITS_CMD_CLEAR);
631 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
632 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
636 return valid_col(col);
639 static struct its_collection *its_build_invall_cmd(struct its_node *its,
640 struct its_cmd_block *cmd,
641 struct its_cmd_desc *desc)
643 its_encode_cmd(cmd, GITS_CMD_INVALL);
644 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
651 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
652 struct its_cmd_block *cmd,
653 struct its_cmd_desc *desc)
655 its_encode_cmd(cmd, GITS_CMD_VINVALL);
656 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
660 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
663 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
664 struct its_cmd_block *cmd,
665 struct its_cmd_desc *desc)
667 unsigned long vpt_addr, vconf_addr;
671 its_encode_cmd(cmd, GITS_CMD_VMAPP);
672 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
673 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
675 if (!desc->its_vmapp_cmd.valid) {
677 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
678 its_encode_alloc(cmd, alloc);
684 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
685 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
687 its_encode_target(cmd, target);
688 its_encode_vpt_addr(cmd, vpt_addr);
689 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
694 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
696 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
698 its_encode_alloc(cmd, alloc);
700 /* We can only signal PTZ when alloc==1. Why do we have two bits? */
701 its_encode_ptz(cmd, alloc);
702 its_encode_vconf_addr(cmd, vconf_addr);
703 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
708 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
711 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
712 struct its_cmd_block *cmd,
713 struct its_cmd_desc *desc)
717 if (desc->its_vmapti_cmd.db_enabled)
718 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
722 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
723 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
724 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
725 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
726 its_encode_db_phys_id(cmd, db);
727 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
731 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
734 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
735 struct its_cmd_block *cmd,
736 struct its_cmd_desc *desc)
740 if (desc->its_vmovi_cmd.db_enabled)
741 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
745 its_encode_cmd(cmd, GITS_CMD_VMOVI);
746 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
747 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
748 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
749 its_encode_db_phys_id(cmd, db);
750 its_encode_db_valid(cmd, true);
754 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
757 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
758 struct its_cmd_block *cmd,
759 struct its_cmd_desc *desc)
763 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
764 its_encode_cmd(cmd, GITS_CMD_VMOVP);
765 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
766 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
767 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
768 its_encode_target(cmd, target);
771 its_encode_db(cmd, true);
772 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
777 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
780 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
781 struct its_cmd_block *cmd,
782 struct its_cmd_desc *desc)
784 struct its_vlpi_map *map;
786 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
787 desc->its_inv_cmd.event_id);
789 its_encode_cmd(cmd, GITS_CMD_INV);
790 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
791 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
795 return valid_vpe(its, map->vpe);
798 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
799 struct its_cmd_block *cmd,
800 struct its_cmd_desc *desc)
802 struct its_vlpi_map *map;
804 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
805 desc->its_int_cmd.event_id);
807 its_encode_cmd(cmd, GITS_CMD_INT);
808 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
809 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
813 return valid_vpe(its, map->vpe);
816 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
817 struct its_cmd_block *cmd,
818 struct its_cmd_desc *desc)
820 struct its_vlpi_map *map;
822 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
823 desc->its_clear_cmd.event_id);
825 its_encode_cmd(cmd, GITS_CMD_CLEAR);
826 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
827 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
831 return valid_vpe(its, map->vpe);
834 static u64 its_cmd_ptr_to_offset(struct its_node *its,
835 struct its_cmd_block *ptr)
837 return (ptr - its->cmd_base) * sizeof(*ptr);
840 static int its_queue_full(struct its_node *its)
845 widx = its->cmd_write - its->cmd_base;
846 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
848 /* This is incredibly unlikely to happen, unless the ITS locks up. */
849 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
855 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
857 struct its_cmd_block *cmd;
858 u32 count = 1000000; /* 1s! */
860 while (its_queue_full(its)) {
863 pr_err_ratelimited("ITS queue not draining\n");
870 cmd = its->cmd_write++;
872 /* Handle queue wrapping */
873 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
874 its->cmd_write = its->cmd_base;
885 static struct its_cmd_block *its_post_commands(struct its_node *its)
887 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
889 writel_relaxed(wr, its->base + GITS_CWRITER);
891 return its->cmd_write;
894 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
897 * Make sure the commands written to memory are observable by
900 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
901 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
906 static int its_wait_for_range_completion(struct its_node *its,
908 struct its_cmd_block *to)
910 u64 rd_idx, to_idx, linear_idx;
911 u32 count = 1000000; /* 1s! */
913 /* Linearize to_idx if the command set has wrapped around */
914 to_idx = its_cmd_ptr_to_offset(its, to);
915 if (to_idx < prev_idx)
916 to_idx += ITS_CMD_QUEUE_SZ;
918 linear_idx = prev_idx;
923 rd_idx = readl_relaxed(its->base + GITS_CREADR);
926 * Compute the read pointer progress, taking the
927 * potential wrap-around into account.
929 delta = rd_idx - prev_idx;
930 if (rd_idx < prev_idx)
931 delta += ITS_CMD_QUEUE_SZ;
934 if (linear_idx >= to_idx)
939 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
951 /* Warning, macro hell follows */
952 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
953 void name(struct its_node *its, \
955 struct its_cmd_desc *desc) \
957 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
958 synctype *sync_obj; \
959 unsigned long flags; \
962 raw_spin_lock_irqsave(&its->lock, flags); \
964 cmd = its_allocate_entry(its); \
965 if (!cmd) { /* We're soooooo screewed... */ \
966 raw_spin_unlock_irqrestore(&its->lock, flags); \
969 sync_obj = builder(its, cmd, desc); \
970 its_flush_cmd(its, cmd); \
973 sync_cmd = its_allocate_entry(its); \
977 buildfn(its, sync_cmd, sync_obj); \
978 its_flush_cmd(its, sync_cmd); \
982 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
983 next_cmd = its_post_commands(its); \
984 raw_spin_unlock_irqrestore(&its->lock, flags); \
986 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
987 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
990 static void its_build_sync_cmd(struct its_node *its,
991 struct its_cmd_block *sync_cmd,
992 struct its_collection *sync_col)
994 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
995 its_encode_target(sync_cmd, sync_col->target_address);
997 its_fixup_cmd(sync_cmd);
1000 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1001 struct its_collection, its_build_sync_cmd)
1003 static void its_build_vsync_cmd(struct its_node *its,
1004 struct its_cmd_block *sync_cmd,
1005 struct its_vpe *sync_vpe)
1007 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1008 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1010 its_fixup_cmd(sync_cmd);
1013 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1014 struct its_vpe, its_build_vsync_cmd)
1016 static void its_send_int(struct its_device *dev, u32 event_id)
1018 struct its_cmd_desc desc;
1020 desc.its_int_cmd.dev = dev;
1021 desc.its_int_cmd.event_id = event_id;
1023 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1026 static void its_send_clear(struct its_device *dev, u32 event_id)
1028 struct its_cmd_desc desc;
1030 desc.its_clear_cmd.dev = dev;
1031 desc.its_clear_cmd.event_id = event_id;
1033 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1036 static void its_send_inv(struct its_device *dev, u32 event_id)
1038 struct its_cmd_desc desc;
1040 desc.its_inv_cmd.dev = dev;
1041 desc.its_inv_cmd.event_id = event_id;
1043 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1046 static void its_send_mapd(struct its_device *dev, int valid)
1048 struct its_cmd_desc desc;
1050 desc.its_mapd_cmd.dev = dev;
1051 desc.its_mapd_cmd.valid = !!valid;
1053 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1056 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1059 struct its_cmd_desc desc;
1061 desc.its_mapc_cmd.col = col;
1062 desc.its_mapc_cmd.valid = !!valid;
1064 its_send_single_command(its, its_build_mapc_cmd, &desc);
1067 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1069 struct its_cmd_desc desc;
1071 desc.its_mapti_cmd.dev = dev;
1072 desc.its_mapti_cmd.phys_id = irq_id;
1073 desc.its_mapti_cmd.event_id = id;
1075 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1078 static void its_send_movi(struct its_device *dev,
1079 struct its_collection *col, u32 id)
1081 struct its_cmd_desc desc;
1083 desc.its_movi_cmd.dev = dev;
1084 desc.its_movi_cmd.col = col;
1085 desc.its_movi_cmd.event_id = id;
1087 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1090 static void its_send_discard(struct its_device *dev, u32 id)
1092 struct its_cmd_desc desc;
1094 desc.its_discard_cmd.dev = dev;
1095 desc.its_discard_cmd.event_id = id;
1097 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1100 static void its_send_invall(struct its_node *its, struct its_collection *col)
1102 struct its_cmd_desc desc;
1104 desc.its_invall_cmd.col = col;
1106 its_send_single_command(its, its_build_invall_cmd, &desc);
1109 static void its_send_vmapti(struct its_device *dev, u32 id)
1111 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1112 struct its_cmd_desc desc;
1114 desc.its_vmapti_cmd.vpe = map->vpe;
1115 desc.its_vmapti_cmd.dev = dev;
1116 desc.its_vmapti_cmd.virt_id = map->vintid;
1117 desc.its_vmapti_cmd.event_id = id;
1118 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1120 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1123 static void its_send_vmovi(struct its_device *dev, u32 id)
1125 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1126 struct its_cmd_desc desc;
1128 desc.its_vmovi_cmd.vpe = map->vpe;
1129 desc.its_vmovi_cmd.dev = dev;
1130 desc.its_vmovi_cmd.event_id = id;
1131 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1133 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1136 static void its_send_vmapp(struct its_node *its,
1137 struct its_vpe *vpe, bool valid)
1139 struct its_cmd_desc desc;
1141 desc.its_vmapp_cmd.vpe = vpe;
1142 desc.its_vmapp_cmd.valid = valid;
1143 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1145 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1148 static void its_send_vmovp(struct its_vpe *vpe)
1150 struct its_cmd_desc desc = {};
1151 struct its_node *its;
1152 unsigned long flags;
1153 int col_id = vpe->col_idx;
1155 desc.its_vmovp_cmd.vpe = vpe;
1157 if (!its_list_map) {
1158 its = list_first_entry(&its_nodes, struct its_node, entry);
1159 desc.its_vmovp_cmd.col = &its->collections[col_id];
1160 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1165 * Yet another marvel of the architecture. If using the
1166 * its_list "feature", we need to make sure that all ITSs
1167 * receive all VMOVP commands in the same order. The only way
1168 * to guarantee this is to make vmovp a serialization point.
1172 raw_spin_lock_irqsave(&vmovp_lock, flags);
1174 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1175 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1178 list_for_each_entry(its, &its_nodes, entry) {
1182 if (!vpe->its_vm->vlpi_count[its->list_nr])
1185 desc.its_vmovp_cmd.col = &its->collections[col_id];
1186 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1189 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1192 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1194 struct its_cmd_desc desc;
1196 desc.its_vinvall_cmd.vpe = vpe;
1197 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1200 static void its_send_vinv(struct its_device *dev, u32 event_id)
1202 struct its_cmd_desc desc;
1205 * There is no real VINV command. This is just a normal INV,
1206 * with a VSYNC instead of a SYNC.
1208 desc.its_inv_cmd.dev = dev;
1209 desc.its_inv_cmd.event_id = event_id;
1211 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1214 static void its_send_vint(struct its_device *dev, u32 event_id)
1216 struct its_cmd_desc desc;
1219 * There is no real VINT command. This is just a normal INT,
1220 * with a VSYNC instead of a SYNC.
1222 desc.its_int_cmd.dev = dev;
1223 desc.its_int_cmd.event_id = event_id;
1225 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1228 static void its_send_vclear(struct its_device *dev, u32 event_id)
1230 struct its_cmd_desc desc;
1233 * There is no real VCLEAR command. This is just a normal CLEAR,
1234 * with a VSYNC instead of a SYNC.
1236 desc.its_clear_cmd.dev = dev;
1237 desc.its_clear_cmd.event_id = event_id;
1239 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1243 * irqchip functions - assumes MSI, mostly.
1245 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
1247 if (irqd_is_forwarded_to_vcpu(d)) {
1248 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1249 u32 event = its_get_event_id(d);
1251 return dev_event_to_vlpi_map(its_dev, event);
1257 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1259 struct its_vlpi_map *map = get_vlpi_map(d);
1260 irq_hw_number_t hwirq;
1265 va = page_address(map->vm->vprop_page);
1266 hwirq = map->vintid;
1268 /* Remember the updated property */
1269 map->properties &= ~clr;
1270 map->properties |= set | LPI_PROP_GROUP1;
1272 va = gic_rdists->prop_table_va;
1276 cfg = va + hwirq - 8192;
1278 *cfg |= set | LPI_PROP_GROUP1;
1281 * Make the above write visible to the redistributors.
1282 * And yes, we're flushing exactly: One. Single. Byte.
1285 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1286 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1291 static void wait_for_syncr(void __iomem *rdbase)
1293 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
1297 static void direct_lpi_inv(struct irq_data *d)
1299 struct its_collection *col;
1300 void __iomem *rdbase;
1302 /* Target the redistributor this LPI is currently routed to */
1303 col = irq_to_col(d);
1304 rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
1305 gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
1307 wait_for_syncr(rdbase);
1310 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1312 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1314 lpi_write_config(d, clr, set);
1315 if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
1317 else if (!irqd_is_forwarded_to_vcpu(d))
1318 its_send_inv(its_dev, its_get_event_id(d));
1320 its_send_vinv(its_dev, its_get_event_id(d));
1323 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1325 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1326 u32 event = its_get_event_id(d);
1327 struct its_vlpi_map *map;
1329 map = dev_event_to_vlpi_map(its_dev, event);
1331 if (map->db_enabled == enable)
1334 map->db_enabled = enable;
1337 * More fun with the architecture:
1339 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1340 * value or to 1023, depending on the enable bit. But that
1341 * would be issueing a mapping for an /existing/ DevID+EventID
1342 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1343 * to the /same/ vPE, using this opportunity to adjust the
1344 * doorbell. Mouahahahaha. We loves it, Precious.
1346 its_send_vmovi(its_dev, event);
1349 static void its_mask_irq(struct irq_data *d)
1351 if (irqd_is_forwarded_to_vcpu(d))
1352 its_vlpi_set_doorbell(d, false);
1354 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1357 static void its_unmask_irq(struct irq_data *d)
1359 if (irqd_is_forwarded_to_vcpu(d))
1360 its_vlpi_set_doorbell(d, true);
1362 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1365 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1369 const struct cpumask *cpu_mask = cpu_online_mask;
1370 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1371 struct its_collection *target_col;
1372 u32 id = its_get_event_id(d);
1374 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1375 if (irqd_is_forwarded_to_vcpu(d))
1378 /* lpi cannot be routed to a redistributor that is on a foreign node */
1379 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1380 if (its_dev->its->numa_node >= 0) {
1381 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1382 if (!cpumask_intersects(mask_val, cpu_mask))
1387 cpu = cpumask_any_and(mask_val, cpu_mask);
1389 if (cpu >= nr_cpu_ids)
1392 /* don't set the affinity when the target cpu is same as current one */
1393 if (cpu != its_dev->event_map.col_map[id]) {
1394 target_col = &its_dev->its->collections[cpu];
1395 its_send_movi(its_dev, target_col, id);
1396 its_dev->event_map.col_map[id] = cpu;
1397 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1400 return IRQ_SET_MASK_OK_DONE;
1403 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1405 struct its_node *its = its_dev->its;
1407 return its->phys_base + GITS_TRANSLATER;
1410 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1412 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1413 struct its_node *its;
1417 addr = its->get_msi_base(its_dev);
1419 msg->address_lo = lower_32_bits(addr);
1420 msg->address_hi = upper_32_bits(addr);
1421 msg->data = its_get_event_id(d);
1423 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1426 static int its_irq_set_irqchip_state(struct irq_data *d,
1427 enum irqchip_irq_state which,
1430 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1431 u32 event = its_get_event_id(d);
1433 if (which != IRQCHIP_STATE_PENDING)
1436 if (irqd_is_forwarded_to_vcpu(d)) {
1438 its_send_vint(its_dev, event);
1440 its_send_vclear(its_dev, event);
1443 its_send_int(its_dev, event);
1445 its_send_clear(its_dev, event);
1451 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1453 unsigned long flags;
1455 /* Not using the ITS list? Everything is always mapped. */
1459 raw_spin_lock_irqsave(&vmovp_lock, flags);
1462 * If the VM wasn't mapped yet, iterate over the vpes and get
1465 vm->vlpi_count[its->list_nr]++;
1467 if (vm->vlpi_count[its->list_nr] == 1) {
1470 for (i = 0; i < vm->nr_vpes; i++) {
1471 struct its_vpe *vpe = vm->vpes[i];
1472 struct irq_data *d = irq_get_irq_data(vpe->irq);
1474 /* Map the VPE to the first possible CPU */
1475 vpe->col_idx = cpumask_first(cpu_online_mask);
1476 its_send_vmapp(its, vpe, true);
1477 its_send_vinvall(its, vpe);
1478 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1482 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1485 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1487 unsigned long flags;
1489 /* Not using the ITS list? Everything is always mapped. */
1493 raw_spin_lock_irqsave(&vmovp_lock, flags);
1495 if (!--vm->vlpi_count[its->list_nr]) {
1498 for (i = 0; i < vm->nr_vpes; i++)
1499 its_send_vmapp(its, vm->vpes[i], false);
1502 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1505 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1507 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1508 u32 event = its_get_event_id(d);
1514 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1516 if (!its_dev->event_map.vm) {
1517 struct its_vlpi_map *maps;
1519 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1526 its_dev->event_map.vm = info->map->vm;
1527 its_dev->event_map.vlpi_maps = maps;
1528 } else if (its_dev->event_map.vm != info->map->vm) {
1533 /* Get our private copy of the mapping information */
1534 its_dev->event_map.vlpi_maps[event] = *info->map;
1536 if (irqd_is_forwarded_to_vcpu(d)) {
1537 /* Already mapped, move it around */
1538 its_send_vmovi(its_dev, event);
1540 /* Ensure all the VPEs are mapped on this ITS */
1541 its_map_vm(its_dev->its, info->map->vm);
1544 * Flag the interrupt as forwarded so that we can
1545 * start poking the virtual property table.
1547 irqd_set_forwarded_to_vcpu(d);
1549 /* Write out the property to the prop table */
1550 lpi_write_config(d, 0xff, info->map->properties);
1552 /* Drop the physical mapping */
1553 its_send_discard(its_dev, event);
1555 /* and install the virtual one */
1556 its_send_vmapti(its_dev, event);
1558 /* Increment the number of VLPIs */
1559 its_dev->event_map.nr_vlpis++;
1563 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1567 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1569 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1570 struct its_vlpi_map *map;
1573 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1575 map = get_vlpi_map(d);
1577 if (!its_dev->event_map.vm || !map) {
1582 /* Copy our mapping information to the incoming request */
1586 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1590 static int its_vlpi_unmap(struct irq_data *d)
1592 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1593 u32 event = its_get_event_id(d);
1596 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1598 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1603 /* Drop the virtual mapping */
1604 its_send_discard(its_dev, event);
1606 /* and restore the physical one */
1607 irqd_clr_forwarded_to_vcpu(d);
1608 its_send_mapti(its_dev, d->hwirq, event);
1609 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1613 /* Potentially unmap the VM from this ITS */
1614 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1617 * Drop the refcount and make the device available again if
1618 * this was the last VLPI.
1620 if (!--its_dev->event_map.nr_vlpis) {
1621 its_dev->event_map.vm = NULL;
1622 kfree(its_dev->event_map.vlpi_maps);
1626 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1630 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1632 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1634 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1637 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1638 lpi_update_config(d, 0xff, info->config);
1640 lpi_write_config(d, 0xff, info->config);
1641 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1646 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1648 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1649 struct its_cmd_info *info = vcpu_info;
1652 if (!is_v4(its_dev->its))
1655 /* Unmap request? */
1657 return its_vlpi_unmap(d);
1659 switch (info->cmd_type) {
1661 return its_vlpi_map(d, info);
1664 return its_vlpi_get(d, info);
1666 case PROP_UPDATE_VLPI:
1667 case PROP_UPDATE_AND_INV_VLPI:
1668 return its_vlpi_prop_update(d, info);
1675 static struct irq_chip its_irq_chip = {
1677 .irq_mask = its_mask_irq,
1678 .irq_unmask = its_unmask_irq,
1679 .irq_eoi = irq_chip_eoi_parent,
1680 .irq_set_affinity = its_set_affinity,
1681 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1682 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1683 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1688 * How we allocate LPIs:
1690 * lpi_range_list contains ranges of LPIs that are to available to
1691 * allocate from. To allocate LPIs, just pick the first range that
1692 * fits the required allocation, and reduce it by the required
1693 * amount. Once empty, remove the range from the list.
1695 * To free a range of LPIs, add a free range to the list, sort it and
1696 * merge the result if the new range happens to be adjacent to an
1697 * already free block.
1699 * The consequence of the above is that allocation is cost is low, but
1700 * freeing is expensive. We assumes that freeing rarely occurs.
1702 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1704 static DEFINE_MUTEX(lpi_range_lock);
1705 static LIST_HEAD(lpi_range_list);
1708 struct list_head entry;
1713 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1715 struct lpi_range *range;
1717 range = kmalloc(sizeof(*range), GFP_KERNEL);
1719 range->base_id = base;
1726 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1728 struct lpi_range *range, *tmp;
1731 mutex_lock(&lpi_range_lock);
1733 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1734 if (range->span >= nr_lpis) {
1735 *base = range->base_id;
1736 range->base_id += nr_lpis;
1737 range->span -= nr_lpis;
1739 if (range->span == 0) {
1740 list_del(&range->entry);
1749 mutex_unlock(&lpi_range_lock);
1751 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1755 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1757 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1759 if (a->base_id + a->span != b->base_id)
1761 b->base_id = a->base_id;
1763 list_del(&a->entry);
1767 static int free_lpi_range(u32 base, u32 nr_lpis)
1769 struct lpi_range *new, *old;
1771 new = mk_lpi_range(base, nr_lpis);
1775 mutex_lock(&lpi_range_lock);
1777 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1778 if (old->base_id < base)
1782 * old is the last element with ->base_id smaller than base,
1783 * so new goes right after it. If there are no elements with
1784 * ->base_id smaller than base, &old->entry ends up pointing
1785 * at the head of the list, and inserting new it the start of
1786 * the list is the right thing to do in that case as well.
1788 list_add(&new->entry, &old->entry);
1790 * Now check if we can merge with the preceding and/or
1793 merge_lpi_ranges(old, new);
1794 merge_lpi_ranges(new, list_next_entry(new, entry));
1796 mutex_unlock(&lpi_range_lock);
1800 static int __init its_lpi_init(u32 id_bits)
1802 u32 lpis = (1UL << id_bits) - 8192;
1806 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1808 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1810 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1815 * Initializing the allocator is just the same as freeing the
1816 * full range of LPIs.
1818 err = free_lpi_range(8192, lpis);
1819 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1823 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1825 unsigned long *bitmap = NULL;
1829 err = alloc_lpi_range(nr_irqs, base);
1834 } while (nr_irqs > 0);
1842 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1850 *base = *nr_ids = 0;
1855 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1857 WARN_ON(free_lpi_range(base, nr_ids));
1861 static void gic_reset_prop_table(void *va)
1863 /* Priority 0xa0, Group-1, disabled */
1864 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1866 /* Make sure the GIC will observe the written configuration */
1867 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1870 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1872 struct page *prop_page;
1874 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1878 gic_reset_prop_table(page_address(prop_page));
1883 static void its_free_prop_table(struct page *prop_page)
1885 free_pages((unsigned long)page_address(prop_page),
1886 get_order(LPI_PROPBASE_SZ));
1889 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1891 phys_addr_t start, end, addr_end;
1895 * We don't bother checking for a kdump kernel as by
1896 * construction, the LPI tables are out of this kernel's
1899 if (is_kdump_kernel())
1902 addr_end = addr + size - 1;
1904 for_each_reserved_mem_region(i, &start, &end) {
1905 if (addr >= start && addr_end <= end)
1909 /* Not found, not a good sign... */
1910 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1912 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1916 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1918 if (efi_enabled(EFI_CONFIG_TABLES))
1919 return efi_mem_reserve_persistent(addr, size);
1924 static int __init its_setup_lpi_prop_table(void)
1926 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1929 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1930 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1932 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1933 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1936 gic_reset_prop_table(gic_rdists->prop_table_va);
1940 lpi_id_bits = min_t(u32,
1941 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1942 ITS_MAX_LPI_NRBITS);
1943 page = its_allocate_prop_table(GFP_NOWAIT);
1945 pr_err("Failed to allocate PROPBASE\n");
1949 gic_rdists->prop_table_pa = page_to_phys(page);
1950 gic_rdists->prop_table_va = page_address(page);
1951 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1955 pr_info("GICv3: using LPI property table @%pa\n",
1956 &gic_rdists->prop_table_pa);
1958 return its_lpi_init(lpi_id_bits);
1961 static const char *its_base_type_string[] = {
1962 [GITS_BASER_TYPE_DEVICE] = "Devices",
1963 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1964 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1965 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1966 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1967 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1968 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1971 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1973 u32 idx = baser - its->tables;
1975 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1978 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1981 u32 idx = baser - its->tables;
1983 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1984 baser->val = its_read_baser(its, baser);
1987 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1988 u64 cache, u64 shr, u32 psz, u32 order,
1991 u64 val = its_read_baser(its, baser);
1992 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1993 u64 type = GITS_BASER_TYPE(val);
1994 u64 baser_phys, tmp;
2000 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2001 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2002 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2003 &its->phys_base, its_base_type_string[type],
2004 alloc_pages, GITS_BASER_PAGES_MAX);
2005 alloc_pages = GITS_BASER_PAGES_MAX;
2006 order = get_order(GITS_BASER_PAGES_MAX * psz);
2009 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2013 base = (void *)page_address(page);
2014 baser_phys = virt_to_phys(base);
2016 /* Check if the physical address of the memory is above 48bits */
2017 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2019 /* 52bit PA is supported only when PageSize=64K */
2020 if (psz != SZ_64K) {
2021 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2022 free_pages((unsigned long)base, order);
2026 /* Convert 52bit PA to 48bit field */
2027 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2032 (type << GITS_BASER_TYPE_SHIFT) |
2033 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2034 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2039 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2043 val |= GITS_BASER_PAGE_SIZE_4K;
2046 val |= GITS_BASER_PAGE_SIZE_16K;
2049 val |= GITS_BASER_PAGE_SIZE_64K;
2053 its_write_baser(its, baser, val);
2056 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2058 * Shareability didn't stick. Just use
2059 * whatever the read reported, which is likely
2060 * to be the only thing this redistributor
2061 * supports. If that's zero, make it
2062 * non-cacheable as well.
2064 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2066 cache = GITS_BASER_nC;
2067 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2072 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
2074 * Page size didn't stick. Let's try a smaller
2075 * size and retry. If we reach 4K, then
2076 * something is horribly wrong...
2078 free_pages((unsigned long)base, order);
2084 goto retry_alloc_baser;
2087 goto retry_alloc_baser;
2092 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2093 &its->phys_base, its_base_type_string[type],
2095 free_pages((unsigned long)base, order);
2099 baser->order = order;
2102 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2104 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2105 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2106 its_base_type_string[type],
2107 (unsigned long)virt_to_phys(base),
2108 indirect ? "indirect" : "flat", (int)esz,
2109 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2114 static bool its_parse_indirect_baser(struct its_node *its,
2115 struct its_baser *baser,
2116 u32 psz, u32 *order, u32 ids)
2118 u64 tmp = its_read_baser(its, baser);
2119 u64 type = GITS_BASER_TYPE(tmp);
2120 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2121 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2122 u32 new_order = *order;
2123 bool indirect = false;
2125 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2126 if ((esz << ids) > (psz * 2)) {
2128 * Find out whether hw supports a single or two-level table by
2129 * table by reading bit at offset '62' after writing '1' to it.
2131 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2132 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2136 * The size of the lvl2 table is equal to ITS page size
2137 * which is 'psz'. For computing lvl1 table size,
2138 * subtract ID bits that sparse lvl2 table from 'ids'
2139 * which is reported by ITS hardware times lvl1 table
2142 ids -= ilog2(psz / (int)esz);
2143 esz = GITS_LVL1_ENTRY_SIZE;
2148 * Allocate as many entries as required to fit the
2149 * range of device IDs that the ITS can grok... The ID
2150 * space being incredibly sparse, this results in a
2151 * massive waste of memory if two-level device table
2152 * feature is not supported by hardware.
2154 new_order = max_t(u32, get_order(esz << ids), new_order);
2155 if (new_order >= MAX_ORDER) {
2156 new_order = MAX_ORDER - 1;
2157 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2158 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2159 &its->phys_base, its_base_type_string[type],
2160 device_ids(its), ids);
2168 static u32 compute_common_aff(u64 val)
2172 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2173 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2175 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2178 static u32 compute_its_aff(struct its_node *its)
2184 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2185 * the resulting affinity. We then use that to see if this match
2188 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2189 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2190 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2191 return compute_common_aff(val);
2194 static struct its_node *find_sibling_its(struct its_node *cur_its)
2196 struct its_node *its;
2199 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2202 aff = compute_its_aff(cur_its);
2204 list_for_each_entry(its, &its_nodes, entry) {
2207 if (!is_v4_1(its) || its == cur_its)
2210 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2213 if (aff != compute_its_aff(its))
2216 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2217 baser = its->tables[2].val;
2218 if (!(baser & GITS_BASER_VALID))
2227 static void its_free_tables(struct its_node *its)
2231 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2232 if (its->tables[i].base) {
2233 free_pages((unsigned long)its->tables[i].base,
2234 its->tables[i].order);
2235 its->tables[i].base = NULL;
2240 static int its_alloc_tables(struct its_node *its)
2242 u64 shr = GITS_BASER_InnerShareable;
2243 u64 cache = GITS_BASER_RaWaWb;
2247 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2248 /* erratum 24313: ignore memory access type */
2249 cache = GITS_BASER_nCnB;
2251 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2252 struct its_baser *baser = its->tables + i;
2253 u64 val = its_read_baser(its, baser);
2254 u64 type = GITS_BASER_TYPE(val);
2255 u32 order = get_order(psz);
2256 bool indirect = false;
2259 case GITS_BASER_TYPE_NONE:
2262 case GITS_BASER_TYPE_DEVICE:
2263 indirect = its_parse_indirect_baser(its, baser,
2268 case GITS_BASER_TYPE_VCPU:
2270 struct its_node *sibling;
2273 if ((sibling = find_sibling_its(its))) {
2274 *baser = sibling->tables[2];
2275 its_write_baser(its, baser, baser->val);
2280 indirect = its_parse_indirect_baser(its, baser,
2282 ITS_MAX_VPEID_BITS);
2286 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
2288 its_free_tables(its);
2292 /* Update settings which will be used for next BASERn */
2294 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2295 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2301 static u64 inherit_vpe_l1_table_from_its(void)
2303 struct its_node *its;
2307 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2308 aff = compute_common_aff(val);
2310 list_for_each_entry(its, &its_nodes, entry) {
2316 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2319 if (aff != compute_its_aff(its))
2322 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2323 baser = its->tables[2].val;
2324 if (!(baser & GITS_BASER_VALID))
2327 /* We have a winner! */
2328 val = GICR_VPROPBASER_4_1_VALID;
2329 if (baser & GITS_BASER_INDIRECT)
2330 val |= GICR_VPROPBASER_4_1_INDIRECT;
2331 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2332 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2333 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2334 case GIC_PAGE_SIZE_64K:
2335 addr = GITS_BASER_ADDR_48_to_52(baser);
2338 addr = baser & GENMASK_ULL(47, 12);
2341 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2342 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2343 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2344 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2345 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2346 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2354 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2360 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2361 aff = compute_common_aff(val);
2363 for_each_possible_cpu(cpu) {
2364 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2367 if (!base || cpu == smp_processor_id())
2370 val = gic_read_typer(base + GICR_TYPER);
2371 tmp = compute_common_aff(val);
2376 * At this point, we have a victim. This particular CPU
2377 * has already booted, and has an affinity that matches
2378 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2379 * Make sure we don't write the Z bit in that case.
2381 val = gits_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2382 val &= ~GICR_VPROPBASER_4_1_Z;
2384 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2392 static int allocate_vpe_l1_table(void)
2394 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2395 u64 val, gpsz, npg, pa;
2396 unsigned int psz = SZ_64K;
2397 unsigned int np, epp, esz;
2400 if (!gic_rdists->has_rvpeid)
2404 * if VPENDBASER.Valid is set, disable any previously programmed
2405 * VPE by setting PendingLast while clearing Valid. This has the
2406 * effect of making sure no doorbell will be generated and we can
2407 * then safely clear VPROPBASER.Valid.
2409 if (gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2410 gits_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2411 vlpi_base + GICR_VPENDBASER);
2414 * If we can inherit the configuration from another RD, let's do
2415 * so. Otherwise, we have to go through the allocation process. We
2416 * assume that all RDs have the exact same requirements, as
2417 * nothing will work otherwise.
2419 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2420 if (val & GICR_VPROPBASER_4_1_VALID)
2423 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
2424 if (!gic_data_rdist()->vpe_table_mask)
2427 val = inherit_vpe_l1_table_from_its();
2428 if (val & GICR_VPROPBASER_4_1_VALID)
2431 /* First probe the page size */
2432 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2433 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2434 val = gits_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2435 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2436 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2440 gpsz = GIC_PAGE_SIZE_4K;
2442 case GIC_PAGE_SIZE_4K:
2445 case GIC_PAGE_SIZE_16K:
2448 case GIC_PAGE_SIZE_64K:
2454 * Start populating the register from scratch, including RO fields
2455 * (which we want to print in debug cases...)
2458 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2459 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2461 /* How many entries per GIC page? */
2463 epp = psz / (esz * SZ_8);
2466 * If we need more than just a single L1 page, flag the table
2467 * as indirect and compute the number of required L1 pages.
2469 if (epp < ITS_MAX_VPEID) {
2472 val |= GICR_VPROPBASER_4_1_INDIRECT;
2474 /* Number of L2 pages required to cover the VPEID space */
2475 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2477 /* Number of L1 pages to point to the L2 pages */
2478 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2483 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg);
2485 /* Right, that's the number of CPU pages we need for L1 */
2486 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2488 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2489 np, npg, psz, epp, esz);
2490 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
2494 gic_data_rdist()->vpe_l1_page = page;
2495 pa = virt_to_phys(page_address(page));
2496 WARN_ON(!IS_ALIGNED(pa, psz));
2498 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2499 val |= GICR_VPROPBASER_RaWb;
2500 val |= GICR_VPROPBASER_InnerShareable;
2501 val |= GICR_VPROPBASER_4_1_Z;
2502 val |= GICR_VPROPBASER_4_1_VALID;
2505 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2506 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2508 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2509 smp_processor_id(), val,
2510 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2515 static int its_alloc_collections(struct its_node *its)
2519 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2521 if (!its->collections)
2524 for (i = 0; i < nr_cpu_ids; i++)
2525 its->collections[i].target_address = ~0ULL;
2530 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2532 struct page *pend_page;
2534 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2535 get_order(LPI_PENDBASE_SZ));
2539 /* Make sure the GIC will observe the zero-ed page */
2540 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2545 static void its_free_pending_table(struct page *pt)
2547 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2551 * Booting with kdump and LPIs enabled is generally fine. Any other
2552 * case is wrong in the absence of firmware/EFI support.
2554 static bool enabled_lpis_allowed(void)
2559 /* Check whether the property table is in a reserved region */
2560 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2561 addr = val & GENMASK_ULL(51, 12);
2563 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2566 static int __init allocate_lpi_tables(void)
2572 * If LPIs are enabled while we run this from the boot CPU,
2573 * flag the RD tables as pre-allocated if the stars do align.
2575 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2576 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2577 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2578 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2579 pr_info("GICv3: Using preallocated redistributor tables\n");
2582 err = its_setup_lpi_prop_table();
2587 * We allocate all the pending tables anyway, as we may have a
2588 * mix of RDs that have had LPIs enabled, and some that
2589 * don't. We'll free the unused ones as each CPU comes online.
2591 for_each_possible_cpu(cpu) {
2592 struct page *pend_page;
2594 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2596 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2600 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2606 static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2608 u32 count = 1000000; /* 1s! */
2612 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2613 val &= ~GICR_VPENDBASER_Valid;
2614 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2617 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2618 clean = !(val & GICR_VPENDBASER_Dirty);
2624 } while (!clean && count);
2629 static void its_cpu_init_lpis(void)
2631 void __iomem *rbase = gic_data_rdist_rd_base();
2632 struct page *pend_page;
2636 if (gic_data_rdist()->lpi_enabled)
2639 val = readl_relaxed(rbase + GICR_CTLR);
2640 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2641 (val & GICR_CTLR_ENABLE_LPIS)) {
2643 * Check that we get the same property table on all
2644 * RDs. If we don't, this is hopeless.
2646 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2647 paddr &= GENMASK_ULL(51, 12);
2648 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2649 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2651 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2652 paddr &= GENMASK_ULL(51, 16);
2654 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2655 its_free_pending_table(gic_data_rdist()->pend_page);
2656 gic_data_rdist()->pend_page = NULL;
2661 pend_page = gic_data_rdist()->pend_page;
2662 paddr = page_to_phys(pend_page);
2663 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
2666 val = (gic_rdists->prop_table_pa |
2667 GICR_PROPBASER_InnerShareable |
2668 GICR_PROPBASER_RaWaWb |
2669 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2671 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2672 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2674 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2675 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2677 * The HW reports non-shareable, we must
2678 * remove the cacheability attributes as
2681 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2682 GICR_PROPBASER_CACHEABILITY_MASK);
2683 val |= GICR_PROPBASER_nC;
2684 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2686 pr_info_once("GIC: using cache flushing for LPI property table\n");
2687 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2691 val = (page_to_phys(pend_page) |
2692 GICR_PENDBASER_InnerShareable |
2693 GICR_PENDBASER_RaWaWb);
2695 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2696 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2698 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2700 * The HW reports non-shareable, we must remove the
2701 * cacheability attributes as well.
2703 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2704 GICR_PENDBASER_CACHEABILITY_MASK);
2705 val |= GICR_PENDBASER_nC;
2706 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2710 val = readl_relaxed(rbase + GICR_CTLR);
2711 val |= GICR_CTLR_ENABLE_LPIS;
2712 writel_relaxed(val, rbase + GICR_CTLR);
2714 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
2715 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2718 * It's possible for CPU to receive VLPIs before it is
2719 * sheduled as a vPE, especially for the first CPU, and the
2720 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2721 * as out of range and dropped by GIC.
2722 * So we initialize IDbits to known value to avoid VLPI drop.
2724 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2725 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2726 smp_processor_id(), val);
2727 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2730 * Also clear Valid bit of GICR_VPENDBASER, in case some
2731 * ancient programming gets left in and has possibility of
2732 * corrupting memory.
2734 val = its_clear_vpend_valid(vlpi_base);
2735 WARN_ON(val & GICR_VPENDBASER_Dirty);
2738 if (allocate_vpe_l1_table()) {
2740 * If the allocation has failed, we're in massive trouble.
2741 * Disable direct injection, and pray that no VM was
2742 * already running...
2744 gic_rdists->has_rvpeid = false;
2745 gic_rdists->has_vlpis = false;
2748 /* Make sure the GIC has seen the above */
2751 gic_data_rdist()->lpi_enabled = true;
2752 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
2754 gic_data_rdist()->pend_page ? "allocated" : "reserved",
2758 static void its_cpu_init_collection(struct its_node *its)
2760 int cpu = smp_processor_id();
2763 /* avoid cross node collections and its mapping */
2764 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2765 struct device_node *cpu_node;
2767 cpu_node = of_get_cpu_node(cpu, NULL);
2768 if (its->numa_node != NUMA_NO_NODE &&
2769 its->numa_node != of_node_to_nid(cpu_node))
2774 * We now have to bind each collection to its target
2777 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2779 * This ITS wants the physical address of the
2782 target = gic_data_rdist()->phys_base;
2784 /* This ITS wants a linear CPU number. */
2785 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2786 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2789 /* Perform collection mapping */
2790 its->collections[cpu].target_address = target;
2791 its->collections[cpu].col_id = cpu;
2793 its_send_mapc(its, &its->collections[cpu], 1);
2794 its_send_invall(its, &its->collections[cpu]);
2797 static void its_cpu_init_collections(void)
2799 struct its_node *its;
2801 raw_spin_lock(&its_lock);
2803 list_for_each_entry(its, &its_nodes, entry)
2804 its_cpu_init_collection(its);
2806 raw_spin_unlock(&its_lock);
2809 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2811 struct its_device *its_dev = NULL, *tmp;
2812 unsigned long flags;
2814 raw_spin_lock_irqsave(&its->lock, flags);
2816 list_for_each_entry(tmp, &its->its_device_list, entry) {
2817 if (tmp->device_id == dev_id) {
2823 raw_spin_unlock_irqrestore(&its->lock, flags);
2828 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2832 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2833 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2834 return &its->tables[i];
2840 static bool its_alloc_table_entry(struct its_node *its,
2841 struct its_baser *baser, u32 id)
2847 /* Don't allow device id that exceeds single, flat table limit */
2848 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2849 if (!(baser->val & GITS_BASER_INDIRECT))
2850 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2852 /* Compute 1st level table index & check if that exceeds table limit */
2853 idx = id >> ilog2(baser->psz / esz);
2854 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2857 table = baser->base;
2859 /* Allocate memory for 2nd level table */
2861 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2862 get_order(baser->psz));
2866 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2867 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2868 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2870 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2872 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2873 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2874 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2876 /* Ensure updated table contents are visible to ITS hardware */
2883 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2885 struct its_baser *baser;
2887 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2889 /* Don't allow device id that exceeds ITS hardware limit */
2891 return (ilog2(dev_id) < device_ids(its));
2893 return its_alloc_table_entry(its, baser, dev_id);
2896 static bool its_alloc_vpe_table(u32 vpe_id)
2898 struct its_node *its;
2901 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2902 * could try and only do it on ITSs corresponding to devices
2903 * that have interrupts targeted at this VPE, but the
2904 * complexity becomes crazy (and you have tons of memory
2907 list_for_each_entry(its, &its_nodes, entry) {
2908 struct its_baser *baser;
2913 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2917 if (!its_alloc_table_entry(its, baser, vpe_id))
2924 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2925 int nvecs, bool alloc_lpis)
2927 struct its_device *dev;
2928 unsigned long *lpi_map = NULL;
2929 unsigned long flags;
2930 u16 *col_map = NULL;
2937 if (!its_alloc_device_table(its, dev_id))
2940 if (WARN_ON(!is_power_of_2(nvecs)))
2941 nvecs = roundup_pow_of_two(nvecs);
2943 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2945 * Even if the device wants a single LPI, the ITT must be
2946 * sized as a power of two (and you need at least one bit...).
2948 nr_ites = max(2, nvecs);
2949 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
2950 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2951 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
2953 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2955 col_map = kcalloc(nr_lpis, sizeof(*col_map),
2958 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
2963 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2971 gic_flush_dcache_to_poc(itt, sz);
2975 dev->nr_ites = nr_ites;
2976 dev->event_map.lpi_map = lpi_map;
2977 dev->event_map.col_map = col_map;
2978 dev->event_map.lpi_base = lpi_base;
2979 dev->event_map.nr_lpis = nr_lpis;
2980 raw_spin_lock_init(&dev->event_map.vlpi_lock);
2981 dev->device_id = dev_id;
2982 INIT_LIST_HEAD(&dev->entry);
2984 raw_spin_lock_irqsave(&its->lock, flags);
2985 list_add(&dev->entry, &its->its_device_list);
2986 raw_spin_unlock_irqrestore(&its->lock, flags);
2988 /* Map device to its ITT */
2989 its_send_mapd(dev, 1);
2994 static void its_free_device(struct its_device *its_dev)
2996 unsigned long flags;
2998 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2999 list_del(&its_dev->entry);
3000 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3001 kfree(its_dev->event_map.col_map);
3002 kfree(its_dev->itt);
3006 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3010 /* Find a free LPI region in lpi_map and allocate them. */
3011 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3012 dev->event_map.nr_lpis,
3013 get_count_order(nvecs));
3017 *hwirq = dev->event_map.lpi_base + idx;
3022 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3023 int nvec, msi_alloc_info_t *info)
3025 struct its_node *its;
3026 struct its_device *its_dev;
3027 struct msi_domain_info *msi_info;
3032 * We ignore "dev" entirely, and rely on the dev_id that has
3033 * been passed via the scratchpad. This limits this domain's
3034 * usefulness to upper layers that definitely know that they
3035 * are built on top of the ITS.
3037 dev_id = info->scratchpad[0].ul;
3039 msi_info = msi_get_domain_info(domain);
3040 its = msi_info->data;
3042 if (!gic_rdists->has_direct_lpi &&
3044 vpe_proxy.dev->its == its &&
3045 dev_id == vpe_proxy.dev->device_id) {
3046 /* Bad luck. Get yourself a better implementation */
3047 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3052 mutex_lock(&its->dev_alloc_lock);
3053 its_dev = its_find_device(its, dev_id);
3056 * We already have seen this ID, probably through
3057 * another alias (PCI bridge of some sort). No need to
3058 * create the device.
3060 its_dev->shared = true;
3061 pr_debug("Reusing ITT for devID %x\n", dev_id);
3065 its_dev = its_create_device(its, dev_id, nvec, true);
3071 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3073 mutex_unlock(&its->dev_alloc_lock);
3074 info->scratchpad[0].ptr = its_dev;
3078 static struct msi_domain_ops its_msi_domain_ops = {
3079 .msi_prepare = its_msi_prepare,
3082 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3084 irq_hw_number_t hwirq)
3086 struct irq_fwspec fwspec;
3088 if (irq_domain_get_of_node(domain->parent)) {
3089 fwspec.fwnode = domain->parent->fwnode;
3090 fwspec.param_count = 3;
3091 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3092 fwspec.param[1] = hwirq;
3093 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3094 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3095 fwspec.fwnode = domain->parent->fwnode;
3096 fwspec.param_count = 2;
3097 fwspec.param[0] = hwirq;
3098 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3103 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3106 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3107 unsigned int nr_irqs, void *args)
3109 msi_alloc_info_t *info = args;
3110 struct its_device *its_dev = info->scratchpad[0].ptr;
3111 struct its_node *its = its_dev->its;
3112 irq_hw_number_t hwirq;
3116 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3120 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3124 for (i = 0; i < nr_irqs; i++) {
3125 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3129 irq_domain_set_hwirq_and_chip(domain, virq + i,
3130 hwirq + i, &its_irq_chip, its_dev);
3131 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
3132 pr_debug("ID:%d pID:%d vID:%d\n",
3133 (int)(hwirq + i - its_dev->event_map.lpi_base),
3134 (int)(hwirq + i), virq + i);
3140 static int its_irq_domain_activate(struct irq_domain *domain,
3141 struct irq_data *d, bool reserve)
3143 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3144 u32 event = its_get_event_id(d);
3145 const struct cpumask *cpu_mask = cpu_online_mask;
3148 /* get the cpu_mask of local node */
3149 if (its_dev->its->numa_node >= 0)
3150 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
3152 /* Bind the LPI to the first possible CPU */
3153 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
3154 if (cpu >= nr_cpu_ids) {
3155 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
3158 cpu = cpumask_first(cpu_online_mask);
3161 its_dev->event_map.col_map[event] = cpu;
3162 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3164 /* Map the GIC IRQ and event to the device */
3165 its_send_mapti(its_dev, d->hwirq, event);
3169 static void its_irq_domain_deactivate(struct irq_domain *domain,
3172 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3173 u32 event = its_get_event_id(d);
3175 /* Stop the delivery of interrupts */
3176 its_send_discard(its_dev, event);
3179 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3180 unsigned int nr_irqs)
3182 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3183 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3184 struct its_node *its = its_dev->its;
3187 bitmap_release_region(its_dev->event_map.lpi_map,
3188 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3189 get_count_order(nr_irqs));
3191 for (i = 0; i < nr_irqs; i++) {
3192 struct irq_data *data = irq_domain_get_irq_data(domain,
3194 /* Nuke the entry in the domain */
3195 irq_domain_reset_irq_data(data);
3198 mutex_lock(&its->dev_alloc_lock);
3201 * If all interrupts have been freed, start mopping the
3202 * floor. This is conditionned on the device not being shared.
3204 if (!its_dev->shared &&
3205 bitmap_empty(its_dev->event_map.lpi_map,
3206 its_dev->event_map.nr_lpis)) {
3207 its_lpi_free(its_dev->event_map.lpi_map,
3208 its_dev->event_map.lpi_base,
3209 its_dev->event_map.nr_lpis);
3211 /* Unmap device/itt */
3212 its_send_mapd(its_dev, 0);
3213 its_free_device(its_dev);
3216 mutex_unlock(&its->dev_alloc_lock);
3218 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3221 static const struct irq_domain_ops its_domain_ops = {
3222 .alloc = its_irq_domain_alloc,
3223 .free = its_irq_domain_free,
3224 .activate = its_irq_domain_activate,
3225 .deactivate = its_irq_domain_deactivate,
3231 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3232 * likely), the only way to perform an invalidate is to use a fake
3233 * device to issue an INV command, implying that the LPI has first
3234 * been mapped to some event on that device. Since this is not exactly
3235 * cheap, we try to keep that mapping around as long as possible, and
3236 * only issue an UNMAP if we're short on available slots.
3238 * Broken by design(tm).
3240 * GICv4.1, on the other hand, mandates that we're able to invalidate
3241 * by writing to a MMIO register. It doesn't implement the whole of
3242 * DirectLPI, but that's good enough. And most of the time, we don't
3243 * even have to invalidate anything, as the redistributor can be told
3244 * whether to generate a doorbell or not (we thus leave it enabled,
3247 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3249 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3250 if (gic_rdists->has_rvpeid)
3253 /* Already unmapped? */
3254 if (vpe->vpe_proxy_event == -1)
3257 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3258 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3261 * We don't track empty slots at all, so let's move the
3262 * next_victim pointer if we can quickly reuse that slot
3263 * instead of nuking an existing entry. Not clear that this is
3264 * always a win though, and this might just generate a ripple
3265 * effect... Let's just hope VPEs don't migrate too often.
3267 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3268 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3270 vpe->vpe_proxy_event = -1;
3273 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3275 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3276 if (gic_rdists->has_rvpeid)
3279 if (!gic_rdists->has_direct_lpi) {
3280 unsigned long flags;
3282 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3283 its_vpe_db_proxy_unmap_locked(vpe);
3284 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3288 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3290 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3291 if (gic_rdists->has_rvpeid)
3294 /* Already mapped? */
3295 if (vpe->vpe_proxy_event != -1)
3298 /* This slot was already allocated. Kick the other VPE out. */
3299 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3300 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3302 /* Map the new VPE instead */
3303 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3304 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3305 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3307 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3308 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3311 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3313 unsigned long flags;
3314 struct its_collection *target_col;
3316 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3317 if (gic_rdists->has_rvpeid)
3320 if (gic_rdists->has_direct_lpi) {
3321 void __iomem *rdbase;
3323 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3324 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3325 wait_for_syncr(rdbase);
3330 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3332 its_vpe_db_proxy_map_locked(vpe);
3334 target_col = &vpe_proxy.dev->its->collections[to];
3335 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3336 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3338 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3341 static int its_vpe_set_affinity(struct irq_data *d,
3342 const struct cpumask *mask_val,
3345 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3346 int from, cpu = cpumask_first(mask_val);
3349 * Changing affinity is mega expensive, so let's be as lazy as
3350 * we can and only do it if we really have to. Also, if mapped
3351 * into the proxy device, we need to move the doorbell
3352 * interrupt to its new location.
3354 if (vpe->col_idx == cpu)
3357 from = vpe->col_idx;
3361 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3362 * is sharing its VPE table with the current one.
3364 if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3365 cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3368 its_send_vmovp(vpe);
3369 its_vpe_db_proxy_move(vpe, from, cpu);
3372 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3374 return IRQ_SET_MASK_OK_DONE;
3377 static void its_vpe_schedule(struct its_vpe *vpe)
3379 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3382 /* Schedule the VPE */
3383 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3384 GENMASK_ULL(51, 12);
3385 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3386 val |= GICR_VPROPBASER_RaWb;
3387 val |= GICR_VPROPBASER_InnerShareable;
3388 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3390 val = virt_to_phys(page_address(vpe->vpt_page)) &
3391 GENMASK_ULL(51, 16);
3392 val |= GICR_VPENDBASER_RaWaWb;
3393 val |= GICR_VPENDBASER_NonShareable;
3395 * There is no good way of finding out if the pending table is
3396 * empty as we can race against the doorbell interrupt very
3397 * easily. So in the end, vpe->pending_last is only an
3398 * indication that the vcpu has something pending, not one
3399 * that the pending table is empty. A good implementation
3400 * would be able to read its coarse map pretty quickly anyway,
3401 * making this a tolerable issue.
3403 val |= GICR_VPENDBASER_PendingLast;
3404 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3405 val |= GICR_VPENDBASER_Valid;
3406 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3409 static void its_vpe_deschedule(struct its_vpe *vpe)
3411 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3414 val = its_clear_vpend_valid(vlpi_base);
3416 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
3417 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3419 vpe->pending_last = true;
3421 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3422 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3426 static void its_vpe_invall(struct its_vpe *vpe)
3428 struct its_node *its;
3430 list_for_each_entry(its, &its_nodes, entry) {
3434 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3438 * Sending a VINVALL to a single ITS is enough, as all
3439 * we need is to reach the redistributors.
3441 its_send_vinvall(its, vpe);
3446 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3448 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3449 struct its_cmd_info *info = vcpu_info;
3451 switch (info->cmd_type) {
3453 its_vpe_schedule(vpe);
3456 case DESCHEDULE_VPE:
3457 its_vpe_deschedule(vpe);
3461 its_vpe_invall(vpe);
3469 static void its_vpe_send_cmd(struct its_vpe *vpe,
3470 void (*cmd)(struct its_device *, u32))
3472 unsigned long flags;
3474 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3476 its_vpe_db_proxy_map_locked(vpe);
3477 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3479 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3482 static void its_vpe_send_inv(struct irq_data *d)
3484 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3486 if (gic_rdists->has_direct_lpi) {
3487 void __iomem *rdbase;
3489 /* Target the redistributor this VPE is currently known on */
3490 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3491 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3492 wait_for_syncr(rdbase);
3494 its_vpe_send_cmd(vpe, its_send_inv);
3498 static void its_vpe_mask_irq(struct irq_data *d)
3501 * We need to unmask the LPI, which is described by the parent
3502 * irq_data. Instead of calling into the parent (which won't
3503 * exactly do the right thing, let's simply use the
3504 * parent_data pointer. Yes, I'm naughty.
3506 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3507 its_vpe_send_inv(d);
3510 static void its_vpe_unmask_irq(struct irq_data *d)
3512 /* Same hack as above... */
3513 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3514 its_vpe_send_inv(d);
3517 static int its_vpe_set_irqchip_state(struct irq_data *d,
3518 enum irqchip_irq_state which,
3521 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3523 if (which != IRQCHIP_STATE_PENDING)
3526 if (gic_rdists->has_direct_lpi) {
3527 void __iomem *rdbase;
3529 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3531 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3533 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3534 wait_for_syncr(rdbase);
3538 its_vpe_send_cmd(vpe, its_send_int);
3540 its_vpe_send_cmd(vpe, its_send_clear);
3546 static struct irq_chip its_vpe_irq_chip = {
3547 .name = "GICv4-vpe",
3548 .irq_mask = its_vpe_mask_irq,
3549 .irq_unmask = its_vpe_unmask_irq,
3550 .irq_eoi = irq_chip_eoi_parent,
3551 .irq_set_affinity = its_vpe_set_affinity,
3552 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
3553 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
3556 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3558 struct its_cmd_info *info = vcpu_info;
3560 switch (info->cmd_type) {
3564 case DESCHEDULE_VPE:
3575 static struct irq_chip its_vpe_4_1_irq_chip = {
3576 .name = "GICv4.1-vpe",
3577 .irq_eoi = irq_chip_eoi_parent,
3578 .irq_set_affinity = its_vpe_set_affinity,
3579 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
3582 static int its_vpe_id_alloc(void)
3584 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
3587 static void its_vpe_id_free(u16 id)
3589 ida_simple_remove(&its_vpeid_ida, id);
3592 static int its_vpe_init(struct its_vpe *vpe)
3594 struct page *vpt_page;
3597 /* Allocate vpe_id */
3598 vpe_id = its_vpe_id_alloc();
3603 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3605 its_vpe_id_free(vpe_id);
3609 if (!its_alloc_vpe_table(vpe_id)) {
3610 its_vpe_id_free(vpe_id);
3611 its_free_pending_table(vpt_page);
3615 vpe->vpe_id = vpe_id;
3616 vpe->vpt_page = vpt_page;
3617 if (gic_rdists->has_rvpeid)
3618 atomic_set(&vpe->vmapp_count, 0);
3620 vpe->vpe_proxy_event = -1;
3625 static void its_vpe_teardown(struct its_vpe *vpe)
3627 its_vpe_db_proxy_unmap(vpe);
3628 its_vpe_id_free(vpe->vpe_id);
3629 its_free_pending_table(vpe->vpt_page);
3632 static void its_vpe_irq_domain_free(struct irq_domain *domain,
3634 unsigned int nr_irqs)
3636 struct its_vm *vm = domain->host_data;
3639 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3641 for (i = 0; i < nr_irqs; i++) {
3642 struct irq_data *data = irq_domain_get_irq_data(domain,
3644 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3646 BUG_ON(vm != vpe->its_vm);
3648 clear_bit(data->hwirq, vm->db_bitmap);
3649 its_vpe_teardown(vpe);
3650 irq_domain_reset_irq_data(data);
3653 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
3654 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
3655 its_free_prop_table(vm->vprop_page);
3659 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3660 unsigned int nr_irqs, void *args)
3662 struct irq_chip *irqchip = &its_vpe_irq_chip;
3663 struct its_vm *vm = args;
3664 unsigned long *bitmap;
3665 struct page *vprop_page;
3666 int base, nr_ids, i, err = 0;
3670 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
3674 if (nr_ids < nr_irqs) {
3675 its_lpi_free(bitmap, base, nr_ids);
3679 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3681 its_lpi_free(bitmap, base, nr_ids);
3685 vm->db_bitmap = bitmap;
3686 vm->db_lpi_base = base;
3687 vm->nr_db_lpis = nr_ids;
3688 vm->vprop_page = vprop_page;
3690 if (gic_rdists->has_rvpeid)
3691 irqchip = &its_vpe_4_1_irq_chip;
3693 for (i = 0; i < nr_irqs; i++) {
3694 vm->vpes[i]->vpe_db_lpi = base + i;
3695 err = its_vpe_init(vm->vpes[i]);
3698 err = its_irq_gic_domain_alloc(domain, virq + i,
3699 vm->vpes[i]->vpe_db_lpi);
3702 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3703 irqchip, vm->vpes[i]);
3709 its_vpe_irq_domain_free(domain, virq, i - 1);
3711 its_lpi_free(bitmap, base, nr_ids);
3712 its_free_prop_table(vprop_page);
3718 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
3719 struct irq_data *d, bool reserve)
3721 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3722 struct its_node *its;
3724 /* If we use the list map, we issue VMAPP on demand... */
3728 /* Map the VPE to the first possible CPU */
3729 vpe->col_idx = cpumask_first(cpu_online_mask);
3731 list_for_each_entry(its, &its_nodes, entry) {
3735 its_send_vmapp(its, vpe, true);
3736 its_send_vinvall(its, vpe);
3739 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3744 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3747 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3748 struct its_node *its;
3751 * If we use the list map, we unmap the VPE once no VLPIs are
3752 * associated with the VM.
3757 list_for_each_entry(its, &its_nodes, entry) {
3761 its_send_vmapp(its, vpe, false);
3765 static const struct irq_domain_ops its_vpe_domain_ops = {
3766 .alloc = its_vpe_irq_domain_alloc,
3767 .free = its_vpe_irq_domain_free,
3768 .activate = its_vpe_irq_domain_activate,
3769 .deactivate = its_vpe_irq_domain_deactivate,
3772 static int its_force_quiescent(void __iomem *base)
3774 u32 count = 1000000; /* 1s */
3777 val = readl_relaxed(base + GITS_CTLR);
3779 * GIC architecture specification requires the ITS to be both
3780 * disabled and quiescent for writes to GITS_BASER<n> or
3781 * GITS_CBASER to not have UNPREDICTABLE results.
3783 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
3786 /* Disable the generation of all interrupts to this ITS */
3787 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
3788 writel_relaxed(val, base + GITS_CTLR);
3790 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3792 val = readl_relaxed(base + GITS_CTLR);
3793 if (val & GITS_CTLR_QUIESCENT)
3805 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
3807 struct its_node *its = data;
3809 /* erratum 22375: only alloc 8MB table size (20 bits) */
3810 its->typer &= ~GITS_TYPER_DEVBITS;
3811 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
3812 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
3817 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
3819 struct its_node *its = data;
3821 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
3826 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
3828 struct its_node *its = data;
3830 /* On QDF2400, the size of the ITE is 16Bytes */
3831 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
3832 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
3837 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3839 struct its_node *its = its_dev->its;
3842 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3843 * which maps 32-bit writes targeted at a separate window of
3844 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3845 * with device ID taken from bits [device_id_bits + 1:2] of
3846 * the window offset.
3848 return its->pre_its_base + (its_dev->device_id << 2);
3851 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3853 struct its_node *its = data;
3854 u32 pre_its_window[2];
3857 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3858 "socionext,synquacer-pre-its",
3860 ARRAY_SIZE(pre_its_window))) {
3862 its->pre_its_base = pre_its_window[0];
3863 its->get_msi_base = its_irq_get_msi_base_pre_its;
3865 ids = ilog2(pre_its_window[1]) - 2;
3866 if (device_ids(its) > ids) {
3867 its->typer &= ~GITS_TYPER_DEVBITS;
3868 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
3871 /* the pre-ITS breaks isolation, so disable MSI remapping */
3872 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3878 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3880 struct its_node *its = data;
3883 * Hip07 insists on using the wrong address for the VLPI
3884 * page. Trick it into doing the right thing...
3886 its->vlpi_redist_offset = SZ_128K;
3890 static const struct gic_quirk its_quirks[] = {
3891 #ifdef CONFIG_CAVIUM_ERRATUM_22375
3893 .desc = "ITS: Cavium errata 22375, 24313",
3894 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3896 .init = its_enable_quirk_cavium_22375,
3899 #ifdef CONFIG_CAVIUM_ERRATUM_23144
3901 .desc = "ITS: Cavium erratum 23144",
3902 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3904 .init = its_enable_quirk_cavium_23144,
3907 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3909 .desc = "ITS: QDF2400 erratum 0065",
3910 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3912 .init = its_enable_quirk_qdf2400_e0065,
3915 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3918 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3919 * implementation, but with a 'pre-ITS' added that requires
3920 * special handling in software.
3922 .desc = "ITS: Socionext Synquacer pre-ITS",
3925 .init = its_enable_quirk_socionext_synquacer,
3928 #ifdef CONFIG_HISILICON_ERRATUM_161600802
3930 .desc = "ITS: Hip07 erratum 161600802",
3933 .init = its_enable_quirk_hip07_161600802,
3940 static void its_enable_quirks(struct its_node *its)
3942 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3944 gic_enable_quirks(iidr, its_quirks, its);
3947 static int its_save_disable(void)
3949 struct its_node *its;
3952 raw_spin_lock(&its_lock);
3953 list_for_each_entry(its, &its_nodes, entry) {
3956 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3960 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3961 err = its_force_quiescent(base);
3963 pr_err("ITS@%pa: failed to quiesce: %d\n",
3964 &its->phys_base, err);
3965 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3969 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3974 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3977 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3981 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3984 raw_spin_unlock(&its_lock);
3989 static void its_restore_enable(void)
3991 struct its_node *its;
3994 raw_spin_lock(&its_lock);
3995 list_for_each_entry(its, &its_nodes, entry) {
3999 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4005 * Make sure that the ITS is disabled. If it fails to quiesce,
4006 * don't restore it since writing to CBASER or BASER<n>
4007 * registers is undefined according to the GIC v3 ITS
4010 ret = its_force_quiescent(base);
4012 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4013 &its->phys_base, ret);
4017 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4020 * Writing CBASER resets CREADR to 0, so make CWRITER and
4021 * cmd_write line up with it.
4023 its->cmd_write = its->cmd_base;
4024 gits_write_cwriter(0, base + GITS_CWRITER);
4026 /* Restore GITS_BASER from the value cache. */
4027 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4028 struct its_baser *baser = &its->tables[i];
4030 if (!(baser->val & GITS_BASER_VALID))
4033 its_write_baser(its, baser, baser->val);
4035 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4038 * Reinit the collection if it's stored in the ITS. This is
4039 * indicated by the col_id being less than the HCC field.
4040 * CID < HCC as specified in the GIC v3 Documentation.
4042 if (its->collections[smp_processor_id()].col_id <
4043 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4044 its_cpu_init_collection(its);
4046 raw_spin_unlock(&its_lock);
4049 static struct syscore_ops its_syscore_ops = {
4050 .suspend = its_save_disable,
4051 .resume = its_restore_enable,
4054 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4056 struct irq_domain *inner_domain;
4057 struct msi_domain_info *info;
4059 info = kzalloc(sizeof(*info), GFP_KERNEL);
4063 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4064 if (!inner_domain) {
4069 inner_domain->parent = its_parent;
4070 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4071 inner_domain->flags |= its->msi_domain_flags;
4072 info->ops = &its_msi_domain_ops;
4074 inner_domain->host_data = info;
4079 static int its_init_vpe_domain(void)
4081 struct its_node *its;
4085 if (gic_rdists->has_direct_lpi) {
4086 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4090 /* Any ITS will do, even if not v4 */
4091 its = list_first_entry(&its_nodes, struct its_node, entry);
4093 entries = roundup_pow_of_two(nr_cpu_ids);
4094 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4096 if (!vpe_proxy.vpes) {
4097 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
4101 /* Use the last possible DevID */
4102 devid = GENMASK(device_ids(its) - 1, 0);
4103 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4104 if (!vpe_proxy.dev) {
4105 kfree(vpe_proxy.vpes);
4106 pr_err("ITS: Can't allocate GICv4 proxy device\n");
4110 BUG_ON(entries > vpe_proxy.dev->nr_ites);
4112 raw_spin_lock_init(&vpe_proxy.lock);
4113 vpe_proxy.next_victim = 0;
4114 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4115 devid, vpe_proxy.dev->nr_ites);
4120 static int __init its_compute_its_list_map(struct resource *res,
4121 void __iomem *its_base)
4127 * This is assumed to be done early enough that we're
4128 * guaranteed to be single-threaded, hence no
4129 * locking. Should this change, we should address
4132 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4133 if (its_number >= GICv4_ITS_LIST_MAX) {
4134 pr_err("ITS@%pa: No ITSList entry available!\n",
4139 ctlr = readl_relaxed(its_base + GITS_CTLR);
4140 ctlr &= ~GITS_CTLR_ITS_NUMBER;
4141 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4142 writel_relaxed(ctlr, its_base + GITS_CTLR);
4143 ctlr = readl_relaxed(its_base + GITS_CTLR);
4144 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4145 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4146 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4149 if (test_and_set_bit(its_number, &its_list_map)) {
4150 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4151 &res->start, its_number);
4158 static int __init its_probe_one(struct resource *res,
4159 struct fwnode_handle *handle, int numa_node)
4161 struct its_node *its;
4162 void __iomem *its_base;
4164 u64 baser, tmp, typer;
4168 its_base = ioremap(res->start, resource_size(res));
4170 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4174 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4175 if (val != 0x30 && val != 0x40) {
4176 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4181 err = its_force_quiescent(its_base);
4183 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4187 pr_info("ITS %pR\n", res);
4189 its = kzalloc(sizeof(*its), GFP_KERNEL);
4195 raw_spin_lock_init(&its->lock);
4196 mutex_init(&its->dev_alloc_lock);
4197 INIT_LIST_HEAD(&its->entry);
4198 INIT_LIST_HEAD(&its->its_device_list);
4199 typer = gic_read_typer(its_base + GITS_TYPER);
4201 its->base = its_base;
4202 its->phys_base = res->start;
4204 if (!(typer & GITS_TYPER_VMOVP)) {
4205 err = its_compute_its_list_map(res, its_base);
4211 pr_info("ITS@%pa: Using ITS number %d\n",
4214 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
4218 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
4219 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
4221 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
4222 &res->start, its->mpidr, svpet);
4226 its->numa_node = numa_node;
4228 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
4229 get_order(ITS_CMD_QUEUE_SZ));
4234 its->cmd_base = (void *)page_address(page);
4235 its->cmd_write = its->cmd_base;
4236 its->fwnode_handle = handle;
4237 its->get_msi_base = its_irq_get_msi_base;
4238 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
4240 its_enable_quirks(its);
4242 err = its_alloc_tables(its);
4246 err = its_alloc_collections(its);
4248 goto out_free_tables;
4250 baser = (virt_to_phys(its->cmd_base) |
4251 GITS_CBASER_RaWaWb |
4252 GITS_CBASER_InnerShareable |
4253 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
4256 gits_write_cbaser(baser, its->base + GITS_CBASER);
4257 tmp = gits_read_cbaser(its->base + GITS_CBASER);
4259 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
4260 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
4262 * The HW reports non-shareable, we must
4263 * remove the cacheability attributes as
4266 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
4267 GITS_CBASER_CACHEABILITY_MASK);
4268 baser |= GITS_CBASER_nC;
4269 gits_write_cbaser(baser, its->base + GITS_CBASER);
4271 pr_info("ITS: using cache flushing for cmd queue\n");
4272 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
4275 gits_write_cwriter(0, its->base + GITS_CWRITER);
4276 ctlr = readl_relaxed(its->base + GITS_CTLR);
4277 ctlr |= GITS_CTLR_ENABLE;
4279 ctlr |= GITS_CTLR_ImDe;
4280 writel_relaxed(ctlr, its->base + GITS_CTLR);
4282 if (GITS_TYPER_HCC(typer))
4283 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
4285 err = its_init_domain(handle, its);
4287 goto out_free_tables;
4289 raw_spin_lock(&its_lock);
4290 list_add(&its->entry, &its_nodes);
4291 raw_spin_unlock(&its_lock);
4296 its_free_tables(its);
4298 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
4303 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
4307 static bool gic_rdists_supports_plpis(void)
4309 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
4312 static int redist_disable_lpis(void)
4314 void __iomem *rbase = gic_data_rdist_rd_base();
4315 u64 timeout = USEC_PER_SEC;
4318 if (!gic_rdists_supports_plpis()) {
4319 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
4323 val = readl_relaxed(rbase + GICR_CTLR);
4324 if (!(val & GICR_CTLR_ENABLE_LPIS))
4328 * If coming via a CPU hotplug event, we don't need to disable
4329 * LPIs before trying to re-enable them. They are already
4330 * configured and all is well in the world.
4332 * If running with preallocated tables, there is nothing to do.
4334 if (gic_data_rdist()->lpi_enabled ||
4335 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
4339 * From that point on, we only try to do some damage control.
4341 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
4342 smp_processor_id());
4343 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
4346 val &= ~GICR_CTLR_ENABLE_LPIS;
4347 writel_relaxed(val, rbase + GICR_CTLR);
4349 /* Make sure any change to GICR_CTLR is observable by the GIC */
4353 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
4354 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
4355 * Error out if we time out waiting for RWP to clear.
4357 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
4359 pr_err("CPU%d: Timeout while disabling LPIs\n",
4360 smp_processor_id());
4368 * After it has been written to 1, it is IMPLEMENTATION
4369 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
4370 * cleared to 0. Error out if clearing the bit failed.
4372 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
4373 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
4380 int its_cpu_init(void)
4382 if (!list_empty(&its_nodes)) {
4385 ret = redist_disable_lpis();
4389 its_cpu_init_lpis();
4390 its_cpu_init_collections();
4396 static const struct of_device_id its_device_id[] = {
4397 { .compatible = "arm,gic-v3-its", },
4401 static int __init its_of_probe(struct device_node *node)
4403 struct device_node *np;
4404 struct resource res;
4406 for (np = of_find_matching_node(node, its_device_id); np;
4407 np = of_find_matching_node(np, its_device_id)) {
4408 if (!of_device_is_available(np))
4410 if (!of_property_read_bool(np, "msi-controller")) {
4411 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
4416 if (of_address_to_resource(np, 0, &res)) {
4417 pr_warn("%pOF: no regs?\n", np);
4421 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
4428 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
4430 #ifdef CONFIG_ACPI_NUMA
4431 struct its_srat_map {
4438 static struct its_srat_map *its_srat_maps __initdata;
4439 static int its_in_srat __initdata;
4441 static int __init acpi_get_its_numa_node(u32 its_id)
4445 for (i = 0; i < its_in_srat; i++) {
4446 if (its_id == its_srat_maps[i].its_id)
4447 return its_srat_maps[i].numa_node;
4449 return NUMA_NO_NODE;
4452 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
4453 const unsigned long end)
4458 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
4459 const unsigned long end)
4462 struct acpi_srat_gic_its_affinity *its_affinity;
4464 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
4468 if (its_affinity->header.length < sizeof(*its_affinity)) {
4469 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
4470 its_affinity->header.length);
4474 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
4476 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
4477 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
4481 its_srat_maps[its_in_srat].numa_node = node;
4482 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
4484 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
4485 its_affinity->proximity_domain, its_affinity->its_id, node);
4490 static void __init acpi_table_parse_srat_its(void)
4494 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
4495 sizeof(struct acpi_table_srat),
4496 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4497 gic_acpi_match_srat_its, 0);
4501 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
4503 if (!its_srat_maps) {
4504 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
4508 acpi_table_parse_entries(ACPI_SIG_SRAT,
4509 sizeof(struct acpi_table_srat),
4510 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4511 gic_acpi_parse_srat_its, 0);
4514 /* free the its_srat_maps after ITS probing */
4515 static void __init acpi_its_srat_maps_free(void)
4517 kfree(its_srat_maps);
4520 static void __init acpi_table_parse_srat_its(void) { }
4521 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
4522 static void __init acpi_its_srat_maps_free(void) { }
4525 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
4526 const unsigned long end)
4528 struct acpi_madt_generic_translator *its_entry;
4529 struct fwnode_handle *dom_handle;
4530 struct resource res;
4533 its_entry = (struct acpi_madt_generic_translator *)header;
4534 memset(&res, 0, sizeof(res));
4535 res.start = its_entry->base_address;
4536 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
4537 res.flags = IORESOURCE_MEM;
4539 dom_handle = irq_domain_alloc_fwnode(&res.start);
4541 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
4546 err = iort_register_domain_token(its_entry->translation_id, res.start,
4549 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
4550 &res.start, its_entry->translation_id);
4554 err = its_probe_one(&res, dom_handle,
4555 acpi_get_its_numa_node(its_entry->translation_id));
4559 iort_deregister_domain_token(its_entry->translation_id);
4561 irq_domain_free_fwnode(dom_handle);
4565 static void __init its_acpi_probe(void)
4567 acpi_table_parse_srat_its();
4568 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
4569 gic_acpi_parse_madt_its, 0);
4570 acpi_its_srat_maps_free();
4573 static void __init its_acpi_probe(void) { }
4576 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4577 struct irq_domain *parent_domain)
4579 struct device_node *of_node;
4580 struct its_node *its;
4581 bool has_v4 = false;
4584 gic_rdists = rdists;
4586 its_parent = parent_domain;
4587 of_node = to_of_node(handle);
4589 its_of_probe(of_node);
4593 if (list_empty(&its_nodes)) {
4594 pr_warn("ITS: No ITS available, not enabling LPIs\n");
4598 err = allocate_lpi_tables();
4602 list_for_each_entry(its, &its_nodes, entry)
4603 has_v4 |= is_v4(its);
4605 if (has_v4 & rdists->has_vlpis) {
4606 if (its_init_vpe_domain() ||
4607 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
4608 rdists->has_vlpis = false;
4609 pr_err("ITS: Disabling GICv4 support\n");
4613 register_syscore_ops(&its_syscore_ops);