2 * User interface for Resource Alloction in Resource Director Technology(RDT)
4 * Copyright (C) 2016 Intel Corporation
6 * Author: Fenghua Yu <fenghua.yu@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * More information about RDT be found in the Intel (R) x86 Architecture
18 * Software Developer Manual.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/cacheinfo.h>
24 #include <linux/cpu.h>
25 #include <linux/debugfs.h>
27 #include <linux/sysfs.h>
28 #include <linux/kernfs.h>
29 #include <linux/seq_buf.h>
30 #include <linux/seq_file.h>
31 #include <linux/sched/signal.h>
32 #include <linux/sched/task.h>
33 #include <linux/slab.h>
34 #include <linux/task_work.h>
36 #include <uapi/linux/magic.h>
38 #include <asm/intel_rdt_sched.h>
39 #include "intel_rdt.h"
41 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
42 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
43 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
44 static struct kernfs_root *rdt_root;
45 struct rdtgroup rdtgroup_default;
46 LIST_HEAD(rdt_all_groups);
48 /* Kernel fs node for "info" directory under root */
49 static struct kernfs_node *kn_info;
51 /* Kernel fs node for "mon_groups" directory under root */
52 static struct kernfs_node *kn_mongrp;
54 /* Kernel fs node for "mon_data" directory under root */
55 static struct kernfs_node *kn_mondata;
57 static struct seq_buf last_cmd_status;
58 static char last_cmd_status_buf[512];
60 struct dentry *debugfs_resctrl;
62 void rdt_last_cmd_clear(void)
64 lockdep_assert_held(&rdtgroup_mutex);
65 seq_buf_clear(&last_cmd_status);
68 void rdt_last_cmd_puts(const char *s)
70 lockdep_assert_held(&rdtgroup_mutex);
71 seq_buf_puts(&last_cmd_status, s);
74 void rdt_last_cmd_printf(const char *fmt, ...)
79 lockdep_assert_held(&rdtgroup_mutex);
80 seq_buf_vprintf(&last_cmd_status, fmt, ap);
85 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
86 * we can keep a bitmap of free CLOSIDs in a single integer.
88 * Using a global CLOSID across all resources has some advantages and
90 * + We can simply set "current->closid" to assign a task to a resource
92 * + Context switch code can avoid extra memory references deciding which
93 * CLOSID to load into the PQR_ASSOC MSR
94 * - We give up some options in configuring resource groups across multi-socket
96 * - Our choices on how to configure each resource become progressively more
97 * limited as the number of resources grows.
99 static int closid_free_map;
100 static int closid_free_map_len;
102 int closids_supported(void)
104 return closid_free_map_len;
107 static void closid_init(void)
109 struct rdt_resource *r;
110 int rdt_min_closid = 32;
112 /* Compute rdt_min_closid across all resources */
113 for_each_alloc_enabled_rdt_resource(r)
114 rdt_min_closid = min(rdt_min_closid, r->num_closid);
116 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
118 /* CLOSID 0 is always reserved for the default group */
119 closid_free_map &= ~1;
120 closid_free_map_len = rdt_min_closid;
123 static int closid_alloc(void)
125 u32 closid = ffs(closid_free_map);
130 closid_free_map &= ~(1 << closid);
135 void closid_free(int closid)
137 closid_free_map |= 1 << closid;
141 * closid_allocated - test if provided closid is in use
142 * @closid: closid to be tested
144 * Return: true if @closid is currently associated with a resource group,
145 * false if @closid is free
147 static bool closid_allocated(unsigned int closid)
149 return (closid_free_map & (1 << closid)) == 0;
153 * rdtgroup_mode_by_closid - Return mode of resource group with closid
154 * @closid: closid if the resource group
156 * Each resource group is associated with a @closid. Here the mode
157 * of a resource group can be queried by searching for it using its closid.
159 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
161 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
163 struct rdtgroup *rdtgrp;
165 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
166 if (rdtgrp->closid == closid)
170 return RDT_NUM_MODES;
173 static const char * const rdt_mode_str[] = {
174 [RDT_MODE_SHAREABLE] = "shareable",
175 [RDT_MODE_EXCLUSIVE] = "exclusive",
176 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
177 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
181 * rdtgroup_mode_str - Return the string representation of mode
182 * @mode: the resource group mode as &enum rdtgroup_mode
184 * Return: string representation of valid mode, "unknown" otherwise
186 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
188 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
191 return rdt_mode_str[mode];
194 /* set uid and gid of rdtgroup dirs and files to that of the creator */
195 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
197 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
198 .ia_uid = current_fsuid(),
199 .ia_gid = current_fsgid(), };
201 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
202 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
205 return kernfs_setattr(kn, &iattr);
208 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
210 struct kernfs_node *kn;
213 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
214 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
215 0, rft->kf_ops, rft, NULL, NULL);
219 ret = rdtgroup_kn_set_ugid(kn);
228 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
230 struct kernfs_open_file *of = m->private;
231 struct rftype *rft = of->kn->priv;
234 return rft->seq_show(of, m, arg);
238 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
239 size_t nbytes, loff_t off)
241 struct rftype *rft = of->kn->priv;
244 return rft->write(of, buf, nbytes, off);
249 static struct kernfs_ops rdtgroup_kf_single_ops = {
250 .atomic_write_len = PAGE_SIZE,
251 .write = rdtgroup_file_write,
252 .seq_show = rdtgroup_seqfile_show,
255 static struct kernfs_ops kf_mondata_ops = {
256 .atomic_write_len = PAGE_SIZE,
257 .seq_show = rdtgroup_mondata_show,
260 static bool is_cpu_list(struct kernfs_open_file *of)
262 struct rftype *rft = of->kn->priv;
264 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
267 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
268 struct seq_file *s, void *v)
270 struct rdtgroup *rdtgrp;
273 rdtgrp = rdtgroup_kn_lock_live(of->kn);
276 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
277 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
278 cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
280 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
281 cpumask_pr_args(&rdtgrp->cpu_mask));
285 rdtgroup_kn_unlock(of->kn);
291 * This is safe against intel_rdt_sched_in() called from __switch_to()
292 * because __switch_to() is executed with interrupts disabled. A local call
293 * from update_closid_rmid() is proteced against __switch_to() because
294 * preemption is disabled.
296 static void update_cpu_closid_rmid(void *info)
298 struct rdtgroup *r = info;
301 this_cpu_write(pqr_state.default_closid, r->closid);
302 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
306 * We cannot unconditionally write the MSR because the current
307 * executing task might have its own closid selected. Just reuse
308 * the context switch code.
310 intel_rdt_sched_in();
314 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
316 * Per task closids/rmids must have been set up before calling this function.
319 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
323 if (cpumask_test_cpu(cpu, cpu_mask))
324 update_cpu_closid_rmid(r);
325 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
329 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
330 cpumask_var_t tmpmask)
332 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
333 struct list_head *head;
335 /* Check whether cpus belong to parent ctrl group */
336 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
337 if (cpumask_weight(tmpmask)) {
338 rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
342 /* Check whether cpus are dropped from this group */
343 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
344 if (cpumask_weight(tmpmask)) {
345 /* Give any dropped cpus to parent rdtgroup */
346 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
347 update_closid_rmid(tmpmask, prgrp);
351 * If we added cpus, remove them from previous group that owned them
352 * and update per-cpu rmid
354 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
355 if (cpumask_weight(tmpmask)) {
356 head = &prgrp->mon.crdtgrp_list;
357 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
360 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
363 update_closid_rmid(tmpmask, rdtgrp);
366 /* Done pushing/pulling - update this group with new mask */
367 cpumask_copy(&rdtgrp->cpu_mask, newmask);
372 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
374 struct rdtgroup *crgrp;
376 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
377 /* update the child mon group masks as well*/
378 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
379 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
382 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
383 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
385 struct rdtgroup *r, *crgrp;
386 struct list_head *head;
388 /* Check whether cpus are dropped from this group */
389 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
390 if (cpumask_weight(tmpmask)) {
391 /* Can't drop from default group */
392 if (rdtgrp == &rdtgroup_default) {
393 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
397 /* Give any dropped cpus to rdtgroup_default */
398 cpumask_or(&rdtgroup_default.cpu_mask,
399 &rdtgroup_default.cpu_mask, tmpmask);
400 update_closid_rmid(tmpmask, &rdtgroup_default);
404 * If we added cpus, remove them from previous group and
405 * the prev group's child groups that owned them
406 * and update per-cpu closid/rmid.
408 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
409 if (cpumask_weight(tmpmask)) {
410 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
413 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
414 if (cpumask_weight(tmpmask1))
415 cpumask_rdtgrp_clear(r, tmpmask1);
417 update_closid_rmid(tmpmask, rdtgrp);
420 /* Done pushing/pulling - update this group with new mask */
421 cpumask_copy(&rdtgrp->cpu_mask, newmask);
424 * Clear child mon group masks since there is a new parent mask
425 * now and update the rmid for the cpus the child lost.
427 head = &rdtgrp->mon.crdtgrp_list;
428 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
429 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
430 update_closid_rmid(tmpmask, rdtgrp);
431 cpumask_clear(&crgrp->cpu_mask);
437 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
438 char *buf, size_t nbytes, loff_t off)
440 cpumask_var_t tmpmask, newmask, tmpmask1;
441 struct rdtgroup *rdtgrp;
447 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
449 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
450 free_cpumask_var(tmpmask);
453 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
454 free_cpumask_var(tmpmask);
455 free_cpumask_var(newmask);
459 rdtgrp = rdtgroup_kn_lock_live(of->kn);
460 rdt_last_cmd_clear();
463 rdt_last_cmd_puts("directory was removed\n");
467 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
468 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
470 rdt_last_cmd_puts("pseudo-locking in progress\n");
475 ret = cpulist_parse(buf, newmask);
477 ret = cpumask_parse(buf, newmask);
480 rdt_last_cmd_puts("bad cpu list/mask\n");
484 /* check that user didn't specify any offline cpus */
485 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
486 if (cpumask_weight(tmpmask)) {
488 rdt_last_cmd_puts("can only assign online cpus\n");
492 if (rdtgrp->type == RDTCTRL_GROUP)
493 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
494 else if (rdtgrp->type == RDTMON_GROUP)
495 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
500 rdtgroup_kn_unlock(of->kn);
501 free_cpumask_var(tmpmask);
502 free_cpumask_var(newmask);
503 free_cpumask_var(tmpmask1);
505 return ret ?: nbytes;
508 struct task_move_callback {
509 struct callback_head work;
510 struct rdtgroup *rdtgrp;
513 static void move_myself(struct callback_head *head)
515 struct task_move_callback *callback;
516 struct rdtgroup *rdtgrp;
518 callback = container_of(head, struct task_move_callback, work);
519 rdtgrp = callback->rdtgrp;
522 * If resource group was deleted before this task work callback
523 * was invoked, then assign the task to root group and free the
526 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
527 (rdtgrp->flags & RDT_DELETED)) {
534 /* update PQR_ASSOC MSR to make resource group go into effect */
535 intel_rdt_sched_in();
541 static int __rdtgroup_move_task(struct task_struct *tsk,
542 struct rdtgroup *rdtgrp)
544 struct task_move_callback *callback;
547 callback = kzalloc(sizeof(*callback), GFP_KERNEL);
550 callback->work.func = move_myself;
551 callback->rdtgrp = rdtgrp;
554 * Take a refcount, so rdtgrp cannot be freed before the
555 * callback has been invoked.
557 atomic_inc(&rdtgrp->waitcount);
558 ret = task_work_add(tsk, &callback->work, true);
561 * Task is exiting. Drop the refcount and free the callback.
562 * No need to check the refcount as the group cannot be
563 * deleted before the write function unlocks rdtgroup_mutex.
565 atomic_dec(&rdtgrp->waitcount);
567 rdt_last_cmd_puts("task exited\n");
570 * For ctrl_mon groups move both closid and rmid.
571 * For monitor groups, can move the tasks only from
572 * their parent CTRL group.
574 if (rdtgrp->type == RDTCTRL_GROUP) {
575 tsk->closid = rdtgrp->closid;
576 tsk->rmid = rdtgrp->mon.rmid;
577 } else if (rdtgrp->type == RDTMON_GROUP) {
578 if (rdtgrp->mon.parent->closid == tsk->closid) {
579 tsk->rmid = rdtgrp->mon.rmid;
581 rdt_last_cmd_puts("Can't move task to different control group\n");
590 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
593 * Return: 1 if tasks have been assigned to @r, 0 otherwise
595 int rdtgroup_tasks_assigned(struct rdtgroup *r)
597 struct task_struct *p, *t;
600 lockdep_assert_held(&rdtgroup_mutex);
603 for_each_process_thread(p, t) {
604 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
605 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) {
615 static int rdtgroup_task_write_permission(struct task_struct *task,
616 struct kernfs_open_file *of)
618 const struct cred *tcred = get_task_cred(task);
619 const struct cred *cred = current_cred();
623 * Even if we're attaching all tasks in the thread group, we only
624 * need to check permissions on one of them.
626 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
627 !uid_eq(cred->euid, tcred->uid) &&
628 !uid_eq(cred->euid, tcred->suid)) {
629 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
637 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
638 struct kernfs_open_file *of)
640 struct task_struct *tsk;
645 tsk = find_task_by_vpid(pid);
648 rdt_last_cmd_printf("No task %d\n", pid);
655 get_task_struct(tsk);
658 ret = rdtgroup_task_write_permission(tsk, of);
660 ret = __rdtgroup_move_task(tsk, rdtgrp);
662 put_task_struct(tsk);
666 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
667 char *buf, size_t nbytes, loff_t off)
669 struct rdtgroup *rdtgrp;
673 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
675 rdtgrp = rdtgroup_kn_lock_live(of->kn);
677 rdtgroup_kn_unlock(of->kn);
680 rdt_last_cmd_clear();
682 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
683 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
685 rdt_last_cmd_puts("pseudo-locking in progress\n");
689 ret = rdtgroup_move_task(pid, rdtgrp, of);
692 rdtgroup_kn_unlock(of->kn);
694 return ret ?: nbytes;
697 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
699 struct task_struct *p, *t;
702 for_each_process_thread(p, t) {
703 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
704 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
705 seq_printf(s, "%d\n", t->pid);
710 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
711 struct seq_file *s, void *v)
713 struct rdtgroup *rdtgrp;
716 rdtgrp = rdtgroup_kn_lock_live(of->kn);
718 show_rdt_tasks(rdtgrp, s);
721 rdtgroup_kn_unlock(of->kn);
726 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
727 struct seq_file *seq, void *v)
731 mutex_lock(&rdtgroup_mutex);
732 len = seq_buf_used(&last_cmd_status);
734 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
736 seq_puts(seq, "ok\n");
737 mutex_unlock(&rdtgroup_mutex);
741 static int rdt_num_closids_show(struct kernfs_open_file *of,
742 struct seq_file *seq, void *v)
744 struct rdt_resource *r = of->kn->parent->priv;
746 seq_printf(seq, "%d\n", r->num_closid);
750 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
751 struct seq_file *seq, void *v)
753 struct rdt_resource *r = of->kn->parent->priv;
755 seq_printf(seq, "%x\n", r->default_ctrl);
759 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
760 struct seq_file *seq, void *v)
762 struct rdt_resource *r = of->kn->parent->priv;
764 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
768 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
769 struct seq_file *seq, void *v)
771 struct rdt_resource *r = of->kn->parent->priv;
773 seq_printf(seq, "%x\n", r->cache.shareable_bits);
778 * rdt_bit_usage_show - Display current usage of resources
780 * A domain is a shared resource that can now be allocated differently. Here
781 * we display the current regions of the domain as an annotated bitmask.
782 * For each domain of this resource its allocation bitmask
783 * is annotated as below to indicate the current usage of the corresponding bit:
784 * 0 - currently unused
785 * X - currently available for sharing and used by software and hardware
786 * H - currently used by hardware only but available for software use
787 * S - currently used and shareable by software only
788 * E - currently used exclusively by one resource group
789 * P - currently pseudo-locked by one resource group
791 static int rdt_bit_usage_show(struct kernfs_open_file *of,
792 struct seq_file *seq, void *v)
794 struct rdt_resource *r = of->kn->parent->priv;
795 u32 sw_shareable = 0, hw_shareable = 0;
796 u32 exclusive = 0, pseudo_locked = 0;
797 struct rdt_domain *dom;
798 int i, hwb, swb, excl, psl;
799 enum rdtgrp_mode mode;
803 mutex_lock(&rdtgroup_mutex);
804 hw_shareable = r->cache.shareable_bits;
805 list_for_each_entry(dom, &r->domains, list) {
808 ctrl = dom->ctrl_val;
811 seq_printf(seq, "%d=", dom->id);
812 for (i = 0; i < closids_supported(); i++, ctrl++) {
813 if (!closid_allocated(i))
815 mode = rdtgroup_mode_by_closid(i);
817 case RDT_MODE_SHAREABLE:
818 sw_shareable |= *ctrl;
820 case RDT_MODE_EXCLUSIVE:
823 case RDT_MODE_PSEUDO_LOCKSETUP:
825 * RDT_MODE_PSEUDO_LOCKSETUP is possible
826 * here but not included since the CBM
827 * associated with this CLOSID in this mode
828 * is not initialized and no task or cpu can be
829 * assigned this CLOSID.
832 case RDT_MODE_PSEUDO_LOCKED:
835 "invalid mode for closid %d\n", i);
839 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
840 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
841 hwb = test_bit(i, (unsigned long *)&hw_shareable);
842 swb = test_bit(i, (unsigned long *)&sw_shareable);
843 excl = test_bit(i, (unsigned long *)&exclusive);
844 psl = test_bit(i, (unsigned long *)&pseudo_locked);
847 else if (hwb && !swb)
849 else if (!hwb && swb)
855 else /* Unused bits remain */
861 mutex_unlock(&rdtgroup_mutex);
865 static int rdt_min_bw_show(struct kernfs_open_file *of,
866 struct seq_file *seq, void *v)
868 struct rdt_resource *r = of->kn->parent->priv;
870 seq_printf(seq, "%u\n", r->membw.min_bw);
874 static int rdt_num_rmids_show(struct kernfs_open_file *of,
875 struct seq_file *seq, void *v)
877 struct rdt_resource *r = of->kn->parent->priv;
879 seq_printf(seq, "%d\n", r->num_rmid);
884 static int rdt_mon_features_show(struct kernfs_open_file *of,
885 struct seq_file *seq, void *v)
887 struct rdt_resource *r = of->kn->parent->priv;
888 struct mon_evt *mevt;
890 list_for_each_entry(mevt, &r->evt_list, list)
891 seq_printf(seq, "%s\n", mevt->name);
896 static int rdt_bw_gran_show(struct kernfs_open_file *of,
897 struct seq_file *seq, void *v)
899 struct rdt_resource *r = of->kn->parent->priv;
901 seq_printf(seq, "%u\n", r->membw.bw_gran);
905 static int rdt_delay_linear_show(struct kernfs_open_file *of,
906 struct seq_file *seq, void *v)
908 struct rdt_resource *r = of->kn->parent->priv;
910 seq_printf(seq, "%u\n", r->membw.delay_linear);
914 static int max_threshold_occ_show(struct kernfs_open_file *of,
915 struct seq_file *seq, void *v)
917 struct rdt_resource *r = of->kn->parent->priv;
919 seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
924 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
925 char *buf, size_t nbytes, loff_t off)
927 struct rdt_resource *r = of->kn->parent->priv;
931 ret = kstrtouint(buf, 0, &bytes);
935 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
938 intel_cqm_threshold = bytes / r->mon_scale;
944 * rdtgroup_mode_show - Display mode of this resource group
946 static int rdtgroup_mode_show(struct kernfs_open_file *of,
947 struct seq_file *s, void *v)
949 struct rdtgroup *rdtgrp;
951 rdtgrp = rdtgroup_kn_lock_live(of->kn);
953 rdtgroup_kn_unlock(of->kn);
957 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
959 rdtgroup_kn_unlock(of->kn);
964 * rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
965 * @r: Resource to which domain instance @d belongs.
966 * @d: The domain instance for which @closid is being tested.
967 * @cbm: Capacity bitmask being tested.
968 * @closid: Intended closid for @cbm.
969 * @exclusive: Only check if overlaps with exclusive resource groups
971 * Checks if provided @cbm intended to be used for @closid on domain
972 * @d overlaps with any other closids or other hardware usage associated
973 * with this domain. If @exclusive is true then only overlaps with
974 * resource groups in exclusive mode will be considered. If @exclusive
975 * is false then overlaps with any resource group or hardware entities
976 * will be considered.
978 * Return: false if CBM does not overlap, true if it does.
980 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
981 u32 _cbm, int closid, bool exclusive)
983 unsigned long *cbm = (unsigned long *)&_cbm;
984 unsigned long *ctrl_b;
985 enum rdtgrp_mode mode;
989 /* Check for any overlap with regions used by hardware directly */
991 if (bitmap_intersects(cbm,
992 (unsigned long *)&r->cache.shareable_bits,
997 /* Check for overlap with other resource groups */
999 for (i = 0; i < closids_supported(); i++, ctrl++) {
1000 ctrl_b = (unsigned long *)ctrl;
1001 mode = rdtgroup_mode_by_closid(i);
1002 if (closid_allocated(i) && i != closid &&
1003 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1004 if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
1006 if (mode == RDT_MODE_EXCLUSIVE)
1019 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1021 * An exclusive resource group implies that there should be no sharing of
1022 * its allocated resources. At the time this group is considered to be
1023 * exclusive this test can determine if its current schemata supports this
1024 * setting by testing for overlap with all other resource groups.
1026 * Return: true if resource group can be exclusive, false if there is overlap
1027 * with allocations of other resource groups and thus this resource group
1028 * cannot be exclusive.
1030 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1032 int closid = rdtgrp->closid;
1033 struct rdt_resource *r;
1034 bool has_cache = false;
1035 struct rdt_domain *d;
1037 for_each_alloc_enabled_rdt_resource(r) {
1038 if (r->rid == RDT_RESOURCE_MBA)
1041 list_for_each_entry(d, &r->domains, list) {
1042 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
1043 rdtgrp->closid, false)) {
1044 rdt_last_cmd_puts("schemata overlaps\n");
1051 rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
1059 * rdtgroup_mode_write - Modify the resource group's mode
1062 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1063 char *buf, size_t nbytes, loff_t off)
1065 struct rdtgroup *rdtgrp;
1066 enum rdtgrp_mode mode;
1069 /* Valid input requires a trailing newline */
1070 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1072 buf[nbytes - 1] = '\0';
1074 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1076 rdtgroup_kn_unlock(of->kn);
1080 rdt_last_cmd_clear();
1082 mode = rdtgrp->mode;
1084 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1085 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1086 (!strcmp(buf, "pseudo-locksetup") &&
1087 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1088 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1091 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1092 rdt_last_cmd_printf("cannot change pseudo-locked group\n");
1097 if (!strcmp(buf, "shareable")) {
1098 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1099 ret = rdtgroup_locksetup_exit(rdtgrp);
1103 rdtgrp->mode = RDT_MODE_SHAREABLE;
1104 } else if (!strcmp(buf, "exclusive")) {
1105 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1109 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1110 ret = rdtgroup_locksetup_exit(rdtgrp);
1114 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1115 } else if (!strcmp(buf, "pseudo-locksetup")) {
1116 ret = rdtgroup_locksetup_enter(rdtgrp);
1119 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1121 rdt_last_cmd_printf("unknown/unsupported mode\n");
1126 rdtgroup_kn_unlock(of->kn);
1127 return ret ?: nbytes;
1131 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1132 * @r: RDT resource to which @d belongs.
1133 * @d: RDT domain instance.
1134 * @cbm: bitmask for which the size should be computed.
1136 * The bitmask provided associated with the RDT domain instance @d will be
1137 * translated into how many bytes it represents. The size in bytes is
1138 * computed by first dividing the total cache size by the CBM length to
1139 * determine how many bytes each bit in the bitmask represents. The result
1140 * is multiplied with the number of bits set in the bitmask.
1142 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1143 struct rdt_domain *d, u32 cbm)
1145 struct cpu_cacheinfo *ci;
1146 unsigned int size = 0;
1149 num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
1150 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1151 for (i = 0; i < ci->num_leaves; i++) {
1152 if (ci->info_list[i].level == r->cache_level) {
1153 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1162 * rdtgroup_size_show - Display size in bytes of allocated regions
1164 * The "size" file mirrors the layout of the "schemata" file, printing the
1165 * size in bytes of each region instead of the capacity bitmask.
1168 static int rdtgroup_size_show(struct kernfs_open_file *of,
1169 struct seq_file *s, void *v)
1171 struct rdtgroup *rdtgrp;
1172 struct rdt_resource *r;
1173 struct rdt_domain *d;
1178 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1180 rdtgroup_kn_unlock(of->kn);
1184 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1185 seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
1186 size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1189 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1193 for_each_alloc_enabled_rdt_resource(r) {
1195 seq_printf(s, "%*s:", max_name_width, r->name);
1196 list_for_each_entry(d, &r->domains, list) {
1199 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1202 ctrl = (!is_mba_sc(r) ?
1203 d->ctrl_val[rdtgrp->closid] :
1204 d->mbps_val[rdtgrp->closid]);
1205 if (r->rid == RDT_RESOURCE_MBA)
1208 size = rdtgroup_cbm_to_size(r, d, ctrl);
1210 seq_printf(s, "%d=%u", d->id, size);
1217 rdtgroup_kn_unlock(of->kn);
1222 /* rdtgroup information files for one cache resource. */
1223 static struct rftype res_common_files[] = {
1225 .name = "last_cmd_status",
1227 .kf_ops = &rdtgroup_kf_single_ops,
1228 .seq_show = rdt_last_cmd_status_show,
1229 .fflags = RF_TOP_INFO,
1232 .name = "num_closids",
1234 .kf_ops = &rdtgroup_kf_single_ops,
1235 .seq_show = rdt_num_closids_show,
1236 .fflags = RF_CTRL_INFO,
1239 .name = "mon_features",
1241 .kf_ops = &rdtgroup_kf_single_ops,
1242 .seq_show = rdt_mon_features_show,
1243 .fflags = RF_MON_INFO,
1246 .name = "num_rmids",
1248 .kf_ops = &rdtgroup_kf_single_ops,
1249 .seq_show = rdt_num_rmids_show,
1250 .fflags = RF_MON_INFO,
1255 .kf_ops = &rdtgroup_kf_single_ops,
1256 .seq_show = rdt_default_ctrl_show,
1257 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1260 .name = "min_cbm_bits",
1262 .kf_ops = &rdtgroup_kf_single_ops,
1263 .seq_show = rdt_min_cbm_bits_show,
1264 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1267 .name = "shareable_bits",
1269 .kf_ops = &rdtgroup_kf_single_ops,
1270 .seq_show = rdt_shareable_bits_show,
1271 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1274 .name = "bit_usage",
1276 .kf_ops = &rdtgroup_kf_single_ops,
1277 .seq_show = rdt_bit_usage_show,
1278 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1281 .name = "min_bandwidth",
1283 .kf_ops = &rdtgroup_kf_single_ops,
1284 .seq_show = rdt_min_bw_show,
1285 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1288 .name = "bandwidth_gran",
1290 .kf_ops = &rdtgroup_kf_single_ops,
1291 .seq_show = rdt_bw_gran_show,
1292 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1295 .name = "delay_linear",
1297 .kf_ops = &rdtgroup_kf_single_ops,
1298 .seq_show = rdt_delay_linear_show,
1299 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1302 .name = "max_threshold_occupancy",
1304 .kf_ops = &rdtgroup_kf_single_ops,
1305 .write = max_threshold_occ_write,
1306 .seq_show = max_threshold_occ_show,
1307 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1312 .kf_ops = &rdtgroup_kf_single_ops,
1313 .write = rdtgroup_cpus_write,
1314 .seq_show = rdtgroup_cpus_show,
1315 .fflags = RFTYPE_BASE,
1318 .name = "cpus_list",
1320 .kf_ops = &rdtgroup_kf_single_ops,
1321 .write = rdtgroup_cpus_write,
1322 .seq_show = rdtgroup_cpus_show,
1323 .flags = RFTYPE_FLAGS_CPUS_LIST,
1324 .fflags = RFTYPE_BASE,
1329 .kf_ops = &rdtgroup_kf_single_ops,
1330 .write = rdtgroup_tasks_write,
1331 .seq_show = rdtgroup_tasks_show,
1332 .fflags = RFTYPE_BASE,
1337 .kf_ops = &rdtgroup_kf_single_ops,
1338 .write = rdtgroup_schemata_write,
1339 .seq_show = rdtgroup_schemata_show,
1340 .fflags = RF_CTRL_BASE,
1345 .kf_ops = &rdtgroup_kf_single_ops,
1346 .write = rdtgroup_mode_write,
1347 .seq_show = rdtgroup_mode_show,
1348 .fflags = RF_CTRL_BASE,
1353 .kf_ops = &rdtgroup_kf_single_ops,
1354 .seq_show = rdtgroup_size_show,
1355 .fflags = RF_CTRL_BASE,
1360 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1362 struct rftype *rfts, *rft;
1365 rfts = res_common_files;
1366 len = ARRAY_SIZE(res_common_files);
1368 lockdep_assert_held(&rdtgroup_mutex);
1370 for (rft = rfts; rft < rfts + len; rft++) {
1371 if ((fflags & rft->fflags) == rft->fflags) {
1372 ret = rdtgroup_add_file(kn, rft);
1380 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1381 while (--rft >= rfts) {
1382 if ((fflags & rft->fflags) == rft->fflags)
1383 kernfs_remove_by_name(kn, rft->name);
1389 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1390 * @r: The resource group with which the file is associated.
1391 * @name: Name of the file
1393 * The permissions of named resctrl file, directory, or link are modified
1394 * to not allow read, write, or execute by any user.
1396 * WARNING: This function is intended to communicate to the user that the
1397 * resctrl file has been locked down - that it is not relevant to the
1398 * particular state the system finds itself in. It should not be relied
1399 * on to protect from user access because after the file's permissions
1400 * are restricted the user can still change the permissions using chmod
1401 * from the command line.
1403 * Return: 0 on success, <0 on failure.
1405 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1407 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1408 struct kernfs_node *kn;
1411 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1415 switch (kernfs_type(kn)) {
1417 iattr.ia_mode = S_IFDIR;
1420 iattr.ia_mode = S_IFREG;
1423 iattr.ia_mode = S_IFLNK;
1427 ret = kernfs_setattr(kn, &iattr);
1433 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1434 * @r: The resource group with which the file is associated.
1435 * @name: Name of the file
1436 * @mask: Mask of permissions that should be restored
1438 * Restore the permissions of the named file. If @name is a directory the
1439 * permissions of its parent will be used.
1441 * Return: 0 on success, <0 on failure.
1443 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1446 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1447 struct kernfs_node *kn, *parent;
1448 struct rftype *rfts, *rft;
1451 rfts = res_common_files;
1452 len = ARRAY_SIZE(res_common_files);
1454 for (rft = rfts; rft < rfts + len; rft++) {
1455 if (!strcmp(rft->name, name))
1456 iattr.ia_mode = rft->mode & mask;
1459 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1463 switch (kernfs_type(kn)) {
1465 parent = kernfs_get_parent(kn);
1467 iattr.ia_mode |= parent->mode;
1470 iattr.ia_mode |= S_IFDIR;
1473 iattr.ia_mode |= S_IFREG;
1476 iattr.ia_mode |= S_IFLNK;
1480 ret = kernfs_setattr(kn, &iattr);
1485 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
1486 unsigned long fflags)
1488 struct kernfs_node *kn_subdir;
1491 kn_subdir = kernfs_create_dir(kn_info, name,
1493 if (IS_ERR(kn_subdir))
1494 return PTR_ERR(kn_subdir);
1496 kernfs_get(kn_subdir);
1497 ret = rdtgroup_kn_set_ugid(kn_subdir);
1501 ret = rdtgroup_add_files(kn_subdir, fflags);
1503 kernfs_activate(kn_subdir);
1508 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1510 struct rdt_resource *r;
1511 unsigned long fflags;
1515 /* create the directory */
1516 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1517 if (IS_ERR(kn_info))
1518 return PTR_ERR(kn_info);
1519 kernfs_get(kn_info);
1521 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1525 for_each_alloc_enabled_rdt_resource(r) {
1526 fflags = r->fflags | RF_CTRL_INFO;
1527 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
1532 for_each_mon_enabled_rdt_resource(r) {
1533 fflags = r->fflags | RF_MON_INFO;
1534 sprintf(name, "%s_MON", r->name);
1535 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1541 * This extra ref will be put in kernfs_remove() and guarantees
1542 * that @rdtgrp->kn is always accessible.
1544 kernfs_get(kn_info);
1546 ret = rdtgroup_kn_set_ugid(kn_info);
1550 kernfs_activate(kn_info);
1555 kernfs_remove(kn_info);
1560 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1561 char *name, struct kernfs_node **dest_kn)
1563 struct kernfs_node *kn;
1566 /* create the directory */
1567 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1575 * This extra ref will be put in kernfs_remove() and guarantees
1576 * that @rdtgrp->kn is always accessible.
1580 ret = rdtgroup_kn_set_ugid(kn);
1584 kernfs_activate(kn);
1593 static void l3_qos_cfg_update(void *arg)
1597 wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1600 static void l2_qos_cfg_update(void *arg)
1604 wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1607 static inline bool is_mba_linear(void)
1609 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
1612 static int set_cache_qos_cfg(int level, bool enable)
1614 void (*update)(void *arg);
1615 struct rdt_resource *r_l;
1616 cpumask_var_t cpu_mask;
1617 struct rdt_domain *d;
1620 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1623 if (level == RDT_RESOURCE_L3)
1624 update = l3_qos_cfg_update;
1625 else if (level == RDT_RESOURCE_L2)
1626 update = l2_qos_cfg_update;
1630 r_l = &rdt_resources_all[level];
1631 list_for_each_entry(d, &r_l->domains, list) {
1632 /* Pick one CPU from each domain instance to update MSR */
1633 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1636 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1637 if (cpumask_test_cpu(cpu, cpu_mask))
1639 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1640 smp_call_function_many(cpu_mask, update, &enable, 1);
1643 free_cpumask_var(cpu_mask);
1649 * Enable or disable the MBA software controller
1650 * which helps user specify bandwidth in MBps.
1651 * MBA software controller is supported only if
1652 * MBM is supported and MBA is in linear scale.
1654 static int set_mba_sc(bool mba_sc)
1656 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
1657 struct rdt_domain *d;
1659 if (!is_mbm_enabled() || !is_mba_linear() ||
1660 mba_sc == is_mba_sc(r))
1663 r->membw.mba_sc = mba_sc;
1664 list_for_each_entry(d, &r->domains, list)
1665 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
1670 static int cdp_enable(int level, int data_type, int code_type)
1672 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1673 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1674 struct rdt_resource *r_l = &rdt_resources_all[level];
1677 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1678 !r_lcode->alloc_capable)
1681 ret = set_cache_qos_cfg(level, true);
1683 r_l->alloc_enabled = false;
1684 r_ldata->alloc_enabled = true;
1685 r_lcode->alloc_enabled = true;
1690 static int cdpl3_enable(void)
1692 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1693 RDT_RESOURCE_L3CODE);
1696 static int cdpl2_enable(void)
1698 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1699 RDT_RESOURCE_L2CODE);
1702 static void cdp_disable(int level, int data_type, int code_type)
1704 struct rdt_resource *r = &rdt_resources_all[level];
1706 r->alloc_enabled = r->alloc_capable;
1708 if (rdt_resources_all[data_type].alloc_enabled) {
1709 rdt_resources_all[data_type].alloc_enabled = false;
1710 rdt_resources_all[code_type].alloc_enabled = false;
1711 set_cache_qos_cfg(level, false);
1715 static void cdpl3_disable(void)
1717 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1720 static void cdpl2_disable(void)
1722 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
1725 static void cdp_disable_all(void)
1727 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
1729 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
1733 static int parse_rdtgroupfs_options(char *data)
1735 char *token, *o = data;
1738 while ((token = strsep(&o, ",")) != NULL) {
1744 if (!strcmp(token, "cdp")) {
1745 ret = cdpl3_enable();
1748 } else if (!strcmp(token, "cdpl2")) {
1749 ret = cdpl2_enable();
1752 } else if (!strcmp(token, "mba_MBps")) {
1753 ret = set_mba_sc(true);
1765 pr_err("Invalid mount option \"%s\"\n", token);
1771 * We don't allow rdtgroup directories to be created anywhere
1772 * except the root directory. Thus when looking for the rdtgroup
1773 * structure for a kernfs node we are either looking at a directory,
1774 * in which case the rdtgroup structure is pointed at by the "priv"
1775 * field, otherwise we have a file, and need only look to the parent
1776 * to find the rdtgroup.
1778 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1780 if (kernfs_type(kn) == KERNFS_DIR) {
1782 * All the resource directories use "kn->priv"
1783 * to point to the "struct rdtgroup" for the
1784 * resource. "info" and its subdirectories don't
1785 * have rdtgroup structures, so return NULL here.
1787 if (kn == kn_info || kn->parent == kn_info)
1792 return kn->parent->priv;
1796 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1798 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1803 atomic_inc(&rdtgrp->waitcount);
1804 kernfs_break_active_protection(kn);
1806 mutex_lock(&rdtgroup_mutex);
1808 /* Was this group deleted while we waited? */
1809 if (rdtgrp->flags & RDT_DELETED)
1815 void rdtgroup_kn_unlock(struct kernfs_node *kn)
1817 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1822 mutex_unlock(&rdtgroup_mutex);
1824 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
1825 (rdtgrp->flags & RDT_DELETED)) {
1826 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
1827 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
1828 rdtgroup_pseudo_lock_remove(rdtgrp);
1829 kernfs_unbreak_active_protection(kn);
1830 kernfs_put(rdtgrp->kn);
1833 kernfs_unbreak_active_protection(kn);
1837 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
1838 struct rdtgroup *prgrp,
1839 struct kernfs_node **mon_data_kn);
1841 static struct dentry *rdt_mount(struct file_system_type *fs_type,
1842 int flags, const char *unused_dev_name,
1845 struct rdt_domain *dom;
1846 struct rdt_resource *r;
1847 struct dentry *dentry;
1851 mutex_lock(&rdtgroup_mutex);
1853 * resctrl file system can only be mounted once.
1855 if (static_branch_unlikely(&rdt_enable_key)) {
1856 dentry = ERR_PTR(-EBUSY);
1860 ret = parse_rdtgroupfs_options(data);
1862 dentry = ERR_PTR(ret);
1868 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
1870 dentry = ERR_PTR(ret);
1874 if (rdt_mon_capable) {
1875 ret = mongroup_create_dir(rdtgroup_default.kn,
1879 dentry = ERR_PTR(ret);
1882 kernfs_get(kn_mongrp);
1884 ret = mkdir_mondata_all(rdtgroup_default.kn,
1885 &rdtgroup_default, &kn_mondata);
1887 dentry = ERR_PTR(ret);
1890 kernfs_get(kn_mondata);
1891 rdtgroup_default.mon.mon_data_kn = kn_mondata;
1894 ret = rdt_pseudo_lock_init();
1896 dentry = ERR_PTR(ret);
1900 dentry = kernfs_mount(fs_type, flags, rdt_root,
1901 RDTGROUP_SUPER_MAGIC, NULL);
1905 if (rdt_alloc_capable)
1906 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
1907 if (rdt_mon_capable)
1908 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
1910 if (rdt_alloc_capable || rdt_mon_capable)
1911 static_branch_enable_cpuslocked(&rdt_enable_key);
1913 if (is_mbm_enabled()) {
1914 r = &rdt_resources_all[RDT_RESOURCE_L3];
1915 list_for_each_entry(dom, &r->domains, list)
1916 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
1922 rdt_pseudo_lock_release();
1924 if (rdt_mon_capable)
1925 kernfs_remove(kn_mondata);
1927 if (rdt_mon_capable)
1928 kernfs_remove(kn_mongrp);
1930 kernfs_remove(kn_info);
1934 rdt_last_cmd_clear();
1935 mutex_unlock(&rdtgroup_mutex);
1941 static int reset_all_ctrls(struct rdt_resource *r)
1943 struct msr_param msr_param;
1944 cpumask_var_t cpu_mask;
1945 struct rdt_domain *d;
1948 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1953 msr_param.high = r->num_closid;
1956 * Disable resource control for this resource by setting all
1957 * CBMs in all domains to the maximum mask value. Pick one CPU
1958 * from each domain to update the MSRs below.
1960 list_for_each_entry(d, &r->domains, list) {
1961 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1963 for (i = 0; i < r->num_closid; i++)
1964 d->ctrl_val[i] = r->default_ctrl;
1967 /* Update CBM on this cpu if it's in cpu_mask. */
1968 if (cpumask_test_cpu(cpu, cpu_mask))
1969 rdt_ctrl_update(&msr_param);
1970 /* Update CBM on all other cpus in cpu_mask. */
1971 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
1974 free_cpumask_var(cpu_mask);
1979 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
1981 return (rdt_alloc_capable &&
1982 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
1985 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
1987 return (rdt_mon_capable &&
1988 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
1992 * Move tasks from one to the other group. If @from is NULL, then all tasks
1993 * in the systems are moved unconditionally (used for teardown).
1995 * If @mask is not NULL the cpus on which moved tasks are running are set
1996 * in that mask so the update smp function call is restricted to affected
1999 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2000 struct cpumask *mask)
2002 struct task_struct *p, *t;
2004 read_lock(&tasklist_lock);
2005 for_each_process_thread(p, t) {
2006 if (!from || is_closid_match(t, from) ||
2007 is_rmid_match(t, from)) {
2008 t->closid = to->closid;
2009 t->rmid = to->mon.rmid;
2013 * This is safe on x86 w/o barriers as the ordering
2014 * of writing to task_cpu() and t->on_cpu is
2015 * reverse to the reading here. The detection is
2016 * inaccurate as tasks might move or schedule
2017 * before the smp function call takes place. In
2018 * such a case the function call is pointless, but
2019 * there is no other side effect.
2021 if (mask && t->on_cpu)
2022 cpumask_set_cpu(task_cpu(t), mask);
2026 read_unlock(&tasklist_lock);
2029 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2031 struct rdtgroup *sentry, *stmp;
2032 struct list_head *head;
2034 head = &rdtgrp->mon.crdtgrp_list;
2035 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2036 free_rmid(sentry->mon.rmid);
2037 list_del(&sentry->mon.crdtgrp_list);
2043 * Forcibly remove all of subdirectories under root.
2045 static void rmdir_all_sub(void)
2047 struct rdtgroup *rdtgrp, *tmp;
2049 /* Move all tasks to the default resource group */
2050 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2052 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2053 /* Free any child rmids */
2054 free_all_child_rdtgrp(rdtgrp);
2056 /* Remove each rdtgroup other than root */
2057 if (rdtgrp == &rdtgroup_default)
2060 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2061 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2062 rdtgroup_pseudo_lock_remove(rdtgrp);
2065 * Give any CPUs back to the default group. We cannot copy
2066 * cpu_online_mask because a CPU might have executed the
2067 * offline callback already, but is still marked online.
2069 cpumask_or(&rdtgroup_default.cpu_mask,
2070 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2072 free_rmid(rdtgrp->mon.rmid);
2074 kernfs_remove(rdtgrp->kn);
2075 list_del(&rdtgrp->rdtgroup_list);
2078 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2079 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2081 kernfs_remove(kn_info);
2082 kernfs_remove(kn_mongrp);
2083 kernfs_remove(kn_mondata);
2086 static void rdt_kill_sb(struct super_block *sb)
2088 struct rdt_resource *r;
2091 mutex_lock(&rdtgroup_mutex);
2095 /*Put everything back to default values. */
2096 for_each_alloc_enabled_rdt_resource(r)
2100 rdt_pseudo_lock_release();
2101 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2102 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2103 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2104 static_branch_disable_cpuslocked(&rdt_enable_key);
2106 mutex_unlock(&rdtgroup_mutex);
2110 static struct file_system_type rdt_fs_type = {
2113 .kill_sb = rdt_kill_sb,
2116 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2119 struct kernfs_node *kn;
2122 kn = __kernfs_create_file(parent_kn, name, 0444,
2123 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2124 &kf_mondata_ops, priv, NULL, NULL);
2128 ret = rdtgroup_kn_set_ugid(kn);
2138 * Remove all subdirectories of mon_data of ctrl_mon groups
2139 * and monitor groups with given domain id.
2141 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2143 struct rdtgroup *prgrp, *crgrp;
2146 if (!r->mon_enabled)
2149 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2150 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2151 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2153 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2154 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2158 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2159 struct rdt_domain *d,
2160 struct rdt_resource *r, struct rdtgroup *prgrp)
2162 union mon_data_bits priv;
2163 struct kernfs_node *kn;
2164 struct mon_evt *mevt;
2165 struct rmid_read rr;
2169 sprintf(name, "mon_%s_%02d", r->name, d->id);
2170 /* create the directory */
2171 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2176 * This extra ref will be put in kernfs_remove() and guarantees
2177 * that kn is always accessible.
2180 ret = rdtgroup_kn_set_ugid(kn);
2184 if (WARN_ON(list_empty(&r->evt_list))) {
2189 priv.u.rid = r->rid;
2190 priv.u.domid = d->id;
2191 list_for_each_entry(mevt, &r->evt_list, list) {
2192 priv.u.evtid = mevt->evtid;
2193 ret = mon_addfile(kn, mevt->name, priv.priv);
2197 if (is_mbm_event(mevt->evtid))
2198 mon_event_read(&rr, d, prgrp, mevt->evtid, true);
2200 kernfs_activate(kn);
2209 * Add all subdirectories of mon_data for "ctrl_mon" groups
2210 * and "monitor" groups with given domain id.
2212 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2213 struct rdt_domain *d)
2215 struct kernfs_node *parent_kn;
2216 struct rdtgroup *prgrp, *crgrp;
2217 struct list_head *head;
2219 if (!r->mon_enabled)
2222 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2223 parent_kn = prgrp->mon.mon_data_kn;
2224 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2226 head = &prgrp->mon.crdtgrp_list;
2227 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2228 parent_kn = crgrp->mon.mon_data_kn;
2229 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2234 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2235 struct rdt_resource *r,
2236 struct rdtgroup *prgrp)
2238 struct rdt_domain *dom;
2241 list_for_each_entry(dom, &r->domains, list) {
2242 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2251 * This creates a directory mon_data which contains the monitored data.
2253 * mon_data has one directory for each domain whic are named
2254 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2255 * with L3 domain looks as below:
2262 * Each domain directory has one file per event:
2267 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2268 struct rdtgroup *prgrp,
2269 struct kernfs_node **dest_kn)
2271 struct rdt_resource *r;
2272 struct kernfs_node *kn;
2276 * Create the mon_data directory first.
2278 ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
2286 * Create the subdirectories for each domain. Note that all events
2287 * in a domain like L3 are grouped into a resource whose domain is L3
2289 for_each_mon_enabled_rdt_resource(r) {
2290 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2303 * cbm_ensure_valid - Enforce validity on provided CBM
2304 * @_val: Candidate CBM
2305 * @r: RDT resource to which the CBM belongs
2307 * The provided CBM represents all cache portions available for use. This
2308 * may be represented by a bitmap that does not consist of contiguous ones
2309 * and thus be an invalid CBM.
2310 * Here the provided CBM is forced to be a valid CBM by only considering
2311 * the first set of contiguous bits as valid and clearing all bits.
2312 * The intention here is to provide a valid default CBM with which a new
2313 * resource group is initialized. The user can follow this with a
2314 * modification to the CBM if the default does not satisfy the
2317 static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
2320 * Convert the u32 _val to an unsigned long required by all the bit
2321 * operations within this function. No more than 32 bits of this
2322 * converted value can be accessed because all bit operations are
2323 * additionally provided with cbm_len that is initialized during
2324 * hardware enumeration using five bits from the EAX register and
2325 * thus never can exceed 32 bits.
2327 unsigned long *val = (unsigned long *)_val;
2328 unsigned int cbm_len = r->cache.cbm_len;
2329 unsigned long first_bit, zero_bit;
2334 first_bit = find_first_bit(val, cbm_len);
2335 zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
2337 /* Clear any remaining bits to ensure contiguous region */
2338 bitmap_clear(val, zero_bit, cbm_len - zero_bit);
2342 * rdtgroup_init_alloc - Initialize the new RDT group's allocations
2344 * A new RDT group is being created on an allocation capable (CAT)
2345 * supporting system. Set this group up to start off with all usable
2346 * allocations. That is, all shareable and unused bits.
2348 * All-zero CBM is invalid. If there are no more shareable bits available
2349 * on any domain then the entire allocation will fail.
2351 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2353 u32 used_b = 0, unused_b = 0;
2354 u32 closid = rdtgrp->closid;
2355 struct rdt_resource *r;
2356 enum rdtgrp_mode mode;
2357 struct rdt_domain *d;
2361 for_each_alloc_enabled_rdt_resource(r) {
2363 * Only initialize default allocations for CBM cache
2366 if (r->rid == RDT_RESOURCE_MBA)
2368 list_for_each_entry(d, &r->domains, list) {
2369 d->have_new_ctrl = false;
2370 d->new_ctrl = r->cache.shareable_bits;
2371 used_b = r->cache.shareable_bits;
2373 for (i = 0; i < closids_supported(); i++, ctrl++) {
2374 if (closid_allocated(i) && i != closid) {
2375 mode = rdtgroup_mode_by_closid(i);
2376 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2379 if (mode == RDT_MODE_SHAREABLE)
2380 d->new_ctrl |= *ctrl;
2383 if (d->plr && d->plr->cbm > 0)
2384 used_b |= d->plr->cbm;
2385 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2386 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2387 d->new_ctrl |= unused_b;
2389 * Force the initial CBM to be valid, user can
2390 * modify the CBM based on system availability.
2392 cbm_ensure_valid(&d->new_ctrl, r);
2393 if (bitmap_weight((unsigned long *) &d->new_ctrl,
2395 r->cache.min_cbm_bits) {
2396 rdt_last_cmd_printf("no space on %s:%d\n",
2400 d->have_new_ctrl = true;
2404 for_each_alloc_enabled_rdt_resource(r) {
2406 * Only initialize default allocations for CBM cache
2409 if (r->rid == RDT_RESOURCE_MBA)
2411 ret = update_domains(r, rdtgrp->closid);
2413 rdt_last_cmd_puts("failed to initialize allocations\n");
2416 rdtgrp->mode = RDT_MODE_SHAREABLE;
2422 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2423 struct kernfs_node *prgrp_kn,
2424 const char *name, umode_t mode,
2425 enum rdt_group_type rtype, struct rdtgroup **r)
2427 struct rdtgroup *prdtgrp, *rdtgrp;
2428 struct kernfs_node *kn;
2432 prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
2433 rdt_last_cmd_clear();
2436 rdt_last_cmd_puts("directory was removed\n");
2440 if (rtype == RDTMON_GROUP &&
2441 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2442 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2444 rdt_last_cmd_puts("pseudo-locking in progress\n");
2448 /* allocate the rdtgroup. */
2449 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2452 rdt_last_cmd_puts("kernel out of memory\n");
2456 rdtgrp->mon.parent = prdtgrp;
2457 rdtgrp->type = rtype;
2458 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2460 /* kernfs creates the directory for rdtgrp */
2461 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2464 rdt_last_cmd_puts("kernfs create error\n");
2470 * kernfs_remove() will drop the reference count on "kn" which
2471 * will free it. But we still need it to stick around for the
2472 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
2473 * here, which will be dropped inside rdtgroup_kn_unlock().
2477 ret = rdtgroup_kn_set_ugid(kn);
2479 rdt_last_cmd_puts("kernfs perm error\n");
2483 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2484 ret = rdtgroup_add_files(kn, files);
2486 rdt_last_cmd_puts("kernfs fill error\n");
2490 if (rdt_mon_capable) {
2493 rdt_last_cmd_puts("out of RMIDs\n");
2496 rdtgrp->mon.rmid = ret;
2498 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2500 rdt_last_cmd_puts("kernfs subdir error\n");
2504 kernfs_activate(kn);
2507 * The caller unlocks the prgrp_kn upon success.
2512 free_rmid(rdtgrp->mon.rmid);
2514 kernfs_remove(rdtgrp->kn);
2518 rdtgroup_kn_unlock(prgrp_kn);
2522 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2524 kernfs_remove(rgrp->kn);
2525 free_rmid(rgrp->mon.rmid);
2530 * Create a monitor group under "mon_groups" directory of a control
2531 * and monitor group(ctrl_mon). This is a resource group
2532 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2534 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2535 struct kernfs_node *prgrp_kn,
2539 struct rdtgroup *rdtgrp, *prgrp;
2542 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP,
2547 prgrp = rdtgrp->mon.parent;
2548 rdtgrp->closid = prgrp->closid;
2551 * Add the rdtgrp to the list of rdtgrps the parent
2552 * ctrl_mon group has to track.
2554 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2556 rdtgroup_kn_unlock(prgrp_kn);
2561 * These are rdtgroups created under the root directory. Can be used
2562 * to allocate and monitor resources.
2564 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2565 struct kernfs_node *prgrp_kn,
2566 const char *name, umode_t mode)
2568 struct rdtgroup *rdtgrp;
2569 struct kernfs_node *kn;
2573 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP,
2579 ret = closid_alloc();
2581 rdt_last_cmd_puts("out of CLOSIDs\n");
2582 goto out_common_fail;
2587 rdtgrp->closid = closid;
2588 ret = rdtgroup_init_alloc(rdtgrp);
2592 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2594 if (rdt_mon_capable) {
2596 * Create an empty mon_groups directory to hold the subset
2597 * of tasks and cpus to monitor.
2599 ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
2601 rdt_last_cmd_puts("kernfs subdir error\n");
2609 list_del(&rdtgrp->rdtgroup_list);
2611 closid_free(closid);
2613 mkdir_rdt_prepare_clean(rdtgrp);
2615 rdtgroup_kn_unlock(prgrp_kn);
2620 * We allow creating mon groups only with in a directory called "mon_groups"
2621 * which is present in every ctrl_mon group. Check if this is a valid
2622 * "mon_groups" directory.
2624 * 1. The directory should be named "mon_groups".
2625 * 2. The mon group itself should "not" be named "mon_groups".
2626 * This makes sure "mon_groups" directory always has a ctrl_mon group
2629 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
2631 return (!strcmp(kn->name, "mon_groups") &&
2632 strcmp(name, "mon_groups"));
2635 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
2638 /* Do not accept '\n' to avoid unparsable situation. */
2639 if (strchr(name, '\n'))
2643 * If the parent directory is the root directory and RDT
2644 * allocation is supported, add a control and monitoring
2647 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
2648 return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode);
2651 * If RDT monitoring is supported and the parent directory is a valid
2652 * "mon_groups" directory, add a monitoring subdirectory.
2654 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
2655 return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode);
2660 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
2661 cpumask_var_t tmpmask)
2663 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
2666 /* Give any tasks back to the parent group */
2667 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
2669 /* Update per cpu rmid of the moved CPUs first */
2670 for_each_cpu(cpu, &rdtgrp->cpu_mask)
2671 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
2673 * Update the MSR on moved CPUs and CPUs which have moved
2674 * task running on them.
2676 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
2677 update_closid_rmid(tmpmask, NULL);
2679 rdtgrp->flags = RDT_DELETED;
2680 free_rmid(rdtgrp->mon.rmid);
2683 * Remove the rdtgrp from the parent ctrl_mon group's list
2685 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
2686 list_del(&rdtgrp->mon.crdtgrp_list);
2689 * one extra hold on this, will drop when we kfree(rdtgrp)
2690 * in rdtgroup_kn_unlock()
2693 kernfs_remove(rdtgrp->kn);
2698 static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
2699 struct rdtgroup *rdtgrp)
2701 rdtgrp->flags = RDT_DELETED;
2702 list_del(&rdtgrp->rdtgroup_list);
2705 * one extra hold on this, will drop when we kfree(rdtgrp)
2706 * in rdtgroup_kn_unlock()
2709 kernfs_remove(rdtgrp->kn);
2713 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
2714 cpumask_var_t tmpmask)
2718 /* Give any tasks back to the default group */
2719 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
2721 /* Give any CPUs back to the default group */
2722 cpumask_or(&rdtgroup_default.cpu_mask,
2723 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2725 /* Update per cpu closid and rmid of the moved CPUs first */
2726 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
2727 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
2728 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
2732 * Update the MSR on moved CPUs and CPUs which have moved
2733 * task running on them.
2735 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
2736 update_closid_rmid(tmpmask, NULL);
2738 closid_free(rdtgrp->closid);
2739 free_rmid(rdtgrp->mon.rmid);
2742 * Free all the child monitor group rmids.
2744 free_all_child_rdtgrp(rdtgrp);
2746 rdtgroup_ctrl_remove(kn, rdtgrp);
2751 static int rdtgroup_rmdir(struct kernfs_node *kn)
2753 struct kernfs_node *parent_kn = kn->parent;
2754 struct rdtgroup *rdtgrp;
2755 cpumask_var_t tmpmask;
2758 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
2761 rdtgrp = rdtgroup_kn_lock_live(kn);
2768 * If the rdtgroup is a ctrl_mon group and parent directory
2769 * is the root directory, remove the ctrl_mon group.
2771 * If the rdtgroup is a mon group and parent directory
2772 * is a valid "mon_groups" directory, remove the mon group.
2774 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) {
2775 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2776 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
2777 ret = rdtgroup_ctrl_remove(kn, rdtgrp);
2779 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
2781 } else if (rdtgrp->type == RDTMON_GROUP &&
2782 is_mon_groups(parent_kn, kn->name)) {
2783 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
2789 rdtgroup_kn_unlock(kn);
2790 free_cpumask_var(tmpmask);
2794 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
2796 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2797 seq_puts(seq, ",cdp");
2801 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
2802 .mkdir = rdtgroup_mkdir,
2803 .rmdir = rdtgroup_rmdir,
2804 .show_options = rdtgroup_show_options,
2807 static int __init rdtgroup_setup_root(void)
2811 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
2812 KERNFS_ROOT_CREATE_DEACTIVATED |
2813 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
2815 if (IS_ERR(rdt_root))
2816 return PTR_ERR(rdt_root);
2818 mutex_lock(&rdtgroup_mutex);
2820 rdtgroup_default.closid = 0;
2821 rdtgroup_default.mon.rmid = 0;
2822 rdtgroup_default.type = RDTCTRL_GROUP;
2823 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
2825 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
2827 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
2829 kernfs_destroy_root(rdt_root);
2833 rdtgroup_default.kn = rdt_root->kn;
2834 kernfs_activate(rdtgroup_default.kn);
2837 mutex_unlock(&rdtgroup_mutex);
2843 * rdtgroup_init - rdtgroup initialization
2845 * Setup resctrl file system including set up root, create mount point,
2846 * register rdtgroup filesystem, and initialize files under root directory.
2848 * Return: 0 on success or -errno
2850 int __init rdtgroup_init(void)
2854 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
2855 sizeof(last_cmd_status_buf));
2857 ret = rdtgroup_setup_root();
2861 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
2865 ret = register_filesystem(&rdt_fs_type);
2867 goto cleanup_mountpoint;
2870 * Adding the resctrl debugfs directory here may not be ideal since
2871 * it would let the resctrl debugfs directory appear on the debugfs
2872 * filesystem before the resctrl filesystem is mounted.
2873 * It may also be ok since that would enable debugging of RDT before
2874 * resctrl is mounted.
2875 * The reason why the debugfs directory is created here and not in
2876 * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and
2877 * during the debugfs directory creation also &sb->s_type->i_mutex_key
2878 * (the lockdep class of inode->i_rwsem). Other filesystem
2879 * interactions (eg. SyS_getdents) have the lock ordering:
2880 * &sb->s_type->i_mutex_key --> &mm->mmap_sem
2881 * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
2882 * is taken, thus creating dependency:
2883 * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
2884 * issues considering the other two lock dependencies.
2885 * By creating the debugfs directory here we avoid a dependency
2886 * that may cause deadlock (even though file operations cannot
2887 * occur until the filesystem is mounted, but I do not know how to
2888 * tell lockdep that).
2890 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
2895 sysfs_remove_mount_point(fs_kobj, "resctrl");
2897 kernfs_destroy_root(rdt_root);
2902 void __exit rdtgroup_exit(void)
2904 debugfs_remove_recursive(debugfs_resctrl);
2905 unregister_filesystem(&rdt_fs_type);
2906 sysfs_remove_mount_point(fs_kobj, "resctrl");
2907 kernfs_destroy_root(rdt_root);