]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
Merge branch 'x86/urgent' into x86/cache, to pick up dependent fix
[linux.git] / arch / x86 / kernel / cpu / intel_rdt_pseudo_lock.c
index 30e6c9f5a0ada75f9a0b7f7d85c9e05607b4368c..41aeb431e83444d0360ac83be9a31d9c838a83eb 100644 (file)
@@ -789,25 +789,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
 /**
  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
  * @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
  *
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
  * pseudo-locked region on @d.
  *
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
  * otherwise.
  */
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *cbm_b;
        unsigned int cbm_len;
+       unsigned long cbm_b;
 
        if (d->plr) {
                cbm_len = d->plr->r->cache.cbm_len;
-               cbm_b = (unsigned long *)&d->plr->cbm;
-               if (bitmap_intersects(cbm, cbm_b, cbm_len))
+               cbm_b = d->plr->cbm;
+               if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
        }
        return false;