]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/mm: Convert drm_mm_node booleans to bitops
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 3 Oct 2019 21:00:59 +0000 (22:00 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 4 Oct 2019 12:43:13 +0000 (13:43 +0100)
A straightforward conversion of assignment and checking of the boolean
state flags (allocated, scanned) into non-atomic bitops. The caller
remains responsible for all locking around the drm_mm and its nodes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191003210100.22250-4-chris@chris-wilson.co.uk
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem.c
include/drm/drm_mm.h

index 99312bdc627332a9c3ddb69823f378b943392094..a9cab5e53731cfef16a3890a2990cfd14c13067e 100644 (file)
@@ -426,7 +426,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 
        list_add(&node->node_list, &hole->node_list);
        drm_mm_interval_tree_add_node(hole, node);
-       node->allocated = true;
+       __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
        node->hole_size = 0;
 
        rm_hole(hole);
@@ -545,7 +545,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 
                list_add(&node->node_list, &hole->node_list);
                drm_mm_interval_tree_add_node(hole, node);
-               node->allocated = true;
+               __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 
                rm_hole(hole);
                if (adj_start > hole_start)
@@ -563,7 +563,7 @@ EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 
 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
 {
-       return node->scanned_block;
+       return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 }
 
 /**
@@ -589,7 +589,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 
        drm_mm_interval_tree_remove(node, &mm->interval_tree);
        list_del(&node->node_list);
-       node->allocated = false;
+       __clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 
        if (drm_mm_hole_follows(prev_node))
                rm_hole(prev_node);
@@ -627,8 +627,8 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
                                &mm->holes_addr);
        }
 
-       old->allocated = false;
-       new->allocated = true;
+       __clear_bit(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
+       __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
 }
 EXPORT_SYMBOL(drm_mm_replace_node);
 
@@ -738,7 +738,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
        DRM_MM_BUG_ON(node->mm != mm);
        DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
        DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
-       node->scanned_block = true;
+       __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
        mm->scan_active++;
 
        /* Remove this block from the node_list so that we enlarge the hole
@@ -824,7 +824,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
 
        DRM_MM_BUG_ON(node->mm != scan->mm);
        DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
-       node->scanned_block = false;
+       __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 
        DRM_MM_BUG_ON(!node->mm->scan_active);
        node->mm->scan_active--;
@@ -922,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 
        /* Clever trick to avoid a special case in the free hole tracking. */
        INIT_LIST_HEAD(&mm->head_node.node_list);
-       mm->head_node.allocated = false;
+       mm->head_node.flags = 0;
        mm->head_node.mm = mm;
        mm->head_node.start = start + size;
        mm->head_node.size = -size;
index 1cc93022f8e0bb5f5deb55983542f44a4769bed9..493f07806b08761afde8203e1a3f7eca9d02067e 100644 (file)
@@ -902,7 +902,7 @@ static void reloc_cache_init(struct reloc_cache *cache,
        cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
        cache->has_fence = cache->gen < 4;
        cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
-       cache->node.allocated = false;
+       cache->node.flags = 0;
        cache->rq = NULL;
        cache->rq_size = 0;
 }
index 1c44fccfe718b6162155cf8b6e7e264c7350b626..814f62fca72798bb812bf009f4436ae96889c80b 100644 (file)
@@ -351,7 +351,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                                               PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
-               node.allocated = false;
+               node.flags = 0;
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
@@ -561,7 +561,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                                               PIN_NOEVICT);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
-               node.allocated = false;
+               node.flags = 0;
        } else {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
index 2c3bbb43c7d1a55600c688e08b3abb1a2cdacc83..d7939c0542599bc42711fa6773f450189e83aa7f 100644 (file)
@@ -168,8 +168,9 @@ struct drm_mm_node {
        struct rb_node rb_hole_addr;
        u64 __subtree_last;
        u64 hole_size;
-       bool allocated : 1;
-       bool scanned_block : 1;
+       unsigned long flags;
+#define DRM_MM_NODE_ALLOCATED_BIT      0
+#define DRM_MM_NODE_SCANNED_BIT                1
 #ifdef CONFIG_DRM_DEBUG_MM
        depot_stack_handle_t stack;
 #endif
@@ -253,7 +254,7 @@ struct drm_mm_scan {
  */
 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
 {
-       return node->allocated;
+       return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 }
 
 /**