]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/bpf/hashtab.c
Merge tag 'gvt-fixes-2020-03-10' of https://github.com/intel/gvt-linux into drm-intel...
[linux.git] / kernel / bpf / hashtab.c
index 9194479a2fa7807e3eb4d3436944d6f36357ceb9..a1468e3f5af24e33acbfaa392db9770cea249668 100644 (file)
@@ -56,6 +56,7 @@ struct htab_elem {
                        union {
                                struct bpf_htab *htab;
                                struct pcpu_freelist_node fnode;
+                               struct htab_elem *batch_flink;
                        };
                };
        };
@@ -126,6 +127,17 @@ static void htab_free_elems(struct bpf_htab *htab)
        bpf_map_area_free(htab->elems);
 }
 
+/* The LRU list has a lock (lru_lock). Each htab bucket has a lock
+ * (bucket_lock). If both locks need to be acquired together, the lock
+ * order is always lru_lock -> bucket_lock and this only happens in
+ * bpf_lru_list.c logic. For example, certain code path of
+ * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
+ * will acquire lru_lock first followed by acquiring bucket_lock.
+ *
+ * In hashtab.c, to avoid deadlock, lock acquisition of
+ * bucket_lock followed by lru_lock is not allowed. In such cases,
+ * bucket_lock needs to be released first before acquiring lru_lock.
+ */
 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
                                          u32 hash)
 {
@@ -1256,6 +1268,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
        void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
        void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
        u32 batch, max_count, size, bucket_size;
+       struct htab_elem *node_to_free = NULL;
        u64 elem_map_flags, map_flags;
        struct hlist_nulls_head *head;
        struct hlist_nulls_node *n;
@@ -1388,10 +1401,18 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
                }
                if (do_delete) {
                        hlist_nulls_del_rcu(&l->hash_node);
-                       if (is_lru_map)
-                               bpf_lru_push_free(&htab->lru, &l->lru_node);
-                       else
+
+                       /* bpf_lru_push_free() will acquire lru_lock, which
+                        * may cause deadlock. See comments in function
+                        * prealloc_lru_pop(). Let us do bpf_lru_push_free()
+                        * after releasing the bucket lock.
+                        */
+                       if (is_lru_map) {
+                               l->batch_flink = node_to_free;
+                               node_to_free = l;
+                       } else {
                                free_htab_elem(htab, l);
+                       }
                }
                dst_key += key_size;
                dst_val += value_size;
@@ -1399,6 +1420,13 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
 
        raw_spin_unlock_irqrestore(&b->lock, flags);
        locked = false;
+
+       while (node_to_free) {
+               l = node_to_free;
+               node_to_free = node_to_free->batch_flink;
+               bpf_lru_push_free(&htab->lru, &l->lru_node);
+       }
+
 next_batch:
        /* If we are not copying data, we can go to next bucket and avoid
         * unlocking the rcu.