]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
bpf: group memory related fields in struct bpf_map_memory
authorRoman Gushchin <guro@fb.com>
Thu, 30 May 2019 01:03:57 +0000 (18:03 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 31 May 2019 23:52:56 +0000 (16:52 -0700)
Group "user" and "pages" fields of bpf_map into the bpf_map_memory
structure. Later it can be extended with "memcg" and other related
information.

The main reason for a such change (beside cosmetics) is to pass
bpf_map_memory structure to charging functions before the actual
allocation of bpf_map.

Signed-off-by: Roman Gushchin <guro@fb.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
14 files changed:
include/linux/bpf.h
kernel/bpf/arraymap.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/local_storage.c
kernel/bpf/lpm_trie.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/reuseport_array.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/xskmap.c
net/core/bpf_sk_storage.c
net/core/sock_map.c

index 2cc58fc0f41302beda9d3455733304a6b74b5271..2e7c1c40d949f1ea96ecaa13de10947f2380f8f5 100644 (file)
@@ -66,6 +66,11 @@ struct bpf_map_ops {
                                     u64 imm, u32 *off);
 };
 
+struct bpf_map_memory {
+       u32 pages;
+       struct user_struct *user;
+};
+
 struct bpf_map {
        /* The first two cachelines with read-mostly members of which some
         * are also accessed in fast-path (e.g. ops, max_entries).
@@ -86,7 +91,7 @@ struct bpf_map {
        u32 btf_key_type_id;
        u32 btf_value_type_id;
        struct btf *btf;
-       u32 pages;
+       struct bpf_map_memory memory;
        bool unpriv_array;
        bool frozen; /* write-once */
        /* 48 bytes hole */
@@ -94,8 +99,7 @@ struct bpf_map {
        /* The 3rd and 4th cacheline with misc members to avoid false sharing
         * particularly with refcounting.
         */
-       struct user_struct *user ____cacheline_aligned;
-       atomic_t refcnt;
+       atomic_t refcnt ____cacheline_aligned;
        atomic_t usercnt;
        struct work_struct work;
        char name[BPF_OBJ_NAME_LEN];
index 584636c9e2eb0e27f235990f5e1306877bb7aad1..8fda24e78193a342674c6c53176dccf90f69e6ee 100644 (file)
@@ -138,7 +138,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        /* copy mandatory map attributes */
        bpf_map_init_from_attr(&array->map, attr);
-       array->map.pages = cost;
+       array->map.memory.pages = cost;
        array->elem_size = elem_size;
 
        if (percpu && bpf_array_alloc_percpu(array)) {
index cf727d77c6c6be3cfb3ae656c7061aa0709ac207..035268add72459fab8c8a8f4e0436ec817c00e23 100644 (file)
@@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
        cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
        if (cost >= U32_MAX - PAGE_SIZE)
                goto free_cmap;
-       cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        /* Notice returns -EPERM on if map size is larger than memlock limit */
-       ret = bpf_map_precharge_memlock(cmap->map.pages);
+       ret = bpf_map_precharge_memlock(cmap->map.memory.pages);
        if (ret) {
                err = ret;
                goto free_cmap;
index 1e525d70f83354e451b738ffb8e42d83b5fa932f..f6c57efb1d0dda697e00a00474dcc0f618d01fe2 100644 (file)
@@ -111,10 +111,10 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        if (cost >= U32_MAX - PAGE_SIZE)
                goto free_dtab;
 
-       dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        /* if map size is larger than memlock limit, reject it early */
-       err = bpf_map_precharge_memlock(dtab->map.pages);
+       err = bpf_map_precharge_memlock(dtab->map.memory.pages);
        if (err)
                goto free_dtab;
 
index 0f2708fde5f7b162ca5bf7b1da2945415a4ba67b..15bf228d2e98e2261c51784ec92012b95232f1b9 100644 (file)
@@ -364,10 +364,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                /* make sure page count doesn't overflow */
                goto free_htab;
 
-       htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        /* if map size is larger than memlock limit, reject it early */
-       err = bpf_map_precharge_memlock(htab->map.pages);
+       err = bpf_map_precharge_memlock(htab->map.memory.pages);
        if (err)
                goto free_htab;
 
index e48302ecb3893a5e93385dc57f0e5bf3e355d0a3..574325276650c0d46051ccaa0fb44eb46acfc755 100644 (file)
@@ -303,7 +303,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
        if (!map)
                return ERR_PTR(-ENOMEM);
 
-       map->map.pages = pages;
+       map->map.memory.pages = pages;
 
        /* copy mandatory map attributes */
        bpf_map_init_from_attr(&map->map, attr);
index e61630c2e50b28a3e342c4684b9be8e8141784a9..8e423a582760eaa928a75c2e44d42e764355ecf1 100644 (file)
@@ -578,9 +578,9 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
                goto out_err;
        }
 
-       trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
-       ret = bpf_map_precharge_memlock(trie->map.pages);
+       ret = bpf_map_precharge_memlock(trie->map.memory.pages);
        if (ret)
                goto out_err;
 
index 0b140d2368896d2d154bc0d0fa90b82aa238061c..8a510e71d486733bfda687ccd2c233a70065c857 100644 (file)
@@ -89,7 +89,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 
        bpf_map_init_from_attr(&qs->map, attr);
 
-       qs->map.pages = cost;
+       qs->map.memory.pages = cost;
        qs->size = size;
 
        raw_spin_lock_init(&qs->lock);
index 18e225de80ff0edf1b12845b32a0f7098f2457be..819515242739e60ec9e17b6da972a5edbfee0a13 100644 (file)
@@ -176,7 +176,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
 
        /* copy mandatory map attributes */
        bpf_map_init_from_attr(&array->map, attr);
-       array->map.pages = cost;
+       array->map.memory.pages = cost;
 
        return &array->map;
 }
index 950ab2f28922e3cbc341700f6d67d645d5185d73..08d4efff73ac8dd8b62fb5a3ad91287f16e1e6d6 100644 (file)
@@ -131,9 +131,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        bpf_map_init_from_attr(&smap->map, attr);
        smap->map.value_size = value_size;
        smap->n_buckets = n_buckets;
-       smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
-       err = bpf_map_precharge_memlock(smap->map.pages);
+       err = bpf_map_precharge_memlock(smap->map.memory.pages);
        if (err)
                goto free_smap;
 
index 1539774d78c7f9857c5451e8a0c8f6f04a943e47..8289a2ce14fc3ebb3fad69ae9fc45de8907034bf 100644 (file)
@@ -222,19 +222,20 @@ static int bpf_map_init_memlock(struct bpf_map *map)
        struct user_struct *user = get_current_user();
        int ret;
 
-       ret = bpf_charge_memlock(user, map->pages);
+       ret = bpf_charge_memlock(user, map->memory.pages);
        if (ret) {
                free_uid(user);
                return ret;
        }
-       map->user = user;
+       map->memory.user = user;
        return ret;
 }
 
 static void bpf_map_release_memlock(struct bpf_map *map)
 {
-       struct user_struct *user = map->user;
-       bpf_uncharge_memlock(user, map->pages);
+       struct user_struct *user = map->memory.user;
+
+       bpf_uncharge_memlock(user, map->memory.pages);
        free_uid(user);
 }
 
@@ -242,17 +243,17 @@ int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
 {
        int ret;
 
-       ret = bpf_charge_memlock(map->user, pages);
+       ret = bpf_charge_memlock(map->memory.user, pages);
        if (ret)
                return ret;
-       map->pages += pages;
+       map->memory.pages += pages;
        return ret;
 }
 
 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
 {
-       bpf_uncharge_memlock(map->user, pages);
-       map->pages -= pages;
+       bpf_uncharge_memlock(map->memory.user, pages);
+       map->memory.pages -= pages;
 }
 
 static int bpf_map_alloc_id(struct bpf_map *map)
@@ -395,7 +396,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
                   map->value_size,
                   map->max_entries,
                   map->map_flags,
-                  map->pages * 1ULL << PAGE_SHIFT,
+                  map->memory.pages * 1ULL << PAGE_SHIFT,
                   map->id,
                   READ_ONCE(map->frozen));
 
index 686d244e798dfcc06bfd145a688facf080d62f3e..f816ee1a0fa0bc6f9e19a7ab9fb7244d967c21d3 100644 (file)
@@ -40,10 +40,10 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
        if (cost >= U32_MAX - PAGE_SIZE)
                goto free_m;
 
-       m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        /* Notice returns -EPERM on if map size is larger than memlock limit */
-       err = bpf_map_precharge_memlock(m->map.pages);
+       err = bpf_map_precharge_memlock(m->map.memory.pages);
        if (err)
                goto free_m;
 
index 9a8aaf8e235d3cd5021c381e41c2ce27ea6e9591..92581c3ff220921bdba1e44292d92ac4b182507b 100644 (file)
@@ -659,7 +659,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
        smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
        smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
                BPF_SK_STORAGE_CACHE_SIZE;
-       smap->map.pages = pages;
+       smap->map.memory.pages = pages;
 
        return &smap->map;
 }
index be6092ac69f8ac2c69144defd80f766dbff6df22..4eb5b6a1b29f802114f75b9ec8a1c5fff10ee901 100644 (file)
@@ -49,8 +49,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
                goto free_stab;
        }
 
-       stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
-       err = bpf_map_precharge_memlock(stab->map.pages);
+       stab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+       err = bpf_map_precharge_memlock(stab->map.memory.pages);
        if (err)
                goto free_stab;