Group "user" and "pages" fields of bpf_map into the bpf_map_memory
structure. Later it can be extended with "memcg" and other related
information.
The main reason for a such change (beside cosmetics) is to pass
bpf_map_memory structure to charging functions before the actual
allocation of bpf_map.
Signed-off-by: Roman Gushchin <guro@fb.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
14 files changed:
+struct bpf_map_memory {
+ u32 pages;
+ struct user_struct *user;
+};
+
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
u32 btf_key_type_id;
u32 btf_value_type_id;
struct btf *btf;
+ struct bpf_map_memory memory;
bool unpriv_array;
bool frozen; /* write-once */
/* 48 bytes hole */
bool unpriv_array;
bool frozen; /* write-once */
/* 48 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
- struct user_struct *user ____cacheline_aligned;
- atomic_t refcnt;
+ atomic_t refcnt ____cacheline_aligned;
atomic_t usercnt;
struct work_struct work;
char name[BPF_OBJ_NAME_LEN];
atomic_t usercnt;
struct work_struct work;
char name[BPF_OBJ_NAME_LEN];
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- array->map.pages = cost;
+ array->map.memory.pages = cost;
array->elem_size = elem_size;
if (percpu && bpf_array_alloc_percpu(array)) {
array->elem_size = elem_size;
if (percpu && bpf_array_alloc_percpu(array)) {
cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
goto free_cmap;
cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
goto free_cmap;
- cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* Notice returns -EPERM on if map size is larger than memlock limit */
/* Notice returns -EPERM on if map size is larger than memlock limit */
- ret = bpf_map_precharge_memlock(cmap->map.pages);
+ ret = bpf_map_precharge_memlock(cmap->map.memory.pages);
if (ret) {
err = ret;
goto free_cmap;
if (ret) {
err = ret;
goto free_cmap;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_dtab;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_dtab;
- dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */
/* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(dtab->map.pages);
+ err = bpf_map_precharge_memlock(dtab->map.memory.pages);
/* make sure page count doesn't overflow */
goto free_htab;
/* make sure page count doesn't overflow */
goto free_htab;
- htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */
/* if map size is larger than memlock limit, reject it early */
- err = bpf_map_precharge_memlock(htab->map.pages);
+ err = bpf_map_precharge_memlock(htab->map.memory.pages);
if (!map)
return ERR_PTR(-ENOMEM);
if (!map)
return ERR_PTR(-ENOMEM);
- map->map.pages = pages;
+ map->map.memory.pages = pages;
/* copy mandatory map attributes */
bpf_map_init_from_attr(&map->map, attr);
/* copy mandatory map attributes */
bpf_map_init_from_attr(&map->map, attr);
- trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- ret = bpf_map_precharge_memlock(trie->map.pages);
+ ret = bpf_map_precharge_memlock(trie->map.memory.pages);
bpf_map_init_from_attr(&qs->map, attr);
bpf_map_init_from_attr(&qs->map, attr);
+ qs->map.memory.pages = cost;
qs->size = size;
raw_spin_lock_init(&qs->lock);
qs->size = size;
raw_spin_lock_init(&qs->lock);
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
- array->map.pages = cost;
+ array->map.memory.pages = cost;
bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
smap->n_buckets = n_buckets;
bpf_map_init_from_attr(&smap->map, attr);
smap->map.value_size = value_size;
smap->n_buckets = n_buckets;
- smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- err = bpf_map_precharge_memlock(smap->map.pages);
+ err = bpf_map_precharge_memlock(smap->map.memory.pages);
struct user_struct *user = get_current_user();
int ret;
struct user_struct *user = get_current_user();
int ret;
- ret = bpf_charge_memlock(user, map->pages);
+ ret = bpf_charge_memlock(user, map->memory.pages);
if (ret) {
free_uid(user);
return ret;
}
if (ret) {
free_uid(user);
return ret;
}
+ map->memory.user = user;
return ret;
}
static void bpf_map_release_memlock(struct bpf_map *map)
{
return ret;
}
static void bpf_map_release_memlock(struct bpf_map *map)
{
- struct user_struct *user = map->user;
- bpf_uncharge_memlock(user, map->pages);
+ struct user_struct *user = map->memory.user;
+
+ bpf_uncharge_memlock(user, map->memory.pages);
- ret = bpf_charge_memlock(map->user, pages);
+ ret = bpf_charge_memlock(map->memory.user, pages);
+ map->memory.pages += pages;
return ret;
}
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
{
return ret;
}
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
{
- bpf_uncharge_memlock(map->user, pages);
- map->pages -= pages;
+ bpf_uncharge_memlock(map->memory.user, pages);
+ map->memory.pages -= pages;
}
static int bpf_map_alloc_id(struct bpf_map *map)
}
static int bpf_map_alloc_id(struct bpf_map *map)
map->value_size,
map->max_entries,
map->map_flags,
map->value_size,
map->max_entries,
map->map_flags,
- map->pages * 1ULL << PAGE_SHIFT,
+ map->memory.pages * 1ULL << PAGE_SHIFT,
map->id,
READ_ONCE(map->frozen));
map->id,
READ_ONCE(map->frozen));
if (cost >= U32_MAX - PAGE_SIZE)
goto free_m;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_m;
- m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* Notice returns -EPERM on if map size is larger than memlock limit */
/* Notice returns -EPERM on if map size is larger than memlock limit */
- err = bpf_map_precharge_memlock(m->map.pages);
+ err = bpf_map_precharge_memlock(m->map.memory.pages);
smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
BPF_SK_STORAGE_CACHE_SIZE;
smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
BPF_SK_STORAGE_CACHE_SIZE;
- smap->map.pages = pages;
+ smap->map.memory.pages = pages;
- stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
- err = bpf_map_precharge_memlock(stab->map.pages);
+ stab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+ err = bpf_map_precharge_memlock(stab->map.memory.pages);