2 * Copyright (c) 2016 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
19 #include <sys/resource.h>
24 #define LOCAL_FREE_TARGET (128)
25 #define PERCPU_FREE_TARGET (16)
29 static int create_map(int map_type, int map_flags, unsigned int size)
33 map_fd = bpf_map_create(map_type, sizeof(unsigned long long),
34 sizeof(unsigned long long), size, map_flags);
37 perror("bpf_map_create");
42 static int map_subset(int map0, int map1)
44 unsigned long long next_key = 0;
45 unsigned long long value0[nr_cpus], value1[nr_cpus];
48 while (!bpf_map_next_key(map1, &next_key, &next_key)) {
49 assert(!bpf_map_lookup(map1, &next_key, value1));
50 ret = bpf_map_lookup(map0, &next_key, value0);
52 printf("key:%llu not found from map. %s(%d)\n",
53 next_key, strerror(errno), errno);
56 if (value0[0] != value1[0]) {
57 printf("key:%llu value0:%llu != value1:%llu\n",
58 next_key, value0[0], value1[0]);
65 static int map_equal(int lru_map, int expected)
67 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
70 static int sched_next_online(int pid, int *next_to_try)
73 int next = *next_to_try;
76 while (next < nr_cpus) {
78 CPU_SET(next++, &cpuset);
79 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
89 /* Size of the LRU amp is 2
94 * => Key=2 will be removed by LRU
95 * Iterate map. Only found key=1 and key=3
97 static void test_lru_sanity0(int map_type, int map_flags)
99 unsigned long long key, value[nr_cpus];
100 int lru_map_fd, expected_map_fd;
103 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
106 assert(sched_next_online(0, &next_cpu) != -1);
108 if (map_flags & BPF_F_NO_COMMON_LRU)
109 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
111 lru_map_fd = create_map(map_type, map_flags, 2);
112 assert(lru_map_fd != -1);
114 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2);
115 assert(expected_map_fd != -1);
119 /* insert key=1 element */
122 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
123 assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
125 /* BPF_NOEXIST means: add new element if it doesn't exist */
126 assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 &&
127 /* key=1 already exists */
130 assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 &&
133 /* insert key=2 element */
135 /* check that key=2 is not found */
137 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
140 /* BPF_EXIST means: update existing element */
141 assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
142 /* key=2 is not there */
145 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
147 /* insert key=3 element */
149 /* check that key=3 is not found */
151 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
154 /* check that key=1 can be found and mark the ref bit to
155 * stop LRU from removing key=1
158 assert(!bpf_map_lookup(lru_map_fd, &key, value));
159 assert(value[0] == 1234);
162 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
163 assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
165 /* key=2 has been removed from the LRU */
167 assert(bpf_map_lookup(lru_map_fd, &key, value) == -1);
169 assert(map_equal(lru_map_fd, expected_map_fd));
171 close(expected_map_fd);
177 /* Size of the LRU map is 1.5*tgt_free
178 * Insert 1 to tgt_free (+tgt_free keys)
179 * Lookup 1 to tgt_free/2
180 * Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
181 * => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
183 static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
185 unsigned long long key, end_key, value[nr_cpus];
186 int lru_map_fd, expected_map_fd;
187 unsigned int batch_size;
188 unsigned int map_size;
191 if (map_flags & BPF_F_NO_COMMON_LRU)
192 /* Ther percpu lru list (i.e each cpu has its own LRU
193 * list) does not have a local free list. Hence,
194 * it will only free old nodes till there is no free
195 * from the LRU list. Hence, this test does not apply
196 * to BPF_F_NO_COMMON_LRU
200 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
203 assert(sched_next_online(0, &next_cpu) != -1);
205 batch_size = tgt_free / 2;
206 assert(batch_size * 2 == tgt_free);
208 map_size = tgt_free + batch_size;
209 lru_map_fd = create_map(map_type, map_flags, map_size);
210 assert(lru_map_fd != -1);
212 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
213 assert(expected_map_fd != -1);
217 /* Insert 1 to tgt_free (+tgt_free keys) */
218 end_key = 1 + tgt_free;
219 for (key = 1; key < end_key; key++)
220 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
222 /* Lookup 1 to tgt_free/2 */
223 end_key = 1 + batch_size;
224 for (key = 1; key < end_key; key++) {
225 assert(!bpf_map_lookup(lru_map_fd, &key, value));
226 assert(!bpf_map_update(expected_map_fd, &key, value,
230 /* Insert 1+tgt_free to 2*tgt_free
231 * => 1+tgt_free/2 to LOCALFREE_TARGET will be
235 end_key = key + tgt_free;
236 for (; key < end_key; key++) {
237 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
238 assert(!bpf_map_update(expected_map_fd, &key, value,
242 assert(map_equal(lru_map_fd, expected_map_fd));
244 close(expected_map_fd);
250 /* Size of the LRU map 1.5 * tgt_free
251 * Insert 1 to tgt_free (+tgt_free keys)
252 * Update 1 to tgt_free/2
253 * => The original 1 to tgt_free/2 will be removed due to
254 * the LRU shrink process
255 * Re-insert 1 to tgt_free/2 again and do a lookup immeidately
256 * Insert 1+tgt_free to tgt_free*3/2
257 * Insert 1+tgt_free*3/2 to tgt_free*5/2
258 * => Key 1+tgt_free to tgt_free*3/2
259 * will be removed from LRU because it has never
260 * been lookup and ref bit is not set
262 static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
264 unsigned long long key, value[nr_cpus];
265 unsigned long long end_key;
266 int lru_map_fd, expected_map_fd;
267 unsigned int batch_size;
268 unsigned int map_size;
271 if (map_flags & BPF_F_NO_COMMON_LRU)
272 /* Ther percpu lru list (i.e each cpu has its own LRU
273 * list) does not have a local free list. Hence,
274 * it will only free old nodes till there is no free
275 * from the LRU list. Hence, this test does not apply
276 * to BPF_F_NO_COMMON_LRU
280 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
283 assert(sched_next_online(0, &next_cpu) != -1);
285 batch_size = tgt_free / 2;
286 assert(batch_size * 2 == tgt_free);
288 map_size = tgt_free + batch_size;
289 if (map_flags & BPF_F_NO_COMMON_LRU)
290 lru_map_fd = create_map(map_type, map_flags,
293 lru_map_fd = create_map(map_type, map_flags, map_size);
294 assert(lru_map_fd != -1);
296 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
297 assert(expected_map_fd != -1);
301 /* Insert 1 to tgt_free (+tgt_free keys) */
302 end_key = 1 + tgt_free;
303 for (key = 1; key < end_key; key++)
304 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
306 /* Any bpf_map_update will require to acquire a new node
309 * The local list is running out of free nodes.
310 * It gets from the global LRU list which tries to
311 * shrink the inactive list to get tgt_free
312 * number of free nodes.
314 * Hence, the oldest key 1 to tgt_free/2
315 * are removed from the LRU list.
318 if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
319 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
320 assert(!bpf_map_delete(lru_map_fd, &key));
322 assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST));
325 /* Re-insert 1 to tgt_free/2 again and do a lookup
328 end_key = 1 + batch_size;
330 for (key = 1; key < end_key; key++) {
331 assert(bpf_map_lookup(lru_map_fd, &key, value));
332 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
333 assert(!bpf_map_lookup(lru_map_fd, &key, value));
334 assert(value[0] == 4321);
335 assert(!bpf_map_update(expected_map_fd, &key, value,
341 /* Insert 1+tgt_free to tgt_free*3/2 */
342 end_key = 1 + tgt_free + batch_size;
343 for (key = 1 + tgt_free; key < end_key; key++)
344 /* These newly added but not referenced keys will be
345 * gone during the next LRU shrink.
347 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
349 /* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
350 end_key = key + tgt_free;
351 for (; key < end_key; key++) {
352 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
353 assert(!bpf_map_update(expected_map_fd, &key, value,
357 assert(map_equal(lru_map_fd, expected_map_fd));
359 close(expected_map_fd);
365 /* Size of the LRU map is 2*tgt_free
366 * It is to test the active/inactive list rotation
367 * Insert 1 to 2*tgt_free (+2*tgt_free keys)
368 * Lookup key 1 to tgt_free*3/2
369 * Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
370 * => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
372 static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
374 unsigned long long key, end_key, value[nr_cpus];
375 int lru_map_fd, expected_map_fd;
376 unsigned int batch_size;
377 unsigned int map_size;
380 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
383 assert(sched_next_online(0, &next_cpu) != -1);
385 batch_size = tgt_free / 2;
386 assert(batch_size * 2 == tgt_free);
388 map_size = tgt_free * 2;
389 if (map_flags & BPF_F_NO_COMMON_LRU)
390 lru_map_fd = create_map(map_type, map_flags,
393 lru_map_fd = create_map(map_type, map_flags, map_size);
394 assert(lru_map_fd != -1);
396 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size);
397 assert(expected_map_fd != -1);
401 /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
402 end_key = 1 + (2 * tgt_free);
403 for (key = 1; key < end_key; key++)
404 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
406 /* Lookup key 1 to tgt_free*3/2 */
407 end_key = tgt_free + batch_size;
408 for (key = 1; key < end_key; key++) {
409 assert(!bpf_map_lookup(lru_map_fd, &key, value));
410 assert(!bpf_map_update(expected_map_fd, &key, value,
414 /* Add 1+2*tgt_free to tgt_free*5/2
417 key = 2 * tgt_free + 1;
418 end_key = key + batch_size;
419 for (; key < end_key; key++) {
420 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
421 assert(!bpf_map_update(expected_map_fd, &key, value,
425 assert(map_equal(lru_map_fd, expected_map_fd));
427 close(expected_map_fd);
434 static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
436 int lru_map_fd, expected_map_fd;
437 unsigned long long key, value[nr_cpus];
438 unsigned long long end_key;
441 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
444 assert(sched_next_online(0, &next_cpu) != -1);
446 if (map_flags & BPF_F_NO_COMMON_LRU)
447 lru_map_fd = create_map(map_type, map_flags,
448 3 * tgt_free * nr_cpus);
450 lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
451 assert(lru_map_fd != -1);
453 expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,
455 assert(expected_map_fd != -1);
459 for (key = 1; key <= 2 * tgt_free; key++)
460 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
463 assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
465 for (key = 1; key <= tgt_free; key++) {
466 assert(!bpf_map_lookup(lru_map_fd, &key, value));
467 assert(!bpf_map_update(expected_map_fd, &key, value,
471 for (; key <= 2 * tgt_free; key++) {
472 assert(!bpf_map_delete(lru_map_fd, &key));
473 assert(bpf_map_delete(lru_map_fd, &key));
476 end_key = key + 2 * tgt_free;
477 for (; key < end_key; key++) {
478 assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
479 assert(!bpf_map_update(expected_map_fd, &key, value,
483 assert(map_equal(lru_map_fd, expected_map_fd));
485 close(expected_map_fd);
491 static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
493 unsigned long long key, value[nr_cpus];
495 /* Ensure the last key inserted by previous CPU can be found */
496 assert(!bpf_map_lookup(map_fd, &last_key, value));
501 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
502 assert(!bpf_map_lookup(map_fd, &key, value));
504 /* Cannot find the last key because it was removed by LRU */
505 assert(bpf_map_lookup(map_fd, &last_key, value));
508 /* Test map with only one element */
509 static void test_lru_sanity5(int map_type, int map_flags)
511 unsigned long long key, value[nr_cpus];
515 if (map_flags & BPF_F_NO_COMMON_LRU)
518 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
521 map_fd = create_map(map_type, map_flags, 1);
522 assert(map_fd != -1);
526 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
528 while (sched_next_online(0, &next_cpu) != -1) {
533 do_test_lru_sanity5(key, map_fd);
535 } else if (pid == -1) {
536 printf("couldn't spawn process to test key:%llu\n",
542 assert(waitpid(pid, &status, 0) == pid);
549 /* At least one key should be tested */
555 int main(int argc, char **argv)
557 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
558 int map_types[] = {BPF_MAP_TYPE_LRU_HASH,
559 BPF_MAP_TYPE_LRU_PERCPU_HASH};
560 int map_flags[] = {0, BPF_F_NO_COMMON_LRU};
563 setbuf(stdout, NULL);
565 assert(!setrlimit(RLIMIT_MEMLOCK, &r));
567 nr_cpus = bpf_num_possible_cpus();
568 assert(nr_cpus != -1);
569 printf("nr_cpus:%d\n\n", nr_cpus);
571 for (f = 0; f < sizeof(map_flags) / sizeof(*map_flags); f++) {
572 unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
573 PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
575 for (t = 0; t < sizeof(map_types) / sizeof(*map_types); t++) {
576 test_lru_sanity0(map_types[t], map_flags[f]);
577 test_lru_sanity1(map_types[t], map_flags[f], tgt_free);
578 test_lru_sanity2(map_types[t], map_flags[f], tgt_free);
579 test_lru_sanity3(map_types[t], map_flags[f], tgt_free);
580 test_lru_sanity4(map_types[t], map_flags[f], tgt_free);
581 test_lru_sanity5(map_types[t], map_flags[f]);