aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin KaFai Lau2017-04-14 12:30:28 -0500
committerDavid S. Miller2017-04-17 12:55:52 -0500
commit9fd63d05f3e8476282cd8c484eb34d3f6be54f40 (patch)
treef3fb9ca5e7f8e8be6114a29ee23b17357522000d /samples
parentbf8db5d243a103ccd3f6d82a110e2302608e248c (diff)
downloadkernel-9fd63d05f3e8476282cd8c484eb34d3f6be54f40.tar.gz
kernel-9fd63d05f3e8476282cd8c484eb34d3f6be54f40.tar.xz
kernel-9fd63d05f3e8476282cd8c484eb34d3f6be54f40.zip
bpf: Allow bpf sample programs (*_user.c) to change bpf_map_def
The current bpf_map_def is statically defined during compile time. This patch allows the *_user.c program to change it during runtime. It is done by adding load_bpf_file_fixup_map() which takes a callback. The callback will be called before creating each map so that it has a chance to modify the bpf_map_def. The current usecase is to change max_entries in map_perf_test. It is interesting to test with a much bigger map size in some cases (e.g. the following patch on bpf_lru_map.c). However, it is hard to find one size to fit all testing environment. Hence, it is handy to take the max_entries as a cmdline arg and then configure the bpf_map_def during runtime. This patch adds two cmdline args. One is to configure the map's max_entries. Another is to configure the max_cnt which controls how many times a syscall is called. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'samples')
-rw-r--r--samples/bpf/bpf_load.c114
-rw-r--r--samples/bpf/bpf_load.h13
-rw-r--r--samples/bpf/map_perf_test_user.c148
3 files changed, 201 insertions, 74 deletions
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index dcdce1270d38..0d449d8032d1 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -21,6 +21,7 @@
21#include <sys/mman.h> 21#include <sys/mman.h>
22#include <poll.h> 22#include <poll.h>
23#include <ctype.h> 23#include <ctype.h>
24#include <assert.h>
24#include "libbpf.h" 25#include "libbpf.h"
25#include "bpf_load.h" 26#include "bpf_load.h"
26#include "perf-sys.h" 27#include "perf-sys.h"
@@ -37,15 +38,6 @@ int event_fd[MAX_PROGS];
37int prog_cnt; 38int prog_cnt;
38int prog_array_fd = -1; 39int prog_array_fd = -1;
39 40
40struct bpf_map_def {
41 unsigned int type;
42 unsigned int key_size;
43 unsigned int value_size;
44 unsigned int max_entries;
45 unsigned int map_flags;
46 unsigned int inner_map_idx;
47};
48
49static int populate_prog_array(const char *event, int prog_fd) 41static int populate_prog_array(const char *event, int prog_fd)
50{ 42{
51 int ind = atoi(event), err; 43 int ind = atoi(event), err;
@@ -193,11 +185,14 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
193 return 0; 185 return 0;
194} 186}
195 187
196static int load_maps(struct bpf_map_def *maps, int len) 188static int load_maps(struct bpf_map_def *maps, int len,
189 const char **map_names, fixup_map_cb fixup_map)
197{ 190{
198 int i; 191 int i;
199 192
200 for (i = 0; i < len / sizeof(struct bpf_map_def); i++) { 193 for (i = 0; i < len / sizeof(struct bpf_map_def); i++) {
194 if (fixup_map)
195 fixup_map(&maps[i], map_names[i], i);
201 196
202 if (maps[i].type == BPF_MAP_TYPE_ARRAY_OF_MAPS || 197 if (maps[i].type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
203 maps[i].type == BPF_MAP_TYPE_HASH_OF_MAPS) { 198 maps[i].type == BPF_MAP_TYPE_HASH_OF_MAPS) {
@@ -280,14 +275,64 @@ static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
280 return 0; 275 return 0;
281} 276}
282 277
283int load_bpf_file(char *path) 278static int cmp_symbols(const void *l, const void *r)
279{
280 const GElf_Sym *lsym = (const GElf_Sym *)l;
281 const GElf_Sym *rsym = (const GElf_Sym *)r;
282
283 if (lsym->st_value < rsym->st_value)
284 return -1;
285 else if (lsym->st_value > rsym->st_value)
286 return 1;
287 else
288 return 0;
289}
290
291static int get_sorted_map_names(Elf *elf, Elf_Data *symbols, int maps_shndx,
292 int strtabidx, char **map_names)
293{
294 GElf_Sym map_symbols[MAX_MAPS];
295 int i, nr_maps = 0;
296
297 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
298 assert(nr_maps < MAX_MAPS);
299 if (!gelf_getsym(symbols, i, &map_symbols[nr_maps]))
300 continue;
301 if (map_symbols[nr_maps].st_shndx != maps_shndx)
302 continue;
303 nr_maps++;
304 }
305
306 qsort(map_symbols, nr_maps, sizeof(GElf_Sym), cmp_symbols);
307
308 for (i = 0; i < nr_maps; i++) {
309 char *map_name;
310
311 map_name = elf_strptr(elf, strtabidx, map_symbols[i].st_name);
312 if (!map_name) {
313 printf("cannot get map symbol\n");
314 return 1;
315 }
316
317 map_names[i] = strdup(map_name);
318 if (!map_names[i]) {
319 printf("strdup(%s): %s(%d)\n", map_name,
320 strerror(errno), errno);
321 return 1;
322 }
323 }
324
325 return 0;
326}
327
328static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
284{ 329{
285 int fd, i; 330 int fd, i, ret, maps_shndx = -1, strtabidx = -1;
286 Elf *elf; 331 Elf *elf;
287 GElf_Ehdr ehdr; 332 GElf_Ehdr ehdr;
288 GElf_Shdr shdr, shdr_prog; 333 GElf_Shdr shdr, shdr_prog;
289 Elf_Data *data, *data_prog, *symbols = NULL; 334 Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL;
290 char *shname, *shname_prog; 335 char *shname, *shname_prog, *map_names[MAX_MAPS] = { NULL };
291 336
292 /* reset global variables */ 337 /* reset global variables */
293 kern_version = 0; 338 kern_version = 0;
@@ -335,14 +380,33 @@ int load_bpf_file(char *path)
335 } 380 }
336 memcpy(&kern_version, data->d_buf, sizeof(int)); 381 memcpy(&kern_version, data->d_buf, sizeof(int));
337 } else if (strcmp(shname, "maps") == 0) { 382 } else if (strcmp(shname, "maps") == 0) {
338 processed_sec[i] = true; 383 maps_shndx = i;
339 if (load_maps(data->d_buf, data->d_size)) 384 data_maps = data;
340 return 1;
341 } else if (shdr.sh_type == SHT_SYMTAB) { 385 } else if (shdr.sh_type == SHT_SYMTAB) {
386 strtabidx = shdr.sh_link;
342 symbols = data; 387 symbols = data;
343 } 388 }
344 } 389 }
345 390
391 ret = 1;
392
393 if (!symbols) {
394 printf("missing SHT_SYMTAB section\n");
395 goto done;
396 }
397
398 if (data_maps) {
399 if (get_sorted_map_names(elf, symbols, maps_shndx, strtabidx,
400 map_names))
401 goto done;
402
403 if (load_maps(data_maps->d_buf, data_maps->d_size,
404 (const char **)map_names, fixup_map))
405 goto done;
406
407 processed_sec[maps_shndx] = true;
408 }
409
346 /* load programs that need map fixup (relocations) */ 410 /* load programs that need map fixup (relocations) */
347 for (i = 1; i < ehdr.e_shnum; i++) { 411 for (i = 1; i < ehdr.e_shnum; i++) {
348 if (processed_sec[i]) 412 if (processed_sec[i])
@@ -399,8 +463,22 @@ int load_bpf_file(char *path)
399 load_and_attach(shname, data->d_buf, data->d_size); 463 load_and_attach(shname, data->d_buf, data->d_size);
400 } 464 }
401 465
466 ret = 0;
467done:
468 for (i = 0; i < MAX_MAPS; i++)
469 free(map_names[i]);
402 close(fd); 470 close(fd);
403 return 0; 471 return ret;
472}
473
474int load_bpf_file(char *path)
475{
476 return do_load_bpf_file(path, NULL);
477}
478
479int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map)
480{
481 return do_load_bpf_file(path, fixup_map);
404} 482}
405 483
406void read_trace_pipe(void) 484void read_trace_pipe(void)
diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h
index c827827299b3..68f6b2d22507 100644
--- a/samples/bpf/bpf_load.h
+++ b/samples/bpf/bpf_load.h
@@ -6,6 +6,18 @@
6#define MAX_MAPS 32 6#define MAX_MAPS 32
7#define MAX_PROGS 32 7#define MAX_PROGS 32
8 8
9struct bpf_map_def {
10 unsigned int type;
11 unsigned int key_size;
12 unsigned int value_size;
13 unsigned int max_entries;
14 unsigned int map_flags;
15 unsigned int inner_map_idx;
16};
17
18typedef void (*fixup_map_cb)(struct bpf_map_def *map, const char *map_name,
19 int idx);
20
9extern int map_fd[MAX_MAPS]; 21extern int map_fd[MAX_MAPS];
10extern int prog_fd[MAX_PROGS]; 22extern int prog_fd[MAX_PROGS];
11extern int event_fd[MAX_PROGS]; 23extern int event_fd[MAX_PROGS];
@@ -25,6 +37,7 @@ extern int prog_cnt;
25 * returns zero on success 37 * returns zero on success
26 */ 38 */
27int load_bpf_file(char *path); 39int load_bpf_file(char *path);
40int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map);
28 41
29void read_trace_pipe(void); 42void read_trace_pipe(void);
30struct ksym { 43struct ksym {
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 51cb8f238aa2..2a12f48b5c6d 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -24,7 +24,7 @@
24#include "libbpf.h" 24#include "libbpf.h"
25#include "bpf_load.h" 25#include "bpf_load.h"
26 26
27#define MAX_CNT 1000000 27#define TEST_BIT(t) (1U << (t))
28 28
29static __u64 time_get_ns(void) 29static __u64 time_get_ns(void)
30{ 30{
@@ -34,17 +34,39 @@ static __u64 time_get_ns(void)
34 return ts.tv_sec * 1000000000ull + ts.tv_nsec; 34 return ts.tv_sec * 1000000000ull + ts.tv_nsec;
35} 35}
36 36
37#define HASH_PREALLOC (1 << 0) 37enum test_type {
38#define PERCPU_HASH_PREALLOC (1 << 1) 38 HASH_PREALLOC,
39#define HASH_KMALLOC (1 << 2) 39 PERCPU_HASH_PREALLOC,
40#define PERCPU_HASH_KMALLOC (1 << 3) 40 HASH_KMALLOC,
41#define LRU_HASH_PREALLOC (1 << 4) 41 PERCPU_HASH_KMALLOC,
42#define NOCOMMON_LRU_HASH_PREALLOC (1 << 5) 42 LRU_HASH_PREALLOC,
43#define LPM_KMALLOC (1 << 6) 43 NOCOMMON_LRU_HASH_PREALLOC,
44#define HASH_LOOKUP (1 << 7) 44 LPM_KMALLOC,
45#define ARRAY_LOOKUP (1 << 8) 45 HASH_LOOKUP,
46 ARRAY_LOOKUP,
47 NR_TESTS,
48};
49
50const char *test_map_names[NR_TESTS] = {
51 [HASH_PREALLOC] = "hash_map",
52 [PERCPU_HASH_PREALLOC] = "percpu_hash_map",
53 [HASH_KMALLOC] = "hash_map_alloc",
54 [PERCPU_HASH_KMALLOC] = "percpu_hash_map_alloc",
55 [LRU_HASH_PREALLOC] = "lru_hash_map",
56 [NOCOMMON_LRU_HASH_PREALLOC] = "nocommon_lru_hash_map",
57 [LPM_KMALLOC] = "lpm_trie_map_alloc",
58 [HASH_LOOKUP] = "hash_map",
59 [ARRAY_LOOKUP] = "array_map",
60};
46 61
47static int test_flags = ~0; 62static int test_flags = ~0;
63static uint32_t num_map_entries;
64static uint32_t max_cnt = 1000000;
65
66static int check_test_flags(enum test_type t)
67{
68 return test_flags & TEST_BIT(t);
69}
48 70
49static void test_hash_prealloc(int cpu) 71static void test_hash_prealloc(int cpu)
50{ 72{
@@ -52,13 +74,13 @@ static void test_hash_prealloc(int cpu)
52 int i; 74 int i;
53 75
54 start_time = time_get_ns(); 76 start_time = time_get_ns();
55 for (i = 0; i < MAX_CNT; i++) 77 for (i = 0; i < max_cnt; i++)
56 syscall(__NR_getuid); 78 syscall(__NR_getuid);
57 printf("%d:hash_map_perf pre-alloc %lld events per sec\n", 79 printf("%d:hash_map_perf pre-alloc %lld events per sec\n",
58 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 80 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
59} 81}
60 82
61static void do_test_lru(int lru_test_flag, int cpu) 83static void do_test_lru(enum test_type test, int cpu)
62{ 84{
63 struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 }; 85 struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
64 const char *test_name; 86 const char *test_name;
@@ -68,10 +90,10 @@ static void do_test_lru(int lru_test_flag, int cpu)
68 in6.sin6_addr.s6_addr16[0] = 0xdead; 90 in6.sin6_addr.s6_addr16[0] = 0xdead;
69 in6.sin6_addr.s6_addr16[1] = 0xbeef; 91 in6.sin6_addr.s6_addr16[1] = 0xbeef;
70 92
71 if (lru_test_flag & LRU_HASH_PREALLOC) { 93 if (test == LRU_HASH_PREALLOC) {
72 test_name = "lru_hash_map_perf"; 94 test_name = "lru_hash_map_perf";
73 in6.sin6_addr.s6_addr16[7] = 0; 95 in6.sin6_addr.s6_addr16[7] = 0;
74 } else if (lru_test_flag & NOCOMMON_LRU_HASH_PREALLOC) { 96 } else if (test == NOCOMMON_LRU_HASH_PREALLOC) {
75 test_name = "nocommon_lru_hash_map_perf"; 97 test_name = "nocommon_lru_hash_map_perf";
76 in6.sin6_addr.s6_addr16[7] = 1; 98 in6.sin6_addr.s6_addr16[7] = 1;
77 } else { 99 } else {
@@ -79,13 +101,13 @@ static void do_test_lru(int lru_test_flag, int cpu)
79 } 101 }
80 102
81 start_time = time_get_ns(); 103 start_time = time_get_ns();
82 for (i = 0; i < MAX_CNT; i++) { 104 for (i = 0; i < max_cnt; i++) {
83 ret = connect(-1, (const struct sockaddr *)&in6, sizeof(in6)); 105 ret = connect(-1, (const struct sockaddr *)&in6, sizeof(in6));
84 assert(ret == -1 && errno == EBADF); 106 assert(ret == -1 && errno == EBADF);
85 } 107 }
86 printf("%d:%s pre-alloc %lld events per sec\n", 108 printf("%d:%s pre-alloc %lld events per sec\n",
87 cpu, test_name, 109 cpu, test_name,
88 MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 110 max_cnt * 1000000000ll / (time_get_ns() - start_time));
89} 111}
90 112
91static void test_lru_hash_prealloc(int cpu) 113static void test_lru_hash_prealloc(int cpu)
@@ -104,10 +126,10 @@ static void test_percpu_hash_prealloc(int cpu)
104 int i; 126 int i;
105 127
106 start_time = time_get_ns(); 128 start_time = time_get_ns();
107 for (i = 0; i < MAX_CNT; i++) 129 for (i = 0; i < max_cnt; i++)
108 syscall(__NR_geteuid); 130 syscall(__NR_geteuid);
109 printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n", 131 printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n",
110 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 132 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
111} 133}
112 134
113static void test_hash_kmalloc(int cpu) 135static void test_hash_kmalloc(int cpu)
@@ -116,10 +138,10 @@ static void test_hash_kmalloc(int cpu)
116 int i; 138 int i;
117 139
118 start_time = time_get_ns(); 140 start_time = time_get_ns();
119 for (i = 0; i < MAX_CNT; i++) 141 for (i = 0; i < max_cnt; i++)
120 syscall(__NR_getgid); 142 syscall(__NR_getgid);
121 printf("%d:hash_map_perf kmalloc %lld events per sec\n", 143 printf("%d:hash_map_perf kmalloc %lld events per sec\n",
122 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 144 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
123} 145}
124 146
125static void test_percpu_hash_kmalloc(int cpu) 147static void test_percpu_hash_kmalloc(int cpu)
@@ -128,10 +150,10 @@ static void test_percpu_hash_kmalloc(int cpu)
128 int i; 150 int i;
129 151
130 start_time = time_get_ns(); 152 start_time = time_get_ns();
131 for (i = 0; i < MAX_CNT; i++) 153 for (i = 0; i < max_cnt; i++)
132 syscall(__NR_getegid); 154 syscall(__NR_getegid);
133 printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n", 155 printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n",
134 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 156 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
135} 157}
136 158
137static void test_lpm_kmalloc(int cpu) 159static void test_lpm_kmalloc(int cpu)
@@ -140,10 +162,10 @@ static void test_lpm_kmalloc(int cpu)
140 int i; 162 int i;
141 163
142 start_time = time_get_ns(); 164 start_time = time_get_ns();
143 for (i = 0; i < MAX_CNT; i++) 165 for (i = 0; i < max_cnt; i++)
144 syscall(__NR_gettid); 166 syscall(__NR_gettid);
145 printf("%d:lpm_perf kmalloc %lld events per sec\n", 167 printf("%d:lpm_perf kmalloc %lld events per sec\n",
146 cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time)); 168 cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
147} 169}
148 170
149static void test_hash_lookup(int cpu) 171static void test_hash_lookup(int cpu)
@@ -152,10 +174,10 @@ static void test_hash_lookup(int cpu)
152 int i; 174 int i;
153 175
154 start_time = time_get_ns(); 176 start_time = time_get_ns();
155 for (i = 0; i < MAX_CNT; i++) 177 for (i = 0; i < max_cnt; i++)
156 syscall(__NR_getpgid, 0); 178 syscall(__NR_getpgid, 0);
157 printf("%d:hash_lookup %lld lookups per sec\n", 179 printf("%d:hash_lookup %lld lookups per sec\n",
158 cpu, MAX_CNT * 1000000000ll * 64 / (time_get_ns() - start_time)); 180 cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
159} 181}
160 182
161static void test_array_lookup(int cpu) 183static void test_array_lookup(int cpu)
@@ -164,46 +186,38 @@ static void test_array_lookup(int cpu)
164 int i; 186 int i;
165 187
166 start_time = time_get_ns(); 188 start_time = time_get_ns();
167 for (i = 0; i < MAX_CNT; i++) 189 for (i = 0; i < max_cnt; i++)
168 syscall(__NR_getpgrp, 0); 190 syscall(__NR_getpgrp, 0);
169 printf("%d:array_lookup %lld lookups per sec\n", 191 printf("%d:array_lookup %lld lookups per sec\n",
170 cpu, MAX_CNT * 1000000000ll * 64 / (time_get_ns() - start_time)); 192 cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
171} 193}
172 194
195typedef void (*test_func)(int cpu);
196const test_func test_funcs[] = {
197 [HASH_PREALLOC] = test_hash_prealloc,
198 [PERCPU_HASH_PREALLOC] = test_percpu_hash_prealloc,
199 [HASH_KMALLOC] = test_hash_kmalloc,
200 [PERCPU_HASH_KMALLOC] = test_percpu_hash_kmalloc,
201 [LRU_HASH_PREALLOC] = test_lru_hash_prealloc,
202 [NOCOMMON_LRU_HASH_PREALLOC] = test_nocommon_lru_hash_prealloc,
203 [LPM_KMALLOC] = test_lpm_kmalloc,
204 [HASH_LOOKUP] = test_hash_lookup,
205 [ARRAY_LOOKUP] = test_array_lookup,
206};
207
173static void loop(int cpu) 208static void loop(int cpu)
174{ 209{
175 cpu_set_t cpuset; 210 cpu_set_t cpuset;
211 int i;
176 212
177 CPU_ZERO(&cpuset); 213 CPU_ZERO(&cpuset);
178 CPU_SET(cpu, &cpuset); 214 CPU_SET(cpu, &cpuset);
179 sched_setaffinity(0, sizeof(cpuset), &cpuset); 215 sched_setaffinity(0, sizeof(cpuset), &cpuset);
180 216
181 if (test_flags & HASH_PREALLOC) 217 for (i = 0; i < NR_TESTS; i++) {
182 test_hash_prealloc(cpu); 218 if (check_test_flags(i))
183 219 test_funcs[i](cpu);
184 if (test_flags & PERCPU_HASH_PREALLOC) 220 }
185 test_percpu_hash_prealloc(cpu);
186
187 if (test_flags & HASH_KMALLOC)
188 test_hash_kmalloc(cpu);
189
190 if (test_flags & PERCPU_HASH_KMALLOC)
191 test_percpu_hash_kmalloc(cpu);
192
193 if (test_flags & LRU_HASH_PREALLOC)
194 test_lru_hash_prealloc(cpu);
195
196 if (test_flags & NOCOMMON_LRU_HASH_PREALLOC)
197 test_nocommon_lru_hash_prealloc(cpu);
198
199 if (test_flags & LPM_KMALLOC)
200 test_lpm_kmalloc(cpu);
201
202 if (test_flags & HASH_LOOKUP)
203 test_hash_lookup(cpu);
204
205 if (test_flags & ARRAY_LOOKUP)
206 test_array_lookup(cpu);
207} 221}
208 222
209static void run_perf_test(int tasks) 223static void run_perf_test(int tasks)
@@ -260,6 +274,22 @@ static void fill_lpm_trie(void)
260 assert(!r); 274 assert(!r);
261} 275}
262 276
277static void fixup_map(struct bpf_map_def *map, const char *name, int idx)
278{
279 int i;
280
281 if (num_map_entries <= 0)
282 return;
283
284 /* Only change the max_entries for the enabled test(s) */
285 for (i = 0; i < NR_TESTS; i++) {
286 if (!strcmp(test_map_names[i], name) &&
287 (check_test_flags(i))) {
288 map->max_entries = num_map_entries;
289 }
290 }
291}
292
263int main(int argc, char **argv) 293int main(int argc, char **argv)
264{ 294{
265 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; 295 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
@@ -275,7 +305,13 @@ int main(int argc, char **argv)
275 if (argc > 2) 305 if (argc > 2)
276 num_cpu = atoi(argv[2]) ? : num_cpu; 306 num_cpu = atoi(argv[2]) ? : num_cpu;
277 307
278 if (load_bpf_file(filename)) { 308 if (argc > 3)
309 num_map_entries = atoi(argv[3]);
310
311 if (argc > 4)
312 max_cnt = atoi(argv[4]);
313
314 if (load_bpf_file_fixup_map(filename, fixup_map)) {
279 printf("%s", bpf_log_buf); 315 printf("%s", bpf_log_buf);
280 return 1; 316 return 1;
281 } 317 }