Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-only */
2 : /*
3 : * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4 : */
5 :
6 : #include "bpfilter/cgen/program.h"
7 :
8 : #include <linux/bpf.h>
9 : #include <linux/bpf_common.h>
10 : #include <linux/limits.h>
11 :
12 : #include <errno.h>
13 : #include <fcntl.h>
14 : #include <limits.h>
15 : #include <stddef.h>
16 : #include <stdint.h>
17 : #include <stdio.h>
18 : #include <stdlib.h>
19 : #include <string.h>
20 : #include <unistd.h>
21 :
22 : #include "bpfilter/cgen/cgroup.h"
23 : #include "bpfilter/cgen/dump.h"
24 : #include "bpfilter/cgen/fixup.h"
25 : #include "bpfilter/cgen/jmp.h"
26 : #include "bpfilter/cgen/matcher/ip4.h"
27 : #include "bpfilter/cgen/matcher/ip6.h"
28 : #include "bpfilter/cgen/matcher/meta.h"
29 : #include "bpfilter/cgen/matcher/set.h"
30 : #include "bpfilter/cgen/matcher/tcp.h"
31 : #include "bpfilter/cgen/matcher/udp.h"
32 : #include "bpfilter/cgen/nf.h"
33 : #include "bpfilter/cgen/printer.h"
34 : #include "bpfilter/cgen/prog/link.h"
35 : #include "bpfilter/cgen/prog/map.h"
36 : #include "bpfilter/cgen/stub.h"
37 : #include "bpfilter/cgen/tc.h"
38 : #include "bpfilter/cgen/xdp.h"
39 : #include "bpfilter/ctx.h"
40 : #include "bpfilter/opts.h"
41 : #include "core/bpf.h"
42 : #include "core/btf.h"
43 : #include "core/chain.h"
44 : #include "core/counter.h"
45 : #include "core/dump.h"
46 : #include "core/flavor.h"
47 : #include "core/helper.h"
48 : #include "core/hook.h"
49 : #include "core/io.h"
50 : #include "core/list.h"
51 : #include "core/logger.h"
52 : #include "core/marsh.h"
53 : #include "core/matcher.h"
54 : #include "core/rule.h"
55 : #include "core/set.h"
56 : #include "core/verdict.h"
57 :
58 : #include "external/filter.h"
59 :
60 : #define _BF_LOG_BUF_SIZE \
61 : (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
62 : #define _BF_PROGRAM_DEFAULT_IMG_SIZE (1 << 6)
63 :
64 8 : static const struct bf_flavor_ops *bf_flavor_ops_get(enum bf_flavor flavor)
65 : {
66 : static const struct bf_flavor_ops *flavor_ops[] = {
67 : [BF_FLAVOR_TC] = &bf_flavor_ops_tc,
68 : [BF_FLAVOR_NF] = &bf_flavor_ops_nf,
69 : [BF_FLAVOR_XDP] = &bf_flavor_ops_xdp,
70 : [BF_FLAVOR_CGROUP] = &bf_flavor_ops_cgroup,
71 : };
72 :
73 : static_assert(ARRAY_SIZE(flavor_ops) == _BF_FLAVOR_MAX,
74 : "missing entries in bf_flavor_ops array");
75 :
76 8 : return flavor_ops[flavor];
77 : }
78 :
79 4 : int bf_program_new(struct bf_program **program, const struct bf_chain *chain)
80 : {
81 4 : _free_bf_program_ struct bf_program *_program = NULL;
82 : char name[BPF_OBJ_NAME_LEN];
83 : uint32_t set_idx = 0;
84 : int r;
85 :
86 4 : bf_assert(program && chain);
87 :
88 4 : _program = calloc(1, sizeof(*_program));
89 4 : if (!_program)
90 : return -ENOMEM;
91 :
92 4 : _program->flavor = bf_hook_to_flavor(chain->hook);
93 4 : _program->runtime.prog_fd = -1;
94 4 : _program->runtime.ops = bf_flavor_ops_get(_program->flavor);
95 4 : _program->runtime.chain = chain;
96 :
97 4 : (void)snprintf(_program->prog_name, BPF_OBJ_NAME_LEN, "%s", "bf_prog");
98 :
99 4 : r = bf_map_new(&_program->cmap, "counters_map", BF_MAP_TYPE_COUNTERS,
100 : BF_MAP_BPF_TYPE_ARRAY, sizeof(uint32_t),
101 : sizeof(struct bf_counter), 1);
102 4 : if (r < 0)
103 0 : return bf_err_r(r, "failed to create the counters bf_map object");
104 :
105 4 : r = bf_map_new(&_program->pmap, "printer_map", BF_MAP_TYPE_PRINTER,
106 : BF_MAP_BPF_TYPE_ARRAY, sizeof(uint32_t),
107 : BF_MAP_VALUE_SIZE_UNKNOWN, 1);
108 4 : if (r < 0)
109 0 : return bf_err_r(r, "failed to create the printer bf_map object");
110 :
111 4 : _program->sets = bf_map_list();
112 8 : bf_list_foreach (&chain->sets, set_node) {
113 0 : struct bf_set *set = bf_list_node_get_data(set_node);
114 0 : _free_bf_map_ struct bf_map *map = NULL;
115 :
116 0 : (void)snprintf(name, BPF_OBJ_NAME_LEN, "set_%04x", (uint8_t)set_idx++);
117 0 : r = bf_map_new(&map, name, BF_MAP_TYPE_SET, BF_MAP_BPF_TYPE_HASH,
118 0 : set->elem_size, 1, bf_list_size(&set->elems));
119 0 : if (r < 0)
120 : return r;
121 :
122 0 : r = bf_list_add_tail(&_program->sets, map);
123 0 : if (r < 0)
124 : return r;
125 0 : TAKE_PTR(map);
126 : };
127 :
128 4 : r = bf_link_new(&_program->link, "bf_link");
129 4 : if (r)
130 : return r;
131 :
132 4 : r = bf_printer_new(&_program->printer);
133 4 : if (r)
134 : return r;
135 :
136 4 : bf_list_init(&_program->fixups,
137 4 : (bf_list_ops[]) {{.free = (bf_list_ops_free)bf_fixup_free}});
138 :
139 4 : *program = TAKE_PTR(_program);
140 :
141 4 : return 0;
142 : }
143 :
144 17 : void bf_program_free(struct bf_program **program)
145 : {
146 17 : if (!*program)
147 : return;
148 :
149 4 : bf_list_clean(&(*program)->fixups);
150 4 : free((*program)->img);
151 :
152 : /* Close the file descriptors if they are still open. If --transient is
153 : * used, then the file descriptors are already closed (as
154 : * bf_program_unload() has been called). Otherwise, bf_program_unload()
155 : * won't be called, but the programs are pinned, so they can be closed
156 : * safely. */
157 4 : closep(&(*program)->runtime.prog_fd);
158 :
159 4 : bf_map_free(&(*program)->cmap);
160 4 : bf_map_free(&(*program)->pmap);
161 4 : bf_list_clean(&(*program)->sets);
162 4 : bf_link_free(&(*program)->link);
163 4 : bf_printer_free(&(*program)->printer);
164 :
165 4 : free(*program);
166 4 : *program = NULL;
167 : }
168 :
169 0 : int bf_program_marsh(const struct bf_program *program, struct bf_marsh **marsh)
170 : {
171 0 : _free_bf_marsh_ struct bf_marsh *_marsh = NULL;
172 : int r;
173 :
174 0 : bf_assert(program);
175 0 : bf_assert(marsh);
176 :
177 0 : r = bf_marsh_new(&_marsh, NULL, 0);
178 0 : if (r < 0)
179 : return r;
180 :
181 : {
182 : // Serialize bf_program.counters
183 0 : _free_bf_marsh_ struct bf_marsh *counters_elem = NULL;
184 :
185 0 : r = bf_map_marsh(program->cmap, &counters_elem);
186 0 : if (r < 0)
187 : return r;
188 :
189 0 : r = bf_marsh_add_child_obj(&_marsh, counters_elem);
190 0 : if (r < 0)
191 : return r;
192 : }
193 :
194 : {
195 : // Serialize bf_program.pmap
196 0 : _free_bf_marsh_ struct bf_marsh *pmap_elem = NULL;
197 :
198 0 : r = bf_map_marsh(program->pmap, &pmap_elem);
199 0 : if (r < 0)
200 : return r;
201 :
202 0 : r = bf_marsh_add_child_obj(&_marsh, pmap_elem);
203 0 : if (r < 0)
204 : return r;
205 : }
206 :
207 : {
208 : // Serialize bf_program.sets
209 0 : _free_bf_marsh_ struct bf_marsh *sets_elem = NULL;
210 :
211 0 : r = bf_list_marsh(&program->sets, &sets_elem);
212 0 : if (r < 0)
213 : return r;
214 :
215 0 : r = bf_marsh_add_child_obj(&_marsh, sets_elem);
216 0 : if (r < 0) {
217 0 : return bf_err_r(
218 : r,
219 : "failed to insert serialized sets into bf_program serialized data");
220 : }
221 : }
222 :
223 : {
224 : // Serialize bf_program.links
225 0 : _free_bf_marsh_ struct bf_marsh *links_elem = NULL;
226 :
227 0 : r = bf_link_marsh(program->link, &links_elem);
228 0 : if (r)
229 0 : return bf_err_r(r, "failed to serialize bf_program.link");
230 :
231 0 : r = bf_marsh_add_child_obj(&_marsh, links_elem);
232 0 : if (r) {
233 0 : return bf_err_r(
234 : r,
235 : "failed to insert serialized link into bf_program serialized data");
236 : }
237 : }
238 :
239 : {
240 : // Serialise bf_program.printer
241 0 : _free_bf_marsh_ struct bf_marsh *child = NULL;
242 :
243 0 : r = bf_printer_marsh(program->printer, &child);
244 0 : if (r)
245 0 : return bf_err_r(r, "failed to marsh bf_printer object");
246 :
247 0 : r = bf_marsh_add_child_obj(&_marsh, child);
248 0 : if (r)
249 0 : return bf_err_r(r, "failed to append object to marsh");
250 : }
251 :
252 0 : r |= bf_marsh_add_child_raw(&_marsh, program->img,
253 0 : program->img_size * sizeof(struct bpf_insn));
254 0 : if (r)
255 0 : return bf_err_r(r, "Failed to serialize program");
256 :
257 0 : *marsh = TAKE_PTR(_marsh);
258 :
259 0 : return 0;
260 : }
261 :
262 0 : int bf_program_unmarsh(const struct bf_marsh *marsh,
263 : struct bf_program **program,
264 : const struct bf_chain *chain, int dir_fd)
265 : {
266 0 : _free_bf_program_ struct bf_program *_program = NULL;
267 0 : _free_bf_link_ struct bf_link *link = NULL;
268 : struct bf_marsh *child = NULL;
269 : int r;
270 :
271 0 : bf_assert(marsh && program);
272 :
273 0 : r = bf_program_new(&_program, chain);
274 0 : if (r < 0)
275 : return r;
276 :
277 0 : if (!(child = bf_marsh_next_child(marsh, child)))
278 : return -EINVAL;
279 0 : bf_map_free(&_program->cmap);
280 0 : r = bf_map_new_from_marsh(&_program->cmap, dir_fd, child);
281 0 : if (r < 0)
282 : return r;
283 :
284 0 : if (!(child = bf_marsh_next_child(marsh, child)))
285 : return -EINVAL;
286 0 : bf_map_free(&_program->pmap);
287 0 : r = bf_map_new_from_marsh(&_program->pmap, dir_fd, child);
288 0 : if (r < 0)
289 : return r;
290 :
291 : /** @todo Avoid creating and filling the list in @ref bf_program_new before
292 : * trashing it all here. Eventually, this function will be replaced with
293 : * @c bf_program_new_from_marsh and this issue could be solved by **not**
294 : * relying on @ref bf_program_new to allocate an initialize @p _program . */
295 0 : bf_list_clean(&_program->sets);
296 0 : _program->sets = bf_map_list();
297 :
298 0 : if (!(child = bf_marsh_next_child(marsh, child)))
299 : return -EINVAL;
300 : {
301 : // Unmarsh bf_program.sets
302 : struct bf_marsh *set_elem = NULL;
303 :
304 0 : while ((set_elem = bf_marsh_next_child(child, set_elem))) {
305 0 : _free_bf_map_ struct bf_map *map = NULL;
306 :
307 0 : r = bf_map_new_from_marsh(&map, dir_fd, set_elem);
308 0 : if (r < 0)
309 : return r;
310 :
311 0 : r = bf_list_add_tail(&_program->sets, map);
312 0 : if (r < 0)
313 : return r;
314 :
315 0 : TAKE_PTR(map);
316 : }
317 : }
318 :
319 : // Unmarsh bf_program.links
320 0 : if (!(child = bf_marsh_next_child(marsh, child)))
321 : return -EINVAL;
322 :
323 : /* Try to restore the link: on success, replace the program's link with the
324 : * restored on. If -ENOENT is returned, the link doesn't exist, meaning the
325 : * program is not attached. Otherwise, return an error. */
326 0 : r = bf_link_new_from_marsh(&link, dir_fd, child);
327 0 : if (!r)
328 0 : bf_swap(_program->link, link);
329 0 : else if (r != -ENOENT)
330 0 : return bf_err_r(r, "failed to restore bf_program.link");
331 :
332 : // Unmarsh bf_program.printer
333 0 : child = bf_marsh_next_child(marsh, child);
334 0 : if (!child)
335 0 : return bf_err_r(-EINVAL, "failed to find valid child");
336 :
337 0 : bf_printer_free(&_program->printer);
338 0 : r = bf_printer_new_from_marsh(&_program->printer, child);
339 0 : if (r)
340 0 : return bf_err_r(r, "failed to restore bf_printer object");
341 :
342 0 : if (!(child = bf_marsh_next_child(marsh, child)))
343 : return -EINVAL;
344 0 : _program->img = bf_memdup(child->data, child->data_len);
345 0 : _program->img_size = child->data_len / sizeof(struct bpf_insn);
346 0 : _program->img_cap = child->data_len / sizeof(struct bpf_insn);
347 :
348 0 : if (bf_marsh_next_child(marsh, child))
349 0 : bf_warn("codegen marsh has more children than expected");
350 :
351 0 : r = bf_bpf_obj_get(_program->prog_name, dir_fd, &_program->runtime.prog_fd);
352 0 : if (r < 0)
353 0 : return bf_err_r(r, "failed to get prog fd");
354 :
355 0 : *program = TAKE_PTR(_program);
356 :
357 0 : return 0;
358 : }
359 :
360 0 : void bf_program_dump(const struct bf_program *program, prefix_t *prefix)
361 : {
362 0 : bf_assert(program);
363 0 : bf_assert(prefix);
364 :
365 0 : DUMP(prefix, "struct bf_program at %p", program);
366 :
367 0 : bf_dump_prefix_push(prefix);
368 :
369 0 : DUMP(prefix, "prog_name: %s", program->prog_name);
370 :
371 0 : DUMP(prefix, "cmap: struct bf_map *");
372 0 : bf_dump_prefix_push(prefix);
373 0 : bf_map_dump(program->cmap, bf_dump_prefix_last(prefix));
374 0 : bf_dump_prefix_pop(prefix);
375 :
376 0 : DUMP(prefix, "pmap: struct bf_map *");
377 0 : bf_dump_prefix_push(prefix);
378 0 : bf_map_dump(program->pmap, bf_dump_prefix_last(prefix));
379 0 : bf_dump_prefix_pop(prefix);
380 :
381 0 : DUMP(prefix, "sets: bf_list<bf_map>[%lu]", bf_list_size(&program->sets));
382 0 : bf_dump_prefix_push(prefix);
383 0 : bf_list_foreach (&program->sets, map_node) {
384 0 : struct bf_map *map = bf_list_node_get_data(map_node);
385 :
386 0 : if (bf_list_is_tail(&program->sets, map_node))
387 0 : bf_dump_prefix_last(prefix);
388 :
389 0 : bf_map_dump(map, prefix);
390 : }
391 0 : bf_dump_prefix_pop(prefix);
392 :
393 0 : DUMP(prefix, "link: struct bf_link *");
394 0 : bf_dump_prefix_push(prefix);
395 0 : bf_link_dump(program->link, prefix);
396 0 : bf_dump_prefix_pop(prefix);
397 :
398 0 : DUMP(prefix, "printer: struct bf_printer *");
399 0 : bf_dump_prefix_push(prefix);
400 0 : bf_printer_dump(program->printer, prefix);
401 0 : bf_dump_prefix_pop(prefix);
402 :
403 0 : DUMP(prefix, "img: %p", program->img);
404 0 : DUMP(prefix, "img_size: %lu", program->img_size);
405 0 : DUMP(prefix, "img_cap: %lu", program->img_cap);
406 :
407 0 : DUMP(prefix, "fixups: bf_list<struct bf_fixup>[%lu]",
408 : bf_list_size(&program->fixups));
409 0 : bf_dump_prefix_push(prefix);
410 0 : bf_list_foreach (&program->fixups, fixup_node) {
411 0 : struct bf_fixup *fixup = bf_list_node_get_data(fixup_node);
412 :
413 0 : if (bf_list_is_tail(&program->fixups, fixup_node))
414 0 : bf_dump_prefix_last(prefix);
415 :
416 0 : bf_fixup_dump(fixup, prefix);
417 : }
418 0 : bf_dump_prefix_pop(prefix);
419 :
420 0 : DUMP(bf_dump_prefix_last(prefix), "runtime: <anonymous>");
421 0 : bf_dump_prefix_push(prefix);
422 0 : DUMP(prefix, "prog_fd: %d", program->runtime.prog_fd);
423 0 : DUMP(bf_dump_prefix_last(prefix), "ops: %p", program->runtime.ops);
424 0 : bf_dump_prefix_pop(prefix);
425 :
426 0 : bf_dump_prefix_pop(prefix);
427 0 : }
428 :
429 0 : static inline size_t _bf_round_next_power_of_2(size_t value)
430 : {
431 0 : value--;
432 0 : value |= value >> 1;
433 0 : value |= value >> 2;
434 0 : value |= value >> 4;
435 0 : value |= value >> 8;
436 0 : value |= value >> 16;
437 :
438 0 : return ++value;
439 : }
440 :
441 4 : int bf_program_grow_img(struct bf_program *program)
442 : {
443 : size_t new_cap = _BF_PROGRAM_DEFAULT_IMG_SIZE;
444 : int r;
445 :
446 4 : bf_assert(program);
447 :
448 4 : if (program->img)
449 0 : new_cap = _bf_round_next_power_of_2(program->img_cap << 1);
450 :
451 4 : r = bf_realloc((void **)&program->img, new_cap * sizeof(struct bpf_insn));
452 4 : if (r < 0) {
453 0 : return bf_err_r(r, "failed to grow program img from %lu to %lu insn",
454 : program->img_cap, new_cap);
455 : }
456 :
457 4 : program->img_cap = new_cap;
458 :
459 4 : return 0;
460 : }
461 :
462 0 : static void _bf_program_fixup_insn(struct bpf_insn *insn,
463 : enum bf_fixup_insn type, int32_t value)
464 : {
465 0 : switch (type) {
466 0 : case BF_FIXUP_INSN_OFF:
467 0 : bf_assert(!insn->off);
468 0 : bf_assert(value < SHRT_MAX);
469 0 : insn->off = (int16_t)value;
470 0 : break;
471 0 : case BF_FIXUP_INSN_IMM:
472 0 : bf_assert(!insn->imm);
473 0 : insn->imm = value;
474 0 : break;
475 0 : default:
476 0 : bf_abort(
477 : "unsupported fixup instruction type, this should not happen: %d",
478 : type);
479 : break;
480 : }
481 0 : }
482 :
483 0 : static int _bf_program_fixup(struct bf_program *program,
484 : enum bf_fixup_type type)
485 : {
486 0 : bf_assert(program);
487 0 : bf_assert(type >= 0 && type < _BF_FIXUP_TYPE_MAX);
488 :
489 0 : bf_list_foreach (&program->fixups, fixup_node) {
490 : enum bf_fixup_insn insn_type = _BF_FIXUP_INSN_MAX;
491 : int32_t value;
492 : size_t offset;
493 0 : struct bf_fixup *fixup = bf_list_node_get_data(fixup_node);
494 0 : struct bpf_insn *insn = &program->img[fixup->insn];
495 : struct bf_map *map;
496 :
497 0 : if (type != fixup->type)
498 0 : continue;
499 :
500 0 : switch (type) {
501 0 : case BF_FIXUP_TYPE_JMP_NEXT_RULE:
502 : insn_type = BF_FIXUP_INSN_OFF;
503 0 : value = (int)(program->img_size - fixup->insn - 1U);
504 0 : break;
505 0 : case BF_FIXUP_TYPE_COUNTERS_MAP_FD:
506 : insn_type = BF_FIXUP_INSN_IMM;
507 0 : value = program->cmap->fd;
508 0 : break;
509 0 : case BF_FIXUP_TYPE_PRINTER_MAP_FD:
510 : insn_type = BF_FIXUP_INSN_IMM;
511 0 : value = program->pmap->fd;
512 0 : break;
513 0 : case BF_FIXUP_TYPE_SET_MAP_FD:
514 0 : map = bf_list_get_at(&program->sets, fixup->attr.set_index);
515 0 : if (!map) {
516 0 : return bf_err_r(-ENOENT, "can't find set map at index %lu",
517 : fixup->attr.set_index);
518 : }
519 : insn_type = BF_FIXUP_INSN_IMM;
520 0 : value = map->fd;
521 0 : break;
522 0 : case BF_FIXUP_TYPE_FUNC_CALL:
523 : insn_type = BF_FIXUP_INSN_IMM;
524 0 : offset = program->functions_location[fixup->attr.function] -
525 : fixup->insn - 1;
526 0 : bf_assert(offset < INT_MAX);
527 0 : value = (int32_t)offset;
528 0 : break;
529 0 : default:
530 0 : bf_abort("unsupported fixup type, this should not happen: %d",
531 : type);
532 : break;
533 : }
534 :
535 0 : _bf_program_fixup_insn(insn, insn_type, value);
536 0 : bf_list_delete(&program->fixups, fixup_node);
537 : }
538 :
539 : return 0;
540 : }
541 :
542 0 : static int _bf_program_generate_rule(struct bf_program *program,
543 : struct bf_rule *rule)
544 : {
545 : int r;
546 :
547 0 : bf_assert(program);
548 0 : bf_assert(rule);
549 :
550 0 : bf_list_foreach (&rule->matchers, matcher_node) {
551 0 : struct bf_matcher *matcher = bf_list_node_get_data(matcher_node);
552 :
553 0 : switch (matcher->type) {
554 0 : case BF_MATCHER_META_IFINDEX:
555 : case BF_MATCHER_META_L3_PROTO:
556 : case BF_MATCHER_META_L4_PROTO:
557 : case BF_MATCHER_META_SPORT:
558 : case BF_MATCHER_META_DPORT:
559 0 : r = bf_matcher_generate_meta(program, matcher);
560 0 : if (r)
561 : return r;
562 : break;
563 0 : case BF_MATCHER_IP4_SADDR:
564 : case BF_MATCHER_IP4_DADDR:
565 : case BF_MATCHER_IP4_PROTO:
566 0 : r = bf_matcher_generate_ip4(program, matcher);
567 0 : if (r)
568 : return r;
569 : break;
570 0 : case BF_MATCHER_IP6_SADDR:
571 : case BF_MATCHER_IP6_DADDR:
572 0 : r = bf_matcher_generate_ip6(program, matcher);
573 0 : if (r)
574 : return r;
575 : break;
576 0 : case BF_MATCHER_TCP_SPORT:
577 : case BF_MATCHER_TCP_DPORT:
578 : case BF_MATCHER_TCP_FLAGS:
579 0 : r = bf_matcher_generate_tcp(program, matcher);
580 0 : if (r)
581 : return r;
582 : break;
583 0 : case BF_MATCHER_UDP_SPORT:
584 : case BF_MATCHER_UDP_DPORT:
585 0 : r = bf_matcher_generate_udp(program, matcher);
586 0 : if (r)
587 : return r;
588 : break;
589 0 : case BF_MATCHER_SET_SRCIP6PORT:
590 : case BF_MATCHER_SET_SRCIP6:
591 0 : r = bf_matcher_generate_set(program, matcher);
592 0 : if (r)
593 : return r;
594 : break;
595 0 : default:
596 0 : return bf_err_r(-EINVAL, "unknown matcher type %d", matcher->type);
597 : };
598 : }
599 :
600 0 : if (rule->counters) {
601 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_1, rule->index));
602 0 : EMIT(program, BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10,
603 : BF_PROG_CTX_OFF(pkt_size)));
604 0 : EMIT_FIXUP_CALL(program, BF_FIXUP_FUNC_UPDATE_COUNTERS);
605 : }
606 :
607 0 : switch (rule->verdict) {
608 0 : case BF_VERDICT_ACCEPT:
609 : case BF_VERDICT_DROP:
610 0 : EMIT(program,
611 : BPF_MOV64_IMM(BPF_REG_0,
612 : program->runtime.ops->get_verdict(rule->verdict)));
613 0 : EMIT(program, BPF_EXIT_INSN());
614 0 : break;
615 : case BF_VERDICT_CONTINUE:
616 : // Fall through to next rule or default chain policy.
617 : break;
618 0 : default:
619 0 : bf_abort("unsupported verdict, this should not happen: %d",
620 : rule->verdict);
621 : break;
622 : }
623 :
624 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_JMP_NEXT_RULE);
625 0 : if (r)
626 0 : return bf_err_r(r, "failed to generate next rule fixups");
627 :
628 : return 0;
629 : }
630 :
631 : /**
632 : * Generate the BPF function to update a rule's counters.
633 : *
634 : * This function defines a new function **in** the generated BPF program to
635 : * be called during packet processing.
636 : *
637 : * Parameters:
638 : * - @c r1 : index of the rule to update the counters for.
639 : * - @c r2 : size of the packet.
640 : * Returns:
641 : * 0 on success, non-zero on error.
642 : *
643 : * @param program Program to emit the function into. Can not be NULL.
644 : * @return 0 on success, or negative errno value on error.
645 : */
646 0 : static int _bf_program_generate_update_counters(struct bf_program *program)
647 : {
648 : // Move the counters key in scratch[0..4] and the packet size in scratch[8..15]
649 0 : EMIT(program,
650 : BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, BF_PROG_SCR_OFF(0)));
651 0 : EMIT(program,
652 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, BF_PROG_SCR_OFF(8)));
653 :
654 : // Call bpf_map_lookup_elem()
655 0 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_1);
656 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_2, BPF_REG_10));
657 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, BF_PROG_SCR_OFF(0)));
658 0 : EMIT(program, BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem));
659 :
660 : // If the counters doesn't exist, return from the function
661 : {
662 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
663 0 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
664 :
665 0 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
666 0 : EMIT_PRINT(program, "failed to fetch the rule's counters");
667 :
668 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_0, 1));
669 0 : EMIT(program, BPF_EXIT_INSN());
670 : }
671 :
672 : // Increment the packets count by 1.
673 0 : EMIT(program, BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
674 : offsetof(struct bf_counter, packets)));
675 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1));
676 0 : EMIT(program, BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
677 : offsetof(struct bf_counter, packets)));
678 :
679 : // Increase the total byte by the size of the packet.
680 0 : EMIT(program, BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
681 : offsetof(struct bf_counter, bytes)));
682 0 : EMIT(program,
683 : BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, BF_PROG_SCR_OFF(8)));
684 0 : EMIT(program, BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2));
685 0 : EMIT(program, BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
686 : offsetof(struct bf_counter, bytes)));
687 :
688 : // On success, return 0
689 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_0, 0));
690 0 : EMIT(program, BPF_EXIT_INSN());
691 :
692 0 : return 0;
693 : }
694 :
695 0 : static int _bf_program_generate_functions(struct bf_program *program)
696 : {
697 : int r;
698 :
699 0 : bf_assert(program);
700 :
701 0 : bf_list_foreach (&program->fixups, fixup_node) {
702 0 : struct bf_fixup *fixup = bf_list_node_get_data(fixup_node);
703 0 : size_t off = program->img_size;
704 :
705 0 : if (fixup->type != BF_FIXUP_TYPE_FUNC_CALL)
706 0 : continue;
707 :
708 0 : bf_assert(fixup->attr.function >= 0 &&
709 : fixup->attr.function < _BF_FIXUP_FUNC_MAX);
710 :
711 : // Only generate each function once
712 0 : if (program->functions_location[fixup->attr.function])
713 0 : continue;
714 :
715 0 : switch (fixup->attr.function) {
716 0 : case BF_FIXUP_FUNC_UPDATE_COUNTERS:
717 0 : r = _bf_program_generate_update_counters(program);
718 0 : if (r)
719 : return r;
720 : break;
721 0 : default:
722 0 : bf_abort("unsupported fixup function, this should not happen: %d",
723 : fixup->attr.function);
724 : break;
725 : }
726 :
727 0 : program->functions_location[fixup->attr.function] = off;
728 : }
729 :
730 : return 0;
731 : }
732 :
733 41 : int bf_program_emit(struct bf_program *program, struct bpf_insn insn)
734 : {
735 : int r;
736 :
737 41 : bf_assert(program);
738 :
739 41 : if (program->img_size == program->img_cap) {
740 3 : r = bf_program_grow_img(program);
741 3 : if (r)
742 : return r;
743 : }
744 :
745 41 : program->img[program->img_size++] = insn;
746 :
747 41 : return 0;
748 : }
749 :
750 0 : int bf_program_emit_kfunc_call(struct bf_program *program, const char *name)
751 : {
752 : int r;
753 :
754 0 : bf_assert(program);
755 0 : bf_assert(name);
756 :
757 0 : r = bf_btf_get_id(name);
758 0 : if (r < 0)
759 : return r;
760 :
761 0 : EMIT(program, ((struct bpf_insn) {.code = BPF_JMP | BPF_CALL,
762 : .dst_reg = 0,
763 : .src_reg = BPF_PSEUDO_KFUNC_CALL,
764 : .off = 0,
765 : .imm = r}));
766 :
767 0 : return 0;
768 : }
769 :
770 0 : int bf_program_emit_fixup(struct bf_program *program, enum bf_fixup_type type,
771 : struct bpf_insn insn, const union bf_fixup_attr *attr)
772 : {
773 0 : _free_bf_fixup_ struct bf_fixup *fixup = NULL;
774 : int r;
775 :
776 0 : bf_assert(program);
777 :
778 0 : if (program->img_size == program->img_cap) {
779 0 : r = bf_program_grow_img(program);
780 0 : if (r)
781 : return r;
782 : }
783 :
784 0 : r = bf_fixup_new(&fixup, type, program->img_size, attr);
785 0 : if (r)
786 : return r;
787 :
788 0 : r = bf_list_add_tail(&program->fixups, fixup);
789 0 : if (r)
790 : return r;
791 :
792 0 : TAKE_PTR(fixup);
793 :
794 : /* This call could fail and return an error, in which case it is not
795 : * properly handled. However, this shouldn't be an issue as we previously
796 : * test whether enough room is available in cgen.img, which is currently
797 : * the only reason for EMIT() to fail. */
798 0 : EMIT(program, insn);
799 :
800 : return 0;
801 : }
802 :
803 2 : int bf_program_emit_fixup_call(struct bf_program *program,
804 : enum bf_fixup_func function)
805 : {
806 1 : _free_bf_fixup_ struct bf_fixup *fixup = NULL;
807 : int r;
808 :
809 2 : bf_assert(program);
810 :
811 1 : if (program->img_size == program->img_cap) {
812 1 : r = bf_program_grow_img(program);
813 1 : if (r)
814 : return r;
815 : }
816 :
817 1 : r = bf_fixup_new(&fixup, BF_FIXUP_TYPE_FUNC_CALL, program->img_size, NULL);
818 1 : if (r)
819 : return r;
820 :
821 1 : fixup->attr.function = function;
822 :
823 1 : r = bf_list_add_tail(&program->fixups, fixup);
824 1 : if (r)
825 : return r;
826 :
827 1 : TAKE_PTR(fixup);
828 :
829 : /* This call could fail and return an error, in which case it is not
830 : * properly handled. However, this shouldn't be an issue as we previously
831 : * test whether enough room is available in cgen.img, which is currently
832 : * the only reason for EMIT() to fail. */
833 1 : EMIT(program, BPF_CALL_REL(0));
834 :
835 1 : return 0;
836 : }
837 :
838 0 : int bf_program_generate(struct bf_program *program)
839 : {
840 0 : const struct bf_chain *chain = program->runtime.chain;
841 : int r;
842 :
843 : // Save the program's argument into the context.
844 0 : EMIT(program,
845 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
846 :
847 : // Reset the protocol ID registers
848 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_7, 0));
849 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_8, 0));
850 :
851 0 : r = program->runtime.ops->gen_inline_prologue(program);
852 0 : if (r)
853 : return r;
854 :
855 0 : bf_list_foreach (&chain->rules, rule_node) {
856 0 : r = _bf_program_generate_rule(program,
857 0 : bf_list_node_get_data(rule_node));
858 0 : if (r)
859 : return r;
860 : }
861 :
862 0 : r = program->runtime.ops->gen_inline_epilogue(program);
863 0 : if (r)
864 : return r;
865 :
866 : // Call the update counters function
867 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_1, bf_list_size(&chain->rules)));
868 0 : EMIT(program,
869 : BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(pkt_size)));
870 0 : EMIT_FIXUP_CALL(program, BF_FIXUP_FUNC_UPDATE_COUNTERS);
871 :
872 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
873 : chain->policy)));
874 0 : EMIT(program, BPF_EXIT_INSN());
875 :
876 0 : r = _bf_program_generate_functions(program);
877 0 : if (r)
878 : return r;
879 :
880 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_FUNC_CALL);
881 0 : if (r)
882 0 : return bf_err_r(r, "failed to generate function call fixups");
883 :
884 : return 0;
885 : }
886 :
887 0 : static int _bf_program_load_printer_map(struct bf_program *program)
888 : {
889 0 : _cleanup_free_ void *pstr = NULL;
890 : size_t pstr_len;
891 0 : uint32_t key = 0;
892 : int r;
893 :
894 0 : bf_assert(program);
895 :
896 0 : r = bf_printer_assemble(program->printer, &pstr, &pstr_len);
897 0 : if (r)
898 0 : return bf_err_r(r, "failed to assemble printer map string");
899 :
900 0 : r = bf_map_set_value_size(program->pmap, pstr_len);
901 0 : if (r < 0)
902 : return r;
903 :
904 0 : r = bf_map_create(program->pmap, 0);
905 0 : if (r < 0)
906 : return r;
907 :
908 0 : r = bf_map_set_elem(program->pmap, &key, pstr);
909 0 : if (r)
910 : return r;
911 :
912 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_PRINTER_MAP_FD);
913 0 : if (r) {
914 0 : bf_map_destroy(program->pmap);
915 0 : return bf_err_r(r, "failed to fixup printer map FD");
916 : }
917 :
918 : return 0;
919 : }
920 :
921 0 : static int _bf_program_load_counters_map(struct bf_program *program)
922 : {
923 0 : _cleanup_close_ int _fd = -1;
924 : int r;
925 :
926 0 : bf_assert(program);
927 :
928 0 : r = bf_map_set_n_elems(program->cmap,
929 0 : bf_list_size(&program->runtime.chain->rules) + 2);
930 0 : if (r < 0)
931 : return r;
932 :
933 0 : r = bf_map_create(program->cmap, 0);
934 0 : if (r < 0)
935 : return r;
936 :
937 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_COUNTERS_MAP_FD);
938 0 : if (r < 0) {
939 0 : bf_map_destroy(program->cmap);
940 0 : return bf_err_r(r, "failed to fixup counters map FD");
941 : }
942 :
943 : return 0;
944 : }
945 :
946 0 : static int _bf_program_load_sets_maps(struct bf_program *new_prog)
947 : {
948 : const bf_list_node *set_node;
949 : const bf_list_node *map_node;
950 : int r;
951 :
952 0 : bf_assert(new_prog);
953 :
954 0 : set_node = bf_list_get_head(&new_prog->runtime.chain->sets);
955 0 : map_node = bf_list_get_head(&new_prog->sets);
956 :
957 : // Fill the bf_map with the sets content
958 0 : while (set_node && map_node) {
959 : _cleanup_free_ uint8_t *values = NULL;
960 : _cleanup_free_ uint8_t *keys = NULL;
961 0 : struct bf_set *set = bf_list_node_get_data(set_node);
962 0 : struct bf_map *map = bf_list_node_get_data(map_node);
963 0 : size_t nelems = bf_list_size(&set->elems);
964 0 : union bpf_attr attr = {};
965 : size_t idx = 0;
966 :
967 0 : r = bf_map_create(map, 0);
968 0 : if (r < 0) {
969 0 : r = bf_err_r(r, "failed to create BPF map for set");
970 0 : goto err_destroy_maps;
971 : }
972 :
973 0 : values = malloc(nelems);
974 0 : if (!values) {
975 0 : r = bf_err_r(errno, "failed to allocate map values");
976 0 : goto err_destroy_maps;
977 : }
978 :
979 0 : keys = malloc(set->elem_size * nelems);
980 0 : if (!keys) {
981 0 : r = bf_err_r(errno, "failed to allocate map keys");
982 0 : goto err_destroy_maps;
983 : }
984 :
985 0 : bf_list_foreach (&set->elems, elem_node) {
986 0 : void *elem = bf_list_node_get_data(elem_node);
987 :
988 0 : memcpy(keys + (idx * set->elem_size), elem, set->elem_size);
989 0 : values[idx] = 1;
990 0 : ++idx;
991 : }
992 :
993 0 : attr.batch.map_fd = map->fd;
994 0 : attr.batch.keys = (unsigned long long)keys;
995 0 : attr.batch.values = (unsigned long long)values;
996 0 : attr.batch.count = nelems;
997 0 : attr.batch.flags = BPF_ANY;
998 :
999 0 : r = bf_bpf(BPF_MAP_UPDATE_BATCH, &attr);
1000 0 : if (r < 0) {
1001 0 : bf_err_r(r, "failed to add set elements to the map");
1002 0 : goto err_destroy_maps;
1003 : }
1004 :
1005 0 : set_node = bf_list_node_next(set_node);
1006 0 : map_node = bf_list_node_next(map_node);
1007 : }
1008 :
1009 0 : r = _bf_program_fixup(new_prog, BF_FIXUP_TYPE_SET_MAP_FD);
1010 0 : if (r < 0)
1011 0 : goto err_destroy_maps;
1012 :
1013 : return 0;
1014 :
1015 0 : err_destroy_maps:
1016 0 : bf_list_foreach (&new_prog->sets, map_node)
1017 0 : bf_map_destroy(bf_list_node_get_data(map_node));
1018 : return r;
1019 : }
1020 :
1021 0 : int bf_program_load(struct bf_program *prog)
1022 : {
1023 : _cleanup_free_ char *log_buf = NULL;
1024 : int r;
1025 :
1026 0 : bf_assert(prog && prog->img);
1027 :
1028 0 : r = _bf_program_load_sets_maps(prog);
1029 0 : if (r)
1030 : return r;
1031 :
1032 0 : r = _bf_program_load_counters_map(prog);
1033 0 : if (r)
1034 : return r;
1035 :
1036 0 : r = _bf_program_load_printer_map(prog);
1037 0 : if (r)
1038 : return r;
1039 :
1040 0 : if (bf_opts_is_verbose(BF_VERBOSE_DEBUG)) {
1041 0 : log_buf = malloc(_BF_LOG_BUF_SIZE);
1042 0 : if (!log_buf) {
1043 0 : return bf_err_r(-ENOMEM,
1044 : "failed to allocate BPF_PROG_LOAD logs buffer");
1045 : }
1046 : }
1047 :
1048 0 : r = bf_bpf_prog_load(
1049 0 : prog->prog_name, bf_hook_to_bpf_prog_type(prog->runtime.chain->hook),
1050 0 : prog->img, prog->img_size,
1051 0 : bf_hook_to_bpf_attach_type(prog->runtime.chain->hook), log_buf,
1052 : log_buf ? _BF_LOG_BUF_SIZE : 0, bf_ctx_token(), &prog->runtime.prog_fd);
1053 0 : if (r) {
1054 0 : return bf_err_r(r, "failed to load bf_program (%lu bytes):\n%s\nerrno:",
1055 : prog->img_size, log_buf ? log_buf : "<NO LOG BUFFER>");
1056 : }
1057 :
1058 0 : if (bf_opts_is_verbose(BF_VERBOSE_BYTECODE))
1059 0 : bf_program_dump_bytecode(prog);
1060 :
1061 : return r;
1062 : }
1063 :
1064 0 : int bf_program_attach(struct bf_program *prog, struct bf_hookopts **hookopts)
1065 : {
1066 : int r;
1067 :
1068 0 : bf_assert(prog && hookopts);
1069 :
1070 0 : r = bf_link_attach(prog->link, prog->runtime.chain->hook, hookopts,
1071 : prog->runtime.prog_fd);
1072 0 : if (r) {
1073 0 : return bf_err_r(r, "failed to attach bf_link for %s program",
1074 : bf_flavor_to_str(prog->flavor));
1075 : }
1076 :
1077 : return r;
1078 : }
1079 :
1080 0 : void bf_program_detach(struct bf_program *prog)
1081 : {
1082 0 : bf_assert(prog);
1083 :
1084 0 : bf_link_detach(prog->link);
1085 0 : }
1086 :
1087 0 : void bf_program_unload(struct bf_program *prog)
1088 : {
1089 0 : bf_assert(prog);
1090 :
1091 0 : closep(&prog->runtime.prog_fd);
1092 0 : bf_link_detach(prog->link);
1093 0 : bf_map_destroy(prog->cmap);
1094 0 : bf_map_destroy(prog->pmap);
1095 0 : bf_list_foreach (&prog->sets, map_node)
1096 0 : bf_map_destroy(bf_list_node_get_data(map_node));
1097 0 : }
1098 :
1099 0 : int bf_program_get_counter(const struct bf_program *program,
1100 : uint32_t counter_idx, struct bf_counter *counter)
1101 : {
1102 0 : bf_assert(program);
1103 0 : bf_assert(counter);
1104 :
1105 : int r;
1106 :
1107 0 : r = bf_bpf_map_lookup_elem(program->cmap->fd, &counter_idx, counter);
1108 0 : if (r < 0)
1109 0 : return bf_err_r(errno, "failed to lookup counters map");
1110 :
1111 : return 0;
1112 : }
1113 :
1114 0 : int bf_cgen_set_counters(struct bf_program *program,
1115 : const struct bf_counter *counters)
1116 : {
1117 : UNUSED(program);
1118 : UNUSED(counters);
1119 :
1120 0 : return -ENOTSUP;
1121 : }
1122 :
1123 0 : int bf_program_pin(struct bf_program *prog, int dir_fd)
1124 : {
1125 : const char *name;
1126 : int r;
1127 :
1128 0 : bf_assert(prog);
1129 :
1130 0 : name = prog->runtime.chain->name;
1131 :
1132 0 : r = bf_bpf_obj_pin(prog->prog_name, prog->runtime.prog_fd, dir_fd);
1133 0 : if (r) {
1134 0 : bf_err_r(r, "failed to pin BPF program for '%s'", name);
1135 0 : goto err_unpin_all;
1136 : }
1137 :
1138 0 : r = bf_map_pin(prog->cmap, dir_fd);
1139 0 : if (r) {
1140 0 : bf_err_r(r, "failed to pin BPF counters map for '%s'", name);
1141 0 : goto err_unpin_all;
1142 : }
1143 :
1144 0 : r = bf_map_pin(prog->pmap, dir_fd);
1145 0 : if (r) {
1146 0 : bf_err_r(r, "failed to pin BPF printer map for '%s'", name);
1147 0 : goto err_unpin_all;
1148 : }
1149 :
1150 0 : bf_list_foreach (&prog->sets, set_node) {
1151 0 : r = bf_map_pin(bf_list_node_get_data(set_node), dir_fd);
1152 0 : if (r) {
1153 0 : bf_err_r(r, "failed to pin BPF set map for '%s'", name);
1154 0 : goto err_unpin_all;
1155 : }
1156 : }
1157 :
1158 : // If a link exists, pin it too.
1159 0 : if (prog->link->hookopts) {
1160 0 : r = bf_link_pin(prog->link, dir_fd);
1161 0 : if (r) {
1162 0 : bf_err_r(r, "failed to pin BPF link for '%s'", name);
1163 0 : goto err_unpin_all;
1164 : }
1165 : }
1166 :
1167 : return 0;
1168 :
1169 0 : err_unpin_all:
1170 0 : bf_program_unpin(prog, dir_fd);
1171 0 : return r;
1172 : }
1173 :
1174 0 : void bf_program_unpin(struct bf_program *prog, int dir_fd)
1175 : {
1176 0 : bf_assert(prog);
1177 :
1178 0 : bf_map_unpin(prog->cmap, dir_fd);
1179 0 : bf_map_unpin(prog->pmap, dir_fd);
1180 :
1181 0 : bf_list_foreach (&prog->sets, set_node)
1182 0 : bf_map_unpin(bf_list_node_get_data(set_node), dir_fd);
1183 :
1184 0 : bf_link_unpin(prog->link, dir_fd);
1185 :
1186 0 : unlinkat(dir_fd, prog->prog_name, 0);
1187 0 : }
1188 :
1189 0 : size_t bf_program_chain_counter_idx(const struct bf_program *program)
1190 : {
1191 0 : return bf_list_size(&program->runtime.chain->rules);
1192 : }
1193 :
1194 0 : size_t bf_program_error_counter_idx(const struct bf_program *program)
1195 : {
1196 0 : return bf_list_size(&program->runtime.chain->rules) + 1;
1197 : }
|