Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-only */
2 : /*
3 : * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4 : */
5 :
6 : #include "bpfilter/cgen/program.h"
7 :
8 : #include <linux/bpf.h>
9 : #include <linux/bpf_common.h>
10 : #include <linux/limits.h>
11 :
12 : #include <errno.h>
13 : #include <fcntl.h>
14 : #include <limits.h>
15 : #include <stddef.h>
16 : #include <stdint.h>
17 : #include <stdio.h>
18 : #include <stdlib.h>
19 : #include <string.h>
20 : #include <unistd.h>
21 :
22 : #include "bpfilter/cgen/cgroup.h"
23 : #include "bpfilter/cgen/dump.h"
24 : #include "bpfilter/cgen/fixup.h"
25 : #include "bpfilter/cgen/jmp.h"
26 : #include "bpfilter/cgen/matcher/ip4.h"
27 : #include "bpfilter/cgen/matcher/ip6.h"
28 : #include "bpfilter/cgen/matcher/meta.h"
29 : #include "bpfilter/cgen/matcher/set.h"
30 : #include "bpfilter/cgen/matcher/tcp.h"
31 : #include "bpfilter/cgen/matcher/udp.h"
32 : #include "bpfilter/cgen/nf.h"
33 : #include "bpfilter/cgen/printer.h"
34 : #include "bpfilter/cgen/prog/link.h"
35 : #include "bpfilter/cgen/prog/map.h"
36 : #include "bpfilter/cgen/stub.h"
37 : #include "bpfilter/cgen/tc.h"
38 : #include "bpfilter/cgen/xdp.h"
39 : #include "bpfilter/ctx.h"
40 : #include "bpfilter/opts.h"
41 : #include "core/bpf.h"
42 : #include "core/btf.h"
43 : #include "core/chain.h"
44 : #include "core/counter.h"
45 : #include "core/dump.h"
46 : #include "core/flavor.h"
47 : #include "core/helper.h"
48 : #include "core/hook.h"
49 : #include "core/io.h"
50 : #include "core/list.h"
51 : #include "core/logger.h"
52 : #include "core/marsh.h"
53 : #include "core/matcher.h"
54 : #include "core/rule.h"
55 : #include "core/set.h"
56 : #include "core/verdict.h"
57 :
58 : #include "external/filter.h"
59 :
60 : #define _BF_LOG_BUF_SIZE \
61 : (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
62 : #define _BF_PROGRAM_DEFAULT_IMG_SIZE (1 << 6)
63 :
64 : static const struct bf_flavor_ops *bf_flavor_ops_get(enum bf_flavor flavor)
65 : {
66 : static const struct bf_flavor_ops *flavor_ops[] = {
67 : [BF_FLAVOR_TC] = &bf_flavor_ops_tc,
68 : [BF_FLAVOR_NF] = &bf_flavor_ops_nf,
69 : [BF_FLAVOR_XDP] = &bf_flavor_ops_xdp,
70 : [BF_FLAVOR_CGROUP] = &bf_flavor_ops_cgroup,
71 : };
72 :
73 : static_assert(ARRAY_SIZE(flavor_ops) == _BF_FLAVOR_MAX,
74 : "missing entries in bf_flavor_ops array");
75 :
76 4 : return flavor_ops[flavor];
77 : }
78 :
79 4 : int bf_program_new(struct bf_program **program, const struct bf_chain *chain)
80 : {
81 4 : _cleanup_bf_program_ struct bf_program *_program = NULL;
82 : char name[BPF_OBJ_NAME_LEN];
83 : uint32_t set_idx = 0;
84 : int r;
85 :
86 4 : bf_assert(program && chain);
87 :
88 4 : _program = calloc(1, sizeof(*_program));
89 4 : if (!_program)
90 : return -ENOMEM;
91 :
92 4 : _program->flavor = bf_hook_to_flavor(chain->hook);
93 4 : _program->runtime.prog_fd = -1;
94 4 : _program->runtime.ops = bf_flavor_ops_get(_program->flavor);
95 4 : _program->runtime.chain = chain;
96 :
97 4 : (void)snprintf(_program->prog_name, BPF_OBJ_NAME_LEN, "%s", "bf_prog");
98 :
99 4 : r = bf_map_new(&_program->cmap, "counters_map", BF_MAP_TYPE_COUNTERS,
100 : BF_MAP_BPF_TYPE_ARRAY, sizeof(uint32_t),
101 : sizeof(struct bf_counter), 1);
102 4 : if (r < 0)
103 0 : return bf_err_r(r, "failed to create the counters bf_map object");
104 :
105 4 : r = bf_map_new(&_program->pmap, "printer_map", BF_MAP_TYPE_PRINTER,
106 : BF_MAP_BPF_TYPE_ARRAY, sizeof(uint32_t),
107 : BF_MAP_VALUE_SIZE_UNKNOWN, 1);
108 4 : if (r < 0)
109 0 : return bf_err_r(r, "failed to create the printer bf_map object");
110 :
111 4 : _program->sets = bf_map_list();
112 8 : bf_list_foreach (&chain->sets, set_node) {
113 0 : struct bf_set *set = bf_list_node_get_data(set_node);
114 0 : _cleanup_bf_map_ struct bf_map *map = NULL;
115 :
116 0 : (void)snprintf(name, BPF_OBJ_NAME_LEN, "set_%04x", (uint8_t)set_idx++);
117 0 : r = bf_map_new(&map, name, BF_MAP_TYPE_SET, BF_MAP_BPF_TYPE_HASH,
118 0 : set->elem_size, 1, bf_list_size(&set->elems));
119 0 : if (r < 0)
120 : return r;
121 :
122 0 : r = bf_list_add_tail(&_program->sets, map);
123 0 : if (r < 0)
124 : return r;
125 0 : TAKE_PTR(map);
126 : };
127 :
128 4 : r = bf_link_new(&_program->link, "bf_link");
129 4 : if (r)
130 : return r;
131 :
132 4 : r = bf_printer_new(&_program->printer);
133 4 : if (r)
134 : return r;
135 :
136 4 : bf_list_init(&_program->fixups,
137 4 : (bf_list_ops[]) {{.free = (bf_list_ops_free)bf_fixup_free}});
138 :
139 4 : *program = TAKE_PTR(_program);
140 :
141 4 : return 0;
142 : }
143 :
144 17 : void bf_program_free(struct bf_program **program)
145 : {
146 17 : if (!*program)
147 : return;
148 :
149 4 : bf_list_clean(&(*program)->fixups);
150 4 : free((*program)->img);
151 :
152 : /* Close the file descriptors if they are still open. If --transient is
153 : * used, then the file descriptors are already closed (as
154 : * bf_program_unload() has been called). Otherwise, bf_program_unload()
155 : * won't be called, but the programs are pinned, so they can be closed
156 : * safely. */
157 4 : closep(&(*program)->runtime.prog_fd);
158 :
159 4 : bf_map_free(&(*program)->cmap);
160 4 : bf_map_free(&(*program)->pmap);
161 4 : bf_list_clean(&(*program)->sets);
162 4 : bf_link_free(&(*program)->link);
163 4 : bf_printer_free(&(*program)->printer);
164 :
165 4 : free(*program);
166 4 : *program = NULL;
167 : }
168 :
169 0 : int bf_program_marsh(const struct bf_program *program, struct bf_marsh **marsh)
170 : {
171 0 : _cleanup_bf_marsh_ struct bf_marsh *_marsh = NULL;
172 : int r;
173 :
174 0 : bf_assert(program);
175 0 : bf_assert(marsh);
176 :
177 0 : r = bf_marsh_new(&_marsh, NULL, 0);
178 0 : if (r < 0)
179 : return r;
180 :
181 : {
182 : // Serialize bf_program.counters
183 0 : _cleanup_bf_marsh_ struct bf_marsh *counters_elem = NULL;
184 :
185 0 : r = bf_map_marsh(program->cmap, &counters_elem);
186 0 : if (r < 0)
187 : return r;
188 :
189 0 : r = bf_marsh_add_child_obj(&_marsh, counters_elem);
190 0 : if (r < 0)
191 : return r;
192 : }
193 :
194 : {
195 : // Serialize bf_program.pmap
196 0 : _cleanup_bf_marsh_ struct bf_marsh *pmap_elem = NULL;
197 :
198 0 : r = bf_map_marsh(program->pmap, &pmap_elem);
199 0 : if (r < 0)
200 : return r;
201 :
202 0 : r = bf_marsh_add_child_obj(&_marsh, pmap_elem);
203 0 : if (r < 0)
204 : return r;
205 : }
206 :
207 : {
208 : // Serialize bf_program.sets
209 0 : _cleanup_bf_marsh_ struct bf_marsh *sets_elem = NULL;
210 :
211 0 : r = bf_list_marsh(&program->sets, &sets_elem);
212 0 : if (r < 0)
213 : return r;
214 :
215 0 : r = bf_marsh_add_child_obj(&_marsh, sets_elem);
216 0 : if (r < 0) {
217 0 : return bf_err_r(
218 : r,
219 : "failed to insert serialized sets into bf_program serialized data");
220 : }
221 : }
222 :
223 : {
224 : // Serialize bf_program.links
225 0 : _cleanup_bf_marsh_ struct bf_marsh *links_elem = NULL;
226 :
227 0 : r = bf_link_marsh(program->link, &links_elem);
228 0 : if (r)
229 0 : return bf_err_r(r, "failed to serialize bf_program.link");
230 :
231 0 : r = bf_marsh_add_child_obj(&_marsh, links_elem);
232 0 : if (r) {
233 0 : return bf_err_r(
234 : r,
235 : "failed to insert serialized link into bf_program serialized data");
236 : }
237 : }
238 :
239 : {
240 : // Serialise bf_program.printer
241 0 : _cleanup_bf_marsh_ struct bf_marsh *child = NULL;
242 :
243 0 : r = bf_printer_marsh(program->printer, &child);
244 0 : if (r)
245 0 : return bf_err_r(r, "failed to marsh bf_printer object");
246 :
247 0 : r = bf_marsh_add_child_obj(&_marsh, child);
248 0 : if (r)
249 0 : return bf_err_r(r, "failed to append object to marsh");
250 : }
251 :
252 0 : r |= bf_marsh_add_child_raw(&_marsh, &program->num_counters,
253 : sizeof(program->num_counters));
254 0 : r |= bf_marsh_add_child_raw(&_marsh, program->img,
255 0 : program->img_size * sizeof(struct bpf_insn));
256 0 : if (r)
257 0 : return bf_err_r(r, "Failed to serialize program");
258 :
259 0 : *marsh = TAKE_PTR(_marsh);
260 :
261 0 : return 0;
262 : }
263 :
264 0 : int bf_program_unmarsh(const struct bf_marsh *marsh,
265 : struct bf_program **program,
266 : const struct bf_chain *chain, int dir_fd)
267 : {
268 0 : _cleanup_bf_program_ struct bf_program *_program = NULL;
269 : struct bf_marsh *child = NULL;
270 : int r;
271 :
272 0 : bf_assert(marsh && program);
273 :
274 0 : r = bf_program_new(&_program, chain);
275 0 : if (r < 0)
276 : return r;
277 :
278 0 : if (!(child = bf_marsh_next_child(marsh, child)))
279 : return -EINVAL;
280 0 : bf_map_free(&_program->cmap);
281 0 : r = bf_map_new_from_marsh(&_program->cmap, dir_fd, child);
282 0 : if (r < 0)
283 : return r;
284 :
285 0 : if (!(child = bf_marsh_next_child(marsh, child)))
286 : return -EINVAL;
287 0 : bf_map_free(&_program->pmap);
288 0 : r = bf_map_new_from_marsh(&_program->pmap, dir_fd, child);
289 0 : if (r < 0)
290 : return r;
291 :
292 : /** @todo Avoid creating and filling the list in @ref bf_program_new before
293 : * trashing it all here. Eventually, this function will be replaced with
294 : * @c bf_program_new_from_marsh and this issue could be solved by **not**
295 : * relying on @ref bf_program_new to allocate an initialize @p _program . */
296 0 : bf_list_clean(&_program->sets);
297 0 : _program->sets = bf_map_list();
298 :
299 0 : if (!(child = bf_marsh_next_child(marsh, child)))
300 : return -EINVAL;
301 : {
302 : // Unmarsh bf_program.sets
303 : struct bf_marsh *set_elem = NULL;
304 :
305 0 : while ((set_elem = bf_marsh_next_child(child, set_elem))) {
306 0 : _cleanup_bf_map_ struct bf_map *map = NULL;
307 :
308 0 : r = bf_map_new_from_marsh(&map, dir_fd, set_elem);
309 0 : if (r < 0)
310 : return r;
311 :
312 0 : r = bf_list_add_tail(&_program->sets, map);
313 0 : if (r < 0)
314 : return r;
315 :
316 0 : TAKE_PTR(map);
317 : }
318 : }
319 :
320 : // Unmarsh bf_program.links
321 0 : if (!(child = bf_marsh_next_child(marsh, child)))
322 : return -EINVAL;
323 :
324 0 : bf_link_free(&_program->link);
325 0 : r = bf_link_new_from_marsh(&_program->link, dir_fd, child);
326 0 : if (r)
327 0 : return bf_err_r(r, "failed to restore bf_program.link");
328 :
329 : // Unmarsh bf_program.printer
330 0 : child = bf_marsh_next_child(marsh, child);
331 0 : if (!child)
332 0 : return bf_err_r(-EINVAL, "failed to find valid child");
333 :
334 0 : bf_printer_free(&_program->printer);
335 0 : r = bf_printer_new_from_marsh(&_program->printer, child);
336 0 : if (r)
337 0 : return bf_err_r(r, "failed to restore bf_printer object");
338 :
339 0 : if (!(child = bf_marsh_next_child(marsh, child)))
340 : return -EINVAL;
341 0 : memcpy(&_program->num_counters, child->data,
342 : sizeof(_program->num_counters));
343 :
344 0 : if (!(child = bf_marsh_next_child(marsh, child)))
345 : return -EINVAL;
346 0 : _program->img = bf_memdup(child->data, child->data_len);
347 0 : _program->img_size = child->data_len / sizeof(struct bpf_insn);
348 0 : _program->img_cap = child->data_len / sizeof(struct bpf_insn);
349 :
350 0 : if (bf_marsh_next_child(marsh, child))
351 0 : bf_warn("codegen marsh has more children than expected");
352 :
353 0 : r = bf_bpf_obj_get(_program->prog_name, dir_fd, &_program->runtime.prog_fd);
354 0 : if (r < 0)
355 0 : return bf_err_r(r, "failed to get prog fd");
356 :
357 0 : *program = TAKE_PTR(_program);
358 :
359 0 : return 0;
360 : }
361 :
362 0 : void bf_program_dump(const struct bf_program *program, prefix_t *prefix)
363 : {
364 0 : bf_assert(program);
365 0 : bf_assert(prefix);
366 :
367 0 : DUMP(prefix, "struct bf_program at %p", program);
368 :
369 0 : bf_dump_prefix_push(prefix);
370 :
371 0 : DUMP(prefix, "num_counters: %lu", program->num_counters);
372 0 : DUMP(prefix, "prog_name: %s", program->prog_name);
373 :
374 0 : DUMP(prefix, "cmap: struct bf_map *");
375 0 : bf_dump_prefix_push(prefix);
376 0 : bf_map_dump(program->cmap, bf_dump_prefix_last(prefix));
377 0 : bf_dump_prefix_pop(prefix);
378 :
379 0 : DUMP(prefix, "pmap: struct bf_map *");
380 0 : bf_dump_prefix_push(prefix);
381 0 : bf_map_dump(program->pmap, bf_dump_prefix_last(prefix));
382 0 : bf_dump_prefix_pop(prefix);
383 :
384 0 : DUMP(prefix, "sets: bf_list<bf_map>[%lu]", bf_list_size(&program->sets));
385 0 : bf_dump_prefix_push(prefix);
386 0 : bf_list_foreach (&program->sets, map_node) {
387 0 : struct bf_map *map = bf_list_node_get_data(map_node);
388 :
389 0 : if (bf_list_is_tail(&program->sets, map_node))
390 0 : bf_dump_prefix_last(prefix);
391 :
392 0 : bf_map_dump(map, prefix);
393 : }
394 0 : bf_dump_prefix_pop(prefix);
395 :
396 0 : DUMP(prefix, "link: struct bf_link *");
397 0 : bf_dump_prefix_push(prefix);
398 0 : bf_link_dump(program->link, prefix);
399 0 : bf_dump_prefix_pop(prefix);
400 :
401 0 : DUMP(prefix, "printer: struct bf_printer *");
402 0 : bf_dump_prefix_push(prefix);
403 0 : bf_printer_dump(program->printer, prefix);
404 0 : bf_dump_prefix_pop(prefix);
405 :
406 0 : DUMP(prefix, "img: %p", program->img);
407 0 : DUMP(prefix, "img_size: %lu", program->img_size);
408 0 : DUMP(prefix, "img_cap: %lu", program->img_cap);
409 :
410 0 : DUMP(prefix, "fixups: bf_list<struct bf_fixup>[%lu]",
411 : bf_list_size(&program->fixups));
412 0 : bf_dump_prefix_push(prefix);
413 0 : bf_list_foreach (&program->fixups, fixup_node) {
414 0 : struct bf_fixup *fixup = bf_list_node_get_data(fixup_node);
415 :
416 0 : if (bf_list_is_tail(&program->fixups, fixup_node))
417 0 : bf_dump_prefix_last(prefix);
418 :
419 0 : bf_fixup_dump(fixup, prefix);
420 : }
421 0 : bf_dump_prefix_pop(prefix);
422 :
423 0 : DUMP(bf_dump_prefix_last(prefix), "runtime: <anonymous>");
424 0 : bf_dump_prefix_push(prefix);
425 0 : DUMP(prefix, "prog_fd: %d", program->runtime.prog_fd);
426 0 : DUMP(bf_dump_prefix_last(prefix), "ops: %p", program->runtime.ops);
427 0 : bf_dump_prefix_pop(prefix);
428 :
429 0 : bf_dump_prefix_pop(prefix);
430 0 : }
431 :
432 0 : static inline size_t _bf_round_next_power_of_2(size_t value)
433 : {
434 0 : value--;
435 0 : value |= value >> 1;
436 0 : value |= value >> 2;
437 0 : value |= value >> 4;
438 0 : value |= value >> 8;
439 0 : value |= value >> 16;
440 :
441 0 : return ++value;
442 : }
443 :
444 4 : int bf_program_grow_img(struct bf_program *program)
445 : {
446 : size_t new_cap = _BF_PROGRAM_DEFAULT_IMG_SIZE;
447 : int r;
448 :
449 4 : bf_assert(program);
450 :
451 4 : if (program->img)
452 0 : new_cap = _bf_round_next_power_of_2(program->img_cap << 1);
453 :
454 4 : r = bf_realloc((void **)&program->img, new_cap * sizeof(struct bpf_insn));
455 4 : if (r < 0) {
456 0 : return bf_err_r(r, "failed to grow program img from %lu to %lu insn",
457 : program->img_cap, new_cap);
458 : }
459 :
460 4 : program->img_cap = new_cap;
461 :
462 4 : return 0;
463 : }
464 :
465 0 : static void _bf_program_fixup_insn(struct bpf_insn *insn,
466 : enum bf_fixup_insn type, int32_t value)
467 : {
468 0 : switch (type) {
469 0 : case BF_FIXUP_INSN_OFF:
470 0 : bf_assert(!insn->off);
471 0 : bf_assert(value < SHRT_MAX);
472 0 : insn->off = (int16_t)value;
473 0 : break;
474 0 : case BF_FIXUP_INSN_IMM:
475 0 : bf_assert(!insn->imm);
476 0 : insn->imm = value;
477 0 : break;
478 0 : default:
479 0 : bf_abort(
480 : "unsupported fixup instruction type, this should not happen: %d",
481 : type);
482 : break;
483 : }
484 0 : }
485 :
486 0 : static int _bf_program_fixup(struct bf_program *program,
487 : enum bf_fixup_type type)
488 : {
489 0 : bf_assert(program);
490 0 : bf_assert(type >= 0 && type < _BF_FIXUP_TYPE_MAX);
491 :
492 0 : bf_list_foreach (&program->fixups, fixup_node) {
493 : enum bf_fixup_insn insn_type = _BF_FIXUP_INSN_MAX;
494 : int32_t value;
495 : size_t offset;
496 0 : struct bf_fixup *fixup = bf_list_node_get_data(fixup_node);
497 0 : struct bpf_insn *insn = &program->img[fixup->insn];
498 : struct bf_map *map;
499 :
500 0 : if (type != fixup->type)
501 0 : continue;
502 :
503 0 : switch (type) {
504 0 : case BF_FIXUP_TYPE_JMP_NEXT_RULE:
505 : insn_type = BF_FIXUP_INSN_OFF;
506 0 : value = (int)(program->img_size - fixup->insn - 1U);
507 0 : break;
508 0 : case BF_FIXUP_TYPE_COUNTERS_MAP_FD:
509 : insn_type = BF_FIXUP_INSN_IMM;
510 0 : value = program->cmap->fd;
511 0 : break;
512 0 : case BF_FIXUP_TYPE_PRINTER_MAP_FD:
513 : insn_type = BF_FIXUP_INSN_IMM;
514 0 : value = program->pmap->fd;
515 0 : break;
516 0 : case BF_FIXUP_TYPE_SET_MAP_FD:
517 0 : map = bf_list_get_at(&program->sets, fixup->attr.set_index);
518 0 : if (!map) {
519 0 : return bf_err_r(-ENOENT, "can't find set map at index %lu",
520 : fixup->attr.set_index);
521 : }
522 : insn_type = BF_FIXUP_INSN_IMM;
523 0 : value = map->fd;
524 0 : break;
525 0 : case BF_FIXUP_TYPE_FUNC_CALL:
526 : insn_type = BF_FIXUP_INSN_IMM;
527 0 : offset = program->functions_location[fixup->attr.function] -
528 : fixup->insn - 1;
529 0 : bf_assert(offset < INT_MAX);
530 0 : value = (int32_t)offset;
531 0 : break;
532 0 : default:
533 0 : bf_abort("unsupported fixup type, this should not happen: %d",
534 : type);
535 : break;
536 : }
537 :
538 0 : _bf_program_fixup_insn(insn, insn_type, value);
539 0 : bf_list_delete(&program->fixups, fixup_node);
540 : }
541 :
542 : return 0;
543 : }
544 :
545 0 : static int _bf_program_generate_rule(struct bf_program *program,
546 : struct bf_rule *rule)
547 : {
548 : int r;
549 :
550 0 : bf_assert(program);
551 0 : bf_assert(rule);
552 :
553 0 : bf_list_foreach (&rule->matchers, matcher_node) {
554 0 : struct bf_matcher *matcher = bf_list_node_get_data(matcher_node);
555 :
556 0 : switch (matcher->type) {
557 0 : case BF_MATCHER_META_IFINDEX:
558 : case BF_MATCHER_META_L3_PROTO:
559 : case BF_MATCHER_META_L4_PROTO:
560 : case BF_MATCHER_META_SPORT:
561 : case BF_MATCHER_META_DPORT:
562 0 : r = bf_matcher_generate_meta(program, matcher);
563 0 : if (r)
564 : return r;
565 : break;
566 0 : case BF_MATCHER_IP4_SRC_ADDR:
567 : case BF_MATCHER_IP4_DST_ADDR:
568 : case BF_MATCHER_IP4_PROTO:
569 0 : r = bf_matcher_generate_ip4(program, matcher);
570 0 : if (r)
571 : return r;
572 : break;
573 0 : case BF_MATCHER_IP6_SADDR:
574 : case BF_MATCHER_IP6_DADDR:
575 0 : r = bf_matcher_generate_ip6(program, matcher);
576 0 : if (r)
577 : return r;
578 : break;
579 0 : case BF_MATCHER_TCP_SPORT:
580 : case BF_MATCHER_TCP_DPORT:
581 : case BF_MATCHER_TCP_FLAGS:
582 0 : r = bf_matcher_generate_tcp(program, matcher);
583 0 : if (r)
584 : return r;
585 : break;
586 0 : case BF_MATCHER_UDP_SPORT:
587 : case BF_MATCHER_UDP_DPORT:
588 0 : r = bf_matcher_generate_udp(program, matcher);
589 0 : if (r)
590 : return r;
591 : break;
592 0 : case BF_MATCHER_SET_SRCIP6PORT:
593 : case BF_MATCHER_SET_SRCIP6:
594 0 : r = bf_matcher_generate_set(program, matcher);
595 0 : if (r)
596 : return r;
597 : break;
598 0 : default:
599 0 : return bf_err_r(-EINVAL, "unknown matcher type %d", matcher->type);
600 : };
601 : }
602 :
603 0 : if (rule->counters) {
604 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_1, rule->index));
605 0 : EMIT(program, BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10,
606 : BF_PROG_CTX_OFF(pkt_size)));
607 0 : EMIT_FIXUP_CALL(program, BF_FIXUP_FUNC_UPDATE_COUNTERS);
608 : }
609 :
610 0 : switch (rule->verdict) {
611 0 : case BF_VERDICT_ACCEPT:
612 : case BF_VERDICT_DROP:
613 0 : EMIT(program,
614 : BPF_MOV64_IMM(BPF_REG_0,
615 : program->runtime.ops->get_verdict(rule->verdict)));
616 0 : EMIT(program, BPF_EXIT_INSN());
617 0 : break;
618 : case BF_VERDICT_CONTINUE:
619 : // Fall through to next rule or default chain policy.
620 : break;
621 0 : default:
622 0 : bf_abort("unsupported verdict, this should not happen: %d",
623 : rule->verdict);
624 : break;
625 : }
626 :
627 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_JMP_NEXT_RULE);
628 0 : if (r)
629 0 : return bf_err_r(r, "failed to generate next rule fixups");
630 :
631 : return 0;
632 : }
633 :
634 : /**
635 : * Generate the BPF function to update a rule's counters.
636 : *
637 : * This function defines a new function **in** the generated BPF program to
638 : * be called during packet processing.
639 : *
640 : * Parameters:
641 : * - @c r1 : index of the rule to update the counters for.
642 : * - @c r2 : size of the packet.
643 : * Returns:
644 : * 0 on success, non-zero on error.
645 : *
646 : * @param program Program to emit the function into. Can not be NULL.
647 : * @return 0 on success, or negative errno value on error.
648 : */
649 0 : static int _bf_program_generate_update_counters(struct bf_program *program)
650 : {
651 : // Move the counters key in scratch[0..4] and the packet size in scratch[8..15]
652 0 : EMIT(program,
653 : BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, BF_PROG_SCR_OFF(0)));
654 0 : EMIT(program,
655 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, BF_PROG_SCR_OFF(8)));
656 :
657 : // Call bpf_map_lookup_elem()
658 0 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_1);
659 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_2, BPF_REG_10));
660 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, BF_PROG_SCR_OFF(0)));
661 0 : EMIT(program, BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem));
662 :
663 : // If the counters doesn't exist, return from the function
664 : {
665 0 : _cleanup_bf_jmpctx_ struct bf_jmpctx _ =
666 0 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
667 :
668 0 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
669 0 : EMIT_PRINT(program, "failed to fetch the rule's counters");
670 :
671 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_0, 1));
672 0 : EMIT(program, BPF_EXIT_INSN());
673 : }
674 :
675 : // Increment the packets count by 1.
676 0 : EMIT(program, BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
677 : offsetof(struct bf_counter, packets)));
678 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1));
679 0 : EMIT(program, BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
680 : offsetof(struct bf_counter, packets)));
681 :
682 : // Increase the total byte by the size of the packet.
683 0 : EMIT(program, BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
684 : offsetof(struct bf_counter, bytes)));
685 0 : EMIT(program,
686 : BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, BF_PROG_SCR_OFF(8)));
687 0 : EMIT(program, BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2));
688 0 : EMIT(program, BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
689 : offsetof(struct bf_counter, bytes)));
690 :
691 : // On success, return 0
692 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_0, 0));
693 0 : EMIT(program, BPF_EXIT_INSN());
694 :
695 0 : return 0;
696 : }
697 :
698 0 : static int _bf_program_generate_functions(struct bf_program *program)
699 : {
700 : int r;
701 :
702 0 : bf_assert(program);
703 :
704 0 : bf_list_foreach (&program->fixups, fixup_node) {
705 0 : struct bf_fixup *fixup = bf_list_node_get_data(fixup_node);
706 0 : size_t off = program->img_size;
707 :
708 0 : if (fixup->type != BF_FIXUP_TYPE_FUNC_CALL)
709 0 : continue;
710 :
711 0 : bf_assert(fixup->attr.function >= 0 &&
712 : fixup->attr.function < _BF_FIXUP_FUNC_MAX);
713 :
714 : // Only generate each function once
715 0 : if (program->functions_location[fixup->attr.function])
716 0 : continue;
717 :
718 0 : switch (fixup->attr.function) {
719 0 : case BF_FIXUP_FUNC_UPDATE_COUNTERS:
720 0 : r = _bf_program_generate_update_counters(program);
721 0 : if (r)
722 : return r;
723 : break;
724 0 : default:
725 0 : bf_abort("unsupported fixup function, this should not happen: %d",
726 : fixup->attr.function);
727 : break;
728 : }
729 :
730 0 : program->functions_location[fixup->attr.function] = off;
731 : }
732 :
733 : return 0;
734 : }
735 :
736 41 : int bf_program_emit(struct bf_program *program, struct bpf_insn insn)
737 : {
738 : int r;
739 :
740 41 : bf_assert(program);
741 :
742 41 : if (program->img_size == program->img_cap) {
743 3 : r = bf_program_grow_img(program);
744 3 : if (r)
745 : return r;
746 : }
747 :
748 41 : program->img[program->img_size++] = insn;
749 :
750 41 : return 0;
751 : }
752 :
753 0 : int bf_program_emit_kfunc_call(struct bf_program *program, const char *name)
754 : {
755 : int r;
756 :
757 0 : bf_assert(program);
758 0 : bf_assert(name);
759 :
760 0 : r = bf_btf_get_id(name);
761 0 : if (r < 0)
762 : return r;
763 :
764 0 : EMIT(program, ((struct bpf_insn) {.code = BPF_JMP | BPF_CALL,
765 : .dst_reg = 0,
766 : .src_reg = BPF_PSEUDO_KFUNC_CALL,
767 : .off = 0,
768 : .imm = r}));
769 :
770 0 : return 0;
771 : }
772 :
773 0 : int bf_program_emit_fixup(struct bf_program *program, enum bf_fixup_type type,
774 : struct bpf_insn insn, const union bf_fixup_attr *attr)
775 : {
776 0 : _cleanup_bf_fixup_ struct bf_fixup *fixup = NULL;
777 : int r;
778 :
779 0 : bf_assert(program);
780 :
781 0 : if (program->img_size == program->img_cap) {
782 0 : r = bf_program_grow_img(program);
783 0 : if (r)
784 : return r;
785 : }
786 :
787 0 : r = bf_fixup_new(&fixup, type, program->img_size, attr);
788 0 : if (r)
789 : return r;
790 :
791 0 : r = bf_list_add_tail(&program->fixups, fixup);
792 0 : if (r)
793 : return r;
794 :
795 0 : TAKE_PTR(fixup);
796 :
797 : /* This call could fail and return an error, in which case it is not
798 : * properly handled. However, this shouldn't be an issue as we previously
799 : * test whether enough room is available in cgen.img, which is currently
800 : * the only reason for EMIT() to fail. */
801 0 : EMIT(program, insn);
802 :
803 : return 0;
804 : }
805 :
806 2 : int bf_program_emit_fixup_call(struct bf_program *program,
807 : enum bf_fixup_func function)
808 : {
809 1 : _cleanup_bf_fixup_ struct bf_fixup *fixup = NULL;
810 : int r;
811 :
812 2 : bf_assert(program);
813 :
814 1 : if (program->img_size == program->img_cap) {
815 1 : r = bf_program_grow_img(program);
816 1 : if (r)
817 : return r;
818 : }
819 :
820 1 : r = bf_fixup_new(&fixup, BF_FIXUP_TYPE_FUNC_CALL, program->img_size, NULL);
821 1 : if (r)
822 : return r;
823 :
824 1 : fixup->attr.function = function;
825 :
826 1 : r = bf_list_add_tail(&program->fixups, fixup);
827 1 : if (r)
828 : return r;
829 :
830 1 : TAKE_PTR(fixup);
831 :
832 : /* This call could fail and return an error, in which case it is not
833 : * properly handled. However, this shouldn't be an issue as we previously
834 : * test whether enough room is available in cgen.img, which is currently
835 : * the only reason for EMIT() to fail. */
836 1 : EMIT(program, BPF_CALL_REL(0));
837 :
838 1 : return 0;
839 : }
840 :
841 0 : int bf_program_generate(struct bf_program *program)
842 : {
843 0 : const struct bf_chain *chain = program->runtime.chain;
844 : int r;
845 :
846 : /* Add 1 to the number of counters for the policy counter, and 1
847 : * for the first reserved error slot. This must be done ahead of
848 : * generation, as we will index into the error counters. */
849 0 : program->num_counters = bf_list_size(&chain->rules) + 2;
850 :
851 : // Save the program's argument into the context.
852 0 : EMIT(program,
853 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
854 :
855 : // Reset the protocol ID registers
856 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_7, 0));
857 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_8, 0));
858 :
859 0 : r = program->runtime.ops->gen_inline_prologue(program);
860 0 : if (r)
861 : return r;
862 :
863 0 : bf_list_foreach (&chain->rules, rule_node) {
864 0 : r = _bf_program_generate_rule(program,
865 0 : bf_list_node_get_data(rule_node));
866 0 : if (r)
867 : return r;
868 : }
869 :
870 0 : r = program->runtime.ops->gen_inline_epilogue(program);
871 0 : if (r)
872 : return r;
873 :
874 : // Call the update counters function
875 0 : EMIT(program, BPF_MOV32_IMM(BPF_REG_1, bf_list_size(&chain->rules)));
876 0 : EMIT(program,
877 : BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(pkt_size)));
878 0 : EMIT_FIXUP_CALL(program, BF_FIXUP_FUNC_UPDATE_COUNTERS);
879 :
880 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
881 : chain->policy)));
882 0 : EMIT(program, BPF_EXIT_INSN());
883 :
884 0 : r = _bf_program_generate_functions(program);
885 0 : if (r)
886 : return r;
887 :
888 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_FUNC_CALL);
889 0 : if (r)
890 0 : return bf_err_r(r, "failed to generate function call fixups");
891 :
892 : return 0;
893 : }
894 :
895 0 : static int _bf_program_load_printer_map(struct bf_program *program)
896 : {
897 0 : _cleanup_free_ void *pstr = NULL;
898 : size_t pstr_len;
899 0 : uint32_t key = 0;
900 : int r;
901 :
902 0 : bf_assert(program);
903 :
904 0 : r = bf_printer_assemble(program->printer, &pstr, &pstr_len);
905 0 : if (r)
906 0 : return bf_err_r(r, "failed to assemble printer map string");
907 :
908 0 : r = bf_map_set_value_size(program->pmap, pstr_len);
909 0 : if (r < 0)
910 : return r;
911 :
912 0 : r = bf_map_create(program->pmap, 0);
913 0 : if (r < 0)
914 : return r;
915 :
916 0 : r = bf_map_set_elem(program->pmap, &key, pstr);
917 0 : if (r)
918 : return r;
919 :
920 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_PRINTER_MAP_FD);
921 0 : if (r) {
922 0 : bf_map_destroy(program->pmap);
923 0 : return bf_err_r(r, "failed to fixup printer map FD");
924 : }
925 :
926 : return 0;
927 : }
928 :
929 0 : static int _bf_program_load_counters_map(struct bf_program *program)
930 : {
931 0 : _cleanup_close_ int _fd = -1;
932 : int r;
933 :
934 0 : bf_assert(program);
935 :
936 0 : r = bf_map_set_n_elems(program->cmap, program->num_counters);
937 0 : if (r < 0)
938 : return r;
939 :
940 0 : r = bf_map_create(program->cmap, 0);
941 0 : if (r < 0)
942 : return r;
943 :
944 0 : r = _bf_program_fixup(program, BF_FIXUP_TYPE_COUNTERS_MAP_FD);
945 0 : if (r < 0) {
946 0 : bf_map_destroy(program->cmap);
947 0 : return bf_err_r(r, "failed to fixup counters map FD");
948 : }
949 :
950 : return 0;
951 : }
952 :
953 0 : static int _bf_program_load_sets_maps(struct bf_program *new_prog)
954 : {
955 : const bf_list_node *set_node;
956 : const bf_list_node *map_node;
957 : int r;
958 :
959 0 : bf_assert(new_prog);
960 :
961 0 : set_node = bf_list_get_head(&new_prog->runtime.chain->sets);
962 0 : map_node = bf_list_get_head(&new_prog->sets);
963 :
964 : // Fill the bf_map with the sets content
965 0 : while (set_node && map_node) {
966 : _cleanup_free_ uint8_t *values = NULL;
967 : _cleanup_free_ uint8_t *keys = NULL;
968 0 : struct bf_set *set = bf_list_node_get_data(set_node);
969 0 : struct bf_map *map = bf_list_node_get_data(map_node);
970 0 : size_t nelems = bf_list_size(&set->elems);
971 0 : union bpf_attr attr = {};
972 : size_t idx = 0;
973 :
974 0 : r = bf_map_create(map, 0);
975 0 : if (r < 0) {
976 0 : r = bf_err_r(r, "failed to create BPF map for set");
977 0 : goto err_destroy_maps;
978 : }
979 :
980 0 : values = malloc(nelems);
981 0 : if (!values) {
982 0 : r = bf_err_r(errno, "failed to allocate map values");
983 0 : goto err_destroy_maps;
984 : }
985 :
986 0 : keys = malloc(set->elem_size * nelems);
987 0 : if (!keys) {
988 0 : r = bf_err_r(errno, "failed to allocate map keys");
989 0 : goto err_destroy_maps;
990 : }
991 :
992 0 : bf_list_foreach (&set->elems, elem_node) {
993 0 : void *elem = bf_list_node_get_data(elem_node);
994 :
995 0 : memcpy(keys + (idx * set->elem_size), elem, set->elem_size);
996 0 : values[idx] = 1;
997 0 : ++idx;
998 : }
999 :
1000 0 : attr.batch.map_fd = map->fd;
1001 0 : attr.batch.keys = (unsigned long long)keys;
1002 0 : attr.batch.values = (unsigned long long)values;
1003 0 : attr.batch.count = nelems;
1004 0 : attr.batch.flags = BPF_ANY;
1005 :
1006 0 : r = bf_bpf(BPF_MAP_UPDATE_BATCH, &attr);
1007 0 : if (r < 0) {
1008 0 : bf_err_r(r, "failed to add set elements to the map");
1009 0 : goto err_destroy_maps;
1010 : }
1011 :
1012 0 : set_node = bf_list_node_next(set_node);
1013 0 : map_node = bf_list_node_next(map_node);
1014 : }
1015 :
1016 0 : r = _bf_program_fixup(new_prog, BF_FIXUP_TYPE_SET_MAP_FD);
1017 0 : if (r < 0)
1018 0 : goto err_destroy_maps;
1019 :
1020 : return 0;
1021 :
1022 0 : err_destroy_maps:
1023 0 : bf_list_foreach (&new_prog->sets, map_node)
1024 0 : bf_map_destroy(bf_list_node_get_data(map_node));
1025 : return r;
1026 : }
1027 :
1028 0 : int bf_program_load(struct bf_program *prog)
1029 : {
1030 : _cleanup_free_ char *log_buf = NULL;
1031 : int r;
1032 :
1033 0 : bf_assert(prog && prog->img);
1034 :
1035 0 : r = _bf_program_load_sets_maps(prog);
1036 0 : if (r)
1037 : return r;
1038 :
1039 0 : r = _bf_program_load_counters_map(prog);
1040 0 : if (r)
1041 : return r;
1042 :
1043 0 : r = _bf_program_load_printer_map(prog);
1044 0 : if (r)
1045 : return r;
1046 :
1047 0 : if (bf_opts_is_verbose(BF_VERBOSE_DEBUG)) {
1048 0 : log_buf = malloc(_BF_LOG_BUF_SIZE);
1049 0 : if (!log_buf) {
1050 0 : return bf_err_r(-ENOMEM,
1051 : "failed to allocate BPF_PROG_LOAD logs buffer");
1052 : }
1053 : }
1054 :
1055 0 : r = bf_bpf_prog_load(
1056 0 : prog->prog_name, bf_hook_to_bpf_prog_type(prog->runtime.chain->hook),
1057 0 : prog->img, prog->img_size,
1058 0 : bf_hook_to_bpf_attach_type(prog->runtime.chain->hook), log_buf,
1059 : log_buf ? _BF_LOG_BUF_SIZE : 0, bf_ctx_token(), &prog->runtime.prog_fd);
1060 0 : if (r) {
1061 0 : return bf_err_r(r, "failed to load bf_program (%lu bytes):\n%s\nerrno:",
1062 : prog->img_size, log_buf ? log_buf : "<NO LOG BUFFER>");
1063 : }
1064 :
1065 0 : if (bf_opts_is_verbose(BF_VERBOSE_BYTECODE))
1066 0 : bf_program_dump_bytecode(prog);
1067 :
1068 : return r;
1069 : }
1070 :
1071 0 : int bf_program_attach(struct bf_program *prog, struct bf_hookopts **hookopts)
1072 : {
1073 : int r;
1074 :
1075 0 : bf_assert(prog && hookopts);
1076 :
1077 0 : r = bf_link_attach(prog->link, prog->runtime.chain->hook, hookopts,
1078 : prog->runtime.prog_fd);
1079 0 : if (r) {
1080 0 : return bf_err_r(r, "failed to attach bf_link for %s program",
1081 : bf_flavor_to_str(prog->flavor));
1082 : }
1083 :
1084 : return r;
1085 : }
1086 :
1087 0 : void bf_program_detach(struct bf_program *prog)
1088 : {
1089 0 : bf_assert(prog);
1090 :
1091 0 : bf_link_detach(prog->link);
1092 0 : }
1093 :
1094 0 : void bf_program_unload(struct bf_program *prog)
1095 : {
1096 0 : bf_assert(prog);
1097 :
1098 0 : closep(&prog->runtime.prog_fd);
1099 0 : bf_link_detach(prog->link);
1100 0 : bf_map_destroy(prog->cmap);
1101 0 : bf_map_destroy(prog->pmap);
1102 0 : bf_list_foreach (&prog->sets, map_node)
1103 0 : bf_map_destroy(bf_list_node_get_data(map_node));
1104 0 : }
1105 :
1106 0 : int bf_program_get_counter(const struct bf_program *program,
1107 : uint32_t counter_idx, struct bf_counter *counter)
1108 : {
1109 0 : bf_assert(program);
1110 0 : bf_assert(counter);
1111 :
1112 : int r;
1113 :
1114 0 : r = bf_bpf_map_lookup_elem(program->cmap->fd, &counter_idx, counter);
1115 0 : if (r < 0)
1116 0 : return bf_err_r(errno, "failed to lookup counters map");
1117 :
1118 : return 0;
1119 : }
1120 :
1121 0 : int bf_cgen_set_counters(struct bf_program *program,
1122 : const struct bf_counter *counters)
1123 : {
1124 : UNUSED(program);
1125 : UNUSED(counters);
1126 :
1127 0 : return -ENOTSUP;
1128 : }
1129 :
1130 0 : int bf_program_pin(struct bf_program *prog, int dir_fd)
1131 : {
1132 : const char *name;
1133 : int r;
1134 :
1135 0 : bf_assert(prog);
1136 :
1137 0 : name = prog->runtime.chain->name;
1138 :
1139 0 : r = bf_bpf_obj_pin(prog->prog_name, prog->runtime.prog_fd, dir_fd);
1140 0 : if (r) {
1141 0 : bf_err_r(r, "failed to pin BPF program for '%s'", name);
1142 0 : goto err_unpin_all;
1143 : }
1144 :
1145 0 : r = bf_map_pin(prog->cmap, dir_fd);
1146 0 : if (r) {
1147 0 : bf_err_r(r, "failed to pin BPF counters map for '%s'", name);
1148 0 : goto err_unpin_all;
1149 : }
1150 :
1151 0 : r = bf_map_pin(prog->pmap, dir_fd);
1152 0 : if (r) {
1153 0 : bf_err_r(r, "failed to pin BPF printer map for '%s'", name);
1154 0 : goto err_unpin_all;
1155 : }
1156 :
1157 0 : bf_list_foreach (&prog->sets, set_node) {
1158 0 : r = bf_map_pin(bf_list_node_get_data(set_node), dir_fd);
1159 0 : if (r) {
1160 0 : bf_err_r(r, "failed to pin BPF set map for '%s'", name);
1161 0 : goto err_unpin_all;
1162 : }
1163 : }
1164 :
1165 : // If a link exists, pin it too.
1166 0 : if (prog->link->hookopts) {
1167 0 : r = bf_link_pin(prog->link, dir_fd);
1168 0 : if (r) {
1169 0 : bf_err_r(r, "failed to pin BPF link for '%s'", name);
1170 0 : goto err_unpin_all;
1171 : }
1172 : }
1173 :
1174 : return 0;
1175 :
1176 0 : err_unpin_all:
1177 0 : bf_program_unpin(prog, dir_fd);
1178 0 : return r;
1179 : }
1180 :
1181 0 : void bf_program_unpin(struct bf_program *prog, int dir_fd)
1182 : {
1183 0 : bf_assert(prog);
1184 :
1185 0 : bf_map_unpin(prog->cmap, dir_fd);
1186 0 : bf_map_unpin(prog->pmap, dir_fd);
1187 :
1188 0 : bf_list_foreach (&prog->sets, set_node)
1189 0 : bf_map_unpin(bf_list_node_get_data(set_node), dir_fd);
1190 :
1191 0 : bf_link_unpin(prog->link, dir_fd);
1192 :
1193 0 : unlinkat(dir_fd, prog->prog_name, 0);
1194 0 : }
|