LCOV - code coverage report
Current view: top level - bpfilter/cgen - stub.c (source / functions) Coverage Total Hit
Test: lcov.out Lines: 0.0 % 187 0
Test Date: 2025-08-19 17:27:08 Functions: 0.0 % 9 0

            Line data    Source code
       1              : /* SPDX-License-Identifier: GPL-2.0-only */
       2              : /*
       3              :  * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
       4              :  */
       5              : 
       6              : #include "bpfilter/cgen/stub.h"
       7              : 
       8              : #include <linux/bpf.h>
       9              : #include <linux/bpf_common.h>
      10              : #include <linux/icmp.h>
      11              : #include <linux/icmpv6.h>
      12              : #include <linux/if_ether.h>
      13              : #include <linux/in.h> // NOLINT
      14              : #include <linux/in6.h>
      15              : #include <linux/ip.h>
      16              : #include <linux/ipv6.h>
      17              : #include <linux/tcp.h>
      18              : #include <linux/udp.h>
      19              : 
      20              : #include <endian.h>
      21              : #include <stddef.h>
      22              : 
      23              : #include "bpfilter/cgen/elfstub.h"
      24              : #include "bpfilter/cgen/jmp.h"
      25              : #include "bpfilter/cgen/printer.h"
      26              : #include "bpfilter/cgen/program.h"
      27              : #include "bpfilter/cgen/swich.h"
      28              : #include "bpfilter/opts.h"
      29              : #include "core/flavor.h"
      30              : #include "core/helper.h"
      31              : #include "core/matcher.h"
      32              : #include "core/verdict.h"
      33              : 
      34              : #include "external/filter.h"
      35              : 
      36              : #define _BF_LOW_EH_BITMASK 0x1801800000000801ULL
      37              : 
      38              : /**
      39              :  * Generate stub to create a dynptr.
      40              :  *
      41              :  * @param program Program to generate the stub for. Must not be NULL.
      42              :  * @param arg_reg Register where the first argument to the dynptr creation
      43              :  *        function is located (SKB or xdp_md structure).
      44              :  * @param kfunc Name of the kfunc to use to create the dynamic pointer.
      45              :  * @return 0 on success, or negative errno value on error.
      46              :  */
      47            0 : static int _bf_stub_make_ctx_dynptr(struct bf_program *program, int arg_reg,
      48              :                                     const char *kfunc)
      49              : {
      50            0 :     bf_assert(program && kfunc);
      51              : 
      52              :     // Call bpf_dynptr_from_xxx()
      53            0 :     if (arg_reg != BPF_REG_1)
      54            0 :         EMIT(program, BPF_MOV64_IMM(BPF_REG_1, arg_reg));
      55            0 :     EMIT(program, BPF_MOV64_IMM(BPF_REG_2, 0));
      56            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
      57            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(dynptr)));
      58            0 :     EMIT_KFUNC_CALL(program, kfunc);
      59              : 
      60              :     // If the function call failed, quit the program
      61              :     {
      62            0 :         _clean_bf_jmpctx_ struct bf_jmpctx _ =
      63            0 :             bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0));
      64              : 
      65              :         // Update the error counter
      66            0 :         EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
      67            0 :         EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
      68            0 :         EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
      69            0 :         EMIT(program,
      70              :              BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
      71            0 :         EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
      72              : 
      73            0 :         if (bf_opts_is_verbose(BF_VERBOSE_BPF))
      74            0 :             EMIT_PRINT(program, "failed to create a new dynamic pointer");
      75              : 
      76            0 :         EMIT(program,
      77              :              BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
      78              :                                           BF_VERDICT_ACCEPT)));
      79            0 :         EMIT(program, BPF_EXIT_INSN());
      80              :     }
      81              : 
      82            0 :     return 0;
      83              : }
      84              : 
      85            0 : int bf_stub_make_ctx_xdp_dynptr(struct bf_program *program, int md_reg)
      86              : {
      87            0 :     bf_assert(program);
      88              : 
      89            0 :     return _bf_stub_make_ctx_dynptr(program, md_reg, "bpf_dynptr_from_xdp");
      90              : }
      91              : 
      92            0 : int bf_stub_make_ctx_skb_dynptr(struct bf_program *program, int skb_reg)
      93              : {
      94            0 :     bf_assert(program);
      95              : 
      96            0 :     return _bf_stub_make_ctx_dynptr(program, skb_reg, "bpf_dynptr_from_skb");
      97              : }
      98              : 
      99            0 : int bf_stub_parse_l2_ethhdr(struct bf_program *program)
     100              : {
     101            0 :     bf_assert(program);
     102              : 
     103              :     // Call bpf_dynptr_slice()
     104            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     105            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
     106            0 :     EMIT(program, BPF_MOV64_IMM(BPF_REG_2, 0));
     107            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
     108            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l2)));
     109            0 :     EMIT(program, BPF_MOV64_IMM(BPF_REG_4, sizeof(struct ethhdr)));
     110              : 
     111            0 :     EMIT(program,
     112              :          BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_4, BF_PROG_CTX_OFF(l2_size)));
     113              : 
     114            0 :     EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
     115              : 
     116              :     // If the function call failed, quit the program
     117              :     {
     118            0 :         _clean_bf_jmpctx_ struct bf_jmpctx _ =
     119            0 :             bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
     120              : 
     121              :         // Update the error counter
     122            0 :         EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     123            0 :         EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
     124            0 :         EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
     125            0 :         EMIT(program,
     126              :              BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
     127            0 :         EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
     128              : 
     129            0 :         if (bf_opts_is_verbose(BF_VERBOSE_BPF))
     130            0 :             EMIT_PRINT(program, "failed to create L2 dynamic pointer slice");
     131              : 
     132            0 :         EMIT(program,
     133              :              BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
     134              :                                           BF_VERDICT_ACCEPT)));
     135            0 :         EMIT(program, BPF_EXIT_INSN());
     136              :     }
     137              : 
     138              :     // Store the L2 header address into the runtime context
     139            0 :     EMIT(program,
     140              :          BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l2_hdr)));
     141              : 
     142              :     // Store the L3 protocol ID in r7
     143            0 :     EMIT(program, BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_0,
     144              :                               offsetof(struct ethhdr, h_proto)));
     145              : 
     146              :     // Set bf_runtime.l3_offset
     147            0 :     EMIT(program, BPF_ST_MEM(BPF_W, BPF_REG_10, BF_PROG_CTX_OFF(l3_offset),
     148              :                              sizeof(struct ethhdr)));
     149              : 
     150            0 :     return 0;
     151              : }
     152              : 
     153            0 : int bf_stub_parse_l3_hdr(struct bf_program *program)
     154              : {
     155            0 :     _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_default();
     156              :     int r;
     157              : 
     158            0 :     bf_assert(program);
     159              : 
     160              :     /* Store the size of the L3 protocol header in r4, depending on the protocol
     161              :      * ID stored in r7. If the protocol is not supported, we store 0 into r7
     162              :      * and we skip the instructions below. */
     163              :     {
     164            0 :         _clean_bf_swich_ struct bf_swich swich =
     165            0 :             bf_swich_get(program, BPF_REG_7);
     166              : 
     167            0 :         EMIT_SWICH_OPTION(&swich, htobe16(ETH_P_IP),
     168              :                           BPF_MOV64_IMM(BPF_REG_4, sizeof(struct iphdr)));
     169            0 :         EMIT_SWICH_OPTION(&swich, htobe16(ETH_P_IPV6),
     170              :                           BPF_MOV64_IMM(BPF_REG_4, sizeof(struct ipv6hdr)));
     171            0 :         EMIT_SWICH_DEFAULT(&swich, BPF_MOV64_IMM(BPF_REG_7, 0));
     172              : 
     173            0 :         r = bf_swich_generate(&swich);
     174            0 :         if (r)
     175              :             return r;
     176              :     }
     177            0 :     _ = bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, 0, 0));
     178              : 
     179            0 :     EMIT(program,
     180              :          BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_4, BF_PROG_CTX_OFF(l3_size)));
     181              : 
     182              :     // Call bpf_dynptr_slice()
     183            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     184            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
     185            0 :     EMIT(program,
     186              :          BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(l3_offset)));
     187            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
     188            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l2)));
     189            0 :     EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
     190              : 
     191              :     // If the function call failed, quit the program
     192              :     {
     193            0 :         _clean_bf_jmpctx_ struct bf_jmpctx _ =
     194            0 :             bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
     195              : 
     196            0 :         EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     197            0 :         EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
     198            0 :         EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
     199            0 :         EMIT(program,
     200              :              BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
     201            0 :         EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
     202              : 
     203            0 :         if (bf_opts_is_verbose(BF_VERBOSE_BPF))
     204            0 :             EMIT_PRINT(program, "failed to create L3 dynamic pointer slice");
     205              : 
     206            0 :         EMIT(program,
     207              :              BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
     208              :                                           BF_VERDICT_ACCEPT)));
     209            0 :         EMIT(program, BPF_EXIT_INSN());
     210              :     }
     211              : 
     212              :     // Store the L3 header address into the runtime context
     213            0 :     EMIT(program,
     214              :          BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l3_hdr)));
     215              : 
     216              :     /* Unsupported L3 protocols have been filtered out at the beginning of this
     217              :      * function and would jump over the block below, so there is no need to
     218              :      * worry about them here. */
     219              :     {
     220              :         // IPv4
     221            0 :         _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_get(
     222              :             program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7, htobe16(ETH_P_IP), 0));
     223              : 
     224            0 :         EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0));
     225            0 :         EMIT(program, BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x0f));
     226            0 :         EMIT(program, BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2));
     227            0 :         EMIT(program, BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10,
     228              :                                   BF_PROG_CTX_OFF(l3_offset)));
     229            0 :         EMIT(program, BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2));
     230            0 :         EMIT(program, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1,
     231              :                                   BF_PROG_CTX_OFF(l4_offset)));
     232            0 :         EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_0,
     233              :                                   offsetof(struct iphdr, protocol)));
     234              :     }
     235              : 
     236              :     {
     237              :         // IPv6
     238              :         struct bf_jmpctx tcpjmp, udpjmp, noehjmp, ehjmp;
     239            0 :         struct bpf_insn ld64[2] = {BPF_LD_IMM64(BPF_REG_2, _BF_LOW_EH_BITMASK)};
     240            0 :         _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_get(
     241              :             program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7, htobe16(ETH_P_IPV6), 0));
     242              : 
     243            0 :         EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_0,
     244              :                                   offsetof(struct ipv6hdr, nexthdr)));
     245              : 
     246              :         /* Fast path for TCP and UDP: quickly recognize the most used protocol
     247              :          * to process them as fast as possible. */
     248            0 :         tcpjmp = bf_jmpctx_get(program,
     249              :                                BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, IPPROTO_TCP, 0));
     250            0 :         udpjmp = bf_jmpctx_get(program,
     251              :                                BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, IPPROTO_UDP, 0));
     252              : 
     253              :         /* For all the EH protocol numbers <64, use a bitmask:
     254              :          * mask = (1<<0) | (1<<43) | (1<<44) | (1<<50) | (1<<51) | (1<<60)
     255              :          *
     256              :          * Pseudo-code:
     257              :          * - r3 = 1 << r8 (nexthdr)
     258              :          * - r3 = r3 & mask
     259              :          * - if r3 != 0: go to slow path (EH present) */
     260            0 :         EMIT(program, ld64[0]);
     261            0 :         EMIT(program, ld64[1]);
     262            0 :         EMIT(program, BPF_JMP_IMM(BPF_JGE, BPF_REG_8, 64, 4));
     263            0 :         EMIT(program, BPF_MOV64_IMM(BPF_REG_3, 1));
     264            0 :         EMIT(program, BPF_ALU64_REG(BPF_LSH, BPF_REG_3, BPF_REG_8));
     265            0 :         EMIT(program, BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2));
     266            0 :         EMIT(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 4));
     267              : 
     268              :         // EH with protocol numbers >64 are processed individually
     269            0 :         EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 135, 3));
     270            0 :         EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 139, 2));
     271            0 :         EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 140, 1));
     272              : 
     273              :         // If no EH matched, nexthdr is L4, skip EH processing
     274            0 :         noehjmp = bf_jmpctx_get(program, BPF_JMP_A(0));
     275              : 
     276              :         // Process EH
     277            0 :         EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     278            0 :         EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
     279              :         // If any rule filters on ipv6.nexthdr, store the EH in the runtime context
     280              :         // during process, so we won't have to process the EH again.
     281            0 :         if (program->runtime.chain->flags & BF_FLAG(BF_CHAIN_STORE_NEXTHDR))
     282            0 :             EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_PARSE_IPV6_NH);
     283              :         else
     284            0 :             EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_PARSE_IPV6_EH);
     285            0 :         EMIT(program, BPF_MOV64_REG(BPF_REG_8, BPF_REG_0));
     286              : 
     287            0 :         ehjmp = bf_jmpctx_get(program, BPF_JMP_A(0));
     288              : 
     289              :         // If no EH found, all the jmp will end up here
     290            0 :         bf_jmpctx_cleanup(&tcpjmp);
     291            0 :         bf_jmpctx_cleanup(&udpjmp);
     292            0 :         bf_jmpctx_cleanup(&noehjmp);
     293              : 
     294              :         // Process IPv6 header, no EH (BPF_REG_8 already contains nexthdr)
     295            0 :         EMIT(program, BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10,
     296              :                                   BF_PROG_CTX_OFF(l3_offset)));
     297            0 :         EMIT(program,
     298              :              BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct ipv6hdr)));
     299            0 :         EMIT(program, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2,
     300              :                                   BF_PROG_CTX_OFF(l4_offset)));
     301              : 
     302            0 :         bf_jmpctx_cleanup(&ehjmp);
     303              :     }
     304              : 
     305            0 :     return 0;
     306              : }
     307              : 
     308            0 : int bf_stub_parse_l4_hdr(struct bf_program *program)
     309              : {
     310            0 :     _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_default();
     311              :     int r;
     312              : 
     313            0 :     bf_assert(program);
     314              : 
     315              :     /* Parse the L4 protocol and handle unuspported protocol, similarly to
     316              :      * bf_stub_parse_l3_hdr() above. */
     317              :     {
     318            0 :         _clean_bf_swich_ struct bf_swich swich =
     319            0 :             bf_swich_get(program, BPF_REG_8);
     320              : 
     321            0 :         EMIT_SWICH_OPTION(&swich, IPPROTO_TCP,
     322              :                           BPF_MOV64_IMM(BPF_REG_4, sizeof(struct tcphdr)));
     323            0 :         EMIT_SWICH_OPTION(&swich, IPPROTO_UDP,
     324              :                           BPF_MOV64_IMM(BPF_REG_4, sizeof(struct udphdr)));
     325            0 :         EMIT_SWICH_OPTION(&swich, IPPROTO_ICMP,
     326              :                           BPF_MOV64_IMM(BPF_REG_4, sizeof(struct icmphdr)));
     327            0 :         EMIT_SWICH_OPTION(&swich, IPPROTO_ICMPV6,
     328              :                           BPF_MOV64_IMM(BPF_REG_4, sizeof(struct icmp6hdr)));
     329            0 :         EMIT_SWICH_DEFAULT(&swich, BPF_MOV64_IMM(BPF_REG_8, 0));
     330              : 
     331            0 :         r = bf_swich_generate(&swich);
     332            0 :         if (r)
     333              :             return r;
     334              :     }
     335            0 :     _ = bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0, 0));
     336              : 
     337            0 :     EMIT(program,
     338              :          BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_4, BF_PROG_CTX_OFF(l4_size)));
     339              : 
     340              :     // Call bpf_dynptr_slice()
     341            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     342            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
     343            0 :     EMIT(program,
     344              :          BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(l4_offset)));
     345            0 :     EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
     346            0 :     EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l4)));
     347            0 :     EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
     348              : 
     349              :     // If the function call failed, quit the program
     350              :     {
     351            0 :         _clean_bf_jmpctx_ struct bf_jmpctx _ =
     352            0 :             bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
     353              : 
     354            0 :         EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
     355            0 :         EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
     356            0 :         EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
     357            0 :         EMIT(program,
     358              :              BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
     359            0 :         EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
     360              : 
     361            0 :         if (bf_opts_is_verbose(BF_VERBOSE_BPF))
     362            0 :             EMIT_PRINT(program, "failed to create L4 dynamic pointer slice");
     363              : 
     364            0 :         EMIT(program,
     365              :              BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
     366              :                                           BF_VERDICT_ACCEPT)));
     367            0 :         EMIT(program, BPF_EXIT_INSN());
     368              :     }
     369              : 
     370              :     // Store the L4 header address into the runtime context
     371            0 :     EMIT(program,
     372              :          BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l4_hdr)));
     373              : 
     374            0 :     return 0;
     375              : }
     376              : 
     377            0 : int bf_stub_rule_check_protocol(struct bf_program *program,
     378              :                                 const struct bf_matcher_meta *meta)
     379              : {
     380            0 :     bf_assert(program && meta);
     381              : 
     382            0 :     switch (meta->layer) {
     383            0 :     case BF_MATCHER_LAYER_3:
     384            0 :         EMIT_FIXUP_JMP_NEXT_RULE(
     385              :             program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
     386              :                                  htobe16((uint16_t)meta->hdr_id), 0));
     387            0 :         break;
     388            0 :     case BF_MATCHER_LAYER_4:
     389            0 :         EMIT_FIXUP_JMP_NEXT_RULE(
     390              :             program, BPF_JMP_IMM(BPF_JNE, BPF_REG_8, (uint8_t)meta->hdr_id, 0));
     391            0 :         break;
     392            0 :     default:
     393            0 :         return bf_err_r(-EINVAL, "rule can't check for layer ID %d",
     394              :                         meta->layer);
     395              :     }
     396              : 
     397              :     return 0;
     398              : }
     399              : 
     400            0 : int bf_stub_load_header(struct bf_program *program,
     401              :                         const struct bf_matcher_meta *meta, int reg)
     402              : {
     403            0 :     bf_assert(program && meta);
     404              : 
     405            0 :     switch (meta->layer) {
     406            0 :     case BF_MATCHER_LAYER_3:
     407            0 :         EMIT(program,
     408              :              BPF_LDX_MEM(BPF_DW, reg, BPF_REG_10, BF_PROG_CTX_OFF(l3_hdr)));
     409            0 :         break;
     410            0 :     case BF_MATCHER_LAYER_4:
     411            0 :         EMIT(program,
     412              :              BPF_LDX_MEM(BPF_DW, reg, BPF_REG_10, BF_PROG_CTX_OFF(l4_hdr)));
     413            0 :         break;
     414            0 :     default:
     415            0 :         return bf_err_r(-EINVAL,
     416              :                         "layer ID %d is not a valid layer to load header for",
     417              :                         meta->layer);
     418              :     }
     419              : 
     420              :     return 0;
     421              : }
     422              : 
     423            0 : int bf_stub_stx_payload(struct bf_program *program,
     424              :                         const struct bf_matcher_meta *meta, size_t offset)
     425              : {
     426            0 :     bf_assert(program && meta);
     427              : 
     428            0 :     size_t remaining_size = meta->hdr_payload_size;
     429              :     size_t src_offset = 0;
     430              :     size_t dst_offset = offset;
     431              : 
     432            0 :     while (remaining_size) {
     433              :         int bpf_size = BPF_B;
     434              :         size_t copy_bytes = 1;
     435              : 
     436            0 :         if (BF_ALIGNED_64(offset) && remaining_size >= 8) {
     437              :             bpf_size = BPF_DW;
     438              :             copy_bytes = 8;
     439            0 :         } else if (BF_ALIGNED_32(offset) && remaining_size >= 4) {
     440              :             bpf_size = BPF_W;
     441              :             copy_bytes = 4;
     442            0 :         } else if (BF_ALIGNED_16(offset) && remaining_size >= 2) {
     443              :             bpf_size = BPF_H;
     444              :             copy_bytes = 2;
     445              :         }
     446              : 
     447            0 :         EMIT(program, BPF_LDX_MEM(bpf_size, BPF_REG_1, BPF_REG_6,
     448              :                                   meta->hdr_payload_offset + src_offset));
     449            0 :         EMIT(program, BPF_STX_MEM(bpf_size, BPF_REG_10, BPF_REG_1,
     450              :                                   BF_PROG_SCR_OFF(dst_offset)));
     451              : 
     452            0 :         remaining_size -= copy_bytes;
     453            0 :         src_offset += copy_bytes;
     454            0 :         dst_offset += copy_bytes;
     455              :     }
     456              : 
     457              :     return 0;
     458              : }
        

Generated by: LCOV version 2.0-1