Branch data Line data Source code
1 : : /* SPDX-License-Identifier: GPL-2.0-only */
2 : : /*
3 : : * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4 : : */
5 : :
6 : : #include "cgen/stub.h"
7 : :
8 : : #include <linux/bpf.h>
9 : : #include <linux/bpf_common.h>
10 : : #include <linux/icmp.h>
11 : : #include <linux/icmpv6.h>
12 : : #include <linux/if_ether.h>
13 : : #include <linux/in.h> // NOLINT
14 : : #include <linux/in6.h>
15 : : #include <linux/ip.h>
16 : : #include <linux/ipv6.h>
17 : : #include <linux/tcp.h>
18 : : #include <linux/udp.h>
19 : :
20 : : #include <endian.h>
21 : : #include <stddef.h>
22 : :
23 : : #include <bpfilter/flavor.h>
24 : : #include <bpfilter/helper.h>
25 : : #include <bpfilter/matcher.h>
26 : : #include <bpfilter/verdict.h>
27 : :
28 : : #include "cgen/elfstub.h"
29 : : #include "cgen/jmp.h"
30 : : #include "cgen/printer.h"
31 : : #include "cgen/program.h"
32 : : #include "cgen/swich.h"
33 : : #include "filter.h"
34 : : #include "opts.h"
35 : :
36 : : #define _BF_LOW_EH_BITMASK 0x1801800000000801ULL
37 : :
38 : : /**
39 : : * Generate stub to create a dynptr.
40 : : *
41 : : * @param program Program to generate the stub for. Must not be NULL.
42 : : * @param arg_reg Register where the first argument to the dynptr creation
43 : : * function is located (SKB or xdp_md structure).
44 : : * @param kfunc Name of the kfunc to use to create the dynamic pointer.
45 : : * @return 0 on success, or negative errno value on error.
46 : : */
47 : 60 : static int _bf_stub_make_ctx_dynptr(struct bf_program *program, int arg_reg,
48 : : const char *kfunc)
49 : : {
50 : : bf_assert(program && kfunc);
51 : :
52 : : // Call bpf_dynptr_from_xxx()
53 [ - + ]: 60 : if (arg_reg != BPF_REG_1)
54 [ # # ]: 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_1, arg_reg));
55 [ - + ]: 60 : EMIT(program, BPF_MOV64_IMM(BPF_REG_2, 0));
56 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
57 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(dynptr)));
58 [ + - ]: 60 : EMIT_KFUNC_CALL(program, kfunc);
59 : :
60 : : // If the function call failed, quit the program
61 : : {
62 : 60 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
63 [ - + ]: 60 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0));
64 : :
65 : : // Update the error counter
66 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
67 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
68 [ + - + - ]: 60 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
69 [ - + ]: 60 : EMIT(program,
70 : : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
71 [ + - ]: 60 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
72 : :
73 [ - + ]: 60 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
74 [ # # # # : 0 : EMIT_PRINT(program, "failed to create a new dynamic pointer");
# # # # ]
75 : :
76 [ - + ]: 60 : EMIT(program,
77 : : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
78 : : BF_VERDICT_ACCEPT)));
79 [ - + ]: 60 : EMIT(program, BPF_EXIT_INSN());
80 : : }
81 : :
82 : 60 : return 0;
83 : : }
84 : :
85 : 32 : int bf_stub_make_ctx_xdp_dynptr(struct bf_program *program, int md_reg)
86 : : {
87 : : bf_assert(program);
88 : :
89 : 32 : return _bf_stub_make_ctx_dynptr(program, md_reg, "bpf_dynptr_from_xdp");
90 : : }
91 : :
92 : 28 : int bf_stub_make_ctx_skb_dynptr(struct bf_program *program, int skb_reg)
93 : : {
94 : : bf_assert(program);
95 : :
96 : 28 : return _bf_stub_make_ctx_dynptr(program, skb_reg, "bpf_dynptr_from_skb");
97 : : }
98 : :
99 : 43 : int bf_stub_parse_l2_ethhdr(struct bf_program *program)
100 : : {
101 : : bf_assert(program);
102 : :
103 : : // Call bpf_dynptr_slice()
104 [ - + ]: 43 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
105 [ - + ]: 43 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
106 [ - + ]: 43 : EMIT(program, BPF_MOV64_IMM(BPF_REG_2, 0));
107 [ - + ]: 43 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
108 [ - + ]: 43 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l2)));
109 [ - + ]: 43 : EMIT(program, BPF_MOV64_IMM(BPF_REG_4, sizeof(struct ethhdr)));
110 : :
111 [ - + ]: 43 : EMIT(program,
112 : : BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_4, BF_PROG_CTX_OFF(l2_size)));
113 : :
114 [ + - ]: 43 : EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
115 : :
116 : : // If the function call failed, quit the program
117 : : {
118 : 43 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
119 [ - + ]: 43 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
120 : :
121 : : // Update the error counter
122 [ - + ]: 43 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
123 [ - + ]: 43 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
124 [ + - + - ]: 43 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
125 [ - + ]: 43 : EMIT(program,
126 : : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
127 [ + - ]: 43 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
128 : :
129 [ - + ]: 43 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
130 [ # # # # : 0 : EMIT_PRINT(program, "failed to create L2 dynamic pointer slice");
# # # # ]
131 : :
132 [ - + ]: 43 : EMIT(program,
133 : : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
134 : : BF_VERDICT_ACCEPT)));
135 [ - + ]: 43 : EMIT(program, BPF_EXIT_INSN());
136 : : }
137 : :
138 : : // Store the L2 header address into the runtime context
139 [ - + ]: 43 : EMIT(program,
140 : : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l2_hdr)));
141 : :
142 : : // Store the L3 protocol ID in r7
143 [ - + ]: 43 : EMIT(program, BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_0,
144 : : offsetof(struct ethhdr, h_proto)));
145 : :
146 : : // Set bf_runtime.l3_offset
147 [ - + ]: 43 : EMIT(program, BPF_ST_MEM(BPF_W, BPF_REG_10, BF_PROG_CTX_OFF(l3_offset),
148 : : sizeof(struct ethhdr)));
149 : :
150 : 43 : return 0;
151 : : }
152 : :
153 : 60 : int bf_stub_parse_l3_hdr(struct bf_program *program)
154 : : {
155 : 60 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_default();
156 : : int r;
157 : :
158 : : bf_assert(program);
159 : :
160 : : /* Store the size of the L3 protocol header in r4, depending on the protocol
161 : : * ID stored in r7. If the protocol is not supported, we store 0 into r7
162 : : * and we skip the instructions below. */
163 : : {
164 : 60 : _clean_bf_swich_ struct bf_swich swich =
165 [ - + ]: 60 : bf_swich_get(program, BPF_REG_7);
166 : :
167 [ - + ]: 60 : EMIT_SWICH_OPTION(&swich, htobe16(ETH_P_IP),
168 : : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct iphdr)));
169 [ - + ]: 60 : EMIT_SWICH_OPTION(&swich, htobe16(ETH_P_IPV6),
170 : : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct ipv6hdr)));
171 [ - + ]: 60 : EMIT_SWICH_DEFAULT(&swich, BPF_MOV64_IMM(BPF_REG_7, 0));
172 : :
173 : 60 : r = bf_swich_generate(&swich);
174 [ + - ]: 60 : if (r)
175 : : return r;
176 : : }
177 [ - + ]: 60 : _ = bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, 0, 0));
178 : :
179 [ - + ]: 60 : EMIT(program,
180 : : BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_4, BF_PROG_CTX_OFF(l3_size)));
181 : :
182 : : // Call bpf_dynptr_slice()
183 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
184 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
185 [ - + ]: 60 : EMIT(program,
186 : : BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(l3_offset)));
187 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
188 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l2)));
189 [ + - ]: 60 : EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
190 : :
191 : : // If the function call failed, quit the program
192 : : {
193 : 60 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
194 [ - + ]: 60 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
195 : :
196 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
197 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
198 [ + - + - ]: 60 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
199 [ - + ]: 60 : EMIT(program,
200 : : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
201 [ + - ]: 60 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
202 : :
203 [ - + ]: 60 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
204 [ # # # # : 0 : EMIT_PRINT(program, "failed to create L3 dynamic pointer slice");
# # # # ]
205 : :
206 [ - + ]: 60 : EMIT(program,
207 : : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
208 : : BF_VERDICT_ACCEPT)));
209 [ - + ]: 60 : EMIT(program, BPF_EXIT_INSN());
210 : : }
211 : :
212 : : // Store the L3 header address into the runtime context
213 [ - + ]: 60 : EMIT(program,
214 : : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l3_hdr)));
215 : :
216 : : /* Unsupported L3 protocols have been filtered out at the beginning of this
217 : : * function and would jump over the block below, so there is no need to
218 : : * worry about them here. */
219 : : {
220 : : // IPv4
221 [ - + ]: 120 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_get(
222 : : program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7, htobe16(ETH_P_IP), 0));
223 : :
224 [ - + ]: 60 : EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0));
225 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x0f));
226 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2));
227 [ - + ]: 60 : EMIT(program, BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10,
228 : : BF_PROG_CTX_OFF(l3_offset)));
229 [ - + ]: 60 : EMIT(program, BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2));
230 [ - + ]: 60 : EMIT(program, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1,
231 : : BF_PROG_CTX_OFF(l4_offset)));
232 [ - + ]: 60 : EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_0,
233 : : offsetof(struct iphdr, protocol)));
234 : : }
235 : :
236 : : {
237 : : // IPv6
238 : : struct bf_jmpctx tcpjmp, udpjmp, noehjmp, ehjmp;
239 : 60 : struct bpf_insn ld64[2] = {BPF_LD_IMM64(BPF_REG_2, _BF_LOW_EH_BITMASK)};
240 [ - + ]: 120 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_get(
241 : : program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7, htobe16(ETH_P_IPV6), 0));
242 : :
243 [ - + ]: 60 : EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_0,
244 : : offsetof(struct ipv6hdr, nexthdr)));
245 : :
246 : : /* Fast path for TCP and UDP: quickly recognize the most used protocol
247 : : * to process them as fast as possible. */
248 [ - + ]: 60 : tcpjmp = bf_jmpctx_get(program,
249 : : BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, IPPROTO_TCP, 0));
250 [ - + ]: 60 : udpjmp = bf_jmpctx_get(program,
251 : : BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, IPPROTO_UDP, 0));
252 : :
253 : : /* For all the EH protocol numbers <64, use a bitmask:
254 : : * mask = (1<<0) | (1<<43) | (1<<44) | (1<<50) | (1<<51) | (1<<60)
255 : : *
256 : : * Pseudo-code:
257 : : * - r3 = 1 << r8 (nexthdr)
258 : : * - r3 = r3 & mask
259 : : * - if r3 != 0: go to slow path (EH present) */
260 [ + - ]: 60 : EMIT(program, ld64[0]);
261 [ + - ]: 60 : EMIT(program, ld64[1]);
262 [ - + ]: 60 : EMIT(program, BPF_JMP_IMM(BPF_JGE, BPF_REG_8, 64, 4));
263 [ - + ]: 60 : EMIT(program, BPF_MOV64_IMM(BPF_REG_3, 1));
264 [ - + ]: 60 : EMIT(program, BPF_ALU64_REG(BPF_LSH, BPF_REG_3, BPF_REG_8));
265 [ - + ]: 60 : EMIT(program, BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2));
266 [ - + ]: 60 : EMIT(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 4));
267 : :
268 : : // EH with protocol numbers >64 are processed individually
269 [ - + ]: 60 : EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 135, 3));
270 [ - + ]: 60 : EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 139, 2));
271 [ - + ]: 60 : EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 140, 1));
272 : :
273 : : // If no EH matched, nexthdr is L4, skip EH processing
274 [ - + ]: 60 : noehjmp = bf_jmpctx_get(program, BPF_JMP_A(0));
275 : :
276 : : // Process EH
277 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
278 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
279 : : // If any rule filters on ipv6.nexthdr, store the EH in the runtime context
280 : : // during process, so we won't have to process the EH again.
281 [ + + ]: 60 : if (program->runtime.chain->flags & BF_FLAG(BF_CHAIN_STORE_NEXTHDR))
282 [ + - ]: 10 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_PARSE_IPV6_NH);
283 : : else
284 [ + - ]: 50 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_PARSE_IPV6_EH);
285 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_8, BPF_REG_0));
286 : :
287 [ - + ]: 60 : ehjmp = bf_jmpctx_get(program, BPF_JMP_A(0));
288 : :
289 : : // If no EH found, all the jmp will end up here
290 : 60 : bf_jmpctx_cleanup(&tcpjmp);
291 : 60 : bf_jmpctx_cleanup(&udpjmp);
292 : 60 : bf_jmpctx_cleanup(&noehjmp);
293 : :
294 : : // Process IPv6 header, no EH (BPF_REG_8 already contains nexthdr)
295 [ - + ]: 60 : EMIT(program, BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10,
296 : : BF_PROG_CTX_OFF(l3_offset)));
297 [ - + ]: 60 : EMIT(program,
298 : : BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct ipv6hdr)));
299 [ - + ]: 60 : EMIT(program, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2,
300 : : BF_PROG_CTX_OFF(l4_offset)));
301 : :
302 : 60 : bf_jmpctx_cleanup(&ehjmp);
303 : : }
304 : :
305 : 60 : return 0;
306 : : }
307 : :
308 : 60 : int bf_stub_parse_l4_hdr(struct bf_program *program)
309 : : {
310 : 60 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_default();
311 : : int r;
312 : :
313 : : bf_assert(program);
314 : :
315 : : /* Parse the L4 protocol and handle unuspported protocol, similarly to
316 : : * bf_stub_parse_l3_hdr() above. */
317 : : {
318 : 60 : _clean_bf_swich_ struct bf_swich swich =
319 [ - + ]: 60 : bf_swich_get(program, BPF_REG_8);
320 : :
321 [ - + ]: 60 : EMIT_SWICH_OPTION(&swich, IPPROTO_TCP,
322 : : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct tcphdr)));
323 [ - + ]: 60 : EMIT_SWICH_OPTION(&swich, IPPROTO_UDP,
324 : : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct udphdr)));
325 [ - + ]: 60 : EMIT_SWICH_OPTION(&swich, IPPROTO_ICMP,
326 : : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct icmphdr)));
327 [ - + ]: 60 : EMIT_SWICH_OPTION(&swich, IPPROTO_ICMPV6,
328 : : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct icmp6hdr)));
329 [ - + ]: 60 : EMIT_SWICH_DEFAULT(&swich, BPF_MOV64_IMM(BPF_REG_8, 0));
330 : :
331 : 60 : r = bf_swich_generate(&swich);
332 [ + - ]: 60 : if (r)
333 : : return r;
334 : : }
335 [ - + ]: 60 : _ = bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0, 0));
336 : :
337 [ - + ]: 60 : EMIT(program,
338 : : BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_4, BF_PROG_CTX_OFF(l4_size)));
339 : :
340 : : // Call bpf_dynptr_slice()
341 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
342 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
343 [ - + ]: 60 : EMIT(program,
344 : : BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(l4_offset)));
345 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
346 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l4)));
347 [ + - ]: 60 : EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
348 : :
349 : : // If the function call failed, quit the program
350 : : {
351 : 60 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
352 [ - + ]: 60 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
353 : :
354 [ - + ]: 60 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
355 [ - + ]: 60 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
356 [ + - + - ]: 60 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
357 [ - + ]: 60 : EMIT(program,
358 : : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
359 [ + - ]: 60 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
360 : :
361 [ - + ]: 60 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
362 [ # # # # : 0 : EMIT_PRINT(program, "failed to create L4 dynamic pointer slice");
# # # # ]
363 : :
364 [ - + ]: 60 : EMIT(program,
365 : : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
366 : : BF_VERDICT_ACCEPT)));
367 [ - + ]: 60 : EMIT(program, BPF_EXIT_INSN());
368 : : }
369 : :
370 : : // Store the L4 header address into the runtime context
371 [ - + ]: 60 : EMIT(program,
372 : : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l4_hdr)));
373 : :
374 : 60 : return 0;
375 : : }
376 : :
377 : 100 : int bf_stub_rule_check_protocol(struct bf_program *program,
378 : : const struct bf_matcher_meta *meta)
379 : : {
380 : : bf_assert(program && meta);
381 : :
382 [ + + - ]: 100 : switch (meta->layer) {
383 : 60 : case BF_MATCHER_LAYER_3:
384 [ - + ]: 60 : EMIT_FIXUP_JMP_NEXT_RULE(
385 : : program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
386 : : htobe16((uint16_t)meta->hdr_id), 0));
387 : 60 : break;
388 : 40 : case BF_MATCHER_LAYER_4:
389 [ - + ]: 40 : EMIT_FIXUP_JMP_NEXT_RULE(
390 : : program, BPF_JMP_IMM(BPF_JNE, BPF_REG_8, (uint8_t)meta->hdr_id, 0));
391 : 40 : break;
392 : 0 : default:
393 [ # # ]: 0 : return bf_err_r(-EINVAL, "rule can't check for layer ID %d",
394 : : meta->layer);
395 : : }
396 : :
397 : : return 0;
398 : : }
399 : :
400 : 100 : int bf_stub_load_header(struct bf_program *program,
401 : : const struct bf_matcher_meta *meta, int reg)
402 : : {
403 : : bf_assert(program && meta);
404 : :
405 [ + + - ]: 100 : switch (meta->layer) {
406 : 60 : case BF_MATCHER_LAYER_3:
407 [ - + ]: 60 : EMIT(program,
408 : : BPF_LDX_MEM(BPF_DW, reg, BPF_REG_10, BF_PROG_CTX_OFF(l3_hdr)));
409 : 60 : break;
410 : 40 : case BF_MATCHER_LAYER_4:
411 [ - + ]: 40 : EMIT(program,
412 : : BPF_LDX_MEM(BPF_DW, reg, BPF_REG_10, BF_PROG_CTX_OFF(l4_hdr)));
413 : 40 : break;
414 : 0 : default:
415 [ # # ]: 0 : return bf_err_r(-EINVAL,
416 : : "layer ID %d is not a valid layer to load header for",
417 : : meta->layer);
418 : : }
419 : :
420 : : return 0;
421 : : }
422 : :
423 : 80 : int bf_stub_stx_payload(struct bf_program *program,
424 : : const struct bf_matcher_meta *meta, size_t offset)
425 : : {
426 : : bf_assert(program && meta);
427 : :
428 : 80 : size_t remaining_size = meta->hdr_payload_size;
429 : : size_t src_offset = 0;
430 : : size_t dst_offset = offset;
431 : :
432 [ + + ]: 160 : while (remaining_size) {
433 : : int bpf_size = BPF_B;
434 : : size_t copy_bytes = 1;
435 : :
436 [ + + + - ]: 80 : if (BF_ALIGNED_64(offset) && remaining_size >= 8) {
437 : : bpf_size = BPF_DW;
438 : : copy_bytes = 8;
439 [ + - + + ]: 80 : } else if (BF_ALIGNED_32(offset) && remaining_size >= 4) {
440 : : bpf_size = BPF_W;
441 : : copy_bytes = 4;
442 [ - + + + ]: 60 : } else if (BF_ALIGNED_16(offset) && remaining_size >= 2) {
443 : : bpf_size = BPF_H;
444 : : copy_bytes = 2;
445 : : }
446 : :
447 [ - + ]: 80 : EMIT(program, BPF_LDX_MEM(bpf_size, BPF_REG_1, BPF_REG_6,
448 : : meta->hdr_payload_offset + src_offset));
449 [ - + ]: 80 : EMIT(program, BPF_STX_MEM(bpf_size, BPF_REG_10, BPF_REG_1,
450 : : BF_PROG_SCR_OFF(dst_offset)));
451 : :
452 : 80 : remaining_size -= copy_bytes;
453 : 80 : src_offset += copy_bytes;
454 : 80 : dst_offset += copy_bytes;
455 : : }
456 : :
457 : : return 0;
458 : : }
|