Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-only */
2 : /*
3 : * Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
4 : */
5 :
6 : #include "bpfilter/cgen/stub.h"
7 :
8 : #include <linux/bpf.h>
9 : #include <linux/bpf_common.h>
10 : #include <linux/icmp.h>
11 : #include <linux/icmpv6.h>
12 : #include <linux/if_ether.h>
13 : #include <linux/in.h> // NOLINT
14 : #include <linux/in6.h>
15 : #include <linux/ip.h>
16 : #include <linux/ipv6.h>
17 : #include <linux/tcp.h>
18 : #include <linux/udp.h>
19 :
20 : #include <endian.h>
21 : #include <stddef.h>
22 :
23 : #include "bpfilter/cgen/elfstub.h"
24 : #include "bpfilter/cgen/fixup.h"
25 : #include "bpfilter/cgen/jmp.h"
26 : #include "bpfilter/cgen/printer.h"
27 : #include "bpfilter/cgen/program.h"
28 : #include "bpfilter/cgen/swich.h"
29 : #include "bpfilter/opts.h"
30 : #include "core/btf.h"
31 : #include "core/flavor.h"
32 : #include "core/helper.h"
33 : #include "core/verdict.h"
34 :
35 : #include "external/filter.h"
36 :
37 : #define _BF_LOW_EH_BITMASK 0x1801800000000801ULL
38 :
39 : /**
40 : * Generate stub to create a dynptr.
41 : *
42 : * @param program Program to generate the stub for. Must not be NULL.
43 : * @param arg_reg Register where the first argument to the dynptr creation
44 : * function is located (SKB or xdp_md structure).
45 : * @param kfunc Name of the kfunc to use to create the dynamic pointer.
46 : * @return 0 on success, or negative errno value on error.
47 : */
48 0 : static int _bf_stub_make_ctx_dynptr(struct bf_program *program, int arg_reg,
49 : const char *kfunc)
50 : {
51 0 : bf_assert(program && kfunc);
52 :
53 : // Call bpf_dynptr_from_xxx()
54 0 : if (arg_reg != BPF_REG_1)
55 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_1, arg_reg));
56 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_2, 0));
57 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
58 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(dynptr)));
59 0 : EMIT_KFUNC_CALL(program, kfunc);
60 :
61 : // If the function call failed, quit the program
62 : {
63 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
64 0 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0));
65 :
66 : // Update the error counter
67 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
68 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
69 0 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
70 0 : EMIT(program,
71 : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
72 0 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
73 :
74 0 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
75 0 : EMIT_PRINT(program, "failed to create a new dynamic pointer");
76 :
77 0 : EMIT(program,
78 : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
79 : BF_VERDICT_ACCEPT)));
80 0 : EMIT(program, BPF_EXIT_INSN());
81 : }
82 :
83 0 : return 0;
84 : }
85 :
86 0 : int bf_stub_make_ctx_xdp_dynptr(struct bf_program *program, int md_reg)
87 : {
88 0 : bf_assert(program);
89 :
90 0 : return _bf_stub_make_ctx_dynptr(program, md_reg, "bpf_dynptr_from_xdp");
91 : }
92 :
93 0 : int bf_stub_make_ctx_skb_dynptr(struct bf_program *program, int skb_reg)
94 : {
95 0 : bf_assert(program);
96 :
97 0 : return _bf_stub_make_ctx_dynptr(program, skb_reg, "bpf_dynptr_from_skb");
98 : }
99 :
100 0 : int bf_stub_parse_l2_ethhdr(struct bf_program *program)
101 : {
102 0 : bf_assert(program);
103 :
104 : // Call bpf_dynptr_slice()
105 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
106 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
107 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_2, 0));
108 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
109 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l2)));
110 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_4, sizeof(struct ethhdr)));
111 0 : EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
112 :
113 : // If the function call failed, quit the program
114 : {
115 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
116 0 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
117 :
118 : // Update the error counter
119 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
120 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
121 0 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
122 0 : EMIT(program,
123 : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
124 0 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
125 :
126 0 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
127 0 : EMIT_PRINT(program, "failed to create L2 dynamic pointer slice");
128 :
129 0 : EMIT(program,
130 : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
131 : BF_VERDICT_ACCEPT)));
132 0 : EMIT(program, BPF_EXIT_INSN());
133 : }
134 :
135 : // Store the L2 header address into the runtime context
136 0 : EMIT(program,
137 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l2_hdr)));
138 :
139 : // Store the L3 protocol ID in r7
140 0 : EMIT(program, BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_0,
141 : offsetof(struct ethhdr, h_proto)));
142 :
143 : // Set bf_runtime.l3_offset
144 0 : EMIT(program, BPF_ST_MEM(BPF_W, BPF_REG_10, BF_PROG_CTX_OFF(l3_offset),
145 : sizeof(struct ethhdr)));
146 :
147 0 : return 0;
148 : }
149 :
150 0 : int bf_stub_parse_l3_hdr(struct bf_program *program)
151 : {
152 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_default();
153 : int r;
154 :
155 0 : bf_assert(program);
156 :
157 : /* Store the size of the L3 protocol header in r4, depending on the protocol
158 : * ID stored in r7. If the protocol is not supported, we store 0 into r7
159 : * and we skip the instructions below. */
160 : {
161 0 : _clean_bf_swich_ struct bf_swich swich =
162 0 : bf_swich_get(program, BPF_REG_7);
163 :
164 0 : EMIT_SWICH_OPTION(&swich, htobe16(ETH_P_IP),
165 : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct iphdr)));
166 0 : EMIT_SWICH_OPTION(&swich, htobe16(ETH_P_IPV6),
167 : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct ipv6hdr)));
168 0 : EMIT_SWICH_DEFAULT(&swich, BPF_MOV64_IMM(BPF_REG_7, 0));
169 :
170 0 : r = bf_swich_generate(&swich);
171 0 : if (r)
172 : return r;
173 : }
174 0 : _ = bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, 0, 0));
175 :
176 : // Call bpf_dynptr_slice()
177 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
178 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
179 0 : EMIT(program,
180 : BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(l3_offset)));
181 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
182 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l2)));
183 0 : EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
184 :
185 : // If the function call failed, quit the program
186 : {
187 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
188 0 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
189 :
190 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
191 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
192 0 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
193 0 : EMIT(program,
194 : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
195 0 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
196 :
197 0 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
198 0 : EMIT_PRINT(program, "failed to create L3 dynamic pointer slice");
199 :
200 0 : EMIT(program,
201 : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
202 : BF_VERDICT_ACCEPT)));
203 0 : EMIT(program, BPF_EXIT_INSN());
204 : }
205 :
206 : // Store the L3 header address into the runtime context
207 0 : EMIT(program,
208 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l3_hdr)));
209 :
210 : /* Unsupported L3 protocols have been filtered out at the beginning of this
211 : * function and would jump over the block below, so there is no need to
212 : * worry about them here. */
213 : {
214 : // IPv4
215 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_get(
216 : program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7, htobe16(ETH_P_IP), 0));
217 :
218 0 : EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0));
219 0 : EMIT(program, BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x0f));
220 0 : EMIT(program, BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2));
221 0 : EMIT(program, BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10,
222 : BF_PROG_CTX_OFF(l3_offset)));
223 0 : EMIT(program, BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2));
224 0 : EMIT(program, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1,
225 : BF_PROG_CTX_OFF(l4_offset)));
226 0 : EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_0,
227 : offsetof(struct iphdr, protocol)));
228 : }
229 :
230 : {
231 : // IPv6
232 : struct bf_jmpctx tcpjmp, udpjmp, noehjmp, ehjmp;
233 0 : struct bpf_insn ld64[2] = {BPF_LD_IMM64(BPF_REG_2, _BF_LOW_EH_BITMASK)};
234 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_get(
235 : program, BPF_JMP_IMM(BPF_JNE, BPF_REG_7, htobe16(ETH_P_IPV6), 0));
236 :
237 0 : EMIT(program, BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_0,
238 : offsetof(struct ipv6hdr, nexthdr)));
239 :
240 : /* Fast path for TCP and UDP: quickly recognize the most used protocol
241 : * to process them as fast as possible. */
242 0 : tcpjmp = bf_jmpctx_get(program,
243 : BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, IPPROTO_TCP, 0));
244 0 : udpjmp = bf_jmpctx_get(program,
245 : BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, IPPROTO_UDP, 0));
246 :
247 : /* For all the EH protocol numbers <64, use a bitmask:
248 : * mask = (1<<0) | (1<<43) | (1<<44) | (1<<50) | (1<<51) | (1<<60)
249 : *
250 : * Pseudo-code:
251 : * - r3 = 1 << r8 (nexthdr)
252 : * - r3 = r3 & mask
253 : * - if r3 != 0: go to slow path (EH present) */
254 0 : EMIT(program, ld64[0]);
255 0 : EMIT(program, ld64[1]);
256 0 : EMIT(program, BPF_JMP_IMM(BPF_JGE, BPF_REG_8, 64, 4));
257 0 : EMIT(program, BPF_MOV64_IMM(BPF_REG_3, 1));
258 0 : EMIT(program, BPF_ALU64_REG(BPF_LSH, BPF_REG_3, BPF_REG_8));
259 0 : EMIT(program, BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2));
260 0 : EMIT(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 4));
261 :
262 : // EH with protocol numbers >64 are processed individually
263 0 : EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 135, 3));
264 0 : EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 139, 2));
265 0 : EMIT(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 140, 1));
266 :
267 : // If no EH matched, nexthdr is L4, skip EH processing
268 0 : noehjmp = bf_jmpctx_get(program, BPF_JMP_A(0));
269 :
270 : // Process EH
271 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
272 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
273 0 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_PARSE_IPV6_EH);
274 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_8, BPF_REG_0));
275 :
276 0 : ehjmp = bf_jmpctx_get(program, BPF_JMP_A(0));
277 :
278 : // If no EH found, all the jmp will end up here
279 0 : bf_jmpctx_cleanup(&tcpjmp);
280 0 : bf_jmpctx_cleanup(&udpjmp);
281 0 : bf_jmpctx_cleanup(&noehjmp);
282 :
283 : // Process IPv6 header, no EH (BPF_REG_8 already contains nexthdr)
284 0 : EMIT(program, BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10,
285 : BF_PROG_CTX_OFF(l3_offset)));
286 0 : EMIT(program,
287 : BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct ipv6hdr)));
288 0 : EMIT(program, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2,
289 : BF_PROG_CTX_OFF(l4_offset)));
290 :
291 0 : bf_jmpctx_cleanup(&ehjmp);
292 : }
293 :
294 0 : return 0;
295 : }
296 :
297 0 : int bf_stub_parse_l4_hdr(struct bf_program *program)
298 : {
299 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ = bf_jmpctx_default();
300 : int r;
301 :
302 0 : bf_assert(program);
303 :
304 : /* Parse the L4 protocol and handle unuspported protocol, similarly to
305 : * bf_stub_parse_l3_hdr() above. */
306 : {
307 0 : _clean_bf_swich_ struct bf_swich swich =
308 0 : bf_swich_get(program, BPF_REG_8);
309 :
310 0 : EMIT_SWICH_OPTION(&swich, IPPROTO_TCP,
311 : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct tcphdr)));
312 0 : EMIT_SWICH_OPTION(&swich, IPPROTO_UDP,
313 : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct udphdr)));
314 0 : EMIT_SWICH_OPTION(&swich, IPPROTO_ICMP,
315 : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct icmphdr)));
316 0 : EMIT_SWICH_OPTION(&swich, IPPROTO_ICMPV6,
317 : BPF_MOV64_IMM(BPF_REG_4, sizeof(struct icmp6hdr)));
318 0 : EMIT_SWICH_DEFAULT(&swich, BPF_MOV64_IMM(BPF_REG_8, 0));
319 :
320 0 : r = bf_swich_generate(&swich);
321 0 : if (r)
322 : return r;
323 : }
324 0 : _ = bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0, 0));
325 :
326 : // Call bpf_dynptr_slice()
327 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
328 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(dynptr)));
329 0 : EMIT(program,
330 : BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, BF_PROG_CTX_OFF(l4_offset)));
331 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_3, BPF_REG_10));
332 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, BF_PROG_CTX_OFF(l4)));
333 0 : EMIT_KFUNC_CALL(program, "bpf_dynptr_slice");
334 :
335 : // If the function call failed, quit the program
336 : {
337 0 : _clean_bf_jmpctx_ struct bf_jmpctx _ =
338 0 : bf_jmpctx_get(program, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0));
339 :
340 0 : EMIT(program, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
341 0 : EMIT(program, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, BF_PROG_CTX_OFF(arg)));
342 0 : EMIT_LOAD_COUNTERS_FD_FIXUP(program, BPF_REG_2);
343 0 : EMIT(program,
344 : BPF_MOV32_IMM(BPF_REG_3, bf_program_error_counter_idx(program)));
345 0 : EMIT_FIXUP_ELFSTUB(program, BF_ELFSTUB_UPDATE_COUNTERS);
346 :
347 0 : if (bf_opts_is_verbose(BF_VERBOSE_BPF))
348 0 : EMIT_PRINT(program, "failed to create L4 dynamic pointer slice");
349 :
350 0 : EMIT(program,
351 : BPF_MOV64_IMM(BPF_REG_0, program->runtime.ops->get_verdict(
352 : BF_VERDICT_ACCEPT)));
353 0 : EMIT(program, BPF_EXIT_INSN());
354 : }
355 :
356 : // Store the L4 header address into the runtime context
357 0 : EMIT(program,
358 : BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, BF_PROG_CTX_OFF(l4_hdr)));
359 :
360 0 : return 0;
361 : }
|