1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
4 #include <linux/types.h>
5 #include <linux/bpf_verifier.h>
8 #include <linux/filter.h>
11 static u32 optional_ops[] = {
12 offsetof(struct tcp_congestion_ops, init),
13 offsetof(struct tcp_congestion_ops, release),
14 offsetof(struct tcp_congestion_ops, set_state),
15 offsetof(struct tcp_congestion_ops, cwnd_event),
16 offsetof(struct tcp_congestion_ops, in_ack_event),
17 offsetof(struct tcp_congestion_ops, pkts_acked),
18 offsetof(struct tcp_congestion_ops, min_tso_segs),
19 offsetof(struct tcp_congestion_ops, sndbuf_expand),
20 offsetof(struct tcp_congestion_ops, cong_control),
23 static u32 unsupported_ops[] = {
24 offsetof(struct tcp_congestion_ops, get_info),
27 static const struct btf_type *tcp_sock_type;
28 static u32 tcp_sock_id, sock_id;
30 static int bpf_tcp_ca_init(struct btf *btf)
34 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
39 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
42 tcp_sock_id = type_id;
43 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
48 static bool is_optional(u32 member_offset)
52 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
53 if (member_offset == optional_ops[i])
60 static bool is_unsupported(u32 member_offset)
64 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
65 if (member_offset == unsupported_ops[i])
72 extern struct btf *btf_vmlinux;
74 static bool bpf_tcp_ca_is_valid_access(int off, int size,
75 enum bpf_access_type type,
76 const struct bpf_prog *prog,
77 struct bpf_insn_access_aux *info)
79 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
86 if (!btf_ctx_access(off, size, type, prog, info))
89 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
90 /* promote it to tcp_sock */
91 info->btf_id = tcp_sock_id;
96 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
97 const struct btf_type *t, int off,
98 int size, enum bpf_access_type atype,
103 if (atype == BPF_READ)
104 return btf_struct_access(log, t, off, size, atype, next_btf_id);
106 if (t != tcp_sock_type) {
107 bpf_log(log, "only read is supported\n");
112 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
113 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
115 case offsetof(struct inet_connection_sock, icsk_ack.pending):
116 end = offsetofend(struct inet_connection_sock,
119 case offsetof(struct tcp_sock, snd_cwnd):
120 end = offsetofend(struct tcp_sock, snd_cwnd);
122 case offsetof(struct tcp_sock, snd_cwnd_cnt):
123 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
125 case offsetof(struct tcp_sock, snd_ssthresh):
126 end = offsetofend(struct tcp_sock, snd_ssthresh);
128 case offsetof(struct tcp_sock, ecn_flags):
129 end = offsetofend(struct tcp_sock, ecn_flags);
132 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
136 if (off + size > end) {
138 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
146 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
148 /* bpf_tcp_ca prog cannot have NULL tp */
149 __tcp_send_ack((struct sock *)tp, rcv_nxt);
153 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
154 .func = bpf_tcp_send_ack,
156 /* In case we want to report error later */
157 .ret_type = RET_INTEGER,
158 .arg1_type = ARG_PTR_TO_BTF_ID,
159 .arg2_type = ARG_ANYTHING,
160 .btf_id = &tcp_sock_id,
163 static const struct bpf_func_proto *
164 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
165 const struct bpf_prog *prog)
168 case BPF_FUNC_tcp_send_ack:
169 return &bpf_tcp_send_ack_proto;
171 return bpf_base_func_proto(func_id);
175 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
176 .get_func_proto = bpf_tcp_ca_get_func_proto,
177 .is_valid_access = bpf_tcp_ca_is_valid_access,
178 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
181 static int bpf_tcp_ca_init_member(const struct btf_type *t,
182 const struct btf_member *member,
183 void *kdata, const void *udata)
185 const struct tcp_congestion_ops *utcp_ca;
186 struct tcp_congestion_ops *tcp_ca;
187 size_t tcp_ca_name_len;
191 utcp_ca = (const struct tcp_congestion_ops *)udata;
192 tcp_ca = (struct tcp_congestion_ops *)kdata;
194 moff = btf_member_bit_offset(t, member) / 8;
196 case offsetof(struct tcp_congestion_ops, flags):
197 if (utcp_ca->flags & ~TCP_CONG_MASK)
199 tcp_ca->flags = utcp_ca->flags;
201 case offsetof(struct tcp_congestion_ops, name):
202 tcp_ca_name_len = strnlen(utcp_ca->name, sizeof(utcp_ca->name));
203 if (!tcp_ca_name_len ||
204 tcp_ca_name_len == sizeof(utcp_ca->name))
206 if (tcp_ca_find(utcp_ca->name))
208 memcpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name));
212 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
215 /* Ensure bpf_prog is provided for compulsory func ptr */
216 prog_fd = (int)(*(unsigned long *)(udata + moff));
217 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
223 static int bpf_tcp_ca_check_member(const struct btf_type *t,
224 const struct btf_member *member)
226 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
231 static int bpf_tcp_ca_reg(void *kdata)
233 return tcp_register_congestion_control(kdata);
236 static void bpf_tcp_ca_unreg(void *kdata)
238 tcp_unregister_congestion_control(kdata);
241 /* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
242 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
244 struct bpf_struct_ops bpf_tcp_congestion_ops = {
245 .verifier_ops = &bpf_tcp_ca_verifier_ops,
246 .reg = bpf_tcp_ca_reg,
247 .unreg = bpf_tcp_ca_unreg,
248 .check_member = bpf_tcp_ca_check_member,
249 .init_member = bpf_tcp_ca_init_member,
250 .init = bpf_tcp_ca_init,
251 .name = "tcp_congestion_ops",