Skip to content

Commit

Permalink
Fix map deletes which may be affected by race conditions (#34826)
Browse files Browse the repository at this point in the history
  • Loading branch information
usamasaqib authored Mar 6, 2025
1 parent a6dee66 commit a2091d7
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 14 deletions.
6 changes: 4 additions & 2 deletions pkg/network/ebpf/c/protocols/tls/https.h
Original file line number Diff line number Diff line change
Expand Up @@ -277,15 +277,17 @@ static __always_inline void map_ssl_ctx_to_sock(struct sock *skp) {
if (ssl_ctx_map_val == NULL) {
return;
}

// copy map value to stack. required for older kernels
void *ssl_ctx = *ssl_ctx_map_val;

bpf_map_delete_elem(&ssl_ctx_by_pid_tgid, &pid_tgid);

ssl_sock_t ssl_sock = {};
if (!read_conn_tuple(&ssl_sock.tup, skp, pid_tgid, CONN_TYPE_TCP)) {
return;
}

// copy map value to stack. required for older kernels
void *ssl_ctx = *ssl_ctx_map_val;
bpf_map_update_with_telemetry(ssl_sock_by_ctx, &ssl_ctx, &ssl_sock, BPF_ANY);
}

Expand Down
19 changes: 7 additions & 12 deletions pkg/network/ebpf/c/protocols/tls/native-tls.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ static __always_inline int SSL_read_ret(struct pt_regs *ctx, __u64 tags) {
}

char *buffer_ptr = args->buf;
bpf_map_delete_elem(&ssl_read_args, &pid_tgid);
// The read tuple should be flipped (compared to the write tuple).
// tls_process and the appropriate parsers will flip it back if needed.
conn_tuple_t copy = {0};
Expand All @@ -131,7 +130,7 @@ static __always_inline int SSL_read_ret(struct pt_regs *ctx, __u64 tags) {
// the inverse direction, thus we're normalizing the tuples into a client <-> server direction.
normalize_tuple(&copy);
tls_process(ctx, &copy, buffer_ptr, len, tags);
return 0;

cleanup:
bpf_map_delete_elem(&ssl_read_args, &pid_tgid);
return 0;
Expand Down Expand Up @@ -182,7 +181,7 @@ static __always_inline int SSL_write_ret(struct pt_regs* ctx, __u64 flags) {
}

char *buffer_ptr = args->buf;
bpf_map_delete_elem(&ssl_write_args, &pid_tgid);

conn_tuple_t copy = {0};
bpf_memcpy(&copy, t, sizeof(conn_tuple_t));
// We want to guarantee write-TLS hooks generates the same connection tuple, while read-TLS hooks generate
Expand All @@ -191,7 +190,7 @@ static __always_inline int SSL_write_ret(struct pt_regs* ctx, __u64 flags) {
normalize_tuple(&copy);
flip_tuple(&copy);
tls_process(ctx, &copy, buffer_ptr, write_len, flags);
return 0;

cleanup:
bpf_map_delete_elem(&ssl_write_args, &pid_tgid);
return 0;
Expand Down Expand Up @@ -261,7 +260,6 @@ static __always_inline int SSL_read_ex_ret(struct pt_regs* ctx, __u64 tags) {
}

char *buffer_ptr = args->buf;
bpf_map_delete_elem(&ssl_read_ex_args, &pid_tgid);
// The read tuple should be flipped (compared to the write tuple).
// tls_process and the appropriate parsers will flip it back if needed.
conn_tuple_t copy = {0};
Expand All @@ -270,7 +268,7 @@ static __always_inline int SSL_read_ex_ret(struct pt_regs* ctx, __u64 tags) {
// the inverse direction, thus we're normalizing the tuples into a client <-> server direction.
normalize_tuple(&copy);
tls_process(ctx, &copy, buffer_ptr, bytes_count, tags);
return 0;

cleanup:
bpf_map_delete_elem(&ssl_read_ex_args, &pid_tgid);
return 0;
Expand Down Expand Up @@ -331,7 +329,6 @@ static __always_inline int SSL_write_ex_ret(struct pt_regs* ctx, __u64 tags) {
}

char *buffer_ptr = args->buf;
bpf_map_delete_elem(&ssl_write_ex_args, &pid_tgid);
conn_tuple_t copy = {0};
bpf_memcpy(&copy, conn_tuple, sizeof(conn_tuple_t));
// We want to guarantee write-TLS hooks generates the same connection tuple, while read-TLS hooks generate
Expand All @@ -340,7 +337,7 @@ static __always_inline int SSL_write_ex_ret(struct pt_regs* ctx, __u64 tags) {
normalize_tuple(&copy);
flip_tuple(&copy);
tls_process(ctx, &copy, buffer_ptr, bytes_count, tags);
return 0;

cleanup:
bpf_map_delete_elem(&ssl_write_ex_args, &pid_tgid);
return 0;
Expand Down Expand Up @@ -461,7 +458,6 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_recv, ssize_t read_len) {
}

char *buffer_ptr = args->buf;
bpf_map_delete_elem(&ssl_read_args, &pid_tgid);
// The read tuple should be flipped (compared to the write tuple).
// tls_process and the appropriate parsers will flip it back if needed.
conn_tuple_t copy = {0};
Expand All @@ -470,7 +466,7 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_recv, ssize_t read_len) {
// the inverse direction, thus we're normalizing the tuples into a client <-> server direction.
normalize_tuple(&copy);
tls_process(ctx, &copy, buffer_ptr, read_len, LIBGNUTLS);
return 0;

cleanup:
bpf_map_delete_elem(&ssl_read_args, &pid_tgid);
return 0;
Expand Down Expand Up @@ -507,7 +503,6 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_send, ssize_t write_len) {
}

char *buffer_ptr = args->buf;
bpf_map_delete_elem(&ssl_write_args, &pid_tgid);
conn_tuple_t copy = {0};
bpf_memcpy(&copy, t, sizeof(conn_tuple_t));
// We want to guarantee write-TLS hooks generates the same connection tuple, while read-TLS hooks generate
Expand All @@ -516,7 +511,7 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_send, ssize_t write_len) {
normalize_tuple(&copy);
flip_tuple(&copy);
tls_process(ctx, &copy, buffer_ptr, write_len, LIBGNUTLS);
return 0;

cleanup:
bpf_map_delete_elem(&ssl_write_args, &pid_tgid);
return 0;
Expand Down

0 comments on commit a2091d7

Please sign in to comment.