diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index fb10d6496e6af1..00fcc8b13da77a 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -277,6 +277,10 @@ static __always_inline void map_ssl_ctx_to_sock(struct sock *skp) { if (ssl_ctx_map_val == NULL) { return; } + + // copy map value to stack. required for older kernels + void *ssl_ctx = *ssl_ctx_map_val; + bpf_map_delete_elem(&ssl_ctx_by_pid_tgid, &pid_tgid); ssl_sock_t ssl_sock = {}; @@ -284,8 +288,6 @@ static __always_inline void map_ssl_ctx_to_sock(struct sock *skp) { return; } - // copy map value to stack. required for older kernels - void *ssl_ctx = *ssl_ctx_map_val; bpf_map_update_with_telemetry(ssl_sock_by_ctx, &ssl_ctx, &ssl_sock, BPF_ANY); } diff --git a/pkg/network/ebpf/c/protocols/tls/native-tls.h b/pkg/network/ebpf/c/protocols/tls/native-tls.h index 719b8a0069b752..be2d1f4a50428f 100644 --- a/pkg/network/ebpf/c/protocols/tls/native-tls.h +++ b/pkg/network/ebpf/c/protocols/tls/native-tls.h @@ -122,7 +122,6 @@ static __always_inline int SSL_read_ret(struct pt_regs *ctx, __u64 tags) { } char *buffer_ptr = args->buf; - bpf_map_delete_elem(&ssl_read_args, &pid_tgid); // The read tuple should be flipped (compared to the write tuple). // tls_process and the appropriate parsers will flip it back if needed. conn_tuple_t copy = {0}; @@ -131,7 +130,7 @@ static __always_inline int SSL_read_ret(struct pt_regs *ctx, __u64 tags) { // the inverse direction, thus we're normalizing the tuples into a client <-> server direction. normalize_tuple(©); tls_process(ctx, ©, buffer_ptr, len, tags); - return 0; + cleanup: bpf_map_delete_elem(&ssl_read_args, &pid_tgid); return 0; @@ -182,7 +181,7 @@ static __always_inline int SSL_write_ret(struct pt_regs* ctx, __u64 flags) { } char *buffer_ptr = args->buf; - bpf_map_delete_elem(&ssl_write_args, &pid_tgid); + conn_tuple_t copy = {0}; bpf_memcpy(©, t, sizeof(conn_tuple_t)); // We want to guarantee write-TLS hooks generates the same connection tuple, while read-TLS hooks generate @@ -191,7 +190,7 @@ static __always_inline int SSL_write_ret(struct pt_regs* ctx, __u64 flags) { normalize_tuple(©); flip_tuple(©); tls_process(ctx, ©, buffer_ptr, write_len, flags); - return 0; + cleanup: bpf_map_delete_elem(&ssl_write_args, &pid_tgid); return 0; @@ -261,7 +260,6 @@ static __always_inline int SSL_read_ex_ret(struct pt_regs* ctx, __u64 tags) { } char *buffer_ptr = args->buf; - bpf_map_delete_elem(&ssl_read_ex_args, &pid_tgid); // The read tuple should be flipped (compared to the write tuple). // tls_process and the appropriate parsers will flip it back if needed. conn_tuple_t copy = {0}; @@ -270,7 +268,7 @@ static __always_inline int SSL_read_ex_ret(struct pt_regs* ctx, __u64 tags) { // the inverse direction, thus we're normalizing the tuples into a client <-> server direction. normalize_tuple(©); tls_process(ctx, ©, buffer_ptr, bytes_count, tags); - return 0; + cleanup: bpf_map_delete_elem(&ssl_read_ex_args, &pid_tgid); return 0; @@ -331,7 +329,6 @@ static __always_inline int SSL_write_ex_ret(struct pt_regs* ctx, __u64 tags) { } char *buffer_ptr = args->buf; - bpf_map_delete_elem(&ssl_write_ex_args, &pid_tgid); conn_tuple_t copy = {0}; bpf_memcpy(©, conn_tuple, sizeof(conn_tuple_t)); // We want to guarantee write-TLS hooks generates the same connection tuple, while read-TLS hooks generate @@ -340,7 +337,7 @@ static __always_inline int SSL_write_ex_ret(struct pt_regs* ctx, __u64 tags) { normalize_tuple(©); flip_tuple(©); tls_process(ctx, ©, buffer_ptr, bytes_count, tags); - return 0; + cleanup: bpf_map_delete_elem(&ssl_write_ex_args, &pid_tgid); return 0; @@ -461,7 +458,6 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_recv, ssize_t read_len) { } char *buffer_ptr = args->buf; - bpf_map_delete_elem(&ssl_read_args, &pid_tgid); // The read tuple should be flipped (compared to the write tuple). // tls_process and the appropriate parsers will flip it back if needed. conn_tuple_t copy = {0}; @@ -470,7 +466,7 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_recv, ssize_t read_len) { // the inverse direction, thus we're normalizing the tuples into a client <-> server direction. normalize_tuple(©); tls_process(ctx, ©, buffer_ptr, read_len, LIBGNUTLS); - return 0; + cleanup: bpf_map_delete_elem(&ssl_read_args, &pid_tgid); return 0; @@ -507,7 +503,6 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_send, ssize_t write_len) { } char *buffer_ptr = args->buf; - bpf_map_delete_elem(&ssl_write_args, &pid_tgid); conn_tuple_t copy = {0}; bpf_memcpy(©, t, sizeof(conn_tuple_t)); // We want to guarantee write-TLS hooks generates the same connection tuple, while read-TLS hooks generate @@ -516,7 +511,7 @@ int BPF_BYPASSABLE_URETPROBE(uretprobe__gnutls_record_send, ssize_t write_len) { normalize_tuple(©); flip_tuple(©); tls_process(ctx, ©, buffer_ptr, write_len, LIBGNUTLS); - return 0; + cleanup: bpf_map_delete_elem(&ssl_write_args, &pid_tgid); return 0;