-
Notifications
You must be signed in to change notification settings - Fork 746
/
Copy pathrdma.c
1928 lines (1558 loc) · 54.8 KB
/
rdma.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* ==========================================================================
* rdma.c - support RDMA protocol for transport layer.
* --------------------------------------------------------------------------
* Copyright (C) 2021-2024 zhenwei pi <pizhenwei@bytedance.com>
*
* This work is licensed under BSD 3-Clause, License 1 of the COPYING file in
* the top-level directory.
* ==========================================================================
*/
/*
* Copyright (c) Valkey Contributors
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*/
#define VALKEYMODULE_CORE_MODULE
#include "server.h"
#include "connection.h"
#if defined __linux__ /* currently RDMA is only supported on Linux */
#if (USE_RDMA == 1 /* BUILD_YES */) || ((USE_RDMA == 2 /* BUILD_MODULE */) && (BUILD_RDMA_MODULE == 2))
#include "connhelpers.h"
#include <assert.h>
#include <arpa/inet.h>
#include <rdma/rdma_cma.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <sys/mman.h>
#define CONN_TYPE_RDMA "rdma"
typedef struct ValkeyRdmaFeature {
/* defined as following Opcodes */
uint16_t opcode;
/* select features */
uint16_t select;
uint8_t rsvd[20];
/* feature bits */
uint64_t features;
} ValkeyRdmaFeature;
typedef struct ValkeyRdmaKeepalive {
/* defined as following Opcodes */
uint16_t opcode;
uint8_t rsvd[30];
} ValkeyRdmaKeepalive;
typedef struct ValkeyRdmaMemory {
/* defined as following Opcodes */
uint16_t opcode;
uint8_t rsvd[14];
/* address of a transfer buffer which is used to receive remote streaming data,
* aka 'RX buffer address'. The remote side should use this as 'TX buffer address' */
uint64_t addr;
/* length of the 'RX buffer' */
uint32_t length;
/* the RDMA remote key of 'RX buffer' */
uint32_t key;
} ValkeyRdmaMemory;
typedef union ValkeyRdmaCmd {
ValkeyRdmaFeature feature;
ValkeyRdmaKeepalive keepalive;
ValkeyRdmaMemory memory;
} ValkeyRdmaCmd;
typedef enum ValkeyRdmaOpcode {
GetServerFeature = 0,
SetClientFeature = 1,
Keepalive = 2,
RegisterXferMemory = 3,
} ValkeyRdmaOpcode;
#define VALKEY_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
#define VALKEY_RDMA_MAX_WQE 1024
#define VALKEY_RDMA_DEFAULT_RX_SIZE (1024 * 1024)
#define VALKEY_RDMA_MIN_RX_SIZE (64 * 1024)
#define VALKEY_RDMA_MAX_RX_SIZE (16 * 1024 * 1024)
#define VALKEY_RDMA_SYNCIO_RES 10
#define VALKEY_RDMA_INVALID_OPCODE 0xffff
#define VALKEY_RDMA_KEEPALIVE_MS 3000
#define RDMA_CONN_FLAG_POSTPONE_UPDATE_STATE (1 << 0)
typedef struct rdma_connection {
connection c;
struct rdma_cm_id *cm_id;
int flags;
int last_errno;
listNode *pending_list_node;
} rdma_connection;
typedef struct RdmaXfer {
struct ibv_mr *mr; /* memory region of the transfer buffer */
char *addr; /* address of transfer buffer in local memory */
uint32_t length; /* bytes of transfer buffer */
uint32_t offset; /* the offset of consumed transfer buffer */
uint32_t pos; /* the position in use of the transfer buffer */
} RdmaXfer;
typedef struct RdmaContext {
connection *conn;
char *ip;
int port;
long long keepalive_te; /* RDMA has no transport layer keepalive */
struct ibv_pd *pd;
struct rdma_event_channel *cm_channel;
struct ibv_comp_channel *comp_channel;
struct ibv_cq *cq;
/* TX */
RdmaXfer tx;
char *tx_addr; /* remote transfer buffer address */
uint32_t tx_key; /* remote transfer buffer key */
uint32_t tx_length; /* remote transfer buffer length */
uint32_t tx_offset; /* remote transfer buffer offset */
uint32_t tx_ops; /* operations on remote transfer */
/* RX */
RdmaXfer rx;
/* CMD 0 ~ VALKEY_RDMA_MAX_WQE for recv buffer
* VALKEY_RDMA_MAX_WQE ~ 2 * VALKEY_RDMA_MAX_WQE -1 for send buffer */
ValkeyRdmaCmd *cmd_buf;
struct ibv_mr *cmd_mr;
} RdmaContext;
typedef struct rdma_listener {
struct rdma_cm_id *cm_id;
struct rdma_event_channel *cm_channel;
} rdma_listener;
/* RDMA connection is always writable, it has no POLLOUT event to drive the write handler, record available write
* handler into pending list */
static list *pending_list;
static rdma_listener *rdma_listeners;
static serverRdmaContextConfig *rdma_config;
static size_t page_size;
static ConnectionType CT_RDMA;
static void serverRdmaError(char *err, const char *fmt, ...) {
va_list ap;
if (!err) return;
va_start(ap, fmt);
vsnprintf(err, ANET_ERR_LEN, fmt, ap);
va_end(ap);
}
static inline int connRdmaAllowCommand(void) {
/* RDMA MR is not accessible in a child process, avoid segment fault due to
* invalid MR access, close it rather than server random crash */
if (server.in_fork_child != CHILD_TYPE_NONE) {
return C_ERR;
}
return C_OK;
}
static inline int connRdmaAllowRW(connection *conn) {
if (conn->state == CONN_STATE_ERROR || conn->state == CONN_STATE_CLOSED) {
return C_ERR;
}
return connRdmaAllowCommand();
}
static int rdmaPostRecv(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd) {
struct ibv_sge sge;
size_t length = sizeof(ValkeyRdmaCmd);
struct ibv_recv_wr recv_wr, *bad_wr;
int ret;
if (connRdmaAllowCommand()) {
return C_ERR;
}
sge.addr = (uint64_t)cmd;
sge.length = length;
sge.lkey = ctx->cmd_mr->lkey;
recv_wr.wr_id = (uint64_t)cmd;
recv_wr.sg_list = &sge;
recv_wr.num_sge = 1;
recv_wr.next = NULL;
ret = ibv_post_recv(cm_id->qp, &recv_wr, &bad_wr);
if (ret && (ret != EAGAIN)) {
serverLog(LL_WARNING, "RDMA: post recv failed: %d", ret);
return C_ERR;
}
return C_OK;
}
/* To make Valkey forkable, buffer which is registered as RDMA memory region should be
* aligned to page size. And the length also need be aligned to page size.
* Random segment-fault case like this:
* 0x7f2764ac5000 - 0x7f2764ac7000
* |ptr0 128| ... |ptr1 4096| ... |ptr2 512|
*
* After ibv_reg_mr(pd, ptr1, 4096, access), the full range of 8K becomes DONTFORK. And
* the child process will hit a segment fault during access ptr0/ptr2.
*
* The portable posix_memalign(&tmp, page_size, aligned_size) would be fine too. However,
* RDMA is supported by Linux only, so it would not break anything. Using raw mmap syscall
* to allocate a separate virtual memory area(VMA), also make it protected by the 2 guard
* pages (a top one and a bottom one).
*/
static void *rdmaMemoryAlloc(size_t size) {
size_t real_size, aligned_size = (size + page_size - 1) & (~(page_size - 1));
uint8_t *ptr;
real_size = aligned_size + 2 * page_size;
ptr = mmap(NULL, real_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED) {
serverPanic("failed to allocate memory for RDMA region");
}
madvise(ptr, real_size, MADV_DONTDUMP); /* no need to dump this VMA on coredump */
mprotect(ptr, page_size, PROT_NONE); /* top page of this VMA */
mprotect(ptr + size + page_size, page_size, PROT_NONE); /* bottom page of this VMA */
return ptr + page_size;
}
static void rdmaMemoryFree(void *ptr, size_t size) {
uint8_t *real_ptr;
size_t real_size, aligned_size;
if (!ptr) {
return;
}
if ((unsigned long)ptr & (page_size - 1)) {
serverPanic("unaligned memory in use for RDMA region");
}
aligned_size = (size + page_size - 1) & (~(page_size - 1));
real_size = aligned_size + 2 * page_size;
real_ptr = (uint8_t *)ptr - page_size;
if (munmap(real_ptr, real_size)) {
serverPanic("failed to free memory for RDMA region");
}
}
static void rdmaDestroyIoBuf(RdmaContext *ctx) {
if (ctx->rx.mr) {
ibv_dereg_mr(ctx->rx.mr);
ctx->rx.mr = NULL;
}
rdmaMemoryFree(ctx->rx.addr, ctx->rx.length);
ctx->rx.addr = NULL;
if (ctx->tx.mr) {
ibv_dereg_mr(ctx->tx.mr);
ctx->tx.mr = NULL;
}
rdmaMemoryFree(ctx->tx.addr, ctx->tx.length);
ctx->tx.addr = NULL;
if (ctx->cmd_mr) {
ibv_dereg_mr(ctx->cmd_mr);
ctx->cmd_mr = NULL;
}
rdmaMemoryFree(ctx->cmd_buf, sizeof(ValkeyRdmaCmd) * VALKEY_RDMA_MAX_WQE * 2);
ctx->cmd_buf = NULL;
}
static int rdmaSetupIoBuf(RdmaContext *ctx, struct rdma_cm_id *cm_id) {
int access = IBV_ACCESS_LOCAL_WRITE;
size_t length = sizeof(ValkeyRdmaCmd) * VALKEY_RDMA_MAX_WQE * 2;
ValkeyRdmaCmd *cmd;
int i;
/* setup CMD buf & MR */
ctx->cmd_buf = rdmaMemoryAlloc(length);
ctx->cmd_mr = ibv_reg_mr(ctx->pd, ctx->cmd_buf, length, access);
if (!ctx->cmd_mr) {
serverLog(LL_WARNING, "RDMA: reg mr for CMD failed");
goto destroy_iobuf;
}
for (i = 0; i < VALKEY_RDMA_MAX_WQE; i++) {
cmd = ctx->cmd_buf + i;
if (rdmaPostRecv(ctx, cm_id, cmd) == C_ERR) {
serverLog(LL_WARNING, "RDMA: post recv failed");
goto destroy_iobuf;
}
}
for (i = VALKEY_RDMA_MAX_WQE; i < VALKEY_RDMA_MAX_WQE * 2; i++) {
cmd = ctx->cmd_buf + i;
cmd->keepalive.opcode = VALKEY_RDMA_INVALID_OPCODE;
}
/* setup recv buf & MR */
access = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE;
length = rdma_config->rx_size;
ctx->rx.addr = rdmaMemoryAlloc(length);
ctx->rx.length = length;
ctx->rx.mr = ibv_reg_mr(ctx->pd, ctx->rx.addr, length, access);
if (!ctx->rx.mr) {
serverLog(LL_WARNING, "RDMA: reg mr for recv buffer failed");
goto destroy_iobuf;
}
return C_OK;
destroy_iobuf:
rdmaDestroyIoBuf(ctx);
return C_ERR;
}
static int rdmaCreateResource(RdmaContext *ctx, struct rdma_cm_id *cm_id) {
int ret = C_OK;
struct ibv_device_attr device_attr;
struct ibv_qp_init_attr init_attr;
struct ibv_comp_channel *comp_channel = NULL;
struct ibv_cq *cq = NULL;
struct ibv_pd *pd = NULL;
int comp_vector = rdma_config->completion_vector;
if (ibv_query_device(cm_id->verbs, &device_attr)) {
serverLog(LL_WARNING, "RDMA: ibv ibv query device failed");
return C_ERR;
}
pd = ibv_alloc_pd(cm_id->verbs);
if (!pd) {
serverLog(LL_WARNING, "RDMA: ibv alloc pd failed");
return C_ERR;
}
ctx->pd = pd;
comp_channel = ibv_create_comp_channel(cm_id->verbs);
if (!comp_channel) {
serverLog(LL_WARNING, "RDMA: ibv create comp channel failed");
return C_ERR;
}
ctx->comp_channel = comp_channel;
/* negative number means a random one */
if (comp_vector < 0) {
comp_vector = abs((int)random());
}
cq = ibv_create_cq(cm_id->verbs, VALKEY_RDMA_MAX_WQE * 2, NULL, comp_channel,
comp_vector % cm_id->verbs->num_comp_vectors);
if (!cq) {
serverLog(LL_WARNING, "RDMA: ibv create cq failed");
return C_ERR;
}
ctx->cq = cq;
ibv_req_notify_cq(cq, 0);
memset(&init_attr, 0, sizeof(init_attr));
init_attr.cap.max_send_wr = VALKEY_RDMA_MAX_WQE;
init_attr.cap.max_recv_wr = VALKEY_RDMA_MAX_WQE;
init_attr.cap.max_send_sge = device_attr.max_sge;
init_attr.cap.max_recv_sge = 1;
init_attr.qp_type = IBV_QPT_RC;
init_attr.send_cq = cq;
init_attr.recv_cq = cq;
ret = rdma_create_qp(cm_id, pd, &init_attr);
if (ret) {
serverLog(LL_WARNING, "RDMA: create qp failed");
return C_ERR;
}
if (rdmaSetupIoBuf(ctx, cm_id)) {
return C_ERR;
}
return C_OK;
}
static void rdmaReleaseResource(RdmaContext *ctx) {
rdmaDestroyIoBuf(ctx);
if (ctx->cq) {
ibv_destroy_cq(ctx->cq);
}
if (ctx->comp_channel) {
ibv_destroy_comp_channel(ctx->comp_channel);
}
if (ctx->pd) {
ibv_dealloc_pd(ctx->pd);
}
}
static int rdmaAdjustSendbuf(RdmaContext *ctx, unsigned int length) {
int access = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_READ | IBV_ACCESS_REMOTE_WRITE;
if (length == ctx->tx_length) {
return C_OK;
}
/* try to free old MR & buffer */
if (ctx->tx_length) {
ibv_dereg_mr(ctx->tx.mr);
zlibc_free(ctx->tx.addr);
ctx->tx_length = 0;
}
/* create a new buffer & MR */
ctx->tx.addr = rdmaMemoryAlloc(length);
ctx->tx_length = length;
ctx->tx.mr = ibv_reg_mr(ctx->pd, ctx->tx.addr, length, access);
if (!ctx->tx.mr) {
serverRdmaError(server.neterr, "RDMA: reg send mr failed");
serverLog(LL_WARNING, "RDMA: FATAL error, recv corrupted cmd");
zlibc_free(ctx->tx.addr);
ctx->tx.addr = NULL;
ctx->tx_length = 0;
return C_ERR;
}
return C_OK;
}
static int rdmaSendCommand(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd) {
struct ibv_send_wr send_wr, *bad_wr;
struct ibv_sge sge;
ValkeyRdmaCmd *_cmd;
int i, ret;
/* find an unused cmd buffer */
for (i = VALKEY_RDMA_MAX_WQE; i < 2 * VALKEY_RDMA_MAX_WQE; i++) {
_cmd = ctx->cmd_buf + i;
if (_cmd->keepalive.opcode == VALKEY_RDMA_INVALID_OPCODE) {
break;
}
}
assert(i < 2 * VALKEY_RDMA_MAX_WQE);
memcpy(_cmd, cmd, sizeof(ValkeyRdmaCmd));
sge.addr = (uint64_t)_cmd;
sge.length = sizeof(ValkeyRdmaCmd);
sge.lkey = ctx->cmd_mr->lkey;
send_wr.sg_list = &sge;
send_wr.num_sge = 1;
send_wr.wr_id = (uint64_t)_cmd;
send_wr.opcode = IBV_WR_SEND;
send_wr.send_flags = IBV_SEND_SIGNALED;
send_wr.next = NULL;
ret = ibv_post_send(cm_id->qp, &send_wr, &bad_wr);
if (ret) {
serverLog(LL_WARNING, "RDMA: post send failed: %d", ret);
return C_ERR;
}
return C_OK;
}
static int connRdmaRegisterRx(RdmaContext *ctx, struct rdma_cm_id *cm_id) {
ValkeyRdmaCmd cmd = {0};
cmd.memory.opcode = htons(RegisterXferMemory);
cmd.memory.addr = htonu64((uint64_t)ctx->rx.addr);
cmd.memory.length = htonl(ctx->rx.length);
cmd.memory.key = htonl(ctx->rx.mr->rkey);
ctx->rx.offset = 0;
ctx->rx.pos = 0;
return rdmaSendCommand(ctx, cm_id, &cmd);
}
static int connRdmaGetFeature(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd) {
ValkeyRdmaCmd _cmd = {0};
_cmd.feature.opcode = htons(GetServerFeature);
_cmd.feature.select = cmd->feature.select;
_cmd.feature.features = htonu64(0); /* currently no feature support */
return rdmaSendCommand(ctx, cm_id, &_cmd);
}
static int connRdmaSetFeature(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd) {
UNUSED(ctx);
UNUSED(cm_id);
/* currently no feature support */
if (ntohu64(cmd->feature.features)) return C_ERR;
return C_OK;
}
static int rdmaHandleEstablished(struct rdma_cm_event *ev) {
struct rdma_cm_id *cm_id = ev->id;
RdmaContext *ctx = cm_id->context;
connRdmaRegisterRx(ctx, cm_id);
return C_OK;
}
static inline void rdmaDelKeepalive(aeEventLoop *el, RdmaContext *ctx) {
if (ctx->keepalive_te == AE_ERR) {
return;
}
aeDeleteTimeEvent(el, ctx->keepalive_te);
ctx->keepalive_te = AE_ERR;
}
static int rdmaHandleDisconnect(aeEventLoop *el, struct rdma_cm_event *ev) {
struct rdma_cm_id *cm_id = ev->id;
RdmaContext *ctx = cm_id->context;
connection *conn = ctx->conn;
rdma_connection *rdma_conn = (rdma_connection *)conn;
rdmaDelKeepalive(el, ctx);
conn->state = CONN_STATE_CLOSED;
/* we can't close connection now, let's mark this connection as closed state */
listAddNodeTail(pending_list, conn);
rdma_conn->pending_list_node = listLast(pending_list);
return C_OK;
}
static int connRdmaHandleRecv(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd, uint32_t byte_len) {
if (unlikely(byte_len != sizeof(ValkeyRdmaCmd))) {
serverLog(LL_WARNING, "RDMA: FATAL error, recv corrupted cmd");
return C_ERR;
}
switch (ntohs(cmd->keepalive.opcode)) {
case GetServerFeature: connRdmaGetFeature(ctx, cm_id, cmd); break;
case SetClientFeature: connRdmaSetFeature(ctx, cm_id, cmd); break;
case Keepalive: break;
case RegisterXferMemory:
ctx->tx_addr = (char *)ntohu64(cmd->memory.addr);
ctx->tx.length = ntohl(cmd->memory.length);
ctx->tx_key = ntohl(cmd->memory.key);
ctx->tx.offset = 0;
rdmaAdjustSendbuf(ctx, ctx->tx.length);
break;
default: serverLog(LL_WARNING, "RDMA: FATAL error, unknown cmd"); return C_ERR;
}
return rdmaPostRecv(ctx, cm_id, cmd);
}
static int connRdmaHandleSend(ValkeyRdmaCmd *cmd) {
/* clear cmd and mark this cmd has already sent */
memset(cmd, 0x00, sizeof(*cmd));
cmd->keepalive.opcode = VALKEY_RDMA_INVALID_OPCODE;
return C_OK;
}
static int connRdmaHandleRecvImm(RdmaContext *ctx, struct rdma_cm_id *cm_id, ValkeyRdmaCmd *cmd, uint32_t byte_len) {
assert(byte_len + ctx->rx.offset <= ctx->rx.length);
ctx->rx.offset += byte_len;
return rdmaPostRecv(ctx, cm_id, cmd);
}
static int connRdmaHandleWrite(RdmaContext *ctx, uint32_t byte_len) {
UNUSED(ctx);
UNUSED(byte_len);
return C_OK;
}
static int connRdmaHandleCq(rdma_connection *rdma_conn) {
struct rdma_cm_id *cm_id = rdma_conn->cm_id;
RdmaContext *ctx = cm_id->context;
struct ibv_cq *ev_cq = NULL;
void *ev_ctx = NULL;
struct ibv_wc wc = {0};
ValkeyRdmaCmd *cmd;
int ret;
if (ibv_get_cq_event(ctx->comp_channel, &ev_cq, &ev_ctx) < 0) {
if (errno != EAGAIN) {
serverLog(LL_WARNING, "RDMA: get CQ event error");
return C_ERR;
}
} else if (ibv_req_notify_cq(ev_cq, 0)) {
serverLog(LL_WARNING, "RDMA: notify CQ error");
return C_ERR;
}
pollcq:
ret = ibv_poll_cq(ctx->cq, 1, &wc);
if (ret < 0) {
serverLog(LL_WARNING, "RDMA: poll recv CQ error");
return C_ERR;
} else if (ret == 0) {
return C_OK;
}
ibv_ack_cq_events(ctx->cq, 1);
if (wc.status != IBV_WC_SUCCESS) {
if (rdma_conn->c.state == CONN_STATE_CONNECTED) {
serverLog(LL_WARNING, "RDMA: CQ handle error status: %s[0x%x], opcode : 0x%x", ibv_wc_status_str(wc.status),
wc.status, wc.opcode);
}
return C_ERR;
}
switch (wc.opcode) {
case IBV_WC_RECV:
cmd = (ValkeyRdmaCmd *)wc.wr_id;
if (connRdmaHandleRecv(ctx, cm_id, cmd, wc.byte_len) == C_ERR) {
return C_ERR;
}
break;
case IBV_WC_RECV_RDMA_WITH_IMM:
cmd = (ValkeyRdmaCmd *)wc.wr_id;
if (connRdmaHandleRecvImm(ctx, cm_id, cmd, ntohl(wc.imm_data)) == C_ERR) {
rdma_conn->c.state = CONN_STATE_ERROR;
return C_ERR;
}
break;
case IBV_WC_RDMA_WRITE:
if (connRdmaHandleWrite(ctx, wc.byte_len) == C_ERR) {
return C_ERR;
}
break;
case IBV_WC_SEND:
cmd = (ValkeyRdmaCmd *)wc.wr_id;
if (connRdmaHandleSend(cmd) == C_ERR) {
return C_ERR;
}
break;
default: serverLog(LL_WARNING, "RDMA: unexpected opcode 0x[%x]", wc.opcode); return C_ERR;
}
goto pollcq;
}
static int connRdmaAccept(connection *conn, ConnectionCallbackFunc accept_handler) {
rdma_connection *rdma_conn = (rdma_connection *)conn;
struct rdma_cm_id *cm_id = rdma_conn->cm_id;
RdmaContext *ctx = cm_id->context;
struct ibv_device_attr device_attr;
int ret = C_OK;
if (conn->state != CONN_STATE_ACCEPTING) return C_ERR;
conn->state = CONN_STATE_CONNECTED;
connIncrRefs(conn);
if (!callHandler(conn, accept_handler)) ret = C_ERR;
connDecrRefs(conn);
if (ibv_query_device(cm_id->verbs, &device_attr)) {
serverLog(LL_WARNING, "RDMA: ibv ibv query device failed");
return C_ERR;
}
conn->iovcnt = min(device_attr.max_sge, IOV_MAX);
ctx->conn = conn; /* save conn into RdmaContext */
return ret;
}
static connection *connCreateRdma(void) {
rdma_connection *rdma_conn = zcalloc(sizeof(rdma_connection));
rdma_conn->c.type = &CT_RDMA;
rdma_conn->c.fd = -1;
rdma_conn->c.iovcnt = 1; /* at least 1, overwrite this on connect */
return (connection *)rdma_conn;
}
static connection *connCreateAcceptedRdma(int fd, void *priv) {
rdma_connection *rdma_conn = (rdma_connection *)connCreateRdma();
rdma_conn->c.fd = fd;
rdma_conn->c.state = CONN_STATE_ACCEPTING;
rdma_conn->cm_id = priv;
/* The comp channel fd should be always non block */
connNonBlock(&rdma_conn->c);
return (connection *)rdma_conn;
}
static void connRdmaEventHandler(struct aeEventLoop *el, int fd, void *clientData, int mask) {
rdma_connection *rdma_conn = (rdma_connection *)clientData;
connection *conn = &rdma_conn->c;
struct rdma_cm_id *cm_id = rdma_conn->cm_id;
RdmaContext *ctx = cm_id->context;
int ret = 0;
UNUSED(el);
UNUSED(fd);
UNUSED(mask);
ret = connRdmaHandleCq(rdma_conn);
if (ret == C_ERR) {
conn->state = CONN_STATE_ERROR;
return;
}
/* uplayer should read all */
while (!(rdma_conn->flags & RDMA_CONN_FLAG_POSTPONE_UPDATE_STATE) && ctx->rx.pos < ctx->rx.offset) {
if (conn->read_handler && (callHandler(conn, conn->read_handler) == C_ERR)) {
return;
}
}
/* recv buf is full, register a new RX buffer */
if (ctx->rx.pos == ctx->rx.length) {
connRdmaRegisterRx(ctx, cm_id);
}
/* RDMA comp channel has no POLLOUT event, try to send remaining buffer */
if (!(rdma_conn->flags & RDMA_CONN_FLAG_POSTPONE_UPDATE_STATE) && ctx->tx.offset < ctx->tx.length && conn->write_handler) {
callHandler(conn, conn->write_handler);
}
}
static long long rdmaKeepaliveTimeProc(struct aeEventLoop *el, long long id, void *clientData) {
struct rdma_cm_id *cm_id = clientData;
RdmaContext *ctx = cm_id->context;
connection *conn = ctx->conn;
ValkeyRdmaCmd cmd = {0};
UNUSED(el);
UNUSED(id);
if (conn->state != CONN_STATE_CONNECTED) {
return AE_NOMORE;
}
cmd.keepalive.opcode = htons(Keepalive);
if (rdmaSendCommand(ctx, cm_id, &cmd) != C_OK) {
return AE_NOMORE;
}
return VALKEY_RDMA_KEEPALIVE_MS;
}
static int rdmaHandleConnect(aeEventLoop *el, char *err, struct rdma_cm_event *ev, char *ip, size_t ip_len, int *port) {
int ret = C_OK;
struct rdma_cm_id *cm_id = ev->id;
struct sockaddr_storage caddr;
RdmaContext *ctx = NULL;
struct rdma_conn_param conn_param = {
.responder_resources = 1,
.initiator_depth = 1,
.retry_count = 5,
};
memcpy(&caddr, &cm_id->route.addr.dst_addr, sizeof(caddr));
if (caddr.ss_family == AF_INET) {
struct sockaddr_in *s = (struct sockaddr_in *)&caddr;
if (ip) inet_ntop(AF_INET, (void *)&(s->sin_addr), ip, ip_len);
if (port) *port = ntohs(s->sin_port);
} else {
struct sockaddr_in6 *s = (struct sockaddr_in6 *)&caddr;
if (ip) inet_ntop(AF_INET6, (void *)&(s->sin6_addr), ip, ip_len);
if (port) *port = ntohs(s->sin6_port);
}
ctx = zcalloc(sizeof(RdmaContext));
ctx->ip = zstrdup(ip);
ctx->port = *port;
ctx->keepalive_te = aeCreateTimeEvent(el, VALKEY_RDMA_KEEPALIVE_MS, rdmaKeepaliveTimeProc, cm_id, NULL);
if (ctx->keepalive_te == AE_ERR) {
return C_ERR;
}
cm_id->context = ctx;
if (rdmaCreateResource(ctx, cm_id) == C_ERR) {
goto reject;
}
ret = rdma_accept(cm_id, &conn_param);
if (ret) {
serverRdmaError(err, "RDMA: accept failed");
goto free_rdma;
}
return C_OK;
free_rdma:
rdmaReleaseResource(ctx);
reject:
/* reject connect request if hitting error */
rdma_reject(cm_id, NULL, 0);
return C_ERR;
}
static rdma_listener *rdmaFdToListener(connListener *listener, int fd) {
for (int i = 0; i < listener->count; i++) {
if (listener->fd[i] != fd) continue;
return &rdma_listeners[i];
}
return NULL;
}
/*
* rdmaAccept, actually it works as cm-event handler for listen cm_id.
* accept a connection logic works in two steps:
* 1, handle RDMA_CM_EVENT_CONNECT_REQUEST and return CM fd on success
* 2, handle RDMA_CM_EVENT_ESTABLISHED and return C_OK on success
*/
static int
rdmaAccept(aeEventLoop *el, connListener *listener, char *err, int fd, char *ip, size_t ip_len, int *port, void **priv) {
struct rdma_cm_event *ev;
enum rdma_cm_event_type ev_type;
int ret = C_OK;
rdma_listener *rdma_listener;
rdma_listener = rdmaFdToListener(listener, fd);
if (!rdma_listener) {
serverPanic("RDMA: unexpected listen file descriptor");
}
ret = rdma_get_cm_event(rdma_listener->cm_channel, &ev);
if (ret) {
if (errno != EAGAIN) {
serverLog(LL_WARNING, "RDMA: listen channel rdma_get_cm_event failed, %s", strerror(errno));
return ANET_ERR;
}
return ANET_OK;
}
ev_type = ev->event;
switch (ev_type) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = rdmaHandleConnect(el, err, ev, ip, ip_len, port);
if (ret == C_OK) {
RdmaContext *ctx = (RdmaContext *)ev->id->context;
*priv = ev->id;
ret = ctx->comp_channel->fd;
}
break;
case RDMA_CM_EVENT_ESTABLISHED: ret = rdmaHandleEstablished(ev); break;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_REJECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
rdmaHandleDisconnect(el, ev);
ret = C_OK;
break;
case RDMA_CM_EVENT_MULTICAST_JOIN:
case RDMA_CM_EVENT_MULTICAST_ERROR:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_RESOLVED:
case RDMA_CM_EVENT_ROUTE_RESOLVED:
case RDMA_CM_EVENT_CONNECT_RESPONSE:
default: serverLog(LL_NOTICE, "RDMA: listen channel ignore event: %s", rdma_event_str(ev_type)); break;
}
if (rdma_ack_cm_event(ev)) {
serverLog(LL_WARNING, "ack cm event failed\n");
return ANET_ERR;
}
return ret;
}
static void connRdmaAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
int cport = 0, cfd, max = server.max_new_conns_per_cycle;
struct ClientFlags flags = {0};
char cip[NET_IP_STR_LEN];
void *connpriv = NULL;
connListener *listener = (connListener *)privdata;
UNUSED(el);
UNUSED(mask);
while (max--) {
cfd = rdmaAccept(el, listener, server.neterr, fd, cip, sizeof(cip), &cport, &connpriv);
if (cfd == ANET_ERR) {
if (errno != EWOULDBLOCK) serverLog(LL_WARNING, "RDMA Accepting client connection: %s", server.neterr);
return;
} else if (cfd == ANET_OK)
continue;
serverLog(LL_VERBOSE, "RDMA Accepted %s:%d", cip, cport);
acceptCommonHandler(connCreateAcceptedRdma(cfd, connpriv), flags, cip);
}
}
static int connRdmaSetRwHandler(connection *conn) {
rdma_connection *rdma_conn = (rdma_connection *)conn;
if (rdma_conn->flags & RDMA_CONN_FLAG_POSTPONE_UPDATE_STATE) return C_OK;
/* IB channel only has POLLIN event */
if (conn->read_handler || conn->write_handler) {
if (aeCreateFileEvent(server.el, conn->fd, AE_READABLE, conn->type->ae_handler, conn) == AE_ERR) {
return C_ERR;
}
} else {
aeDeleteFileEvent(server.el, conn->fd, AE_READABLE);
}
return C_OK;
}
static int connRdmaSetWriteHandler(connection *conn, ConnectionCallbackFunc func, int barrier) {
rdma_connection *rdma_conn = (rdma_connection *)conn;
if (conn->state != CONN_STATE_CONNECTED) {
return C_OK;
}
conn->write_handler = func;
if (barrier) {
conn->flags |= CONN_FLAG_WRITE_BARRIER;
} else {
conn->flags &= ~CONN_FLAG_WRITE_BARRIER;
}
/* does this connection has pending write data? */
if (func) {
listAddNodeTail(pending_list, conn);
rdma_conn->pending_list_node = listLast(pending_list);
} else if (rdma_conn->pending_list_node) {
listDelNode(pending_list, rdma_conn->pending_list_node);
rdma_conn->pending_list_node = NULL;
}
return connRdmaSetRwHandler(conn);
}
static int connRdmaSetReadHandler(connection *conn, ConnectionCallbackFunc func) {
conn->read_handler = func;
return connRdmaSetRwHandler(conn);
}
static const char *connRdmaGetLastError(connection *conn) {
return strerror(conn->last_errno);
}
static inline void rdmaConnectFailed(rdma_connection *rdma_conn) {
connection *conn = &rdma_conn->c;
conn->state = CONN_STATE_ERROR;
conn->last_errno = ENETUNREACH;
}
static int rdmaConnect(RdmaContext *ctx, struct rdma_cm_id *cm_id) {
struct rdma_conn_param conn_param = {0};
if (rdmaCreateResource(ctx, cm_id) == C_ERR) {
return C_ERR;
}
/* rdma connect with param */
conn_param.responder_resources = 1;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 7;
if (rdma_connect(cm_id, &conn_param)) {
return C_ERR;
}
anetNonBlock(NULL, ctx->comp_channel->fd);
anetCloexec(ctx->comp_channel->fd);
return C_OK;