diff --git a/prov/tcp/src/xnet.h b/prov/tcp/src/xnet.h index 7c88608b5bf..37b5e276824 100644 --- a/prov/tcp/src/xnet.h +++ b/prov/tcp/src/xnet.h @@ -482,21 +482,6 @@ struct xnet_domain { size_t rx_size; }; -static inline struct xnet_progress *xnet_ep2_progress(struct xnet_ep *ep) -{ - return ep->progress; -} - -static inline struct xnet_progress *xnet_rdm2_progress(struct xnet_rdm *rdm) -{ - return &rdm->srx->progress; -} - -static inline struct xnet_progress *xnet_srx2_progress(struct xnet_srx *srx) -{ - return &srx->progress; -} - struct xnet_cq { struct util_cq util_cq; }; @@ -516,11 +501,6 @@ struct xnet_eq { struct dlist_entry fabric_entry; }; -static inline struct xnet_progress *xnet_eq2_progress(struct xnet_eq *eq) -{ - return &eq->progress; -} - int xnet_eq_write(struct util_eq *eq, uint32_t event, const void *buf, size_t len, uint64_t flags); @@ -666,8 +646,8 @@ xnet_alloc_rx(struct xnet_ep *ep) { struct xnet_xfer_entry *xfer; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); - xfer = xnet_alloc_xfer(xnet_ep2_progress(ep)); + assert(xnet_progress_locked(ep->progress)); + xfer = xnet_alloc_xfer(ep->progress); if (xfer) { xfer->cntr = ep->util_ep.cntrs[CNTR_RX]; xfer->cq = xnet_ep_rx_cq(ep); @@ -681,8 +661,8 @@ xnet_alloc_tx(struct xnet_ep *ep) { struct xnet_xfer_entry *xfer; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); - xfer = xnet_alloc_xfer(xnet_ep2_progress(ep)); + assert(xnet_progress_locked(ep->progress)); + xfer = xnet_alloc_xfer(ep->progress); if (xfer) { xfer->hdr.base_hdr.version = XNET_HDR_VERSION; xfer->hdr.base_hdr.op_data = 0; @@ -714,7 +694,7 @@ xnet_alloc_xfer_buf(struct xnet_xfer_entry *xfer, size_t len) */ static inline bool xnet_has_unexp(struct xnet_ep *ep) { - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); return ep->cur_rx.handler && !ep->cur_rx.entry; } diff --git a/prov/tcp/src/xnet_cm.c b/prov/tcp/src/xnet_cm.c index 2eee7f57b53..0ecc338f5b7 100644 --- a/prov/tcp/src/xnet_cm.c +++ b/prov/tcp/src/xnet_cm.c @@ -184,7 +184,7 @@ void xnet_req_done(struct xnet_ep *ep) ssize_t ret; FI_DBG(&xnet_prov, FI_LOG_EP_CTRL, "connect request done\n"); - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); ret = xnet_recv_cm_msg(ep->bsock.sock, ep->cm_msg); if (ret == 0) @@ -216,7 +216,7 @@ void xnet_uring_req_done(struct xnet_ep *ep, int res) ssize_t ret; FI_DBG(&xnet_prov, FI_LOG_EP_CTRL, "connect request done\n"); - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); len = sizeof(ep->cm_msg->hdr); if (res < 0) @@ -238,7 +238,7 @@ void xnet_uring_req_done(struct xnet_ep *ep, int res) } ep->pollflags = POLLIN; - ret = xnet_uring_pollin_add(xnet_ep2_progress(ep), ep->bsock.sock, + ret = xnet_uring_pollin_add(ep->progress, ep->bsock.sock, false, &ep->bsock.pollin_sockctx); if (ret) goto disable; @@ -330,7 +330,7 @@ void xnet_connect_done(struct xnet_ep *ep) int status, ret; FI_DBG(&xnet_prov, FI_LOG_EP_CTRL, "socket connected, sending req\n"); - progress = xnet_ep2_progress(ep); + progress = ep->progress; assert(xnet_progress_locked(progress)); len = sizeof(status); diff --git a/prov/tcp/src/xnet_ep.c b/prov/tcp/src/xnet_ep.c index 41e4434b83e..55428772e75 100644 --- a/prov/tcp/src/xnet_ep.c +++ b/prov/tcp/src/xnet_ep.c @@ -249,7 +249,7 @@ static int xnet_ep_connect(struct fid_ep *ep_fid, const void *addr, } } - progress = xnet_ep2_progress(ep); + progress = ep->progress; ofi_genlock_lock(&progress->ep_lock); ep->pollflags = POLLOUT; ret = xnet_monitor_ep(progress, ep); @@ -303,7 +303,7 @@ xnet_ep_accept(struct fid_ep *ep_fid, const void *param, size_t paramlen) ep->state = XNET_CONNECTED; assert(!ofi_bsock_readable(&ep->bsock) && !ep->cur_rx.handler); - progress = xnet_ep2_progress(ep); + progress = ep->progress; ofi_genlock_lock(&progress->ep_lock); ep->pollflags = POLLIN; ret = xnet_monitor_ep(progress, ep); @@ -369,7 +369,7 @@ static void xnet_ep_flush_all_queues(struct xnet_ep *ep) struct xnet_progress *progress; int ret; - progress = xnet_ep2_progress(ep); + progress = ep->progress; assert(xnet_progress_locked(progress)); ret = xnet_uring_cancel(progress, &progress->tx_uring, @@ -398,7 +398,7 @@ static void xnet_ep_flush_all_queues(struct xnet_ep *ep) ep->cur_tx.entry->hdr.base_hdr.op_data); } xnet_report_error(ep->cur_tx.entry, FI_ECANCELED); - xnet_free_xfer(xnet_ep2_progress(ep), ep->cur_tx.entry); + xnet_free_xfer(ep->progress, ep->cur_tx.entry); ep->cur_tx.entry = NULL; } @@ -414,7 +414,7 @@ static void xnet_ep_flush_all_queues(struct xnet_ep *ep) if (ep->cur_rx.entry && !(ep->cur_rx.entry->ctrl_flags & XNET_SAVED_XFER)) { xnet_report_error(ep->cur_rx.entry, FI_ECANCELED); - xnet_free_xfer(xnet_ep2_progress(ep), ep->cur_rx.entry); + xnet_free_xfer(ep->progress, ep->cur_rx.entry); } xnet_reset_rx(ep); xnet_flush_xfer_queue(progress, &ep->rx_queue, NULL); @@ -429,7 +429,7 @@ void xnet_ep_disable(struct xnet_ep *ep, int cm_err, void* err_data, struct fi_eq_err_entry err_entry = {0}; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); switch (ep->state) { case XNET_CONNECTING: case XNET_REQ_SENT: @@ -442,7 +442,7 @@ void xnet_ep_disable(struct xnet_ep *ep, int cm_err, void* err_data, ep->state = XNET_DISCONNECTED; dlist_remove_init(&ep->unexp_entry); if (!xnet_io_uring) - xnet_halt_sock(xnet_ep2_progress(ep), ep->bsock.sock); + xnet_halt_sock(ep->progress, ep->bsock.sock); ret = ofi_shutdown(ep->bsock.sock, SHUT_RDWR); if (ret && ofi_sockerr() != ENOTCONN) @@ -475,10 +475,10 @@ static int xnet_ep_shutdown(struct fid_ep *ep_fid, uint64_t flags) ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); (void) ofi_bsock_flush_sync(&ep->bsock); xnet_ep_disable(ep, 0, NULL, 0); - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return FI_SUCCESS; } @@ -539,7 +539,7 @@ static void xnet_ep_cancel_rx(struct xnet_ep *ep, void *context) struct slist_entry *cur, *prev; struct xnet_xfer_entry *xfer_entry; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); /* To cancel an active receive, we would need to flush the socket of * all data associated with that message. Since some of that data @@ -562,7 +562,7 @@ static void xnet_ep_cancel_rx(struct xnet_ep *ep, void *context) slist_remove(&ep->rx_queue, cur, prev); ep->rx_avail++; xnet_report_error(xfer_entry, FI_ECANCELED); - xnet_free_xfer(xnet_ep2_progress(ep), xfer_entry); + xnet_free_xfer(ep->progress, xfer_entry); } /* We currently only support canceling receives, which is the common case. @@ -575,9 +575,9 @@ static ssize_t xnet_ep_cancel(fid_t fid, void *context) ep = container_of(fid, struct xnet_ep, util_ep.ep_fid.fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); xnet_ep_cancel_rx(ep, context); - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return 0; } diff --git a/prov/tcp/src/xnet_eq.c b/prov/tcp/src/xnet_eq.c index 3fdeb284d51..dfe0fd95d9e 100644 --- a/prov/tcp/src/xnet_eq.c +++ b/prov/tcp/src/xnet_eq.c @@ -58,7 +58,7 @@ int xnet_eq_write(struct util_eq *eq, uint32_t event, } assert(rdm->util_ep.ep_fid.fid.fclass == FI_CLASS_EP); - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); entry = malloc(sizeof(*entry) + len); if (!entry) return -FI_ENOMEM; @@ -67,7 +67,7 @@ int xnet_eq_write(struct util_eq *eq, uint32_t event, entry->event = event; memcpy(&entry->cm_entry, buf, len); slist_insert_tail(&entry->list_entry, - &xnet_rdm2_progress(rdm)->event_list); + &rdm->srx->progress.event_list); return 0; } diff --git a/prov/tcp/src/xnet_msg.c b/prov/tcp/src/xnet_msg.c index 747e3eb5f28..204fe0c86af 100644 --- a/prov/tcp/src/xnet_msg.c +++ b/prov/tcp/src/xnet_msg.c @@ -54,7 +54,7 @@ xnet_alloc_send(struct xnet_ep *ep) { struct xnet_xfer_entry *send_entry; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); send_entry = xnet_alloc_tx(ep); if (send_entry) { send_entry->hdr.base_hdr.op = xnet_op_msg; @@ -69,7 +69,7 @@ xnet_alloc_tsend(struct xnet_ep *ep) { struct xnet_xfer_entry *send_entry; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); send_entry = xnet_alloc_tx(ep); if (send_entry) { assert(ep->srx); @@ -157,7 +157,7 @@ xnet_rts_check(struct xnet_ep *ep, struct xnet_xfer_entry *tx_entry) uint64_t msg_len, hdr_size; uint8_t rts_ctx; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(tx_entry->hdr.base_hdr.op == xnet_op_tag); if ((tx_entry->hdr.base_hdr.size <= xnet_max_saved_size) || @@ -193,7 +193,7 @@ xnet_queue_recv(struct xnet_ep *ep, struct xnet_xfer_entry *recv_entry) { bool ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); ret = ep->rx_avail; if (ret) { slist_insert_tail(&recv_entry->entry, &ep->rx_queue); @@ -218,7 +218,7 @@ xnet_recvmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, uint64_t flags) assert(msg->iov_count <= XNET_IOV_LIMIT); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); recv_entry = xnet_alloc_rx(ep); if (!recv_entry) { ret = -FI_EAGAIN; @@ -236,11 +236,11 @@ xnet_recvmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, uint64_t flags) recv_entry->context = msg->context; if (!xnet_queue_recv(ep, recv_entry)) { - xnet_free_xfer(xnet_ep2_progress(ep), recv_entry); + xnet_free_xfer(ep->progress, recv_entry); ret = -FI_EAGAIN; } unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -254,7 +254,7 @@ xnet_recv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); recv_entry = xnet_alloc_rx(ep); if (!recv_entry) { ret = -FI_EAGAIN; @@ -270,11 +270,11 @@ xnet_recv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, recv_entry->context = context; if (!xnet_queue_recv(ep, recv_entry)) { - xnet_free_xfer(xnet_ep2_progress(ep), recv_entry); + xnet_free_xfer(ep->progress, recv_entry); ret = -FI_EAGAIN; } unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -290,7 +290,7 @@ xnet_recvv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, assert(count <= XNET_IOV_LIMIT); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); recv_entry = xnet_alloc_rx(ep); if (!recv_entry) { ret = -FI_EAGAIN; @@ -306,11 +306,11 @@ xnet_recvv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, recv_entry->context = context; if (!xnet_queue_recv(ep, recv_entry)) { - xnet_free_xfer(xnet_ep2_progress(ep), recv_entry); + xnet_free_xfer(ep->progress, recv_entry); ret = -FI_EAGAIN; } unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -324,7 +324,7 @@ xnet_sendmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, uint64_t flags) ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_send(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -347,7 +347,7 @@ xnet_sendmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, uint64_t flags) xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -361,7 +361,7 @@ xnet_send(struct fid_ep *ep_fid, const void *buf, size_t len, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_send(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -376,7 +376,7 @@ xnet_send(struct fid_ep *ep_fid, const void *buf, size_t len, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -390,7 +390,7 @@ xnet_sendv(struct fid_ep *ep_fid, const struct iovec *iov, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_send(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -405,7 +405,7 @@ xnet_sendv(struct fid_ep *ep_fid, const struct iovec *iov, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -420,7 +420,7 @@ xnet_inject(struct fid_ep *ep_fid, const void *buf, size_t len, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_send(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -433,7 +433,7 @@ xnet_inject(struct fid_ep *ep_fid, const void *buf, size_t len, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -447,7 +447,7 @@ xnet_senddata(struct fid_ep *ep_fid, const void *buf, size_t len, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_send(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -468,7 +468,7 @@ xnet_senddata(struct fid_ep *ep_fid, const void *buf, size_t len, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -482,7 +482,7 @@ xnet_injectdata(struct fid_ep *ep_fid, const void *buf, size_t len, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_send(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -499,7 +499,7 @@ xnet_injectdata(struct fid_ep *ep_fid, const void *buf, size_t len, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -527,7 +527,7 @@ xnet_tsendmsg(struct fid_ep *fid_ep, const struct fi_msg_tagged *msg, ep = container_of(fid_ep, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_tsend(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -554,7 +554,7 @@ xnet_tsendmsg(struct fid_ep *fid_ep, const struct fi_msg_tagged *msg, if (!ret) xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -568,7 +568,7 @@ xnet_tsend(struct fid_ep *fid_ep, const void *buf, size_t len, ep = container_of(fid_ep, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_tsend(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -587,7 +587,7 @@ xnet_tsend(struct fid_ep *fid_ep, const void *buf, size_t len, if (!ret) xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -601,7 +601,7 @@ xnet_tsendv(struct fid_ep *fid_ep, const struct iovec *iov, void **desc, ep = container_of(fid_ep, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_tsend(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -620,7 +620,7 @@ xnet_tsendv(struct fid_ep *fid_ep, const struct iovec *iov, void **desc, if (!ret) xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -635,7 +635,7 @@ xnet_tinject(struct fid_ep *fid_ep, const void *buf, size_t len, ep = container_of(fid_ep, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_tsend(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -650,7 +650,7 @@ xnet_tinject(struct fid_ep *fid_ep, const void *buf, size_t len, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -664,7 +664,7 @@ xnet_tsenddata(struct fid_ep *fid_ep, const void *buf, size_t len, void *desc, ep = container_of(fid_ep, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_tsend(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -686,7 +686,7 @@ xnet_tsenddata(struct fid_ep *fid_ep, const void *buf, size_t len, void *desc, if (!ret) xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -700,7 +700,7 @@ xnet_tinjectdata(struct fid_ep *fid_ep, const void *buf, size_t len, ep = container_of(fid_ep, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); tx_entry = xnet_alloc_tsend(ep); if (!tx_entry) { ret = -FI_EAGAIN; @@ -718,7 +718,7 @@ xnet_tinjectdata(struct fid_ep *fid_ep, const void *buf, size_t len, xnet_tx_queue_insert(ep, tx_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } diff --git a/prov/tcp/src/xnet_progress.c b/prov/tcp/src/xnet_progress.c index 1062bb379b9..d023156040c 100644 --- a/prov/tcp/src/xnet_progress.c +++ b/prov/tcp/src/xnet_progress.c @@ -64,7 +64,7 @@ static void xnet_submit_uring(struct xnet_uring *uring) static bool xnet_save_and_cont(struct xnet_ep *ep) { - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(ep->cur_rx.hdr.base_hdr.op == xnet_op_tag || ep->cur_rx.hdr.base_hdr.op == xnet_op_tag_rts); assert(ep->srx); @@ -89,7 +89,7 @@ xnet_get_save_rx(struct xnet_ep *ep, uint64_t tag) struct xnet_progress *progress; struct xnet_xfer_entry *rx_entry; - progress = xnet_ep2_progress(ep); + progress = ep->progress; assert(xnet_progress_locked(progress)); assert(xnet_save_and_cont(ep)); assert(ep->cur_rx.hdr_done == ep->cur_rx.hdr_len && @@ -97,7 +97,7 @@ xnet_get_save_rx(struct xnet_ep *ep, uint64_t tag) FI_DBG(&xnet_prov, FI_LOG_EP_DATA, "Saving msg tag 0x%zx src %zu\n", tag, ep->peer->fi_addr); - rx_entry = xnet_alloc_xfer(xnet_srx2_progress(ep->srx)); + rx_entry = xnet_alloc_xfer(&ep->srx->progress); if (!rx_entry) return NULL; @@ -154,7 +154,7 @@ static int xnet_handle_truncate(struct xnet_ep *ep) rx_entry->ctrl_flags = XNET_CLAIM_RECV | XNET_INTERNAL_XFER; ret = xnet_alloc_xfer_buf(rx_entry, ep->cur_rx.data_left); if (ret) { - xnet_free_xfer(xnet_ep2_progress(ep), rx_entry); + xnet_free_xfer(ep->progress, rx_entry); xnet_reset_rx(ep); return ret; } @@ -166,9 +166,9 @@ static int xnet_queue_ack(struct xnet_ep *ep, uint8_t op, uint8_t op_data) { struct xnet_xfer_entry *resp; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(op == xnet_op_msg || op == xnet_op_cts); - resp = xnet_alloc_xfer(xnet_ep2_progress(ep)); + resp = xnet_alloc_xfer(ep->progress); if (!resp) return -FI_ENOMEM; @@ -196,7 +196,7 @@ xnet_rts_matched(struct xnet_rdm *rdm, struct xnet_ep *ep, uint8_t cts_ctx; int ret; - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); if (!ep) { ep = xnet_get_rx_ep(rdm, rx_entry->src_addr); if (!ep) { @@ -221,7 +221,7 @@ xnet_rts_matched(struct xnet_rdm *rdm, struct xnet_ep *ep, err_comp: xnet_cntr_incerr(rx_entry); xnet_report_error(rx_entry, -ret); - xnet_free_xfer(xnet_rdm2_progress(rdm), rx_entry); + xnet_free_xfer(&rdm->srx->progress, rx_entry); return ret; } @@ -265,7 +265,7 @@ void xnet_recv_saved(struct xnet_rdm *rdm, struct xnet_xfer_entry *saved_entry, struct xnet_ep *ep; void *buf2free, *msg_data; - progress = xnet_rdm2_progress(rdm); + progress = &rdm->srx->progress; assert(xnet_progress_locked(progress)); FI_DBG(&xnet_prov, FI_LOG_EP_DATA, "recv matched saved msg " "tag 0x%zx src %zu\n", saved_entry->tag, saved_entry->src_addr); @@ -363,7 +363,7 @@ static int xnet_update_pollflag(struct xnet_ep *ep, short pollflag, bool set) struct xnet_progress *progress; int ret; - progress = xnet_ep2_progress(ep); + progress = ep->progress; assert(xnet_progress_locked(progress)); if (set) { if (ep->pollflags & pollflag) @@ -414,7 +414,7 @@ static int xnet_send_msg(struct xnet_ep *ep) int ret; size_t len; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(ep->cur_tx.entry); tx_entry = ep->cur_tx.entry; ret = ofi_bsock_sendv(&ep->bsock, tx_entry->iov, tx_entry->iov_cnt, @@ -446,7 +446,7 @@ static int xnet_recv_msg_data(struct xnet_ep *ep) size_t len; start: - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (!ep->cur_rx.data_left) return FI_SUCCESS; @@ -480,7 +480,7 @@ static void xnet_need_cts(struct xnet_ep *ep, struct xnet_xfer_entry *tx_entry) { uint64_t msg_len; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(tx_entry->hdr.base_hdr.op == xnet_op_tag_rts); msg_len = xnet_msg_len(&tx_entry->hdr); @@ -504,7 +504,7 @@ static void xnet_complete_tx(struct xnet_ep *ep, int ret) FI_WARN(&xnet_prov, FI_LOG_DOMAIN, "msg send failed\n"); xnet_cntr_incerr(tx_entry); xnet_report_error(tx_entry, -ret); - xnet_free_xfer(xnet_ep2_progress(ep), tx_entry); + xnet_free_xfer(ep->progress, tx_entry); } else if (tx_entry->ctrl_flags & XNET_NEED_CTS) { /* Will get SW CTS ack, async completion not needed */ xnet_need_cts(ep, tx_entry); @@ -518,14 +518,14 @@ static void xnet_complete_tx(struct xnet_ep *ep, int ret) /* discard send but enable receive for completion */ assert(tx_entry->resp_entry); tx_entry->resp_entry->ctrl_flags &= ~XNET_INTERNAL_XFER; - xnet_free_xfer(xnet_ep2_progress(ep), tx_entry); + xnet_free_xfer(ep->progress, tx_entry); } else if ((tx_entry->ctrl_flags & XNET_ASYNC) && (ofi_val32_gt(tx_entry->async_index, ep->bsock.done_index))) { slist_insert_tail(&tx_entry->entry, &ep->async_queue); } else { xnet_report_success(tx_entry); - xnet_free_xfer(xnet_ep2_progress(ep), tx_entry); + xnet_free_xfer(ep->progress, tx_entry); } if (!slist_empty(&ep->priority_queue)) { @@ -552,7 +552,7 @@ static void xnet_progress_tx(struct xnet_ep *ep) { int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); while (ep->cur_tx.entry) { ret = xnet_send_msg(ep); if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret)) { @@ -589,7 +589,7 @@ static void xnet_pmem_commit(struct xnet_ep *ep, struct xnet_xfer_entry *rx_entr size_t offset; int i; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (!ofi_pmem_commit) return ; @@ -615,7 +615,7 @@ static int xnet_alter_mrecv(struct xnet_ep *ep, struct xnet_xfer_entry *xfer, int ret = FI_SUCCESS; assert(ep->srx); - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if ((msg_len && !xfer->iov_cnt) || (msg_len > xfer->iov[0].iov_len)) { ret = -FI_ETRUNC; @@ -627,7 +627,7 @@ static int xnet_alter_mrecv(struct xnet_ep *ep, struct xnet_xfer_entry *xfer, goto complete; /* If we can't repost the remaining buffer, return it to the user. */ - recv_entry = xnet_alloc_xfer(xnet_ep2_progress(ep)); + recv_entry = xnet_alloc_xfer(ep->progress); if (!recv_entry) goto complete; @@ -667,7 +667,7 @@ static struct xnet_xfer_entry *xnet_get_rx_entry(struct xnet_ep *ep) struct xnet_xfer_entry *xfer; struct xnet_srx *srx; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (ep->srx) { srx = ep->srx; if (!slist_empty(&srx->rx_queue)) { @@ -693,7 +693,7 @@ static int xnet_handle_ack(struct xnet_ep *ep) { struct xnet_xfer_entry *tx_entry; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (ep->cur_rx.hdr.base_hdr.size != sizeof(ep->cur_rx.hdr.base_hdr)) return -FI_EIO; @@ -702,7 +702,7 @@ static int xnet_handle_ack(struct xnet_ep *ep) struct xnet_xfer_entry, entry); xnet_report_success(tx_entry); - xnet_free_xfer(xnet_ep2_progress(ep), tx_entry); + xnet_free_xfer(ep->progress, tx_entry); xnet_reset_rx(ep); return FI_SUCCESS; } @@ -713,7 +713,7 @@ int xnet_start_recv(struct xnet_ep *ep, struct xnet_xfer_entry *rx_entry) size_t recv_len; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (!dlist_empty(&ep->unexp_entry)) { dlist_remove_init(&ep->unexp_entry); ret = xnet_update_pollflag(ep, POLLIN, true); @@ -752,7 +752,7 @@ int xnet_start_recv(struct xnet_ep *ep, struct xnet_xfer_entry *rx_entry) poll_err: xnet_cntr_incerr(rx_entry); xnet_report_error(rx_entry, -ret); - xnet_free_xfer(xnet_ep2_progress(ep), rx_entry); + xnet_free_xfer(ep->progress, rx_entry); return ret; } @@ -762,7 +762,7 @@ static int xnet_handle_msg(struct xnet_ep *ep) struct xnet_active_rx *msg = &ep->cur_rx; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (msg->hdr.base_hdr.op_data == XNET_OP_ACK) return xnet_handle_ack(ep); @@ -770,7 +770,7 @@ static int xnet_handle_msg(struct xnet_ep *ep) if (!rx_entry) { if (dlist_empty(&ep->unexp_entry)) { dlist_insert_tail(&ep->unexp_entry, - &xnet_ep2_progress(ep)->unexp_msg_list); + &ep->progress->unexp_msg_list); ret = xnet_update_pollflag(ep, POLLIN, false); if (ret) return ret; @@ -788,7 +788,7 @@ static int xnet_handle_tag(struct xnet_ep *ep) uint64_t tag; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(ep->srx); tag = (msg->hdr.base_hdr.flags & XNET_REMOTE_CQ_DATA) ? @@ -805,7 +805,7 @@ static int xnet_handle_tag(struct xnet_ep *ep) } if (dlist_empty(&ep->unexp_entry)) { dlist_insert_tail(&ep->unexp_entry, - &xnet_ep2_progress(ep)->unexp_tag_list); + &ep->progress->unexp_tag_list); ret = xnet_update_pollflag(ep, POLLIN, false); if (ret) return ret; @@ -817,7 +817,7 @@ static int xnet_handle_cts(struct xnet_ep *ep) { struct xnet_xfer_entry *tx_entry; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); tx_entry = ofi_byte_idx_remove(&ep->rts_queue, ep->cur_rx.hdr.base_hdr.op_data); if (!tx_entry) { @@ -839,7 +839,7 @@ static int xnet_handle_data(struct xnet_ep *ep) size_t msg_len; uint8_t cts_ctx; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); cts_ctx = ep->cur_rx.hdr.base_hdr.op_data; rx_entry = ofi_byte_idx_clear(&ep->cts_queue, cts_ctx); if (!rx_entry) { @@ -869,8 +869,8 @@ static int xnet_handle_read_req(struct xnet_ep *ep) ssize_t i; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); - resp = xnet_alloc_xfer(xnet_ep2_progress(ep)); + assert(xnet_progress_locked(ep->progress)); + resp = xnet_alloc_xfer(ep->progress); if (!resp) return -FI_ENOMEM; @@ -896,7 +896,7 @@ static int xnet_handle_read_req(struct xnet_ep *ep) if (ret) { FI_WARN(&xnet_prov, FI_LOG_EP_DATA, "invalid rma iov received\n"); - xnet_free_xfer(xnet_ep2_progress(ep), resp); + xnet_free_xfer(ep->progress, resp); return ret; } @@ -924,8 +924,8 @@ static int xnet_handle_write(struct xnet_ep *ep) ssize_t i; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); - rx_entry = xnet_alloc_xfer(xnet_ep2_progress(ep)); + assert(xnet_progress_locked(ep->progress)); + rx_entry = xnet_alloc_xfer(ep->progress); if (!rx_entry) return -FI_ENOMEM; @@ -956,7 +956,7 @@ static int xnet_handle_write(struct xnet_ep *ep) if (ret) { FI_WARN(&xnet_prov, FI_LOG_EP_DATA, "invalid rma iov received\n"); - xnet_free_xfer(xnet_ep2_progress(ep), rx_entry); + xnet_free_xfer(ep->progress, rx_entry); return ret; } rx_entry->iov[i].iov_base = (void *) (uintptr_t) @@ -973,7 +973,7 @@ static int xnet_handle_read_rsp(struct xnet_ep *ep) { struct xnet_xfer_entry *rx_entry; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); if (slist_empty(&ep->rma_read_queue)) return -FI_EINVAL; @@ -1033,7 +1033,7 @@ static int xnet_recv_hdr(struct xnet_ep *ep) void *buf; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); assert(ep->cur_rx.hdr_done < ep->cur_rx.hdr_len); next_hdr: @@ -1085,7 +1085,7 @@ static void xnet_complete_rx(struct xnet_ep *ep, ssize_t ret) if (!(rx_entry->ctrl_flags & XNET_SAVED_XFER)) { xnet_report_success(rx_entry); - xnet_free_xfer(xnet_ep2_progress(ep), rx_entry); + xnet_free_xfer(ep->progress, rx_entry); } else { rx_entry->saving_ep = NULL; } @@ -1097,7 +1097,7 @@ static void xnet_complete_rx(struct xnet_ep *ep, ssize_t ret) "msg recv failed ret = %zd (%s)\n", ret, fi_strerror((int)-ret)); xnet_cntr_incerr(rx_entry); xnet_report_error(rx_entry, (int) -ret); - xnet_free_xfer(xnet_ep2_progress(ep), rx_entry); + xnet_free_xfer(ep->progress, rx_entry); xnet_reset_rx(ep); xnet_ep_disable(ep, 0, NULL, 0); } @@ -1106,7 +1106,7 @@ void xnet_progress_rx(struct xnet_ep *ep) { int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); do { assert(ep->state == XNET_CONNECTED); if (ep->cur_rx.hdr_done < ep->cur_rx.hdr_len) { @@ -1142,7 +1142,7 @@ void xnet_progress_async(struct xnet_ep *ep) struct xnet_xfer_entry *xfer; int ret; - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); ret = ofi_bsock_async_done(&xnet_prov, &ep->bsock); if (ret) { xnet_ep_disable(ep, 0, NULL, 0); @@ -1157,7 +1157,7 @@ void xnet_progress_async(struct xnet_ep *ep) slist_remove_head(&ep->async_queue); xnet_report_success(xfer); - xnet_free_xfer(xnet_ep2_progress(ep), xfer); + xnet_free_xfer(ep->progress, xfer); } } @@ -1230,7 +1230,7 @@ static void xnet_uring_connect_done(struct xnet_ep *ep, int res) int ret; FI_DBG(&xnet_prov, FI_LOG_EP_CTRL, "socket connected, sending req\n"); - progress = xnet_ep2_progress(ep); + progress = ep->progress; assert(xnet_progress_locked(progress)); if (res < 0) { @@ -1401,7 +1401,7 @@ void xnet_tx_queue_insert(struct xnet_ep *ep, { struct xnet_progress *progress; - progress = xnet_ep2_progress(ep); + progress = ep->progress; assert(xnet_progress_locked(progress)); if (!ep->cur_tx.entry) { @@ -1432,7 +1432,7 @@ static int (*xnet_start_op[xnet_op_max])(struct xnet_ep *ep) = { static void xnet_run_ep(struct xnet_ep *ep, bool pin, bool pout, bool perr) { - assert(xnet_progress_locked(xnet_ep2_progress(ep))); + assert(xnet_progress_locked(ep->progress)); switch (ep->state) { case XNET_CONNECTED: if (perr) diff --git a/prov/tcp/src/xnet_rdm.c b/prov/tcp/src/xnet_rdm.c index 737b526c5da..79f04d78f7c 100644 --- a/prov/tcp/src/xnet_rdm.c +++ b/prov/tcp/src/xnet_rdm.c @@ -76,14 +76,14 @@ xnet_rdm_send(struct fid_ep *ep_fid, const void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; ret = fi_send(&conn->ep->util_ep.ep_fid, buf, len, desc, 0, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -96,14 +96,14 @@ xnet_rdm_sendv(struct fid_ep *ep_fid, const struct iovec *iov, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; ret = fi_sendv(&conn->ep->util_ep.ep_fid, iov, desc, count, 0, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -116,14 +116,14 @@ xnet_rdm_sendmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, msg->addr, &conn); if (ret) goto unlock; ret = fi_sendmsg(&conn->ep->util_ep.ep_fid, msg, flags); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -136,14 +136,14 @@ xnet_rdm_inject(struct fid_ep *ep_fid, const void *buf, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; ret = fi_inject(&conn->ep->util_ep.ep_fid, buf, len, 0); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -156,7 +156,7 @@ xnet_rdm_senddata(struct fid_ep *ep_fid, const void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -164,7 +164,7 @@ xnet_rdm_senddata(struct fid_ep *ep_fid, const void *buf, size_t len, ret = fi_senddata(&conn->ep->util_ep.ep_fid, buf, len, desc, data, 0, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -177,14 +177,14 @@ xnet_rdm_injectdata(struct fid_ep *ep_fid, const void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; ret = fi_injectdata(&conn->ep->util_ep.ep_fid, buf, len, data, 0); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -244,7 +244,7 @@ xnet_rdm_tsend(struct fid_ep *ep_fid, const void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -252,7 +252,7 @@ xnet_rdm_tsend(struct fid_ep *ep_fid, const void *buf, size_t len, ret = fi_tsend(&conn->ep->util_ep.ep_fid, buf, len, desc, 0, tag, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -266,7 +266,7 @@ xnet_rdm_tsendv(struct fid_ep *ep_fid, const struct iovec *iov, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -274,7 +274,7 @@ xnet_rdm_tsendv(struct fid_ep *ep_fid, const struct iovec *iov, ret = fi_tsendv(&conn->ep->util_ep.ep_fid, iov, desc, count, 0, tag, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -287,14 +287,14 @@ xnet_rdm_tsendmsg(struct fid_ep *ep_fid, const struct fi_msg_tagged *msg, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, msg->addr, &conn); if (ret) goto unlock; ret = fi_tsendmsg(&conn->ep->util_ep.ep_fid, msg, flags); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -307,14 +307,14 @@ xnet_rdm_tinject(struct fid_ep *ep_fid, const void *buf, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; ret = fi_tinject(&conn->ep->util_ep.ep_fid, buf, len, 0, tag); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -328,7 +328,7 @@ xnet_rdm_tsenddata(struct fid_ep *ep_fid, const void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -336,7 +336,7 @@ xnet_rdm_tsenddata(struct fid_ep *ep_fid, const void *buf, size_t len, ret = fi_tsenddata(&conn->ep->util_ep.ep_fid, buf, len, desc, data, 0, tag, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -349,14 +349,14 @@ xnet_rdm_tinjectdata(struct fid_ep *ep_fid, const void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; ret = fi_tinjectdata(&conn->ep->util_ep.ep_fid, buf, len, data, 0, tag); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -383,7 +383,7 @@ xnet_rdm_read(struct fid_ep *ep_fid, void *buf, size_t len, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, src_addr, &conn); if (ret) goto unlock; @@ -391,7 +391,7 @@ xnet_rdm_read(struct fid_ep *ep_fid, void *buf, size_t len, ret = fi_read(&conn->ep->util_ep.ep_fid, buf, len, desc, src_addr, addr, key, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -405,7 +405,7 @@ xnet_rdm_readv(struct fid_ep *ep_fid, const struct iovec *iov, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, src_addr, &conn); if (ret) goto unlock; @@ -413,7 +413,7 @@ xnet_rdm_readv(struct fid_ep *ep_fid, const struct iovec *iov, ret = fi_readv(&conn->ep->util_ep.ep_fid, iov, desc, count, src_addr, addr, key, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -426,14 +426,14 @@ xnet_rdm_readmsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, msg->addr, &conn); if (ret) goto unlock; ret = fi_readmsg(&conn->ep->util_ep.ep_fid, msg, flags); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -447,7 +447,7 @@ xnet_rdm_write(struct fid_ep *ep_fid, const void *buf, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -455,7 +455,7 @@ xnet_rdm_write(struct fid_ep *ep_fid, const void *buf, ret = fi_write(&conn->ep->util_ep.ep_fid, buf, len, desc, dest_addr, addr, key, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -469,7 +469,7 @@ xnet_rdm_writev(struct fid_ep *ep_fid, const struct iovec *iov, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -477,7 +477,7 @@ xnet_rdm_writev(struct fid_ep *ep_fid, const struct iovec *iov, ret = fi_writev(&conn->ep->util_ep.ep_fid, iov, desc, count, dest_addr, addr, key, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -490,14 +490,14 @@ xnet_rdm_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, msg->addr, &conn); if (ret) goto unlock; ret = fi_writemsg(&conn->ep->util_ep.ep_fid, msg, flags); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -511,7 +511,7 @@ xnet_rdm_inject_write(struct fid_ep *ep_fid, const void *buf, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -519,7 +519,7 @@ xnet_rdm_inject_write(struct fid_ep *ep_fid, const void *buf, ret = fi_inject_write(&conn->ep->util_ep.ep_fid, buf, len, dest_addr, addr, key); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -534,7 +534,7 @@ xnet_rdm_writedata(struct fid_ep *ep_fid, const void *buf, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -542,7 +542,7 @@ xnet_rdm_writedata(struct fid_ep *ep_fid, const void *buf, ret = fi_writedata(&conn->ep->util_ep.ep_fid, buf, len, desc, data, dest_addr, addr, key, context); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -556,7 +556,7 @@ xnet_rdm_inject_writedata(struct fid_ep *ep_fid, const void *buf, ssize_t ret; rdm = container_of(ep_fid, struct xnet_rdm, util_ep.ep_fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = xnet_get_conn(rdm, dest_addr, &conn); if (ret) goto unlock; @@ -564,7 +564,7 @@ xnet_rdm_inject_writedata(struct fid_ep *ep_fid, const void *buf, ret = fi_inject_writedata(&conn->ep->util_ep.ep_fid, buf, len, data, dest_addr, addr, key); unlock: - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } @@ -772,17 +772,17 @@ static int xnet_rdm_close(struct fid *fid) int ret; rdm = container_of(fid, struct xnet_rdm, util_ep.ep_fid.fid); - ofi_genlock_lock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_lock(&rdm->srx->progress.rdm_lock); ret = fi_close(&rdm->pep->util_pep.pep_fid.fid); if (ret) { FI_WARN(&xnet_prov, FI_LOG_EP_CTRL, \ "Unable to close passive endpoint\n"); - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); return ret; } xnet_freeall_conns(rdm); - ofi_genlock_unlock(&xnet_rdm2_progress(rdm)->rdm_lock); + ofi_genlock_unlock(&rdm->srx->progress.rdm_lock); ret = fi_close(&rdm->srx->rx_fid.fid); if (ret) { diff --git a/prov/tcp/src/xnet_rdm_cm.c b/prov/tcp/src/xnet_rdm_cm.c index 82afbf9d785..e612be31059 100644 --- a/prov/tcp/src/xnet_rdm_cm.c +++ b/prov/tcp/src/xnet_rdm_cm.c @@ -148,7 +148,7 @@ static int xnet_bind_conn(struct xnet_rdm *rdm, struct xnet_ep *ep) { int ret; - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); ret = fi_ep_bind(&ep->util_ep.ep_fid, &rdm->srx->rx_fid.fid, 0); if (ret) @@ -219,7 +219,7 @@ static int xnet_open_conn(struct xnet_conn *conn, struct fi_info *info) struct fid_ep *ep_fid; int ret; - assert(xnet_progress_locked(xnet_rdm2_progress(conn->rdm))); + assert(xnet_progress_locked(&conn->rdm->srx->progress)); ret = xnet_endpoint(&conn->rdm->util_ep.domain->domain_fid, info, &ep_fid, conn); if (ret) { @@ -255,7 +255,7 @@ static int xnet_rdm_connect(struct xnet_conn *conn) int ret; FI_DBG(&xnet_prov, FI_LOG_EP_CTRL, "connecting %p\n", conn); - assert(xnet_progress_locked(xnet_rdm2_progress(conn->rdm))); + assert(xnet_progress_locked(&conn->rdm->srx->progress)); info = conn->rdm->pep->info; info->dest_addrlen = info->src_addrlen; @@ -295,7 +295,7 @@ static void xnet_free_conn(struct xnet_conn *conn) struct rxm_av *av; FI_DBG(&xnet_prov, FI_LOG_EP_CTRL, "free conn %p\n", conn); - assert(xnet_progress_locked(xnet_rdm2_progress(conn->rdm))); + assert(xnet_progress_locked(&conn->rdm->srx->progress)); if (conn->flags & XNET_CONN_INDEXED) ofi_idm_clear(&conn->rdm->conn_idx_map, conn->peer->index); @@ -315,7 +315,7 @@ void xnet_freeall_conns(struct xnet_rdm *rdm) return; av = container_of(rdm->util_ep.av, struct rxm_av, util_av); - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); /* We can't have more connections than the current number of * possible peers. @@ -344,7 +344,7 @@ xnet_alloc_conn(struct xnet_rdm *rdm, struct util_peer_addr *peer) struct xnet_conn *conn; struct rxm_av *av; - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); av = container_of(rdm->util_ep.av, struct rxm_av, util_av); conn = rxm_av_alloc_conn(av); if (!conn) { @@ -366,7 +366,7 @@ xnet_add_conn(struct xnet_rdm *rdm, struct util_peer_addr *peer) { struct xnet_conn *conn; - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); conn = ofi_idm_lookup(&rdm->conn_idx_map, peer->index); if (conn) return conn; @@ -395,7 +395,7 @@ ssize_t xnet_get_conn(struct xnet_rdm *rdm, fi_addr_t addr, struct util_peer_addr **peer; ssize_t ret; - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); peer = ofi_av_addr_context(rdm->util_ep.av, addr); *conn = xnet_add_conn(rdm, *peer); if (!*conn) @@ -411,7 +411,7 @@ ssize_t xnet_get_conn(struct xnet_rdm *rdm, fi_addr_t addr, /* Force progress for apps that simply retry sending without * trying to drive progress in between. */ - xnet_run_progress(xnet_rdm2_progress(rdm), false); + xnet_run_progress(&rdm->srx->progress, false); return -FI_EAGAIN; } @@ -423,7 +423,7 @@ struct xnet_ep *xnet_get_rx_ep(struct xnet_rdm *rdm, fi_addr_t addr) struct util_peer_addr **peer; struct xnet_conn *conn; - assert(xnet_progress_locked(xnet_rdm2_progress(rdm))); + assert(xnet_progress_locked(&rdm->srx->progress)); peer = ofi_av_addr_context(rdm->util_ep.av, addr); conn = ofi_idm_lookup(&rdm->conn_idx_map, (*peer)->index); if (conn) { diff --git a/prov/tcp/src/xnet_rma.c b/prov/tcp/src/xnet_rma.c index 83225f663ac..70714104ae6 100644 --- a/prov/tcp/src/xnet_rma.c +++ b/prov/tcp/src/xnet_rma.c @@ -120,16 +120,16 @@ xnet_rma_readmsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, assert(ofi_total_iov_len(msg->msg_iov, msg->iov_count) == ofi_total_rma_iov_len(msg->rma_iov, msg->rma_iov_count)); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); send_entry = xnet_alloc_tx(ep); if (!send_entry) { ret = -FI_EAGAIN; goto unlock; } - recv_entry = xnet_alloc_xfer(xnet_ep2_progress(ep)); + recv_entry = xnet_alloc_xfer(ep->progress); if (!recv_entry) { - xnet_free_xfer(xnet_ep2_progress(ep), send_entry); + xnet_free_xfer(ep->progress, send_entry); ret = -FI_EAGAIN; goto unlock; } @@ -139,7 +139,7 @@ xnet_rma_readmsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, slist_insert_tail(&recv_entry->entry, &ep->rma_read_queue); xnet_tx_queue_insert(ep, send_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -211,7 +211,7 @@ xnet_rma_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); send_entry = xnet_alloc_tx(ep); if (!send_entry) { ret = -FI_EAGAIN; @@ -270,7 +270,7 @@ xnet_rma_writemsg(struct fid_ep *ep_fid, const struct fi_msg_rma *msg, xnet_tx_queue_insert(ep, send_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } @@ -374,7 +374,7 @@ xnet_rma_inject_common(struct fid_ep *ep_fid, const void *buf, size_t len, ep = container_of(ep_fid, struct xnet_ep, util_ep.ep_fid); - ofi_genlock_lock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_lock(&ep->progress->ep_lock); send_entry = xnet_alloc_tx(ep); if (!send_entry) { ret = -FI_EAGAIN; @@ -413,7 +413,7 @@ xnet_rma_inject_common(struct fid_ep *ep_fid, const void *buf, size_t len, send_entry->cntr = ep->util_ep.cntrs[CNTR_WR]; xnet_tx_queue_insert(ep, send_entry); unlock: - ofi_genlock_unlock(&xnet_ep2_progress(ep)->ep_lock); + ofi_genlock_unlock(&ep->progress->ep_lock); return ret; } diff --git a/prov/tcp/src/xnet_srx.c b/prov/tcp/src/xnet_srx.c index 2b4ff5aed27..e9a20aaeb6b 100644 --- a/prov/tcp/src/xnet_srx.c +++ b/prov/tcp/src/xnet_srx.c @@ -53,7 +53,7 @@ xnet_alloc_srx_xfer(struct xnet_srx *srx) struct xnet_xfer_entry *xfer; struct xnet_progress *progress; - progress = xnet_srx2_progress(srx); + progress = &srx->progress; assert(xnet_progress_locked(progress)); xfer = xnet_alloc_xfer(progress); if (xfer) { @@ -71,7 +71,7 @@ xnet_srx_msg(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry) struct xnet_progress *progress; struct xnet_ep *ep; - progress = xnet_srx2_progress(srx); + progress = &srx->progress; assert(xnet_progress_locked(progress)); /* See comment with xnet_srx_tag(). */ slist_insert_tail(&recv_entry->entry, &srx->rx_queue); @@ -99,7 +99,7 @@ xnet_srx_recvmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, assert(msg->iov_count <= XNET_IOV_LIMIT); assert(!(flags & FI_MULTI_RECV) || msg->iov_count == 1); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); recv_entry = xnet_alloc_srx_xfer(srx); if (!recv_entry) { ret = -FI_EAGAIN; @@ -118,7 +118,7 @@ xnet_srx_recvmsg(struct fid_ep *ep_fid, const struct fi_msg *msg, xnet_srx_msg(srx, recv_entry); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return ret; } @@ -133,7 +133,7 @@ xnet_srx_recv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, srx = container_of(ep_fid, struct xnet_srx, rx_fid); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); recv_entry = xnet_alloc_srx_xfer(srx); if (!recv_entry) { ret = -FI_EAGAIN; @@ -150,7 +150,7 @@ xnet_srx_recv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, xnet_srx_msg(srx, recv_entry); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return ret; } @@ -165,7 +165,7 @@ xnet_srx_recvv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, srx = container_of(ep_fid, struct xnet_srx, rx_fid); assert(count <= XNET_IOV_LIMIT); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); recv_entry = xnet_alloc_srx_xfer(srx); if (!recv_entry) { ret = -FI_EAGAIN; @@ -183,7 +183,7 @@ xnet_srx_recvv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, xnet_srx_msg(srx, recv_entry); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return ret; } @@ -289,7 +289,7 @@ xnet_find_msg(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry, struct xnet_saved_msg *saved_msg; struct dlist_entry *entry; - progress = xnet_srx2_progress(srx); + progress = &srx->progress; assert(xnet_progress_locked(progress)); *ep = NULL; @@ -347,7 +347,7 @@ xnet_srx_claim(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry, ssize_t ret; size_t msg_len; - assert(xnet_progress_locked(xnet_srx2_progress(srx))); + assert(xnet_progress_locked(&srx->progress)); assert(srx->rdm); recv_entry->ctrl_flags |= XNET_CLAIM_RECV; @@ -393,7 +393,7 @@ xnet_srx_peek(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry, struct fi_cq_err_entry err_entry; ssize_t ret = FI_ENOMSG; - assert(xnet_progress_locked(xnet_srx2_progress(srx))); + assert(xnet_progress_locked(&srx->progress)); assert(srx->rdm); if (!xnet_find_msg(srx, recv_entry, &ep, &saved_entry, false)) @@ -424,7 +424,7 @@ xnet_srx_peek(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry, } xnet_report_success(recv_entry); - xnet_free_xfer(xnet_srx2_progress(srx), recv_entry); + xnet_free_xfer(&srx->progress, recv_entry); return FI_SUCCESS; nomatch: @@ -434,7 +434,7 @@ xnet_srx_peek(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry, err_entry.tag = recv_entry->tag; err_entry.err = ret; ofi_cq_write_error(&srx->cq->util_cq, &err_entry); - xnet_free_xfer(xnet_srx2_progress(srx), recv_entry); + xnet_free_xfer(&srx->progress, recv_entry); return FI_SUCCESS; } @@ -456,7 +456,7 @@ xnet_srx_tag(struct xnet_srx *srx, struct xnet_xfer_entry *recv_entry) struct xnet_ep *ep; struct slist *queue; - progress = xnet_srx2_progress(srx); + progress = &srx->progress; assert(xnet_progress_locked(progress)); assert(srx->rdm); @@ -522,8 +522,8 @@ xnet_srx_trecvmsg(struct fid_ep *ep_fid, const struct fi_msg_tagged *msg, srx = container_of(ep_fid, struct xnet_srx, rx_fid); assert(msg->iov_count <= XNET_IOV_LIMIT); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); - recv_entry = xnet_alloc_xfer(xnet_srx2_progress(srx)); + ofi_genlock_lock(srx->progress.active_lock); + recv_entry = xnet_alloc_xfer(&srx->progress); if (!recv_entry) { ret = -FI_EAGAIN; goto unlock; @@ -552,9 +552,9 @@ xnet_srx_trecvmsg(struct fid_ep *ep_fid, const struct fi_msg_tagged *msg, ret = (flags & FI_CLAIM) ? xnet_srx_claim(srx, recv_entry, flags) : xnet_srx_tag(srx, recv_entry); if (ret) - xnet_free_xfer(xnet_srx2_progress(srx), recv_entry); + xnet_free_xfer(&srx->progress, recv_entry); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return ret; } @@ -568,7 +568,7 @@ xnet_srx_trecv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, srx = container_of(ep_fid, struct xnet_srx, rx_fid); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); recv_entry = xnet_alloc_srx_xfer(srx); if (!recv_entry) { ret = -FI_EAGAIN; @@ -587,9 +587,9 @@ xnet_srx_trecv(struct fid_ep *ep_fid, void *buf, size_t len, void *desc, ret = xnet_srx_tag(srx, recv_entry); if (ret) - xnet_free_xfer(xnet_srx2_progress(srx), recv_entry); + xnet_free_xfer(&srx->progress, recv_entry); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return ret; } @@ -605,7 +605,7 @@ xnet_srx_trecvv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, srx = container_of(ep_fid, struct xnet_srx, rx_fid); assert(count <= XNET_IOV_LIMIT); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); recv_entry = xnet_alloc_srx_xfer(srx); if (!recv_entry) { ret = -FI_EAGAIN; @@ -626,9 +626,9 @@ xnet_srx_trecvv(struct fid_ep *ep_fid, const struct iovec *iov, void **desc, ret = xnet_srx_tag(srx, recv_entry); if (ret) - xnet_free_xfer(xnet_srx2_progress(srx), recv_entry); + xnet_free_xfer(&srx->progress, recv_entry); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return ret; } @@ -651,7 +651,7 @@ xnet_match_tag(struct xnet_srx *srx, struct xnet_ep *ep, uint64_t tag) struct xnet_xfer_entry *rx_entry; struct slist_entry *item, *prev; - assert(xnet_progress_locked(xnet_srx2_progress(srx))); + assert(xnet_progress_locked(&srx->progress)); slist_foreach(&srx->tag_queue, item, prev) { rx_entry = container_of(item, struct xnet_xfer_entry, entry); if (ofi_match_tag(rx_entry->tag, rx_entry->ignore, tag)) { @@ -675,7 +675,7 @@ xnet_match_tag_addr(struct xnet_srx *srx, struct xnet_ep *ep, uint64_t tag) struct slist_entry *any_item, *any_prev; struct slist_entry *item, *prev; - assert(xnet_progress_locked(xnet_srx2_progress(srx))); + assert(xnet_progress_locked(&srx->progress)); queue = (ep->peer && ep->peer->fi_addr != FI_ADDR_NOTAVAIL) ? ofi_array_at(&srx->src_tag_queues, ep->peer->fi_addr) : NULL; @@ -718,13 +718,13 @@ xnet_srx_cancel_rx(struct xnet_srx *srx, struct slist *queue, void *context) struct slist_entry *cur, *prev; struct xnet_xfer_entry *xfer_entry; - assert(xnet_progress_locked(xnet_srx2_progress(srx))); + assert(xnet_progress_locked(&srx->progress)); slist_foreach(queue, cur, prev) { xfer_entry = container_of(cur, struct xnet_xfer_entry, entry); if (xfer_entry->context == context) { slist_remove(queue, cur, prev); xnet_report_error(xfer_entry, FI_ECANCELED); - xnet_free_xfer(xnet_srx2_progress(srx), xfer_entry); + xnet_free_xfer(&srx->progress, xfer_entry); return true; } } @@ -748,7 +748,7 @@ static ssize_t xnet_srx_cancel(fid_t fid, void *context) srx = container_of(fid, struct xnet_srx, rx_fid.fid); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); if (xnet_srx_cancel_rx(srx, &srx->tag_queue, context)) goto unlock; @@ -757,7 +757,7 @@ static ssize_t xnet_srx_cancel(fid_t fid, void *context) ofi_array_iter(&srx->src_tag_queues, context, xnet_srx_cancel_src); unlock: - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); return 0; } @@ -811,13 +811,13 @@ static void xnet_srx_cleanup(struct xnet_srx *srx, struct slist *queue) struct slist_entry *entry; struct xnet_xfer_entry *xfer_entry; - assert(xnet_progress_locked(xnet_srx2_progress(srx))); + assert(xnet_progress_locked(&srx->progress)); while (!slist_empty(queue)) { entry = slist_remove_head(queue); xfer_entry = container_of(entry, struct xnet_xfer_entry, entry); if (xfer_entry->cq) xnet_report_error(xfer_entry, FI_ECANCELED); - xnet_free_xfer(xnet_srx2_progress(srx), xfer_entry); + xnet_free_xfer(&srx->progress, xfer_entry); } } @@ -860,12 +860,12 @@ static int xnet_srx_close(struct fid *fid) srx = container_of(fid, struct xnet_srx, rx_fid.fid); - ofi_genlock_lock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_lock(srx->progress.active_lock); xnet_srx_cleanup(srx, &srx->rx_queue); xnet_srx_cleanup(srx, &srx->tag_queue); ofi_array_iter(&srx->src_tag_queues, srx, xnet_srx_cleanup_queues); ofi_array_iter(&srx->saved_msgs, srx, xnet_srx_cleanup_saved); - ofi_genlock_unlock(xnet_srx2_progress(srx)->active_lock); + ofi_genlock_unlock(srx->progress.active_lock); ofi_array_destroy(&srx->src_tag_queues); ofi_array_destroy(&srx->saved_msgs);