From b60eaa54023b772dec1acbef97aadddf9f09333b Mon Sep 17 00:00:00 2001 From: chhy2009 Date: Wed, 29 Nov 2023 10:19:14 +0800 Subject: [PATCH] BugFix: fix the following bugs - unable to view fiber stack using gdb in python3 - prossible timeout in low qps situation when using connection reuse mode - compatibility with the threadmodel_type field --- trpc/client/service_proxy_manager.cc | 1 + trpc/client/service_proxy_option_setter.cc | 4 ++++ trpc/common/config/client_conf_parser.h | 2 ++ trpc/common/config/server_conf_parser.h | 4 ++++ trpc/server/trpc_server.cc | 1 + trpc/tools/gdb_plugin/README.md | 1 + trpc/tools/gdb_plugin/gdb_fiber_plugin.py | 5 +++++ .../future/future_tcp_connector_group_manager.cc | 11 ++++------- trpc/transport/client/future/future_transport.cc | 2 +- 9 files changed, 23 insertions(+), 8 deletions(-) diff --git a/trpc/client/service_proxy_manager.cc b/trpc/client/service_proxy_manager.cc index b2f62e8a..a1f66725 100644 --- a/trpc/client/service_proxy_manager.cc +++ b/trpc/client/service_proxy_manager.cc @@ -41,6 +41,7 @@ void ServiceProxyManager::SetOptionFromConfig(const ServiceProxyConfig& proxy_co option->is_reconnection = proxy_conf.is_reconnection; option->connect_timeout = proxy_conf.connect_timeout; option->allow_reconnect = proxy_conf.allow_reconnect; + option->threadmodel_type_name = proxy_conf.threadmodel_type; option->threadmodel_instance_name = proxy_conf.threadmodel_instance_name; option->service_filters = proxy_conf.service_filters; diff --git a/trpc/client/service_proxy_option_setter.cc b/trpc/client/service_proxy_option_setter.cc index 2b7d7915..d36c16b2 100644 --- a/trpc/client/service_proxy_option_setter.cc +++ b/trpc/client/service_proxy_option_setter.cc @@ -99,6 +99,7 @@ void SetDefaultOption(const std::shared_ptr& option) { option->is_reconnection = kDefaultIsReconnection; option->connect_timeout = kDefaultConnectTimeout; option->allow_reconnect = kDefaultAllowReconnect; + option->threadmodel_type_name = kDefaultThreadmodelType; option->threadmodel_instance_name = ""; option->support_pipeline = kDefaultSupportPipeline; } @@ -186,6 +187,9 @@ void SetSpecifiedOption(const ServiceProxyOption* option_ptr, const std::shared_ auto allow_reconnect = GetValidInput(option_ptr->allow_reconnect, kDefaultAllowReconnect); SetOutputByValidInput(allow_reconnect, option->allow_reconnect); + auto threadmodel_type_name = GetValidInput(option_ptr->threadmodel_type_name, kDefaultThreadmodelType); + SetOutputByValidInput(threadmodel_type_name, option->threadmodel_type_name); + auto threadmodel_instance_name = GetValidInput(option_ptr->threadmodel_instance_name, ""); SetOutputByValidInput(threadmodel_instance_name, option->threadmodel_instance_name); diff --git a/trpc/common/config/client_conf_parser.h b/trpc/common/config/client_conf_parser.h index e1f2bbc8..c15e89e6 100644 --- a/trpc/common/config/client_conf_parser.h +++ b/trpc/common/config/client_conf_parser.h @@ -46,6 +46,7 @@ struct convert { node["recv_buffer_size"] = proxy_config.recv_buffer_size; node["send_queue_capacity"] = proxy_config.send_queue_capacity; node["send_queue_timeout"] = proxy_config.send_queue_timeout; + node["threadmodel_type"] = proxy_config.threadmodel_type; node["threadmodel_instance_name"] = proxy_config.threadmodel_instance_name; node["selector_name"] = proxy_config.selector_name; node["namespace"] = proxy_config.namespace_; @@ -101,6 +102,7 @@ struct convert { if (node["recv_buffer_size"]) proxy_config.recv_buffer_size = node["recv_buffer_size"].as(); if (node["send_queue_capacity"]) proxy_config.send_queue_capacity = node["send_queue_capacity"].as(); if (node["send_queue_timeout"]) proxy_config.send_queue_timeout = node["send_queue_timeout"].as(); + if (node["threadmodel_type"]) proxy_config.threadmodel_type = node["threadmodel_type"].as(); if (node["threadmodel_instance_name"]) proxy_config.threadmodel_instance_name = node["threadmodel_instance_name"].as(); if (node["selector_name"]) proxy_config.selector_name = node["selector_name"].as(); diff --git a/trpc/common/config/server_conf_parser.h b/trpc/common/config/server_conf_parser.h index 1200d2ea..a6106e0f 100644 --- a/trpc/common/config/server_conf_parser.h +++ b/trpc/common/config/server_conf_parser.h @@ -49,6 +49,7 @@ struct convert { node["recv_buffer_size"] = service_config.recv_buffer_size; node["send_queue_capacity"] = service_config.send_queue_capacity; node["send_queue_timeout"] = service_config.send_queue_timeout; + node["threadmodel_type"] = service_config.threadmodel_type; node["threadmodel_instance_name"] = service_config.threadmodel_instance_name; node["accept_thread_num"] = service_config.accept_thread_num; node["stream_read_timeout"] = service_config.stream_read_timeout; @@ -122,6 +123,9 @@ struct convert { if (node["send_queue_timeout"]) { service_config.send_queue_timeout = node["send_queue_timeout"].as(); } + if (node["threadmodel_type"]) { + service_config.threadmodel_type = node["threadmodel_type"].as(); + } if (node["threadmodel_instance_name"]) { service_config.threadmodel_instance_name = node["threadmodel_instance_name"].as(); } diff --git a/trpc/server/trpc_server.cc b/trpc/server/trpc_server.cc index b13c196e..cef00458 100644 --- a/trpc/server/trpc_server.cc +++ b/trpc/server/trpc_server.cc @@ -71,6 +71,7 @@ void TrpcServer::BuildServiceAdapterOption(const ServiceConfig& config, ServiceA option.send_queue_capacity = config.send_queue_capacity; option.send_queue_timeout = config.send_queue_timeout; option.accept_thread_num = config.accept_thread_num; + option.threadmodel_type = config.threadmodel_type; option.threadmodel_instance_name = config.threadmodel_instance_name; option.stream_read_timeout = config.stream_read_timeout; option.stream_max_window_size = config.stream_max_window_size; diff --git a/trpc/tools/gdb_plugin/README.md b/trpc/tools/gdb_plugin/README.md index 3a045331..e776b561 100644 --- a/trpc/tools/gdb_plugin/README.md +++ b/trpc/tools/gdb_plugin/README.md @@ -56,3 +56,4 @@ RIP 0x00000000009249d4 RBP 0x00007f598e5fe8d0 RSP 0x00007f598e5fe880 Found 2 fiber(s) in total. ``` +Note: for the production environment, you can use the following command to minimize the impact of debugging on the running service (although this command can still cause a second-level service suspension): `gdb --pid --eval-command='source gdb_fiber_plugin.py' --eval-command='set pagination off' --eval-command='list-fibers' --batch`. diff --git a/trpc/tools/gdb_plugin/gdb_fiber_plugin.py b/trpc/tools/gdb_plugin/gdb_fiber_plugin.py index 3f79723c..4469fc06 100644 --- a/trpc/tools/gdb_plugin/gdb_fiber_plugin.py +++ b/trpc/tools/gdb_plugin/gdb_fiber_plugin.py @@ -136,6 +136,7 @@ def __init__(self, inferior, fiber_entity_ptr): saved_state = inferior.read_memory(state_save_area, 0x40) except GdbMemoryError: saved_state = '\x00' * 0x40 + return None # @sa: fiber/detail/x86_64/jump_context.S # # +---------------------------------------------------------------+ @@ -327,6 +328,8 @@ def try_extract_fiber(inferior, active_frames, offset): try: # It's a fiber stack. Otherwise we shouldn't have been called. fiber = Fiber(inferior, offset - FIBER_STACK_RESERVED_SIZE) + if fiber is None: + return None if fiber.stack_top == fiber.stack_bottom: # Master fiber. @@ -430,6 +433,8 @@ def extract_call_stack(inferior, rip, rbp): yield rip # Hmmm... try: while True: + if rbp == 0: + return ip = struct.unpack('Q', inferior.read_memory(rbp + 8, 8))[0] if ip == 0: return diff --git a/trpc/transport/client/future/future_tcp_connector_group_manager.cc b/trpc/transport/client/future/future_tcp_connector_group_manager.cc index 5dd05c34..c9665b15 100644 --- a/trpc/transport/client/future/future_tcp_connector_group_manager.cc +++ b/trpc/transport/client/future/future_tcp_connector_group_manager.cc @@ -24,13 +24,7 @@ namespace trpc { FutureTcpConnectorGroupManager::FutureTcpConnectorGroupManager(const Options& options) - : FutureConnectorGroupManager(options) { - if (options_.trans_info->is_complex_conn) { - shared_msg_timeout_handler_ = - std::make_unique(options_.trans_info->rsp_dispatch_function); - TRPC_ASSERT(shared_msg_timeout_handler_ != nullptr); - } -} + : FutureConnectorGroupManager(options) {} void FutureTcpConnectorGroupManager::Stop() { if (options_.trans_info->is_complex_conn && timer_id_ != kInvalidTimerId) { @@ -52,6 +46,9 @@ void FutureTcpConnectorGroupManager::Destroy() { bool FutureTcpConnectorGroupManager::CreateTimer() { if (!is_create_timer_) { if (options_.trans_info->is_complex_conn) { + shared_msg_timeout_handler_ = + std::make_unique(options_.trans_info->rsp_dispatch_function); + TRPC_ASSERT(shared_msg_timeout_handler_ != nullptr); auto timeout_check_interval = options_.trans_info->request_timeout_check_interval; timer_id_ = options_.reactor->AddTimerAfter( 0, timeout_check_interval, [this]() { shared_msg_timeout_handler_->DoTimeout(); }); diff --git a/trpc/transport/client/future/future_transport.cc b/trpc/transport/client/future/future_transport.cc index eb150ab2..aafa4c55 100644 --- a/trpc/transport/client/future/future_transport.cc +++ b/trpc/transport/client/future/future_transport.cc @@ -295,7 +295,7 @@ uint16_t FutureTransport::SelectTransportAdapter(CTransportReqMsg* msg, uint16_t bool FutureTransport::IsSameIOThread(uint16_t index) { auto* current_thread = WorkerThread::GetCurrentWorkerThread(); if (current_thread && current_thread->Role() != kHandle) { - if (index == GetLogicId(current_thread)) { + if ((index == GetLogicId(current_thread)) && (options_.thread_model->GroupId() == current_thread->GroupId())) { return true; } }