Skip to content

Commit

Permalink
gracefully handle new messages without a selected chat model
Browse files Browse the repository at this point in the history
  • Loading branch information
dlqqq committed Dec 30, 2024
1 parent e84d411 commit 0720ffa
Showing 1 changed file with 17 additions and 7 deletions.
24 changes: 17 additions & 7 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,27 +171,37 @@ async def on_message(self, message: Message):
"""
Method which receives a human message, calls `self.get_llm_chain()`, and
processes the message via `self.process_message()`, calling
`self.handle_exc()` when an exception is raised. This method is called
by RootChatHandler when it routes a human message to this chat handler.
`self.handle_exc()` when an exception is raised.
This is the method called directly in response to new chat messages.
"""
lm_provider_klass = self.config_manager.lm_provider
ChatModelProvider = self.config_manager.lm_provider

# first, ensure a chat model is configured
if not ChatModelProvider:
# TODO: update this message to be more useful once we improve
# ease-of-access to the Jupyter AI settings.
self.reply(
"To use Jupyter AI, please select a chat model first in the Jupyter AI settings."
)
return

# ensure the current slash command is supported
if self.routing_type.routing_method == "slash_command":
routing_type = cast(SlashCommandRoutingType, self.routing_type)
slash_command = "/" + routing_type.slash_id if routing_type.slash_id else ""
if slash_command in lm_provider_klass.unsupported_slash_commands:
if slash_command in ChatModelProvider.unsupported_slash_commands:
self.reply(
"Sorry, the selected language model does not support this slash command.",
)
return

# check whether the configured LLM can support a request at this time.
if self.uses_llm and BaseChatHandler._requests_count > 0:
lm_provider_params = self.config_manager.lm_provider_params
lm_provider = lm_provider_klass(**lm_provider_params)
chat_model_args = self.config_manager.lm_provider_params
chat_model = ChatModelProvider(**chat_model_args)

if not lm_provider.allows_concurrency:
if not chat_model.allows_concurrency:
self.reply(
"The currently selected language model can process only one request at a time. Please wait for me to reply before sending another question.",
message,
Expand Down

0 comments on commit 0720ffa

Please sign in to comment.