Skip to content

Commit

Permalink
Merge pull request #66 from aitomatic/experimental
Browse files Browse the repository at this point in the history
enable built-in problem-solving agents
  • Loading branch information
TheVinhLuong102 authored Dec 13, 2023
2 parents e950ef9 + 7fd20ad commit b136568
Show file tree
Hide file tree
Showing 6 changed files with 344 additions and 40 deletions.
8 changes: 5 additions & 3 deletions openssa/contrib/custom_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@ def use_custom_ssm():

def use_ooda():
task_heuristics = TaskDecompositionHeuristic({})
highest_priority_heuristic = ('The Purge Time must be at least as long as the Precursor Pulse Time '
'to ensure that all excess precursor and reaction byproducts are removed '
'from the chamber before the next cycle begins.')
highest_priority_heuristic = (
"The Purge Time must be at least as long as the Precursor Pulse Time "
"to ensure that all excess precursor and reaction byproducts are removed "
"from the chamber before the next cycle begins."
)
ooda_ssa = OodaSSA(
task_heuristics=task_heuristics,
highest_priority_heuristic=highest_priority_heuristic,
Expand Down
108 changes: 108 additions & 0 deletions openssa/core/ooda_rag/builtin_agents.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
from abc import ABC, abstractmethod
from typing import List, Optional
import json
from openai import OpenAI
from loguru import logger
from openssa.utils.aitomatic_llm_config import AitomaticLLMConfig
from openssa.core.ooda_rag.prompts import BuiltInAgentPrompt
from openssa.utils.utils import Utils


class AgentRole:
USER = "user"
SYSTEM = "system"
ASSISTANT = "assistant"


class TaskAgent(ABC):
"""
Abstract base class for all task agents.
"""

@abstractmethod
def execute(self, task: str) -> str:
"""
Execute the task agent with the given task.
"""
pass


class AskUserAgent(TaskAgent):
"""
AskUserAgent helps to determine if user wants to provide additional information
"""

def __init__(
self,
llm: OpenAI = AitomaticLLMConfig.get_aitomatic_llm(),
model: str = "aitomatic-model",
ask_user_heuristic: str = "",
conversation: Optional[List] = None,
) -> None:
self.llm = llm
self.model = model
self.ask_user_heuristic = ask_user_heuristic.strip()
self.conversation = conversation[-10:] if conversation else []

@Utils.timeit
def execute(self, task: str = "") -> str:
if not self.ask_user_heuristic:
return ""
system_message = {
"role": "system",
"content": BuiltInAgentPrompt.ASK_USER.format(
problem_statement=task,
heuristic=self.ask_user_heuristic,
),
}
conversation = self.conversation + [system_message]
response = self.llm.chat.completions.create(
model=self.model,
messages=conversation,
response_format={"type": "json_object"},
)
json_str = response.choices[0].message.content
logger.debug(f"ask user response is: {json_str}")
try:
jobject = json.loads(json_str)
return jobject.get("question", "")
except json.JSONDecodeError:
logger.error("Failed to decode the response as JSON for ask user agent.")
return ""


class GoalAgent(TaskAgent):
"""
GoalAgent helps to determine problem statement from the conversation between user and SSA
"""

def __init__(
self,
llm: OpenAI = AitomaticLLMConfig.get_aitomatic_llm(),
model: str = "aitomatic-model",
conversation: Optional[List] = None,
) -> None:
self.llm = llm
self.model = model
self.conversation = conversation[-10:] if conversation else []

@Utils.timeit
def execute(self, task: str = "") -> str:
system_message = {
"role": "system",
"content": BuiltInAgentPrompt.PROBLEM_STATEMENT,
}
conversation = self.conversation + [system_message]
response = self.llm.chat.completions.create(
model=self.model,
messages=conversation,
response_format={"type": "json_object"},
)
json_str = response.choices[0].message.content
logger.debug(f"problem statement response is: {json_str}")
try:
jobject = json.loads(json_str)
return jobject.get("problem statement", "")
except json.JSONDecodeError:
logger.error("Failed to decode the response as JSON for goal agent.")
return conversation[-1].get("content", "")
68 changes: 68 additions & 0 deletions openssa/core/ooda_rag/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,3 +75,71 @@ class OODAPrompts:
"{heuristic} "
"Exercise discernment in selecting the appropriate messages to construct a logical and step-by-step reasoning process."
)


class BuiltInAgentPrompt:
PROBLEM_STATEMENT = (
"You are tasked with identifying the problem statement from a conversation "
"between a user and an AI chatbot. Your focus should be on the entire context "
"of the conversation, especially the most recent message from the user, "
"to understand the issue comprehensively. Extract specific details "
"that define the current concern or question posed by the user, "
"which the assistant is expected to address. The problem statement should be "
"concise, clear, and presented as a question, command, or task, reflecting "
"the conversation's context and in the user's voice. In cases where "
"the conversation is ambiguous return empty value for problem statement. "
'Output the response in JSON format with the keyword "problem statement".\n'
"Example 1:\n"
"Assistant: Hello, what can I help you with today?\n"
"User: My boiler is not functioning, please help to troubleshoot.\n"
"Assistant: Can you check and provide the temperature, pressure, and on-off status?\n"
"User: The temperature is 120°C.\n\n"
"Response:\n"
"{\n"
' "problem statement": "Can you help to troubleshoot a non-functioning '
'boiler, given the temperature is 120°C?"\n'
"}\n\n"
"Example 2:\n"
"Assistant: Hi, what can I help you with?\n"
"User: I don't know how to go to the airport\n"
"Assistant: Where are you and which airport do you want to go to?\n"
"User: I'm in New York\n"
"Response:\n"
"{\n"
' "problem statement": "How do I get to the airport from my current '
'location in New York?"\n'
"}\n\n"
"Example 3 (Ambiguity):\n"
"Assistant: How can I assist you today?\n"
"User: I'm not sure what's wrong, but my computer is acting weird.\n"
"Assistant: Can you describe the issues you are experiencing?\n"
"User: Hey I am good, the sky is blue.\n\n"
"Response:\n"
"{\n"
' "problem statement": ""\n'
"}\n\n"
"Example 4 (Multiple Issues):\n"
"Assistant: What do you need help with?\n"
"User: My internet is down, and I can't access my email either.\n"
"Assistant: Are both issues related, or did they start separately?\n"
"User: They started at the same time, I think.\n\n"
"Response:\n"
"{\n"
' "problem statement": "Can you help with my internet being down and also '
'accessing my email?"\n'
"}"
)

ASK_USER = (
"Your task is to assist an AI assistant in formulating a question for the user. "
"This should be based on the ongoing conversation, the presented problem statement, "
"and a specific heuristic guideline. "
"The assistant should formulate the question strictly based on the heuristic. "
"If the heuristic does not apply or is irrelevant to the problem statement, "
"return empty string for the question. "
"Below is the heuristic guideline:\n"
"###{heuristic}###\n\n"
"Here is the problem statement or the user's current question:\n"
"###{problem_statement}###\n\n"
'Output the response in JSON format with the keyword "question".'
)
44 changes: 32 additions & 12 deletions openssa/core/ooda_rag/solver.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,24 @@
from openssa.core.ooda_rag.custom import CustomSSM
from openssa.core.ooda_rag.ooda_rag import Solver
from openssa.core.ooda_rag.ooda_rag import Solver, History
from openssa.core.ooda_rag.heuristic import DefaultOODAHeuristic
from openssa.core.ooda_rag.tools import ReasearchAgentTool
from openssa.utils.llm_config import LLMConfig
from openssa.utils.aitomatic_llm_config import AitomaticLLMConfig
from openssa.core.ooda_rag.builtin_agents import GoalAgent, AgentRole, AskUserAgent


class OodaSSA:
def __init__(self,
task_heuristics,
highest_priority_heuristic: str = "",
agent_service_context=LLMConfig.get_service_context_llama_2_70b(),
llm=AitomaticLLMConfig.get_aitomatic_llm(),
rag_llm=LLMConfig.get_llm_llama_2_70b(),
embed_model=LLMConfig.get_aito_embeddings(),
model='gpt-4-1106-preview'):
def __init__(
self,
task_heuristics,
highest_priority_heuristic: str = "",
ask_user_heuristic: str = "",
agent_service_context=LLMConfig.get_service_context_llama_2_70b(),
llm=AitomaticLLMConfig.get_aitomatic_llm(),
rag_llm=LLMConfig.get_llm_llama_2_70b(),
embed_model=LLMConfig.get_aito_embeddings(),
model="aitomatic-model",
):
# pylint: disable=too-many-arguments
self.llm = llm
self.rag_llm = rag_llm
Expand All @@ -27,18 +31,34 @@ def __init__(self,
model=model,
highest_priority_heuristic=highest_priority_heuristic,
)
self.ask_user_heuristic = ask_user_heuristic
self.conversation = History()
self.conversation.add_message("Hi, what can I help you?", AgentRole.ASSISTANT)

def activate_resources(self, folder_path: str) -> None:
agent = CustomSSM(llm=self.rag_llm, embed_model=self.embed_model)

if folder_path.startswith('s3://'):
if folder_path.startswith("s3://"):
agent.read_s3(folder_path)
else:
agent.read_directory(folder_path)

self.research_documents_tool = ReasearchAgentTool(agent=agent)

def solve(self, message: str) -> str:
return self.solver.run(
message, {"research_documents": self.research_documents_tool}
self.conversation.add_message(message, AgentRole.USER)
goal_agent = GoalAgent(conversation=self.conversation.get_history())
problem_statement = goal_agent.execute()
if not problem_statement:
return "Sorry, I don't understand your problem."
ask_user_response = AskUserAgent(
ask_user_heuristic=self.ask_user_heuristic,
conversation=self.conversation.get_history(),
).execute(problem_statement)
if ask_user_response:
return ask_user_response
assistant_response = self.solver.run(
problem_statement, {"research_documents": self.research_documents_tool}
)
self.conversation.add_message(assistant_response, AgentRole.ASSISTANT)
return assistant_response
Loading

0 comments on commit b136568

Please sign in to comment.