Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stepan improvements #12

Merged
merged 9 commits into from
Oct 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .github/workflows/style.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: style

on: [push]

jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.11"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: 'pip' # caching pip dependencies
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Analysing the code with pylint
run: |
black --check --diff .
isort --check --diff .
35 changes: 22 additions & 13 deletions agents/agent.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,31 @@
from textwrap import dedent

from gpt.gpt_client import GPTClient
from clients.base import AIClient
from constants import NO_COMMENT

DEFAULT_SYSTEM_PROMPT = dedent(
f"""\
Provide succinct, fact-based answers. Eliminate filler words and politeness.
Concentrate on delivering actionable insights and concrete solutions.
Avoid vague or generic statements. Stick to the topic at hand.
If your response doesn't meet these standards, reply with the exact words '{NO_COMMENT}'
"""
)


class Agent:
def __init__(self, name: str, user_prompt: str):
def __init__(
self,
client: AIClient,
name: str,
user_prompt: str,
system_prompt: str = DEFAULT_SYSTEM_PROMPT,
):
self.name = name

self.common_instructions = dedent(
"""\
Provide succinct, fact-based answers. Eliminate filler words and politeness.
Concentrate on delivering actionable insights and concrete solutions.
Avoid vague or generic statements. Stick to the topic at hand.
If the query doesn't meet these standards, reply with 'no comment.'
"""
)

self.gpt_client = GPTClient(self.common_instructions, user_prompt)
self.client = client
self.client.common_instructions = system_prompt
self.client.user_prompt = user_prompt

def query_gpt(self, transcript: str) -> str:
return self.gpt_client.query(transcript)
return self.client.query(transcript)
39 changes: 23 additions & 16 deletions agents/chairman.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,35 +2,39 @@

from agents.agent import Agent
from agents.sme import SME

logger.disable(__name__)
from clients.base import AIClient


class Chairman(Agent):
def __init__(self, name: str, executives: list):
def __init__(self, client: AIClient, executives: list[SME], name: str = "Chairman"):
# Construct the user_prompt string with details of the executives
exec_details = ""
for executive_agent in executives:
exec_details += (
f"{executive_agent.name}: expert in {executive_agent.expertise} "
f"and concerned about {', '.join(executive_agent.concerns)}.\n"
)
self.user_prompt = self.update_user_prompt(executives)

user_prompt = (
f"Your task is to decide who should speak next among meeting participates. "
f"Answer with only the name and nothing else. "
f"Do not call on the same person too often.\nParticipants: {exec_details}"
)
system_prompt = f"Answer with only the name and nothing else."

# Call the superclass constructor with the constructed user_prompt
super().__init__(name, user_prompt)
super().__init__(client, name, self.user_prompt, system_prompt)

self.executives = executives

def update_user_prompt(self, SMEs: list[SME]) -> str:
frequency_info_list = []
for sme in SMEs:
frequency_info_list.append(
f"{sme.name}: expertise: {sme.expertise}. "
f"concerns: {', '.join(sme.concerns)}. spoken count: {sme.spoken_count}.\n"
)

return (
f"Your task is to read the transcript and decide who should speak next. "
f"Do not choose the same person all of the time.\n"
f"Participants:\n{''.join(frequency_info_list)} "
)

def decide_if_meeting_over(self, transcript: list) -> bool:
return False

def decide_next_speaker(self, transcript_list: list) -> SME:
def decide_next_speaker(self, transcript_list: list[str]) -> SME:
transcript = " ".join(transcript_list)

while True:
Expand All @@ -42,6 +46,9 @@ def decide_next_speaker(self, transcript_list: list) -> SME:
)

if next_executive is not None:
next_executive.spoken_count += 1 # Update the frequency count
self.user_prompt = self.update_user_prompt(self.executives)
self.gpt_client.user_prompt = self.user_prompt
return next_executive

logger.info(f"{next_speaker} is not a valid exec...")
22 changes: 22 additions & 0 deletions agents/idea_refiner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from textwrap import dedent

from agents.agent import Agent
from clients import AIClient

REFINER_PROMPT = dedent(
"""\
You are going to presented with an topic for discussion at a meeting.
Your task to think deeply and refine the topic presented and note obvious
high level constraints and considerations.
Your output will serve as an introduction to the meeting participants.
"""
)


class IdeaRefiner(Agent):
def __init__(self, client: AIClient, name: str = "Refiner"):
# Call the superclass constructor with the constructed user_prompt
super().__init__(client, name, REFINER_PROMPT)

def refine_idea(self, idea: str) -> str:
return self.query_gpt(idea)
36 changes: 25 additions & 11 deletions agents/sme.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,37 @@
from textwrap import dedent

from agents.agent import Agent
from clients.base import AIClient

USER_PROMPT_TEMPLATE = dedent(
"""\
Adopt the persona of the {name}.\n
Your expertise is {expertise}.\n
Your concerns are {concerns}.\n
You will be shown a transcript of a meeting.
You have been asked to speak by the meeting chairman.
Specifically, provide insights on {concerns}
based on the meeting transcript.\n
Do not repeat points that have already been made
"""
)


class SME(Agent):
def __init__(self, name: str, expertise: str, concerns: list[str]):
def __init__(
self, client: AIClient, name: str, expertise: str, concerns: list[str]
):
# Construct the user_prompt string
user_prompt_list = [
f"Adopt the persona of the {name}.",
f"Your expertise is {expertise}.",
f"Your concerns are {', '.join(concerns)}.",
"You should aim to provide original technical insights that align with these areas "
"of expertise and concerns. Do not repeat points that have already been made.",
]
user_prompt = " ".join(user_prompt_list)
user_prompt = USER_PROMPT_TEMPLATE.format(
name=name, expertise=expertise, concerns=", ".join(concerns)
)

# Call the superclass constructor with the constructed user_prompt
super().__init__(name, user_prompt)
super().__init__(client, name, user_prompt)
self.expertise = expertise
self.concerns = concerns
self.spoken_count = 0

def opinion(self, transcript_list: list) -> str:
def opinion(self, transcript_list: list[str]) -> str:
transcript = " ".join(transcript_list)
return self.query_gpt(transcript)
3 changes: 3 additions & 0 deletions clients/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .base import AIClient
from .config import AIClientConfig
from .get_client import AIClientType, GPTClient, get_ai_client
7 changes: 7 additions & 0 deletions clients/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from abc import ABC, abstractmethod


class AIClient(ABC):
@abstractmethod
def query(self, transcript: str):
pass
7 changes: 7 additions & 0 deletions clients/config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from dataclasses import dataclass


@dataclass
class AIClientConfig:
api_key: str
model: str | None
16 changes: 16 additions & 0 deletions clients/get_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from enum import Enum

from clients.base import AIClient
from clients.config import AIClientConfig
from clients.gpt_client import GPTClient


class AIClientType(str, Enum):
ChatGPT = "ChatGPT"


def get_ai_client(client_type: AIClientType, config: AIClientConfig) -> AIClient:
if client_type == AIClientType.ChatGPT:
return GPTClient(config.api_key)
else:
raise ValueError(f"Unknown AI client type: {client_type}")
83 changes: 83 additions & 0 deletions clients/gpt_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import json
import time
from enum import Enum

import openai
from loguru import logger

from .base import AIClient


class Models(str, Enum):
GPT3 = "gpt-3.5-turbo"
GPT4 = "gpt-4"


class GPTClient(AIClient):
def __init__(self, api_key: str, model: str = Models.GPT4.value):
openai.api_key = api_key
self._system_instructions = None
self._user_prompt = None
self.model = model
self.max_tokens = 100
self.temperature: float = 0.1
# Log initial configuration on startup
logger.info(f"Initializing GPTClient with the following configuration:")
logger.info(f"Model: {self.model}")
logger.info(f"Max Tokens: {self.max_tokens}")
logger.info(f"Temperature: {self.temperature}")

@property
def system_instructions(self):
return self._system_instructions

@system_instructions.setter
def system_instructions(self, value):
logger.debug(f"Setting system instructions: {self._system_instructions}")
self._system_instructions = value

@property
def user_prompt(self):
return self._user_prompt

@user_prompt.setter
def user_prompt(self, value):
logger.debug(f"Setting user prompt: {self._user_prompt}")
self._user_prompt = value

def query(self, transcript: str) -> str:
max_retries = 6 # Number of retries
retry_delay = 10 # Delay between retries in seconds

# TODO: use backoff decorator
for i in range(max_retries):
try:
start_time = time.time()
messages = [
{"role": "system", "content": self._system_instructions},
{"role": "user", "content": self._user_prompt},
{"role": "assistant", "content": transcript},
]
logger.info(json.dumps(messages, indent=4).replace("\\n", "\n"))

response = openai.ChatCompletion.create(
model=self.model,
temperature=self.temperature,
messages=messages,
)

elapsed_time = time.time() - start_time

# Log the time taken and token usage
logger.info(f"GPT query took {elapsed_time:.2f} seconds")
logger.info(f"Tokens used in the request: {response['usage']}")

return response.choices[0].message.content.strip()
except openai.error.RateLimitError as e:
logger.warning(
f"Rate limit reached. Retrying in {retry_delay} seconds. Details: {e}"
)
time.sleep(retry_delay)

logger.error(f"Max retries reached. Could not complete the GPT query.")
return "Error in GPT client that could not be resolved by retrying."
2 changes: 2 additions & 0 deletions constants/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .default_sme import DEFAULT_SME_DICT
from .strings import NO_COMMENT
42 changes: 42 additions & 0 deletions constants/default_sme.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
DEFAULT_SME_DICT = (
{
"name": "CEO",
"expertise": "Corporate Strategy",
"concerns": ["Market Entry", "Competitive Positioning"],
},
{
"name": "CFO",
"expertise": "Financial Products",
"concerns": ["Rate Management", "Regulatory Compliance"],
},
{
"name": "COO",
"expertise": "Operational Efficiency",
"concerns": ["Scalability", "Cost Optimization"],
},
{
"name": "CMO",
"expertise": "Customer Acquisition",
"concerns": ["Target Market", "Onboarding Experience"],
},
{
"name": "CTO",
"expertise": "Technical Infrastructure",
"concerns": ["Data Security", "System Integration"],
},
{
"name": "CRO",
"expertise": "Risk Management",
"concerns": ["Fraud Detection", "Compliance"],
},
{
"name": "CCO",
"expertise": "Customer Experience",
"concerns": ["UX/UI Design", "Customer Support"],
},
{
"name": "CPO",
"expertise": "Product Management",
"concerns": ["Feature Rollout", "Customer Feedback"],
},
)
1 change: 1 addition & 0 deletions constants/strings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
NO_COMMENT = "NO COMMENT"
Empty file removed gpt/__init__.py
Empty file.
Loading