Skip to content

Commit

Permalink
Merge pull request #476 from rmusser01/dev
Browse files Browse the repository at this point in the history
Wew, i'm an idiot.
  • Loading branch information
rmusser01 authored Jan 6, 2025
2 parents 8c2484a + da1261b commit 2395271
Show file tree
Hide file tree
Showing 17 changed files with 228 additions and 201 deletions.
10 changes: 4 additions & 6 deletions App_Function_Libraries/Chat/Chat_Functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,9 @@ def chat_api_call(api_endpoint, api_key, input_data, prompt, temp, system_messag
logging.info(f"Debug - Chat API Call - API Endpoint: {api_endpoint}")
log_counter("chat_api_call_attempt", labels={"api_endpoint": api_endpoint})
start_time = time.time()
if not api_key:
api_key = None
model = None
try:
logging.info(f"Debug - Chat API Call - API Endpoint: {api_endpoint}")
logging.info(f"Debug - Chat API Call - API Key: {api_key[:4]}...{api_key[-4:]}")
logging.info(f"Debug - Chat chat_api_call - API Endpoint: {api_endpoint}")
if api_endpoint.lower() == 'openai':
response = chat_with_openai(api_key, input_data, prompt, temp, system_message, streaming, minp, maxp, model)
Expand Down Expand Up @@ -142,7 +140,7 @@ def chat_api_call(api_endpoint, api_key, input_data, prompt, temp, system_messag
return f"An error occurred: {str(e)}"


def chat(message, history, media_content, selected_parts, api_endpoint, api_key, prompt, temperature,
def chat(message, history, media_content, selected_parts, api_endpoint, api_key, custom_prompt, temperature,
system_message=None, streaming=False, minp=None, maxp=None, model=None):
log_counter("chat_attempt", labels={"api_endpoint": api_endpoint})
start_time = time.time()
Expand Down Expand Up @@ -179,10 +177,10 @@ def chat(message, history, media_content, selected_parts, api_endpoint, api_key,

logging.debug(f"Debug - Chat Function - Temperature: {temperature}")
logging.debug(f"Debug - Chat Function - API Key: {api_key[:10]}")
logging.debug(f"Debug - Chat Function - Prompt: {prompt}")
logging.debug(f"Debug - Chat Function - Prompt: {custom_prompt}")

# Use the existing API request code based on the selected endpoint
response = chat_api_call(api_endpoint, api_key, input_data, prompt, temp, system_message, streaming, minp=None, maxp=None, model=None)
response = chat_api_call(api_endpoint, api_key, input_data, custom_prompt, temp, system_message, streaming, minp, maxp, model)

if streaming:
return response
Expand Down
11 changes: 7 additions & 4 deletions App_Function_Libraries/Gradio_UI/Chat_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ def chat_wrapper(message, history, media_content, selected_parts, api_endpoint,
presence_penalty=None, stop_sequence=None):
try:
if save_conversation:
logging.info("chat_wrapper(): Saving conversation")
if conversation_id is None:
logging.info("chat_wrapper(): Creating a new conversation")
# Create a new conversation
media_id = media_content.get('id', None)
conversation_name = f"Chat about {media_content.get('title', 'Unknown Media')} - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
Expand All @@ -111,11 +113,12 @@ def chat_wrapper(message, history, media_content, selected_parts, api_endpoint,
full_message = message

# Generate bot response
logging.debug("chat_wrapper(): Generating bot response")
bot_message = ""
for chunk in chat(full_message, history, media_content, selected_parts, api_endpoint, api_key, custom_prompt,
temperature, system_prompt, streaming):
temperature, system_prompt, streaming, minp=None, maxp=None, model=None):
bot_message += chunk # Accumulate the streamed response
logging.debug(f"Bot message being returned: {bot_message}")
logging.debug(f"chat_wrapper(): Bot message being returned: {bot_message}")
# Yield the incremental response and updated history
yield bot_message, history + [(message, bot_message)], conversation_id

Expand All @@ -124,8 +127,8 @@ def chat_wrapper(message, history, media_content, selected_parts, api_endpoint,
save_message(conversation_id, role="assistant", content=bot_message)

except Exception as e:
logging.error(f"Error in chat wrapper: {str(e)}")
yield "An error occurred.", history, conversation_id
logging.error(f"chat_wrapper(): Error in chat wrapper: {str(e)}")
yield "chat_wrapper(): An error occurred.", history, conversation_id


def search_conversations(query):
Expand Down
28 changes: 17 additions & 11 deletions App_Function_Libraries/Gradio_UI/Workflows_tab.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def chat_workflows_tab():
with gr.Row():
with gr.Column():
workflow_selector = gr.Dropdown(label="Select Workflow", choices=[wf['name'] for wf in workflows])
# Refactored API selection dropdown
api_selector = gr.Dropdown(
choices=["None"] + [format_api_name(api) for api in global_api_endpoints],
value=default_value,
Expand Down Expand Up @@ -91,8 +90,7 @@ def update_workflow_ui(workflow_name):
logging.error(f"Selected workflow not found: {workflow_name}")
return {"current_step": 0, "max_steps": 0, "conversation_id": None}, "", []

def process_workflow_step(message, history, context, workflow_name, api_endpoint, api_key, workflow_state,
save_conv, temp):
def process_workflow_step(message, history, context, workflow_name, api_endpoint, api_key, workflow_state, save_conv, temp):
logging.info(f"Process workflow step called with message: {message}")
logging.info(f"Current workflow state: {workflow_state}")
try:
Expand All @@ -114,13 +112,20 @@ def process_workflow_step(message, history, context, workflow_name, api_endpoint
full_message = f"{context}\n\nStep {current_step + 1}: {prompt}\nUser: {message}"

logging.info(f"Calling chat_wrapper with full_message: {full_message[:100]}...")
bot_message, new_history, new_conversation_id = chat_wrapper(

# Initialize bot message to accumulate stremed response
bot_message = ""

# call chat _wrapper
for chunk, new_history, new_conversation_id in chat_wrapper(
full_message, history, media_content.value, selected_parts.value,
api_endpoint, api_key, "", workflow_state["conversation_id"],
save_conv, temp, "You are a helpful assistant guiding through a workflow."
)
):
bot_message = chunk # Update bot message with the latest chunk
yield new_history, workflow_state, gr.update(interactive=True)
logging.info(f"Received bot_message: {bot_message[:50]}...")

logging.info(f"Received bot_message: {bot_message[:100]}...")

next_step = current_step + 1
new_workflow_state = {
Expand All @@ -131,15 +136,17 @@ def process_workflow_step(message, history, context, workflow_name, api_endpoint

if next_step >= max_steps:
logging.info("Workflow completed after this step")
return new_history, new_workflow_state, gr.update(interactive=False)
yield history + [(message, bot_message)], new_workflow_state, gr.update(interactive=False)
else:
next_prompt = selected_workflow['prompts'][next_step]
new_history = history + [(message, bot_message)]
new_history.append((None, f"Step {next_step + 1}: {next_prompt}"))
logging.info(f"Moving to next step: {next_step}")
return new_history, new_workflow_state, gr.update(interactive=True)
yield new_history, new_workflow_state, gr.update(interactive=True)

except Exception as e:
logging.error(f"Error in process_workflow_step: {str(e)}")
return history, workflow_state, gr.update(interactive=True)
yield history, workflow_state, gr.update(interactive=True)

workflow_selector.change(
update_workflow_ui,
Expand All @@ -149,8 +156,7 @@ def process_workflow_step(message, history, context, workflow_name, api_endpoint

submit_btn.click(
process_workflow_step,
inputs=[msg, chatbot, context_input, workflow_selector, api_selector, api_key_input, workflow_state,
save_conversation, temperature],
inputs=[msg, chatbot, context_input, workflow_selector, api_selector, api_key_input, workflow_state, save_conversation, temperature],
outputs=[chatbot, workflow_state, msg]
).then(
lambda: gr.update(value=""),
Expand Down
1 change: 1 addition & 0 deletions Docs/Design/Audio_Pipeline.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ https://pubs.aip.org/asa/jel/article/4/2/025206/3267247/Evaluating-OpenAI-s-Whis
Transcription:
https://github.com/AugmendTech/treeseg
https://www.arxiv.org/abs/2407.12028
https://github.com/Purfview/whisper-standalone-win
https://huggingface.co/spaces/aadnk/faster-whisper-webui
https://huggingface.co/spaces/zhang082799/openai-whisper-large-v3-turbo
https://petewarden.com/2024/10/21/introducing-moonshine-the-new-state-of-the-art-for-speech-to-text/
Expand Down
1 change: 1 addition & 0 deletions Docs/Design/DB_Design.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ Migrating to sqlite-vec
https://docs.google.com/document/d/1sJ_S2ggfFmtPJupxIO3C1EZAFuDMUfNYcAytissbFMs/edit?tab=t.0#heading=h.xyau1jyb6vyx
https://github.com/Mozilla-Ocho/llamafile/pull/644

https://ai.plainenglish.io/top-interview-questions-on-data-modeling-concepts-3d1587c86214
https://briandouglas.ie/sqlite-defaults/
https://phiresky.github.io/blog/2020/sqlite-performance-tuning/
https://kerkour.com/sqlite-for-servers
Expand Down
2 changes: 1 addition & 1 deletion Docs/Design/Diagram_Generation.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
https://excalidraw.com/
https://www.napkin.ai/
https://github.com/southbridgeai/diagen

https://levelup.gitconnected.com/uml-diagrams-a-guide-for-software-engineers-71220ffb775f?source=home_following---------57-1--------------------dd5db0ec_9e4b_478a_951e_a16e50e4d723-------3



2 changes: 1 addition & 1 deletion Docs/Design/Education.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ https://news.ycombinator.com/item?id=42534931
https://ankiweb.net/shared/info/1531888719
https://bbycroft.net/llm
https://github.com/met4citizen/TalkingHead

https://github.com/Rolandjg/skool4free


one2manny
Expand Down
21 changes: 21 additions & 0 deletions Docs/Design/Podcast.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# Podcasts


## Introduction


### Link Dump:
https://github.com/FanaHOVA/smol-podcaster
https://huggingface.co/spaces/saq1b/podcastgen/blob/main/app.py
https://huggingface.co/spaces/mozilla-ai/document-to-podcast/blob/main/app.py
https://github.com/aedocw/epub2tts
https://github.com/lamm-mit/PDF2Audio
https://huggingface.co/spaces/bencser/episodegen
https://huggingface.co/spaces/lamm-mit/PDF2Audio
https://github.com/souzatharsis/podcastfy
https://github.com/agituts/gemini-2-podcast
https://github.com/meta-llama/llama-recipes/tree/main/recipes%2Fquickstart%2FNotebookLlama
https://github.com/JarodMica/audiobook_maker



2 changes: 1 addition & 1 deletion Docs/Design/Prompts.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ https://github.com/microsoft/PromptWizard


https://medium.com/@camauger/crafting-effective-chatgpt-prompts-for-tabletop-roleplaying-games-a-step-by-step-guide-part-1-b81a791d278d

https://towardsdatascience.com/how-i-won-singapores-gpt-4-prompt-engineering-competition-34c195a93d41



Expand Down
2 changes: 1 addition & 1 deletion Docs/Design/Researcher.md
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ https://www.researchrabbit.ai/
https://github.com/faraz18001/Sales-Llama
https://github.com/memgraph/memgraph
https://github.com/rashadphz/farfalle/tree/main/src/backend

https://github.com/SakanaAI/AI-Scientist
https://github.com/rashadphz/farfalle/blob/main/src/backend/agent_search.py
https://github.com/rashadphz/farfalle/blob/main/src/backend/prompts.py
https://github.com/stanford-oval/storm/
Expand Down
2 changes: 1 addition & 1 deletion Docs/Design/Structured_Outputs.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


https://towardsdatascience.com/diving-deeper-with-structured-outputs-b4a5d280c208

https://generativeai.pub/building-multi-agent-llm-systems-with-pydanticai-framework-a-step-by-step-guide-to-create-ai-5e41fbba2608

## Introduction
This page serves as documentation regarding the structured outputs within tldw and provides context/justification for the decisions made within the module.
Expand Down
Loading

0 comments on commit 2395271

Please sign in to comment.