Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create simpler example open-deep-research #534

Merged
merged 1 commit into from
Feb 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
192 changes: 19 additions & 173 deletions examples/open_deep_research/run.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,9 @@
import argparse
import json
import os
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from pathlib import Path
from typing import List

import datasets
import pandas as pd
from dotenv import load_dotenv
from huggingface_hub import login
from scripts.reformulator import prepare_response
from scripts.run_agents import (
get_single_file_description,
get_zip_description,
)
from scripts.text_inspector_tool import TextInspectorTool
from scripts.text_web_browser import (
ArchiveSearchTool,
Expand All @@ -28,14 +16,11 @@
VisitTool,
)
from scripts.visual_qa import visualizer
from tqdm import tqdm

from smolagents import (
MANAGED_AGENT_PROMPT,
CodeAgent,
# HfApiModel,
LiteLLMModel,
Model,
ToolCallingAgent,
)

Expand Down Expand Up @@ -74,40 +59,16 @@

def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--concurrency", type=int, default=8)
parser.add_argument("--model-id", type=str, default="o1")
parser.add_argument("--api-base", type=str, default=None)
parser.add_argument("--run-name", type=str, required=True)
parser.add_argument(
"--question", type=str, default="How many studio albums did Mercedes Sosa release before 2007?"
)
return parser.parse_args()


### IMPORTANT: EVALUATION SWITCHES

print("Make sure you deactivated Tailscale VPN, else some URLs will be blocked!")

USE_OPEN_MODELS = False

SET = "validation"

custom_role_conversions = {"tool-call": "assistant", "tool-response": "user"}

### LOAD EVALUATION DATASET

eval_ds = datasets.load_dataset("gaia-benchmark/GAIA", "2023_all")[SET]
eval_ds = eval_ds.rename_columns({"Question": "question", "Final answer": "true_answer", "Level": "task"})


def preprocess_file_paths(row):
if len(row["file_name"]) > 0:
row["file_name"] = f"data/gaia/{SET}/" + row["file_name"]
return row


eval_ds = eval_ds.map(preprocess_file_paths)
eval_df = pd.DataFrame(eval_ds)
print("Loaded evaluation dataset:")
print(eval_df["task"].value_counts())

user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0"

BROWSER_CONFIG = {
Expand All @@ -123,9 +84,17 @@ def preprocess_file_paths(row):
os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True)


def create_agent_hierarchy(model: Model):
def main():
args = parse_args()
text_limit = 100000
ti_tool = TextInspectorTool(model, text_limit)

model = LiteLLMModel(
args.model_id,
custom_role_conversions=custom_role_conversions,
max_completion_tokens=8192,
reasoning_effort="high",
)
document_inspection_tool = TextInspectorTool(model, text_limit)

browser = SimpleTextBrowser(**BROWSER_CONFIG)

Expand Down Expand Up @@ -153,147 +122,24 @@ def create_agent_hierarchy(model: Model):
Your request must be a real sentence, not a google search! Like "Find me this information (...)" rather than a few keywords.
""",
provide_run_summary=True,
managed_agent_prompt=MANAGED_AGENT_PROMPT
+ """You can navigate to .txt online files.
If a non-html page is in another format, especially .pdf or a Youtube video, use tool 'inspect_file_as_text' to inspect it.
Additionally, if after some searching you find out that you need more information to answer the question, you can use `final_answer` with your request for clarification as argument to request for more information.""",
)
text_webbrowser_agent.prompt_templates["managed_agent"]["task"] += """You can navigate to .txt online files.
If a non-html page is in another format, especially .pdf or a Youtube video, use tool 'inspect_file_as_text' to inspect it.
Additionally, if after some searching you find out that you need more information to answer the question, you can use `final_answer` with your request for clarification as argument to request for more information."""

manager_agent = CodeAgent(
model=model,
tools=[visualizer, ti_tool],
tools=[visualizer, document_inspection_tool],
max_steps=12,
verbosity_level=2,
additional_authorized_imports=AUTHORIZED_IMPORTS,
planning_interval=4,
managed_agents=[text_webbrowser_agent],
)
return manager_agent


def append_answer(entry: dict, jsonl_file: str) -> None:
jsonl_file = Path(jsonl_file)
jsonl_file.parent.mkdir(parents=True, exist_ok=True)
with append_answer_lock, open(jsonl_file, "a", encoding="utf-8") as fp:
fp.write(json.dumps(entry) + "\n")
assert os.path.exists(jsonl_file), "File not found!"
print("Answer exported to file:", jsonl_file.resolve())


def answer_single_question(example, model_id, answers_file, visual_inspection_tool):
model = LiteLLMModel(
model_id,
custom_role_conversions=custom_role_conversions,
max_completion_tokens=8192,
reasoning_effort="high",
)
# model = HfApiModel("Qwen/Qwen2.5-72B-Instruct", provider="together")
# "https://lnxyuvj02bpe6mam.us-east-1.aws.endpoints.huggingface.cloud",
# custom_role_conversions=custom_role_conversions,
# # provider="sambanova",
# max_tokens=8096,
# )
document_inspection_tool = TextInspectorTool(model, 100000)

agent = create_agent_hierarchy(model)

augmented_question = """You have one question to answer. It is paramount that you provide a correct answer.
Give it all you can: I know for a fact that you have access to all the relevant tools to solve it and find the correct answer (the answer does exist). Failure or 'I cannot answer' or 'None found' will not be tolerated, success will be rewarded.
Run verification steps if that's needed, you must make sure you find the correct answer!
Here is the task:
""" + example["question"]

if example["file_name"]:
if ".zip" in example["file_name"]:
prompt_use_files = "\n\nTo solve the task above, you will have to use these attached files:\n"
prompt_use_files += get_zip_description(
example["file_name"], example["question"], visual_inspection_tool, document_inspection_tool
)
else:
prompt_use_files = "\n\nTo solve the task above, you will have to use this attached file:"
prompt_use_files += get_single_file_description(
example["file_name"], example["question"], visual_inspection_tool, document_inspection_tool
)
augmented_question += prompt_use_files

start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
# Run agent 🚀
final_result = agent.run(augmented_question)

agent_memory = agent.write_memory_to_messages(summary_mode=True)

final_result = prepare_response(augmented_question, agent_memory, reformulation_model=model)

output = str(final_result)
for memory_step in agent.memory.steps:
memory_step.model_input_messages = None
intermediate_steps = [str(step) for step in agent.memory.steps]

# Check for parsing errors which indicate the LLM failed to follow the required format
parsing_error = True if any(["AgentParsingError" in step for step in intermediate_steps]) else False

# check if iteration limit exceeded
iteration_limit_exceeded = True if "Agent stopped due to iteration limit or time limit." in output else False
raised_exception = False

except Exception as e:
print("Error on ", augmented_question, e)
output = None
intermediate_steps = []
parsing_error = False
iteration_limit_exceeded = False
exception = e
raised_exception = True
end_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
annotated_example = {
"agent_name": model.model_id,
"question": example["question"],
"augmented_question": augmented_question,
"prediction": output,
"intermediate_steps": intermediate_steps,
"parsing_error": parsing_error,
"iteration_limit_exceeded": iteration_limit_exceeded,
"agent_error": str(exception) if raised_exception else None,
"start_time": start_time,
"end_time": end_time,
"task": example["task"],
"task_id": example["task_id"],
"true_answer": example["true_answer"],
}
append_answer(annotated_example, answers_file)


def get_examples_to_answer(answers_file, eval_ds) -> List[dict]:
print(f"Loading answers from {answers_file}...")
try:
done_questions = pd.read_json(answers_file, lines=True)["question"].tolist()
print(f"Found {len(done_questions)} previous results!")
except Exception as e:
print("Error when loading records: ", e)
print("No usable records! ▶️ Starting new.")
done_questions = []
return [line for line in eval_ds.to_list() if line["question"] not in done_questions]


def main():
args = parse_args()
print(f"Starting run with arguments: {args}")

answers_file = f"output/{SET}/{args.run_name}.jsonl"
tasks_to_run = get_examples_to_answer(answers_file, eval_ds)

with ThreadPoolExecutor(max_workers=args.concurrency) as exe:
futures = [
exe.submit(answer_single_question, example, args.model_id, answers_file, visualizer)
for example in tasks_to_run
]
for f in tqdm(as_completed(futures), total=len(tasks_to_run), desc="Processing tasks"):
f.result()
answer = manager_agent.run(args.question)

# for example in tasks_to_run:
# answer_single_question(example, args.model_id, answers_file, visualizer)
print("All tasks processed.")
print(f"Got this answer: {answer}")


if __name__ == "__main__":
Expand Down
Loading