Skip to content

Commit

Permalink
Add folder for logs and improve async processing
Browse files Browse the repository at this point in the history
  • Loading branch information
XuhuiZhou committed Jan 4, 2024
1 parent 924e336 commit 7c7cd38
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 25 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ A redis-stack server is required to run the code. Please follow the [instruction
conda env config vars set REDIS_OM_URL="redis://user:password@host:port"
```

Make a folder to store the logs:
```bash
mkdir logs
```

## Easy Sample Server
You can view an episode demo with default parameters using the following command:
```python
Expand Down
52 changes: 27 additions & 25 deletions examples/evaluate_existing_episode.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
import subprocess
from datetime import datetime
from logging import FileHandler

from tqdm.asyncio import tqdm_asyncio
import gin
import typer
import typing
from experiment_eval import _iterate_env_agent_combo_not_in_db
from rich import print
from rich.logging import RichHandler
Expand Down Expand Up @@ -76,49 +77,50 @@ def run_async_server_in_batch_aevaluate(
logging.info(
f"Running batch of {batch_size} episodes: {episode_batch}"
)
for episode in episode_batch:
asyncio.run(
aevaluate_one_episode(
episode=episode,
model=model,
tag=tag,
push_to_db=push_to_db,
)
episode_futures = [
aevaluate_one_episode(
episode=episode,
model=model,
tag=tag,
push_to_db=push_to_db,
)
for episode in episode_batch
]
asyncio.run(tqdm_asyncio.gather(*episode_futures, desc="Running one batch"))

episode_batch = []
else:
if episode_batch:
logging.info(
f"Running batch of {batch_size} episodes: {episode_batch}"
)
for episode in episode_batch:
asyncio.run(
episode_futures = [
aevaluate_one_episode(
episode=episode,
model=model,
tag=tag,
push_to_db=push_to_db,
)
)
for episode in episode_batch
]
asyncio.run(tqdm_asyncio.gather(*episode_futures, desc="Running one batch"))
return


annotated_episodes_pks = [
AnnotationForEpisode.get(anno).episode
for anno in AnnotationForEpisode.all_pks()
]
annotated_episodes_pks = list(set(annotated_episodes_pks))


@app.command()
def run_server(
tag: str = typer.Option("reeval_llama2"),
model: LLM_Name = typer.Option("togethercomputer/llama-2-70b-chat"),
batch_size: int = typer.Option(5),
push_to_db: bool = typer.Option(True),
verbose: bool = typer.Option(False),
tag: str ="reeval_llama2",
model: str = "togethercomputer/llama-2-70b-chat", # Why typer does not accept LLM_Name?
batch_size: int = 10,
push_to_db: bool = True,
verbose: bool = False,
) -> None:

annotated_episodes_pks = [
AnnotationForEpisode.get(anno).episode
for anno in AnnotationForEpisode.all_pks()
]
annotated_episodes_pks = list(set(annotated_episodes_pks))
model = typing.cast(LLM_Name, model)
# Call the function with the specified parameters
run_async_server_in_batch_aevaluate(
tag=tag,
Expand Down

0 comments on commit 7c7cd38

Please sign in to comment.