Skip to content

Commit

Permalink
Merge pull request #1495 from puffo/litellm_ollama_chat_fix
Browse files Browse the repository at this point in the history
fix(ollama_chat.py): use tiktoken as backup for prompt token counting
  • Loading branch information
krrishdholakia authored Jan 18, 2024
2 parents 143e225 + becff36 commit 658fd4d
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions litellm/llms/ollama_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def get_ollama_response(
model_response["choices"][0]["message"] = response_json["message"]
model_response["created"] = int(time.time())
model_response["model"] = "ollama/" + model
prompt_tokens = response_json["prompt_eval_count"] # type: ignore
prompt_tokens = response_json.get("prompt_eval_count", len(encoding.encode(prompt))) # type: ignore
completion_tokens = response_json["eval_count"]
model_response["usage"] = litellm.Usage(
prompt_tokens=prompt_tokens,
Expand Down Expand Up @@ -320,7 +320,7 @@ async def ollama_acompletion(url, data, model_response, encoding, logging_obj):
model_response["choices"][0]["message"] = response_json["message"]
model_response["created"] = int(time.time())
model_response["model"] = "ollama/" + data["model"]
prompt_tokens = response_json["prompt_eval_count"] # type: ignore
prompt_tokens = response_json.get("prompt_eval_count", len(encoding.encode(prompt))) # type: ignore
completion_tokens = response_json["eval_count"]
model_response["usage"] = litellm.Usage(
prompt_tokens=prompt_tokens,
Expand Down

0 comments on commit 658fd4d

Please sign in to comment.