Skip to content

Commit

Permalink
(fix) 'utf-8' codec can't encode characters error on OpenAI (BerriAI#…
Browse files Browse the repository at this point in the history
…7018)

* test_openai_multilingual

* pin httpx

* fix openai pyproject

* test_multilingual_requests

* TestOpenAIChatCompletion

* fix test anthropic completion
  • Loading branch information
ishaan-jaff authored and rajatvig committed Jan 15, 2025
1 parent 5d9e0b4 commit 06f1586
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 5 deletions.
8 changes: 4 additions & 4 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ documentation = "https://docs.litellm.ai"

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0, !=3.9.7"
openai = ">=1.54.0"
httpx = ">=0.23.0,<0.28.0"
openai = ">=1.55.3"
python-dotenv = ">=0.2.0"
tiktoken = ">=0.7.0"
importlib-metadata = ">=6.8.0"
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# LITELLM PROXY DEPENDENCIES #
anyio==4.4.0 # openai + http req.
httpx==0.27.0 # Pin Httpx dependency
openai==1.55.3 # openai req.
fastapi==0.111.0 # server dep
backoff==2.2.1 # server dep
Expand Down
14 changes: 14 additions & 0 deletions tests/llm_translation/base_llm_unit_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,20 @@ def test_message_with_name(self):
response = litellm.completion(**base_completion_call_args, messages=messages)
assert response is not None

def test_multilingual_requests(self):
"""
Tests that the provider can handle multilingual requests and invalid utf-8 sequences
Context: https://github.com/openai/openai-python/issues/1921
"""
base_completion_call_args = self.get_base_completion_call_args()
response = litellm.completion(
**base_completion_call_args,
messages=[{"role": "user", "content": "你好世界!\ud83e, ö"}],
)
print("multilingual response: ", response)
assert response is not None

@pytest.mark.parametrize(
"response_format",
[
Expand Down
8 changes: 8 additions & 0 deletions tests/llm_translation/test_anthropic_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,14 @@ def test_tool_call_no_arguments(self, tool_call_no_arguments):
result = convert_to_anthropic_tool_invoke([tool_call_no_arguments])
print(result)

def test_multilingual_requests(self):
"""
Anthropic API raises a 400 BadRequest error when the request contains invalid utf-8 sequences.
Todo: if litellm.modify_params is True ensure it's a valid utf-8 sequence
"""
pass


def test_convert_tool_response_to_message_with_values():
"""Test converting a tool response with 'values' key to a message"""
Expand Down
10 changes: 10 additions & 0 deletions tests/llm_translation/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import litellm
from litellm import Choices, Message, ModelResponse
from base_llm_unit_tests import BaseLLMChatTest


def test_openai_prediction_param():
Expand Down Expand Up @@ -268,3 +269,12 @@ async def test_vision_with_custom_model():
]
assert request_body["model"] == "my-custom-model"
assert request_body["max_tokens"] == 10


class TestOpenAIChatCompletion(BaseLLMChatTest):
def get_base_completion_call_args(self) -> dict:
return {"model": "gpt-4o-mini"}

def test_tool_call_no_arguments(self, tool_call_no_arguments):
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
pass

0 comments on commit 06f1586

Please sign in to comment.