diff --git a/config.py b/config.py index f22cf20cf..f654054e2 100644 --- a/config.py +++ b/config.py @@ -33,7 +33,7 @@ # [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", - "gpt-4o", "gpt-4o-2024-05-13", + "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo", "gemini-pro", "chatglm3" diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 72031f298..dc542791d 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -247,7 +247,7 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt35, }, - "gpt-3.5-turbo-1106": { #16k + "gpt-3.5-turbo-1106": { # 16k "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, @@ -256,7 +256,7 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt35, }, - "gpt-3.5-turbo-0125": { #16k + "gpt-3.5-turbo-0125": { # 16k "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, @@ -283,7 +283,7 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "gpt-4-turbo-preview": { + "gpt-4o": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, @@ -293,7 +293,17 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "gpt-4-1106-preview": { + "gpt-4o-mini": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "has_multimodal_capacity": True, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + + "gpt-4o-2024-05-13": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "has_multimodal_capacity": True, @@ -303,7 +313,7 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "gpt-4-0125-preview": { + "gpt-4-turbo-preview": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, @@ -312,7 +322,7 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "gpt-4-turbo": { + "gpt-4-1106-preview": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, @@ -321,7 +331,7 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "gpt-4-turbo-2024-04-09": { + "gpt-4-0125-preview": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "endpoint": openai_endpoint, @@ -330,24 +340,24 @@ def decode(self, *args, **kwargs): "token_cnt": get_token_num_gpt4, }, - "gpt-4o": { + "gpt-4-turbo": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "has_multimodal_capacity": True, "endpoint": openai_endpoint, "max_token": 128000, - "tokenizer": tokenizer_gpt4o, - "token_cnt": get_token_num_gpt4o, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, }, - - "gpt-4o-2024-05-13": { + + "gpt-4-turbo-2024-04-09": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, "has_multimodal_capacity": True, "endpoint": openai_endpoint, "max_token": 128000, - "tokenizer": tokenizer_gpt4o, - "token_cnt": get_token_num_gpt4o, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, }, "gpt-3.5-random": {