Skip to content

Commit

Permalink
support chatglm3
Browse files Browse the repository at this point in the history
  • Loading branch information
binary-husky committed Nov 7, 2023
1 parent 5192d31 commit 8e375b0
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 13 deletions.
6 changes: 5 additions & 1 deletion config.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,15 @@
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
"chatglm", "moss", "newbing", "claude-2"]
"chatglm3", "moss", "newbing", "claude-2"]
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]


# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"


# 百度千帆(LLM_MODEL="qianfan")
BAIDU_CLOUD_API_KEY = ''
BAIDU_CLOUD_SECRET_KEY = ''
Expand Down
7 changes: 4 additions & 3 deletions crazy_functions/询问多个大语言模型.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from toolbox import CatchException, update_ui
from toolbox import CatchException, update_ui, get_conf
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
import datetime
@CatchException
Expand All @@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
web_port 当前软件运行的端口号
"""
history = [] # 清空历史,以免输入溢出
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS))
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新

# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=txt, inputs_show_user=txt,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
Expand Down
4 changes: 2 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules()
server_port=PORT,
favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"),
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile","gpt_log/admin"])
blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])

# 如果需要在二级路径下运行
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
Expand All @@ -442,7 +442,7 @@ def warm_up_mods(): time.sleep(4); warm_up_modules()
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
# else:
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])

if __name__ == "__main__":
main()
14 changes: 7 additions & 7 deletions request_llms/local_llm_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,18 @@
from contextlib import redirect_stdout
from request_llms.queued_pipe import create_queue_pipe

class DebugLock(object):
class ThreadLock(object):
def __init__(self):
self._lock = threading.Lock()

def acquire(self):
print("acquiring", self)
# print("acquiring", self)
#traceback.print_tb
self._lock.acquire()
print("acquired", self)
# print("acquired", self)

def release(self):
print("released", self)
# print("released", self)
#traceback.print_tb
self._lock.release()

Expand Down Expand Up @@ -85,7 +85,7 @@ def __init__(self):
self.is_main_process = False # state wrap for child process
self.start()
self.is_main_process = True # state wrap for child process
self.threadLock = DebugLock()
self.threadLock = ThreadLock()

def get_state(self):
# ⭐run in main process
Expand Down Expand Up @@ -159,7 +159,7 @@ def run(self):
try:
for response_full in self.llm_stream_generator(**kwargs):
self.child.send(response_full)
print('debug' + response_full)
# print('debug' + response_full)
self.child.send('[Finish]')
# 请求处理结束,开始下一个循环
except:
Expand Down Expand Up @@ -200,7 +200,7 @@ def stream_chat(self, **kwargs):
if res.startswith(self.std_tag):
new_output = res[len(self.std_tag):]
std_out = std_out[:std_out_clip_len]
print(new_output, end='')
# print(new_output, end='')
std_out = new_output + std_out
yield self.std_tag + '\n```\n' + std_out + '\n```\n'
elif res == '[Finish]':
Expand Down

0 comments on commit 8e375b0

Please sign in to comment.