Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
awwaawwa committed Jul 4, 2024
2 parents 32e41aa + 0c6c357 commit 762b532
Show file tree
Hide file tree
Showing 14 changed files with 71 additions and 14 deletions.
2 changes: 1 addition & 1 deletion config.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
# AVAIL_LLM_MODELS = [
# "glm-4-0520", "glm-4-air", "glm-4-airx", "glm-4-flash",
# "qianfan", "deepseekcoder",
# "spark", "sparkv2", "sparkv3", "sparkv3.5",
# "spark", "sparkv2", "sparkv3", "sparkv3.5", "sparkv4",
# "qwen-turbo", "qwen-plus", "qwen-max", "qwen-local",
# "moonshot-v1-128k", "moonshot-v1-32k", "moonshot-v1-8k",
# "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0125", "gpt-4o-2024-05-13"
Expand Down
8 changes: 6 additions & 2 deletions crazy_functions/latex_fns/latex_pickle_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
class SafeUnpickler(pickle.Unpickler):

def get_safe_classes(self):
from .latex_actions import LatexPaperFileGroup, LatexPaperSplit
from crazy_functions.latex_fns.latex_actions import LatexPaperFileGroup, LatexPaperSplit
from crazy_functions.latex_fns.latex_toolbox import LinkedListNode
# 定义允许的安全类
safe_classes = {
# 在这里添加其他安全的类
'LatexPaperFileGroup': LatexPaperFileGroup,
'LatexPaperSplit' : LatexPaperSplit,
'LatexPaperSplit': LatexPaperSplit,
'LinkedListNode': LinkedListNode,
}
return safe_classes

Expand All @@ -20,6 +22,8 @@ def find_class(self, module, name):
for class_name in self.safe_classes.keys():
if (class_name in f'{module}.{name}'):
match_class_name = class_name
if module == 'numpy' or module.startswith('numpy.'):
return super().find_class(module, name)
if match_class_name is not None:
return self.safe_classes[match_class_name]
# 如果尝试加载未授权的类,则抛出异常
Expand Down
5 changes: 3 additions & 2 deletions docs/GithubAction+AllCapacity
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest

# edge-tts需要的依赖,某些pip包所需的依赖
RUN apt update && apt install ffmpeg build-essential -y

# use python3 as the system default python
WORKDIR /gpt
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
Expand All @@ -28,8 +31,6 @@ RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
RUN python3 -m pip install nougat-ocr

# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y

# 预热Tiktoken模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
Expand Down
5 changes: 3 additions & 2 deletions docs/GithubAction+AllCapacityBeta
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest

# edge-tts需要的依赖,某些pip包所需的依赖
RUN apt update && apt install ffmpeg build-essential -y

# use python3 as the system default python
WORKDIR /gpt
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
Expand Down Expand Up @@ -36,8 +39,6 @@ RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
RUN python3 -m pip install nougat-ocr

# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y

# 预热Tiktoken模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
Expand Down
4 changes: 2 additions & 2 deletions docs/GithubAction+ChatGLM+Moss
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ RUN apt-get update
RUN apt-get install -y curl proxychains curl gcc
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing

# edge-tts需要的依赖,某些pip包所需的依赖
RUN apt update && apt install ffmpeg build-essential -y

# use python3 as the system default python
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
Expand All @@ -21,8 +23,6 @@ RUN python3 -m pip install -r request_llms/requirements_qwen.txt
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
RUN python3 -m pip install -r request_llms/requirements_newbing.txt

# edge-tts需要的依赖
RUN apt update && apt install ffmpeg -y

# 预热Tiktoken模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
Expand Down
2 changes: 1 addition & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def enable_log(PATH_LOGGING):

def main():
import gradio as gr
if gr.__version__ not in ['3.32.9', '3.32.10']:
if gr.__version__ not in ['3.32.9', '3.32.10', '3.32.11']:
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
from request_llms.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
Expand Down
9 changes: 9 additions & 0 deletions request_llms/bridge_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -951,6 +951,15 @@ def decode(self, *args, **kwargs):
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"sparkv4":{
"fn_with_ui": spark_ui,
"fn_without_ui": spark_noui,
"can_multi_thread": True,
"endpoint": None,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
except:
Expand Down
3 changes: 2 additions & 1 deletion request_llms/bridge_qwen.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import time
import os
from toolbox import update_ui, get_conf, update_ui_lastest_msg
from toolbox import check_packages, report_exception
from toolbox import check_packages, report_exception, log_chat

model_name = 'Qwen'

Expand Down Expand Up @@ -59,6 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
Expand Down
8 changes: 6 additions & 2 deletions request_llms/com_qwenapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,12 @@ def generate(self, inputs, llm_kwargs, history, system_prompt):
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
yield self.result_buf
break
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {self.result_buf}')

# 耗尽generator避免报错
while True:
try: next(responses)
except: break

return self.result_buf


Expand Down
4 changes: 4 additions & 0 deletions request_llms/com_sparkapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ def __init__(self):
self.gpt_url_v3 = "ws://spark-api.xf-yun.com/v3.1/chat"
self.gpt_url_v35 = "wss://spark-api.xf-yun.com/v3.5/chat"
self.gpt_url_img = "wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image"
self.gpt_url_v4 = "wss://spark-api.xf-yun.com/v4.0/chat"

self.time_to_yield_event = threading.Event()
self.time_to_exit_event = threading.Event()
Expand Down Expand Up @@ -94,6 +95,8 @@ def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt, us
gpt_url = self.gpt_url_v3
elif llm_kwargs['llm_model'] == 'sparkv3.5':
gpt_url = self.gpt_url_v35
elif llm_kwargs['llm_model'] == 'sparkv4':
gpt_url = self.gpt_url_v4
else:
gpt_url = self.gpt_url
file_manifest = []
Expand Down Expand Up @@ -194,6 +197,7 @@ def gen_params(appid, inputs, llm_kwargs, history, system_prompt, file_manifest)
"sparkv2": "generalv2",
"sparkv3": "generalv3",
"sparkv3.5": "generalv3.5",
"sparkv4": "4.0Ultra"
}
domains_select = domains[llm_kwargs['llm_model']]
if file_manifest: domains_select = 'image'
Expand Down
3 changes: 2 additions & 1 deletion request_llms/oai_std_model_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ def decode_chunk(chunk):
try:
chunk = json.loads(chunk[6:])
except:
finish_reason = "JSON_ERROR"
respose = "API_ERROR"
finish_reason = chunk
# 错误处理部分
if "error" in chunk:
respose = "API_ERROR"
Expand Down
10 changes: 10 additions & 0 deletions shared_utils/fastapi_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,15 @@ async def file(path_or_url: str, request: fastapi.Request):
return "越权访问!"
return await endpoint(path_or_url, request)

from fastapi import Request, status
from fastapi.responses import FileResponse, RedirectResponse
@gradio_app.get("/academic_logout")
async def logout():
response = RedirectResponse(url=CUSTOM_PATH, status_code=status.HTTP_302_FOUND)
response.delete_cookie('access-token')
response.delete_cookie('access-token-unsecure')
return response

# --- --- enable TTS (text-to-speech) functionality --- ---
TTS_TYPE = get_conf("TTS_TYPE")
if TTS_TYPE != "DISABLE":
Expand Down Expand Up @@ -236,6 +245,7 @@ async def middleware(request: Request, call_next):
response = await call_next(request)
return response


# --- --- uvicorn.Config --- ---
ssl_keyfile = None if SSL_KEYFILE == "" else SSL_KEYFILE
ssl_certfile = None if SSL_CERTFILE == "" else SSL_CERTFILE
Expand Down
22 changes: 22 additions & 0 deletions tests/test_latex_auto_correct.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
"""
对项目中的各个插件进行测试。运行方法:直接运行 python tests/test_plugins.py
"""


import os, sys, importlib


def validate_path():
dir_name = os.path.dirname(__file__)
root_dir_assume = os.path.abspath(dir_name + "/..")
os.chdir(root_dir_assume)
sys.path.append(root_dir_assume)


validate_path() # 返回项目根路径

if __name__ == "__main__":
plugin_test = importlib.import_module('test_utils').plugin_test


plugin_test(plugin='crazy_functions.Latex_Function->Latex翻译中文并重新编译PDF', main_input="2203.01927")
File renamed without changes.

0 comments on commit 762b532

Please sign in to comment.