Skip to content

Commit

Permalink
Merge pull request #391 from LlmKira/main
Browse files Browse the repository at this point in the history
Docker Build
  • Loading branch information
sudoskys authored Apr 18, 2024
2 parents bbd2c1a + af73e3b commit bd37a8e
Show file tree
Hide file tree
Showing 13 changed files with 267 additions and 219 deletions.
3 changes: 2 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ FROM python:3.9-buster as builder
RUN apt update && \
apt install -y build-essential && \
pip install -U pip setuptools wheel && \
pip install pdm
pip install pdm && \
apt install -y ffmpeg

COPY pyproject.toml pdm.lock README.md /project/
WORKDIR /project
Expand Down
15 changes: 8 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,12 @@ This project uses the ToolCall feature.
It integrates a message queuing and snapshot system, offering plugin mechanisms and authentication prior to plugin
execution.

The model adheres to the Openai Schema, other models are not supported. Please adapt using gateways independently.
The model adheres to the Openai Format Schema. Please adapt using [gateway](https://github.com/Portkey-AI/gateway)
or [one-api](https://github.com/songquanpeng/one-api) independently.

| Demo |
|-----------------------------------|
| ![sticker](./docs/chain_chat.gif) |
| Demo | Vision With Voice |
|-----------------------------------|------------------------------|
| ![sticker](./docs/chain_chat.gif) | ![vision](./docs/vision.gif) |

## 🔨 Roadmap

Expand All @@ -56,10 +57,10 @@ The model adheres to the Openai Schema, other models are not supported. Please a
- [x] Implementation of a more robust plugin system
- [x] Project structure simplification
- [x] Elimination of the Provider system
- [x] Hook support.
- [x] Access to TTS.
- [x] Hook support
- [x] Access to TTS
- [x] Add standalone support for gpt-4-turbo and vision
- [ ] Add LLM reference support to the plugin environment. (extract && search in text)
- [ ] Add standalone support for Openai's new Schema. (vision)

## 📦 Features

Expand Down
26 changes: 14 additions & 12 deletions app/middleware/llm_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
SystemMessage,
ToolMessage,
AssistantMessage,
UserMessage,
)
from llmkira.openai.request import OpenAIResult, OpenAI, OpenAICredential
from llmkira.task import TaskHeader
Expand Down Expand Up @@ -174,7 +175,7 @@ async def build_task_messages(self, remember=True):
for i, message in enumerate(task_message):
message: EventMessage
# message format
user_message = message.format_user_message()
user_message = await message.format_user_message()
message_run.append(user_message)
if remember:
await self.message_history.append(messages=[user_message])
Expand All @@ -192,14 +193,7 @@ async def request_openai(
:param disable_tool: 禁用函数
:param credential: 凭证
:return: OpenaiResult 返回结果
:raise RuntimeError: # Feel time leave
time_feel = await TimeFeelManager(self.session_uid).get_leave()
if time_feel:
await self.remember(
message=SystemMessage(
content=f"statu:[After {time_feel} leave, user is back]"
)
) 无法处理消息
:raise RuntimeError: 消息为空
:raise AssertionError: 无法处理消息
:raise OpenaiError: Openai错误
"""
Expand Down Expand Up @@ -231,13 +225,21 @@ async def request_openai(
# TODO:实现消息时序切片
# 日志
logger.info(
f"[x] Openai request" f"\n--message {messages} " f"\n--tools {tools}"
f"[x] Openai request" f"\n--message {len(messages)} " f"\n--tools {tools}"
)
for msg in messages:
if isinstance(msg, UserMessage):
if len(str(msg)) < 100:
logger.debug(f"Message: {msg}")
else:
logger.debug("Message: UserMessage")
else:
logger.debug(f"Message:{msg}")
# 必须校验
if disable_tool or not tools:
logger.debug("llm_task:Tool not enable")
logger.debug("llm_task:no tool loaded")
tools = None
# 根据模型选择不同的驱动a
# 根据模型选择不同的驱动
assert messages, RuntimeError("llm_task:message cant be none...")
messages = await validate_mock(messages)
endpoint: OpenAI = OpenAI(
Expand Down
2 changes: 1 addition & 1 deletion app/receiver/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
# Resign Chain
# 时序实现,防止过度注册
if len(task.task_sign.tool_calls_pending) == 1:
if has_been_called_recently(userid=task.receiver.uid, n_seconds=5):
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=5):
logger.debug(
"ToolCall run out, resign a new request to request stop sign."
)
Expand Down
8 changes: 6 additions & 2 deletions app/receiver/slack/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import ssl
from typing import List

from loguru import logger, Message
from loguru import logger
from slack_sdk.web.async_client import AsyncWebClient

from app.middleware.llm_task import OpenaiMiddleware
Expand All @@ -16,6 +16,7 @@
from app.setting.slack import BotSetting
from llmkira.kv_manager.file import File
from llmkira.openai import OpenAIResult
from llmkira.openai.cell import AssistantMessage
from llmkira.task import Task, TaskHeader

__receiver__ = "slack"
Expand Down Expand Up @@ -86,7 +87,10 @@ async def forward(self, receiver: Location, message: List[EventMessage]):
await self.bot.chat_postMessage(**_message)

async def reply(
self, receiver: Location, messages: List[Message], reply_to_message: bool = True
self,
receiver: Location,
messages: List[AssistantMessage],
reply_to_message: bool = True,
):
"""
模型直转发,Message是Openai的类型
Expand Down
37 changes: 30 additions & 7 deletions app/sender/telegram/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
auth_reloader,
uid_make,
login,
TimerObjectContainer,
)
from app.setting.telegram import BotSetting
from llmkira.kv_manager.env import EnvManager
Expand All @@ -38,7 +39,7 @@
from app.components.credential import split_setting_string, Credential, ProviderError

StepCache = StateMemoryStorage()

FileWindow = TimerObjectContainer()
TelegramTask = Task(queue=__sender__)


Expand Down Expand Up @@ -67,6 +68,7 @@ async def transcribe(
files = files if files else []
messages = messages if messages else []
event_messages = []
files = [file for file in files if file] # No None
for index, message in enumerate(messages):
message_text = getattr(message, "text", "empty")
event_messages.append(
Expand Down Expand Up @@ -144,6 +146,19 @@ async def create_task(message: types.Message, disable_tool_action: bool = True):
message.text = message.text
if not message.text:
return None
__used_file_id = []
photos: List[types.PhotoSize] = FileWindow.get_objects(
user_id=message.from_user.id
)
FileWindow.clear_objects(user_id=message.from_user.id)
for photo in photos:
__used_file_id.append(photo.file_id)
uploaded_file.append(
await self.upload(
file=photo,
uid=uid_make(__sender__, message.from_user.id),
)
)
if message.photo:
uploaded_file.append(
await self.upload(
Expand All @@ -161,12 +176,13 @@ async def create_task(message: types.Message, disable_tool_action: bool = True):
)
if message.reply_to_message:
if message.reply_to_message.photo:
uploaded_file.append(
await self.upload(
message.reply_to_message.photo[-1],
uid=uid_make(__sender__, message.from_user.id),
if message.reply_to_message.photo[-1].file_id not in __used_file_id:
uploaded_file.append(
await self.upload(
message.reply_to_message.photo[-1],
uid=uid_make(__sender__, message.from_user.id),
)
)
)
if message.reply_to_message.document:
if message.reply_to_message.document.file_size < 1024 * 1024 * 10:
uploaded_file.append(
Expand Down Expand Up @@ -367,9 +383,16 @@ async def handle_private_msg(message: types.Message):
自动响应私聊消息
"""
message.text = message.text if message.text else message.caption

# Support for GPT Vision
if not message.text:
if message.photo:
logger.debug("Add a spc image")
FileWindow.add_object(
user_id=message.from_user.id, obj=message.photo[-1]
)
return None

# 扳机
trigger = await get_trigger_loop(
platform_name=__sender__,
message=message.text,
Expand Down
38 changes: 38 additions & 0 deletions app/sender/util_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# @Author : sudoskys
# @File : util_func.py
# @Software: PyCharm
import time
from typing import Tuple, Optional, Union
from urllib.parse import urlparse

Expand Down Expand Up @@ -111,3 +112,40 @@ async def auth_reloader(snapshot_credential: str, platform: str, user_id: str) -
queue_name=snap.channel,
task=snap.snapshot_data,
)


class TimerObjectContainer:
def __init__(self):
self.users = {}

def add_object(self, user_id, obj):
if user_id not in self.users:
self.users[user_id] = {}
self.users[user_id][obj] = time.time()

def get_objects(self, user_id, second=1200) -> list: # 20 minutes = 1200 seconds
"""
获取特定用户的对象列表,并自动删除在指定时间内添加的对象
:param user_id: 用户ID
:param second: 时间(秒)
"""
if user_id not in self.users:
return []

user_objs = self.users[user_id]
valid_objects = {
obj: add_time
for obj, add_time in user_objs.items()
if time.time() - add_time < second
}

self.users[user_id] = valid_objects
return list(valid_objects.keys())

def clear_objects(self, user_id):
"""
清空特定用户的对象
:param user_id: 用户ID
"""
if user_id in self.users:
self.users[user_id] = {}
Binary file added docs/vision.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
85 changes: 84 additions & 1 deletion llmkira/openai/cell.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
import base64
from abc import ABC
from typing import Optional, Union, List, Literal, Type, Any

from docstring_parser import parse
from json_repair import repair_json
from pydantic import ConfigDict, BaseModel, Field, field_validator, model_validator

from llmkira.openai.utils import resize_openai_image


class FunctionChoice(BaseModel):
name: str
Expand Down Expand Up @@ -170,11 +173,91 @@ class SystemMessage(Message):
name: Optional[str] = None


class ImageContent(BaseModel):
url: str
detail: Optional[str] = "auto"


class ContentPart(BaseModel):
type: Union[str, Literal["text", "image_url"]]
text: Optional[str] = None
image_url: Optional[ImageContent] = None

@model_validator(mode="after")
def check_model(self):
if self.type == "image_url":
if self.image_url is None:
raise ValueError("image_url cannot be None")
if self.type == "text":
if self.text is None:
raise ValueError("text cannot be None")
return self

@classmethod
def create_text(cls, text: str):
"""
Create a text content part
:param text: text
:return: ContentPart
"""
assert isinstance(text, str), ValueError("text must be a string")
return cls(type="text", text=text)

@classmethod
def create_image(
cls, url: Union[str, bytes], detail: Literal["low", "high", "auto"] = "auto"
):
"""
Create an image content part
:param url: image url or image bytes
:param detail: image detail
:return: ContentPart
"""
assert detail in ("low", "high", "auto"), ValueError(
"detail must be low, high or auto"
)
if isinstance(url, bytes):
url = resize_openai_image(url, mode=detail)
base64_image = base64.b64encode(url).decode("utf-8")
url = f"data:image/jpeg;base64,{base64_image}"
elif isinstance(url, str):
if not url.startswith("http") or not url.startswith(
"data:image/jpeg;base64,"
):
raise ValueError(
"url must be a http url or `data:image/jpeg;base64,` as base64 image"
)
else:
raise ValueError("url must be a http url or bytes")
return cls(type="image_url", image_url=ImageContent(url=url, detail=detail))


class UserMessage(Message):
role: Literal["user"] = "user"
content: str
content: Union[str, List[ContentPart]]
name: Optional[str] = None

@field_validator("content")
def check_content(cls, v):
if isinstance(v, str):
return [ContentPart.create_text(text=v)]
elif isinstance(v, list):
return v
else:
raise ValueError("content must be a string or a list of ContentPart")

def add_text(self, text: str):
self.content.append(ContentPart.create_text(text=text))
return self

def add_image(
self,
image_url: Union[str, bytes],
detail: Literal["low", "high", "auto"] = "auto",
):
self.content.append(ContentPart.create_image(url=image_url, detail=detail))
return self


class ToolMessage(Message):
role: Literal["tool"] = "tool"
Expand Down
Loading

0 comments on commit bd37a8e

Please sign in to comment.