Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: bump version #155

Merged
merged 3 commits into from
Dec 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ on:
workflow_dispatch:
pull_request:
branches: ["main"]
push:
branches:
- main

permissions:
contents: read
Expand Down
2 changes: 1 addition & 1 deletion examples/langchain_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
cb = lai.langchain_callback()

# Returns a langchain_openai.ChatOpenAI instance.
gpt_4o = init_chat_model(
gpt_4o = init_chat_model( # type: ignore
model_provider=prompt.provider,
**prompt.settings,
)
Expand Down
33 changes: 15 additions & 18 deletions literalai/instrumentation/llamaindex/event_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def extract_document_info(nodes: List[NodeWithScore]):


def build_message_dict(message: ChatMessage):
message_dict = {
message_dict: GenerationMessage = {
"role": convert_message_role(message.role),
"content": message.content,
}
Expand Down Expand Up @@ -144,8 +144,8 @@ def extract_query(x: Union[str, QueryBundle]):
class LiteralEventHandler(BaseEventHandler):
"""This class handles events coming from LlamaIndex."""

_client: "LiteralClient" = PrivateAttr(...)
_span_handler: "LiteralSpanHandler" = PrivateAttr(...)
_client: "LiteralClient" = PrivateAttr()
_span_handler: "LiteralSpanHandler" = PrivateAttr()
runs: Dict[str, List[Step]] = {}
streaming_run_ids: List[str] = []
_standalone_step_id: Optional[str] = None
Expand All @@ -163,21 +163,18 @@ def __init__(
object.__setattr__(self, "_client", literal_client)
object.__setattr__(self, "_span_handler", llama_index_span_handler)

def _convert_message(
self,
message: ChatMessage,
):
def _convert_message(self, message: ChatMessage):
tool_calls = message.additional_kwargs.get("tool_calls")
msg = GenerationMessage(
name=getattr(message, "name", None),
role=convert_message_role(message.role),
content="",
)

msg["content"] = message.content

if tool_calls:
msg["tool_calls"] = [tool_call.to_dict() for tool_call in tool_calls]
msg: GenerationMessage = {
"name": getattr(message, "name", None),
"role": convert_message_role(message.role),
"content": message.content,
"tool_calls": (
[tool_call.to_dict() for tool_call in tool_calls]
if tool_calls
else None
),
}

return msg

Expand Down Expand Up @@ -238,7 +235,7 @@ def handle(self, event: BaseEvent, **kwargs) -> None:
thread_id=thread_id,
content=query,
)

# Retrieval wraps the Embedding step in LlamaIndex
if isinstance(event, RetrievalStartEvent):
run = self._client.start_step(
Expand Down
14 changes: 8 additions & 6 deletions literalai/observability/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,15 +67,16 @@ class BaseGeneration(Utils):
to_dict(self) -> Dict:
Converts the generation object to a dictionary.
"""

id: Optional[str] = None
prompt_id: Optional[str] = None
provider: Optional[str] = None
model: Optional[str] = None
error: Optional[str] = None
settings: Optional[Dict] = Field(default_factory=dict)
variables: Optional[Dict] = Field(default_factory=dict)
tags: Optional[List[str]] = Field(default_factory=list)
metadata: Optional[Dict] = Field(default_factory=dict)
settings: Optional[Dict] = Field(default_factory=lambda: {})
variables: Optional[Dict] = Field(default_factory=lambda: {})
tags: Optional[List[str]] = Field(default_factory=lambda: [])
metadata: Optional[Dict] = Field(default_factory=lambda: {})
tools: Optional[List[Dict]] = None
token_count: Optional[int] = None
input_token_count: Optional[int] = None
Expand Down Expand Up @@ -129,6 +130,7 @@ class CompletionGeneration(BaseGeneration, Utils):
completion (Optional[str]): The generated completion text.
type (GenerationType): The type of generation, which is set to GenerationType.COMPLETION.
"""

prompt: Optional[str] = None
completion: Optional[str] = None
type = GenerationType.COMPLETION
Expand Down Expand Up @@ -177,8 +179,9 @@ class ChatGeneration(BaseGeneration, Utils):
message_completion (Optional[GenerationMessage]): The completion message of the chat generation.
type (GenerationType): The type of generation, which is set to GenerationType.CHAT.
"""

type = GenerationType.CHAT
messages: Optional[List[GenerationMessage]] = Field(default_factory=list)
messages: Optional[List[GenerationMessage]] = Field(default_factory=lambda: [])
message_completion: Optional[GenerationMessage] = None

def to_dict(self):
Expand Down Expand Up @@ -213,4 +216,3 @@ def from_dict(self, generation_dict: Dict):
messages=generation_dict.get("messages", []),
message_completion=generation_dict.get("messageCompletion"),
)

6 changes: 3 additions & 3 deletions literalai/prompt_engineering/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class Prompt(Utils):

Attributes
----------
template_messages : List[GenerationMessage]
template_messages : List[GenerationMessage]
The messages that make up the prompt. Messages can be of type `text` or `image`.
Messages can reference variables.
variables : List[PromptVariable]
Expand Down Expand Up @@ -214,9 +214,9 @@ def to_langchain_chat_prompt_template(self, additional_messages=[]):

class CustomChatPromptTemplate(ChatPromptTemplate):
orig_messages: Optional[List[GenerationMessage]] = Field(
default_factory=list
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

they both do the same thing I would keep list though (it seems easier to read)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No they do not as per the linter error:

literalai/observability/generation.py:75: error: Argument "default_factory" to "Field" has incompatible type "type[dict[Any, Any]]"; expected "Union[Callable[[], Never], Callable[[dict[str, Any]], Never]]"  [arg-type]

default_factory=lambda: []
)
default_vars: Optional[Dict] = Field(default_factory=dict)
default_vars: Optional[Dict] = Field(default_factory=lambda: {})
prompt_id: Optional[str] = None

def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
Expand Down
2 changes: 1 addition & 1 deletion literalai/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.1.102"
__version__ = "0.1.103"
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

setup(
name="literalai",
version="0.1.102", # update version in literalai/version.py
version="0.1.103", # update version in literalai/version.py
description="An SDK for observability in Python applications",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
Expand Down
Loading