From 1d5f1eb7829d047709d4e0e4e0489c840126a53e Mon Sep 17 00:00:00 2001 From: emrgnt-cmplxty <68796651+emrgnt-cmplxty@users.noreply.github.com> Date: Thu, 5 Dec 2024 21:31:26 -0800 Subject: [PATCH] Dev minor (#1665) * Feature/add web search (#1662) * add web search to rag agent * add web search to rag agent * Fix agent bug, remove alias (#1661) * Log Streaming (#1663) * Fix agent bug, remove alias * Allow ws on logs * up (#1664) * up * add ingestion settings too * up * up * up * up --------- Co-authored-by: Nolan Tremelling <34580718+NolanTrem@users.noreply.github.com> --- js/sdk/package-lock.json | 2 +- js/sdk/package.json | 2 +- js/sdk/src/models.tsx | 75 ---- js/sdk/src/r2rClient.ts | 27 +- js/sdk/src/types.ts | 54 ++- js/sdk/src/v3/clients/documents.ts | 4 + js/sdk/src/v3/clients/retrieval.ts | 70 ++-- py/core/__init__.py | 2 + py/core/agent/rag.py | 58 ++- py/core/agent/serper.py | 104 ++++++ py/core/base/__init__.py | 3 + py/core/base/abstractions/__init__.py | 4 + py/core/base/providers/__init__.py | 8 +- py/core/base/providers/ingestion.py | 24 +- py/core/configs/full_local_llm.toml | 2 +- py/core/configs/local_llm.toml | 2 +- py/core/main/api/templates/log_viewer.html | 89 +++++ py/core/main/api/v3/documents_router.py | 66 +++- py/core/main/api/v3/logs_router.py | 99 ++++++ py/core/main/api/v3/retrieval_router.py | 209 +++++++++-- py/core/main/app.py | 42 ++- py/core/main/app_entry.py | 13 +- py/core/main/assembly/builder.py | 20 +- py/core/main/services/retrieval_service.py | 139 +++++--- py/core/pipes/retrieval/search_rag_pipe.py | 10 +- py/core/utils/logging_config.py | 131 +++++++ py/poetry.lock | 388 +++++++++++---------- py/pyproject.toml | 4 +- py/r2r.toml | 3 +- py/sdk/models.py | 3 + py/sdk/v3/documents.py | 11 +- py/sdk/v3/retrieval.py | 14 + py/shared/abstractions/__init__.py | 3 + py/shared/abstractions/document.py | 128 +++++++ py/shared/abstractions/graph.py | 2 - py/shared/abstractions/kg.py | 1 - py/shared/abstractions/llm.py | 4 - py/shared/abstractions/search.py | 109 +++++- py/shared/abstractions/user.py | 2 - py/shared/utils/base_utils.py | 30 +- 40 files changed, 1473 insertions(+), 488 deletions(-) create mode 100644 py/core/agent/serper.py create mode 100644 py/core/main/api/templates/log_viewer.html create mode 100644 py/core/main/api/v3/logs_router.py create mode 100644 py/core/utils/logging_config.py diff --git a/js/sdk/package-lock.json b/js/sdk/package-lock.json index 168df2af0..c7db76bbc 100644 --- a/js/sdk/package-lock.json +++ b/js/sdk/package-lock.json @@ -1,6 +1,6 @@ { "name": "r2r-js", - "version": "0.4.3", + "version": "0.4.5", "lockfileVersion": 3, "requires": true, "packages": { diff --git a/js/sdk/package.json b/js/sdk/package.json index 0f88a0bb3..eb8008feb 100644 --- a/js/sdk/package.json +++ b/js/sdk/package.json @@ -1,6 +1,6 @@ { "name": "r2r-js", - "version": "0.4.4", + "version": "0.4.5", "description": "", "main": "dist/index.js", "browser": "dist/index.browser.js", diff --git a/js/sdk/src/models.tsx b/js/sdk/src/models.tsx index f0d467a27..973af86bd 100644 --- a/js/sdk/src/models.tsx +++ b/js/sdk/src/models.tsx @@ -17,80 +17,11 @@ export interface RefreshTokenResponse { }; } -export interface GenerationConfig { - model?: string; - temperature?: number; - topP?: number; - maxTokensToSample?: number; - stream?: boolean; - functions?: Array>; - tools?: Array>; - addGenerationKwargs?: Record; - apiBase?: string; - responseFormat?: string; -} - -export interface HybridSearchSettings { - fullTextWeight: number; - semanticWeight: number; - fullTextLimit: number; - rrfK: number; -} - -export interface ChunkSearchSettings { - useVectorSearch?: boolean; - useHybridSearch?: boolean; - filters?: Record; - searchLimit?: number; - offset?: number; - selectedCollectionIds?: string[]; - indexMeasure: IndexMeasure; - includeScores?: boolean; - includeMetadatas?: boolean; - probes?: number; - efSearch?: number; - hybridSearchSettings?: HybridSearchSettings; - searchStrategy?: string; -} - -export interface KGSearchSettings { - useKgSearch?: boolean; - filters?: Record; - selectedCollectionIds?: string[]; - graphragMapSystemPrompt?: string; - kgSearchType?: "local"; - kgSearchLevel?: number | null; - generationConfig?: GenerationConfig; - maxCommunityDescriptionLength?: number; - maxLlmQueriesForGlobalSearch?: number; - localSearchLimits?: Record; -} - export enum KGRunType { ESTIMATE = "estimate", RUN = "run", } -export interface KGCreationSettings { - kgRelationshipsExtractionPrompt?: string; - kgEntityDescriptionPrompt?: string; - forceKgCreation?: boolean; - entityTypes?: string[]; - relationTypes?: string[]; - extractionsMergeCount?: number; - maxKnowledgeRelationships?: number; - maxDescriptionInputLength?: number; - generationConfig?: GenerationConfig; -} - -export interface KGEnrichmentSettings { - forceKgEnrichment?: boolean; - communityReportsPrompt?: string; - maxSummaryInputLength?: number; - generationConfig?: GenerationConfig; - leidenParams?: Record; -} - export interface KGEntityDeduplicationSettings { kgEntityDeduplicationType?: KGEntityDeduplicationType; } @@ -121,12 +52,6 @@ export interface R2RDocumentChunksRequest { documentId: string; } -export enum IndexMeasure { - COSINE_DISTANCE = "cosine_distance", - L2_DISTANCE = "l2_distance", - MAX_INNER_PRODUCT = "max_inner_product", -} - export interface RawChunk { text: string; } diff --git a/js/sdk/src/r2rClient.ts b/js/sdk/src/r2rClient.ts index 3f9ed4bb4..d363c62cb 100644 --- a/js/sdk/src/r2rClient.ts +++ b/js/sdk/src/r2rClient.ts @@ -27,13 +27,8 @@ import { TokenInfo, Message, RefreshTokenResponse, - ChunkSearchSettings, - KGSearchSettings, KGRunType, - KGCreationSettings, - KGEnrichmentSettings, KGEntityDeduplicationSettings, - GenerationConfig, RawChunk, } from "./models"; @@ -1615,7 +1610,7 @@ export class r2rClient extends BaseClient { async createGraph( collection_id?: string, run_type?: KGRunType, - graph_creation_settings?: KGCreationSettings | Record, + graph_creation_settings?: Record, ): Promise> { this._ensureAuthenticated(); @@ -1643,7 +1638,7 @@ export class r2rClient extends BaseClient { async enrichGraph( collection_id?: string, run_type?: KGRunType, - graph_enrichment_settings?: KGEnrichmentSettings | Record, + graph_enrichment_settings?: Record, ): Promise { this._ensureAuthenticated(); @@ -1866,7 +1861,7 @@ export class r2rClient extends BaseClient { @feature("searchDocuments") async searchDocuments( query: string, - vector_search_settings?: ChunkSearchSettings | Record, + vector_search_settings?: Record, ): Promise { this._ensureAuthenticated(); const json_data: Record = { @@ -1894,8 +1889,8 @@ export class r2rClient extends BaseClient { @feature("search") async search( query: string, - vector_search_settings?: ChunkSearchSettings | Record, - graph_search_settings?: KGSearchSettings | Record, + vector_search_settings?: Record, + graph_search_settings?: Record, ): Promise { this._ensureAuthenticated(); @@ -1926,9 +1921,9 @@ export class r2rClient extends BaseClient { @feature("rag") async rag( query: string, - vector_search_settings?: ChunkSearchSettings | Record, - graph_search_settings?: KGSearchSettings | Record, - rag_generation_config?: GenerationConfig | Record, + vector_search_settings?: Record, + graph_search_settings?: Record, + rag_generation_config?: Record, task_prompt_override?: string, include_title_if_available?: boolean, ): Promise> { @@ -1986,9 +1981,9 @@ export class r2rClient extends BaseClient { @feature("agent") async agent( messages: Message[], - rag_generation_config?: GenerationConfig | Record, - vector_search_settings?: ChunkSearchSettings | Record, - graph_search_settings?: KGSearchSettings | Record, + rag_generation_config?: Record, + vector_search_settings?: Record, + graph_search_settings?: Record, task_prompt_override?: string, include_title_if_available?: boolean, conversation_id?: string, diff --git a/js/sdk/src/types.ts b/js/sdk/src/types.ts index b1fda1a61..7dee4c40a 100644 --- a/js/sdk/src/types.ts +++ b/js/sdk/src/types.ts @@ -1,9 +1,3 @@ -import { - ChunkSearchSettings, - GenerationConfig, - HybridSearchSettings, -} from "./models"; - export interface UnprocessedChunk { id: string; document_id?: string; @@ -134,6 +128,13 @@ export interface GraphResponse { updated_at: string; } +// Index types +export enum IndexMeasure { + COSINE_DISTANCE = "cosine_distance", + L2_DISTANCE = "l2_distance", + MAX_INNER_PRODUCT = "max_inner_product", +} + // Ingestion types export interface IngestionResponse { message: string; @@ -184,8 +185,41 @@ export interface RelationshipResponse { } // Retrieval types +export interface ChunkSearchSettings { + index_measure?: IndexMeasure; + probes?: number; + ef_search?: number; + enabled?: boolean; +} + +export interface GenerationConfig { + model?: string; + temperature?: number; + top_p?: number; + max_tokens_to_sample?: number; + stream?: boolean; + functions?: Array>; + tools?: Array>; + add_generation_kwargs?: Record; + api_base?: string; + response_format?: string; +} + +export interface HybridSearchSettings { + full_text_weight?: number; + semantic_weight?: number; + full_text_limit?: number; + rrf_k?: number; +} + export interface GraphSearchSettings { generation_config?: GenerationConfig; + graphrag_map_system?: string; + graphrag_reduce_system?: string; + max_community_description_length?: number; + max_llm_queries_for_global_search?: number; + limits?: Record; + enabled?: boolean; } export interface SearchSettings { @@ -196,7 +230,7 @@ export interface SearchSettings { limit?: number; offset?: number; include_metadata?: boolean; - include_scores: boolean; + include_scores?: boolean; search_strategy?: string; hybrid_settings?: HybridSearchSettings; chunk_settings?: ChunkSearchSettings; @@ -212,7 +246,11 @@ export interface VectorSearchResult { metadata?: Record; } -export type KGSearchResultType = "entity" | "relationship" | "community" | "global"; +export type KGSearchResultType = + | "entity" + | "relationship" + | "community" + | "global"; export interface GraphSearchResult { content: any; diff --git a/js/sdk/src/v3/clients/documents.ts b/js/sdk/src/v3/clients/documents.ts index ffc1171e4..f9b5ebd34 100644 --- a/js/sdk/src/v3/clients/documents.ts +++ b/js/sdk/src/v3/clients/documents.ts @@ -46,6 +46,7 @@ export class DocumentsClient { ingestionConfig?: Record; collectionIds?: string[]; runWithOrchestration?: boolean; + ingestionMode?: "hi-res" | "fast" | "custom"; }): Promise { const inputCount = [options.file, options.raw_text, options.chunks].filter( (x) => x !== undefined, @@ -128,6 +129,9 @@ export class DocumentsClient { String(options.runWithOrchestration), ); } + if (options.ingestionMode) { + formData.append("ingestion_mode", options.ingestionMode); + } formData.append("file_names", JSON.stringify(processedFiles)); diff --git a/js/sdk/src/v3/clients/retrieval.ts b/js/sdk/src/v3/clients/retrieval.ts index a248bb541..f758bc5ea 100644 --- a/js/sdk/src/v3/clients/retrieval.ts +++ b/js/sdk/src/v3/clients/retrieval.ts @@ -1,13 +1,12 @@ import { r2rClient } from "../../r2rClient"; +import { Message } from "../../models"; +import { feature } from "../../feature"; import { - Message, - ChunkSearchSettings, - KGSearchSettings, + SearchSettings, + WrappedSearchResponse, GenerationConfig, -} from "../../models"; -import { feature } from "../../feature"; -import { SearchSettings, WrappedSearchResponse } from "../../types"; +} from "../../types"; export class RetrievalClient { constructor(private client: r2rClient) {} @@ -23,19 +22,22 @@ export class RetrievalClient { * Allowed operators include: `eq`, `neq`, `gt`, `gte`, `lt`, `lte`, * `like`, `ilike`, `in`, and `nin`. * @param query Search query to find relevant documents - * @param VectorSearchSettings Settings for vector-based search - * @param KGSearchSettings Settings for knowledge graph search + * @param searchSettings Settings for the search * @returns */ @feature("retrieval.search") async search(options: { query: string; + searchMode?: "advanced" | "basic" | "custom"; searchSettings?: SearchSettings | Record; }): Promise { const data = { query: options.query, ...(options.searchSettings && { - searchSettings: options.searchSettings, + search_settings: options.searchSettings, + }), + ...(options.searchMode && { + search_mode: options.searchMode, }), }; @@ -53,9 +55,8 @@ export class RetrievalClient { * * The generation process can be customized using the `rag_generation_config` parameter. * @param query + * @param searchSettings Settings for the search * @param ragGenerationConfig Configuration for RAG generation - * @param vectorSearchSettings Settings for vector-based search - * @param kgSearchSettings Settings for knowledge graph search * @param taskPromptOverride Optional custom prompt to override default * @param includeTitleIfAvailable Include document titles in responses when available * @returns @@ -63,28 +64,28 @@ export class RetrievalClient { @feature("retrieval.rag") async rag(options: { query: string; + searchMode?: "advanced" | "basic" | "custom"; + searchSettings?: SearchSettings | Record; ragGenerationConfig?: GenerationConfig | Record; - vectorSearchSettings?: ChunkSearchSettings | Record; - kgSearchSettings?: KGSearchSettings | Record; taskPromptOverride?: string; includeTitleIfAvailable?: boolean; }): Promise> { const data = { query: options.query, - ...(options.vectorSearchSettings && { - vectorSearchSettings: options.vectorSearchSettings, + ...(options.searchMode && { + search_mode: options.searchMode, }), - ...(options.ragGenerationConfig && { - ragGenerationConfig: options.ragGenerationConfig, + ...(options.searchSettings && { + search_settings: options.searchSettings, }), - ...(options.kgSearchSettings && { - kgSearchSettings: options.kgSearchSettings, + ...(options.ragGenerationConfig && { + rag_generation_config: options.ragGenerationConfig, }), ...(options.taskPromptOverride && { - taskPromptOverride: options.taskPromptOverride, + task_prompt_override: options.taskPromptOverride, }), ...(options.includeTitleIfAvailable && { - includeTitleIfAvailable: options.includeTitleIfAvailable, + include_title_if_available: options.includeTitleIfAvailable, }), }; @@ -151,9 +152,8 @@ export class RetrievalClient { * find and synthesize information, providing detailed, factual responses * with proper attribution to source documents. * @param message Current message to process + * @param searchSettings Settings for the search * @param ragGenerationConfig Configuration for RAG generation - * @param vectorSearchSettings Settings for vector-based search - * @param kgSearchSettings Settings for knowledge graph search * @param taskPromptOverride Optional custom prompt to override default * @param includeTitleIfAvailable Include document titles in responses when available * @param conversationId ID of the conversation @@ -163,9 +163,9 @@ export class RetrievalClient { @feature("retrieval.agent") async agent(options: { message: Message; + searchMode?: "advanced" | "basic" | "custom"; + searchSettings?: SearchSettings | Record; ragGenerationConfig?: GenerationConfig | Record; - vectorSearchSettings?: ChunkSearchSettings | Record; - kgSearchSettings?: KGSearchSettings | Record; taskPromptOverride?: string; includeTitleIfAvailable?: boolean; conversationId?: string; @@ -173,26 +173,26 @@ export class RetrievalClient { }): Promise> { const data: Record = { message: options.message, - ...(options.vectorSearchSettings && { - vectorSearchSettings: options.vectorSearchSettings, + ...(options.searchMode && { + search_mode: options.searchMode, }), - ...(options.kgSearchSettings && { - kgSearchSettings: options.kgSearchSettings, + ...(options.searchSettings && { + search_settings: options.searchSettings, }), ...(options.ragGenerationConfig && { - ragGenerationConfig: options.ragGenerationConfig, + rag_generation_config: options.ragGenerationConfig, }), ...(options.taskPromptOverride && { - taskPromptOverride: options.taskPromptOverride, + task_prompt_override: options.taskPromptOverride, }), ...(options.includeTitleIfAvailable && { - includeTitleIfAvailable: options.includeTitleIfAvailable, + include_title_if_available: options.includeTitleIfAvailable, }), ...(options.conversationId && { - conversationId: options.conversationId, + conversation_id: options.conversationId, }), ...(options.branchId && { - branchId: options.branchId, + branch_id: options.branchId, }), }; @@ -243,7 +243,7 @@ export class RetrievalClient { const data = { messages: options.messages, ...(options.generationConfig && { - generationConfig: options.generationConfig, + generation_config: options.generationConfig, }), }; diff --git a/py/core/__init__.py b/py/core/__init__.py index 616ebe0ac..18edeb3da 100644 --- a/py/core/__init__.py +++ b/py/core/__init__.py @@ -73,11 +73,13 @@ "Prompt", # Search abstractions "AggregateSearchResult", + "WebSearchResponse", "GraphSearchResult", "ChunkSearchSettings", "GraphSearchSettings", "ChunkSearchResult", "SearchSettings", + "SearchMode", "HybridSearchSettings", # User abstractions "Token", diff --git a/py/core/agent/rag.py b/py/core/agent/rag.py index 403f89aa4..67b8e4632 100644 --- a/py/core/agent/rag.py +++ b/py/core/agent/rag.py @@ -9,6 +9,7 @@ AggregateSearchResult, GraphSearchSettings, SearchSettings, + WebSearchResponse, ) from core.base.agent import AgentConfig, Tool from core.base.providers import CompletionProvider @@ -30,16 +31,18 @@ def _register_tools(self): if not self.config.tool_names: return for tool_name in self.config.tool_names: - if tool_name == "search": - self._tools.append(self.search_tool()) + if tool_name == "local_search": + self._tools.append(self.local_search()) + elif tool_name == "web_search": + self._tools.append(self.web_search()) else: raise ValueError(f"Unsupported tool name: {tool_name}") - def search_tool(self) -> Tool: + def web_search(self) -> Tool: return Tool( - name="search", - description="Search for information using the R2R framework", - results_function=self.search, + name="web_search", + description="Search for information on the web.", + results_function=self._web_search, llm_format_function=RAGAgentMixin.format_search_results_for_llm, stream_function=RAGAgentMixin.format_search_results_for_stream, parameters={ @@ -47,14 +50,53 @@ def search_tool(self) -> Tool: "properties": { "query": { "type": "string", - "description": "The query to search the local vector database with.", + "description": "The query to search Google with.", }, }, "required": ["query"], }, ) - async def search( + async def _web_search( + self, + query: str, + search_settings: SearchSettings, + *args, + **kwargs, + ) -> list[AggregateSearchResult]: + from .serper import SerperClient + + serper_client = SerperClient() + # TODO - make async! + # TODO - Move to search pipeline, make configurable. + raw_results = serper_client.get_raw(query) + web_response = WebSearchResponse.from_serper_results(raw_results) + return AggregateSearchResult( + chunk_search_results=None, + graph_search_results=None, + web_search_results=web_response.organic_results, # TODO - How do we feel about throwing away so much info? + ) + + def local_search(self) -> Tool: + return Tool( + name="local_search", + description="Search your local knowledgebase using the R2R AI system", + results_function=self._local_search, + llm_format_function=RAGAgentMixin.format_search_results_for_llm, + stream_function=RAGAgentMixin.format_search_results_for_stream, + parameters={ + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "The query to search the local knowledgebase with.", + }, + }, + "required": ["query"], + }, + ) + + async def _local_search( self, query: str, search_settings: SearchSettings, diff --git a/py/core/agent/serper.py b/py/core/agent/serper.py new file mode 100644 index 000000000..bfed0cad3 --- /dev/null +++ b/py/core/agent/serper.py @@ -0,0 +1,104 @@ +# TODO - relocate to a dedicated module +import http.client +import json +import os + + +# TODO - Move process json to dedicated data processing module +def process_json(json_object, indent=0): + """ + Recursively traverses the JSON object (dicts and lists) to create an unstructured text blob. + """ + text_blob = "" + if isinstance(json_object, dict): + for key, value in json_object.items(): + padding = " " * indent + if isinstance(value, (dict, list)): + text_blob += ( + f"{padding}{key}:\n{process_json(value, indent + 1)}" + ) + else: + text_blob += f"{padding}{key}: {value}\n" + elif isinstance(json_object, list): + for index, item in enumerate(json_object): + padding = " " * indent + if isinstance(item, (dict, list)): + text_blob += f"{padding}Item {index + 1}:\n{process_json(item, indent + 1)}" + else: + text_blob += f"{padding}Item {index + 1}: {item}\n" + return text_blob + + +# TODO - Introduce abstract "Integration" ABC. +class SerperClient: + def __init__(self, api_base: str = "google.serper.dev") -> None: + api_key = os.getenv("SERPER_API_KEY") + if not api_key: + raise ValueError( + "Please set the `SERPER_API_KEY` environment variable to use `SerperClient`." + ) + + self.api_base = api_base + self.headers = { + "X-API-KEY": api_key, + "Content-Type": "application/json", + } + + @staticmethod + def _extract_results(result_data: dict) -> list: + formatted_results = [] + + for key, value in result_data.items(): + # Skip searchParameters as it's not a result entry + if key == "searchParameters": + continue + + # Handle 'answerBox' as a single item + if key == "answerBox": + value["type"] = key # Add the type key to the dictionary + formatted_results.append(value) + # Handle lists of results + elif isinstance(value, list): + for item in value: + item["type"] = key # Add the type key to the dictionary + formatted_results.append(item) + # Handle 'peopleAlsoAsk' and potentially other single item formats + elif isinstance(value, dict): + value["type"] = key # Add the type key to the dictionary + formatted_results.append(value) + + return formatted_results + + # TODO - Add explicit typing for the return value + def get_raw(self, query: str, limit: int = 10) -> list: + connection = http.client.HTTPSConnection(self.api_base) + payload = json.dumps({"q": query, "num_outputs": limit}) + connection.request("POST", "/search", payload, self.headers) + response = connection.getresponse() + data = response.read() + json_data = json.loads(data.decode("utf-8")) + return SerperClient._extract_results(json_data) + + @staticmethod + def construct_context(results: list) -> str: + # Organize results by type + organized_results = {} + for result in results: + result_type = result.metadata.pop( + "type", "Unknown" + ) # Pop the type and use as key + if result_type not in organized_results: + organized_results[result_type] = [result.metadata] + else: + organized_results[result_type].append(result.metadata) + + context = "" + # Iterate over each result type + for result_type, items in organized_results.items(): + context += f"# {result_type} Results:\n" + for index, item in enumerate(items, start=1): + # Process each item under the current type + context += f"Item {index}:\n" + context += process_json(item) + "\n" + + return context diff --git a/py/core/base/__init__.py b/py/core/base/__init__.py index be447e208..e9973bda4 100644 --- a/py/core/base/__init__.py +++ b/py/core/base/__init__.py @@ -46,11 +46,13 @@ "Prompt", # Search abstractions "AggregateSearchResult", + "WebSearchResponse", "GraphSearchResult", "GraphSearchSettings", "ChunkSearchSettings", "ChunkSearchResult", "SearchSettings", + "SearchMode", "HybridSearchSettings", # User abstractions "Token", @@ -116,6 +118,7 @@ "EmbeddingConfig", "EmbeddingProvider", # Ingestion provider + "IngestionMode", "IngestionConfig", "IngestionProvider", "ChunkingStrategy", diff --git a/py/core/base/abstractions/__init__.py b/py/core/base/abstractions/__init__.py index 479772452..d71c267db 100644 --- a/py/core/base/abstractions/__init__.py +++ b/py/core/base/abstractions/__init__.py @@ -62,7 +62,9 @@ KGGlobalResult, KGRelationshipResult, KGSearchResultType, + SearchMode, SearchSettings, + WebSearchResponse, ) from shared.abstractions.user import Token, TokenData, User from shared.abstractions.vector import ( @@ -120,6 +122,7 @@ # Prompt abstractions "Prompt", # Search abstractions + "WebSearchResponse", "AggregateSearchResult", "GraphSearchResult", "KGSearchResultType", @@ -131,6 +134,7 @@ "ChunkSearchSettings", "ChunkSearchResult", "SearchSettings", + "SearchMode", "HybridSearchSettings", # KG abstractions "KGCreationSettings", diff --git a/py/core/base/providers/__init__.py b/py/core/base/providers/__init__.py index 1825ebcc1..9b2e4cdad 100644 --- a/py/core/base/providers/__init__.py +++ b/py/core/base/providers/__init__.py @@ -18,7 +18,12 @@ ) from .email import EmailConfig, EmailProvider from .embedding import EmbeddingConfig, EmbeddingProvider -from .ingestion import ChunkingStrategy, IngestionConfig, IngestionProvider +from .ingestion import ( + ChunkingStrategy, + IngestionConfig, + IngestionMode, + IngestionProvider, +) from .llm import CompletionConfig, CompletionProvider from .orchestration import OrchestrationConfig, OrchestrationProvider, Workflow @@ -31,6 +36,7 @@ "Provider", "ProviderConfig", # Ingestion provider + "IngestionMode", "IngestionConfig", "IngestionProvider", "ChunkingStrategy", diff --git a/py/core/base/providers/ingestion.py b/py/core/base/providers/ingestion.py index 2d6d9947b..a4d27b2de 100644 --- a/py/core/base/providers/ingestion.py +++ b/py/core/base/providers/ingestion.py @@ -1,7 +1,6 @@ import logging from abc import ABC from enum import Enum -from typing import Optional from core.base.abstractions import ChunkEnrichmentSettings @@ -34,6 +33,8 @@ class IngestionConfig(ProviderConfig): chunks_for_document_summary: int = 128 document_summary_model: str = "openai/gpt-4o-mini" + parser_overrides: dict[str, str] = {} + @property def supported_providers(self) -> list[str]: return ["r2r", "unstructured_local", "unstructured_api"] @@ -42,6 +43,21 @@ def validate_config(self) -> None: if self.provider not in self.supported_providers: raise ValueError(f"Provider {self.provider} is not supported.") + @classmethod + def get_default(cls, mode: str, app) -> "IngestionConfig": + """Return default ingestion configuration for a given mode.""" + if mode == "hi-res": + # More thorough parsing, no skipping summaries, possibly larger `chunks_for_document_summary`. + return cls(app=app, parser_overrides={"pdf": "zerox"}) + # elif mode == "fast": + # # Skip summaries and other enrichment steps for speed. + # return cls( + # app=app, + # ) + else: + # For `custom` or any unrecognized mode, return a base config + return cls(app=app) + class IngestionProvider(Provider, ABC): @@ -66,3 +82,9 @@ class ChunkingStrategy(str, Enum): CHARACTER = "character" BASIC = "basic" BY_TITLE = "by_title" + + +class IngestionMode(str, Enum): + hi_res = "hi-res" + fast = "fast" + custom = "custom" diff --git a/py/core/configs/full_local_llm.toml b/py/core/configs/full_local_llm.toml index e85444744..b5329beb6 100644 --- a/py/core/configs/full_local_llm.toml +++ b/py/core/configs/full_local_llm.toml @@ -1,6 +1,6 @@ [agent] system_instruction_name = "rag_agent" -tool_names = ["search"] +tool_names = ["local_search"] [agent.generation_config] model = "ollama/llama3.1" diff --git a/py/core/configs/local_llm.toml b/py/core/configs/local_llm.toml index 9e2c52d3f..496d04ba6 100644 --- a/py/core/configs/local_llm.toml +++ b/py/core/configs/local_llm.toml @@ -1,6 +1,6 @@ [agent] system_instruction_name = "rag_agent" -tool_names = ["search"] +tool_names = ["local_search"] [agent.generation_config] model = "ollama/llama3.1" diff --git a/py/core/main/api/templates/log_viewer.html b/py/core/main/api/templates/log_viewer.html new file mode 100644 index 000000000..a3f9a6d5b --- /dev/null +++ b/py/core/main/api/templates/log_viewer.html @@ -0,0 +1,89 @@ + + + + R2R Log Viewer + + + +

R2R Log Viewer

+
Connecting to log stream...
+ + + + + + diff --git a/py/core/main/api/v3/documents_router.py b/py/core/main/api/v3/documents_router.py index 4c2d2c13d..c6ba01cd0 100644 --- a/py/core/main/api/v3/documents_router.py +++ b/py/core/main/api/v3/documents_router.py @@ -12,6 +12,8 @@ from pydantic import Json from core.base import ( + IngestionConfig, + IngestionMode, R2RException, RunType, UnprocessedChunk, @@ -44,6 +46,18 @@ MAX_CHUNKS_PER_REQUEST = 1024 * 100 +def merge_ingestion_config( + base: IngestionConfig, overrides: IngestionConfig +) -> IngestionConfig: + base_dict = base.model_dump() + overrides_dict = overrides.model_dump(exclude_unset=True) + + for k, v in overrides_dict.items(): + base_dict[k] = v + + return IngestionConfig(**base_dict) + + class DocumentsRouter(BaseRouterV3): def __init__( self, @@ -106,6 +120,29 @@ def _register_workflows(self): }, ) + def _prepare_ingestion_config( + self, + ingestion_mode: IngestionMode, + ingestion_config: Optional[IngestionConfig], + ) -> IngestionConfig: + # If not custom, start from defaults + if ingestion_mode != IngestionMode.custom: + effective_config = IngestionConfig.get_default( + ingestion_mode.value, app=self.providers.auth.config.app + ) + if ingestion_config: + effective_config = merge_ingestion_config( + effective_config, ingestion_config + ) + else: + # custom mode + effective_config = ingestion_config or IngestionConfig( + app=self.providers.auth.config.app + ) + + effective_config.validate_config() + return effective_config + def _setup_routes(self): @self.router.post( "/documents", @@ -199,7 +236,18 @@ async def create_document( None, description="Metadata to associate with the document, such as title, description, or custom fields.", ), - ingestion_config: Optional[Json[dict]] = Form( + ingestion_mode: IngestionMode = Form( + default=IngestionMode.custom, + description=( + "Ingestion modes:\n" + "- `hi-res`: Thorough ingestion with full summaries and enrichment.\n" + "- `fast`: Quick ingestion with minimal enrichment and no summaries.\n" + "- `custom`: Full control via `ingestion_config`.\n\n" + "If `filters` or `limit` (in `ingestion_config`) are provided alongside `hi-res` or `fast`, " + "they will override the default settings for that mode." + ), + ), + ingestion_config: Optional[Json[IngestionConfig]] = Form( None, description="An optional dictionary to override the default chunking configuration for the ingestion process. If not provided, the system will use the default server-side chunking configuration.", ), @@ -210,14 +258,23 @@ async def create_document( auth_user=Depends(self.providers.auth.auth_wrapper), ) -> WrappedIngestionResponse: """ - Creates a new Document object from an input file or text content. The document will be processed - to create chunks for vector indexing and search. + Creates a new Document object from an input file, text content, or chunks. The chosen `ingestion_mode` determines + how the ingestion process is configured: + + **Ingestion Modes:** + - `hi-res`: Comprehensive parsing and enrichment, including summaries and possibly more thorough parsing. + - `fast`: Speed-focused ingestion that skips certain enrichment steps like summaries. + - `custom`: Provide a full `ingestion_config` to customize the entire ingestion process. Either a file or text content must be provided, but not both. Documents are shared through `Collections` which allow for tightly specified cross-user interactions. The ingestion process runs asynchronously and its progress can be tracked using the returned task_id. """ + effective_ingestion_config = self._prepare_ingestion_config( + ingestion_mode=ingestion_mode, + ingestion_config=ingestion_config, + ) if not file and not raw_text and not chunks: raise R2RException( status_code=422, @@ -275,6 +332,7 @@ async def create_document( ], "metadata": metadata, # Base metadata for the document "user": auth_user.model_dump_json(), + "ingestion_config": effective_ingestion_config.model_dump(), } # TODO - Modify create_chunks so that we can add chunks to existing document @@ -347,7 +405,7 @@ async def create_document( "document_id": str(document_id), "collection_ids": collection_ids, "metadata": metadata, - "ingestion_config": ingestion_config, + "ingestion_config": effective_ingestion_config.model_dump(), "user": auth_user.model_dump_json(), "size_in_bytes": content_length, "is_update": False, diff --git a/py/core/main/api/v3/logs_router.py b/py/core/main/api/v3/logs_router.py new file mode 100644 index 000000000..7a6f1aa74 --- /dev/null +++ b/py/core/main/api/v3/logs_router.py @@ -0,0 +1,99 @@ +from fastapi import WebSocket +import contextlib +from fastapi.requests import Request +from fastapi.templating import Jinja2Templates +from pathlib import Path +import asyncio +import logging +import aiofiles + +from core.providers import ( + HatchetOrchestrationProvider, + SimpleOrchestrationProvider, +) +from core.base.logger.base import RunType +from .base_router import BaseRouterV3 + + +class LogsRouter(BaseRouterV3): + def __init__( + self, + providers, + services, + orchestration_provider: ( + HatchetOrchestrationProvider | SimpleOrchestrationProvider + ), + run_type: RunType = RunType.UNSPECIFIED, + ): + super().__init__(providers, services, orchestration_provider, run_type) + CURRENT_DIR = Path(__file__).resolve().parent + TEMPLATES_DIR = CURRENT_DIR.parent / "templates" + self.templates = Jinja2Templates(directory=str(TEMPLATES_DIR)) + self.services = services + self.log_file = Path.cwd() / "logs" / "app.log" + self.log_file.parent.mkdir(exist_ok=True) + if not self.log_file.exists(): + self.log_file.touch(mode=0o666) + + # Start from the beginning of the file + self.last_position = 0 + + async def read_full_file(self) -> str: + """Read the entire log file from the start.""" + if not self.log_file.exists(): + return "Initializing logging system..." + + try: + async with aiofiles.open(self.log_file, mode="r") as f: + # Start at beginning + await f.seek(0) + full_content = await f.read() + # Move last_position to end of file after reading full content + self.last_position = await f.tell() + return full_content + except Exception as e: + logging.error(f"Error reading full logs: {str(e)}") + return f"Error accessing full log file: {str(e)}" + + async def read_new_logs(self) -> str: + """Read new logs appended after last_position.""" + if not self.log_file.exists(): + return "Initializing logging system..." + + try: + async with aiofiles.open(self.log_file, mode="r") as f: + await f.seek(self.last_position) + new_content = await f.read() + self.last_position = await f.tell() + return new_content or "" + except Exception as e: + logging.error(f"Error reading logs: {str(e)}") + return f"Error accessing log file: {str(e)}" + + def _setup_routes(self): + @self.router.websocket("/logs/stream") + async def stream_logs(websocket: WebSocket): + await websocket.accept() + try: + # Send the entire file content upon initial connection + full_logs = await self.read_full_file() + if full_logs: + await websocket.send_text(full_logs) + + # Now send incremental updates only + while True: + new_logs = await self.read_new_logs() + if new_logs: + await websocket.send_text(new_logs) + await asyncio.sleep(0.5) + except Exception as e: + logging.error(f"WebSocket error: {str(e)}") + finally: + with contextlib.suppress(Exception): + await websocket.close() + + @self.router.get("/logs/viewer") + async def get_log_viewer(request: Request): + return self.templates.TemplateResponse( + "log_viewer.html", {"request": request} + ) diff --git a/py/core/main/api/v3/retrieval_router.py b/py/core/main/api/v3/retrieval_router.py index 6b3c0f888..b35829116 100644 --- a/py/core/main/api/v3/retrieval_router.py +++ b/py/core/main/api/v3/retrieval_router.py @@ -11,6 +11,7 @@ GenerationConfig, Message, R2RException, + SearchMode, SearchSettings, ) from core.base.api.models import ( @@ -28,6 +29,22 @@ from .base_router import BaseRouterV3 +def merge_search_settings( + base: SearchSettings, overrides: SearchSettings +) -> SearchSettings: + # Convert both to dict + base_dict = base.model_dump() + overrides_dict = overrides.model_dump(exclude_unset=True) + + # Update base_dict with values from overrides_dict + # This ensures that any field set in overrides takes precedence + for k, v in overrides_dict.items(): + base_dict[k] = v + + # Construct a new SearchSettings from the merged dict + return SearchSettings(**base_dict) + + class RetrievalRouterV3(BaseRouterV3): def __init__( self, @@ -43,6 +60,36 @@ def __init__( def _register_workflows(self): pass + def _prepare_search_settings( + self, + auth_user: Any, + search_mode: SearchMode, + search_settings: Optional[SearchSettings], + ) -> SearchSettings: + """ + Prepare the effective search settings based on the provided search_mode, + optional user-overrides in search_settings, and applied filters. + """ + + if search_mode != SearchMode.custom: + # Start from mode defaults + effective_settings = SearchSettings.get_default(search_mode.value) + if search_settings: + # Merge user-provided overrides + effective_settings = merge_search_settings( + effective_settings, search_settings + ) + else: + # Custom mode: use provided settings or defaults + effective_settings = search_settings or SearchSettings() + + # Apply user-specific filters + effective_settings.filters = self._select_filters( + auth_user, effective_settings + ) + + return effective_settings + def _select_filters( self, auth_user: Any, @@ -96,20 +143,34 @@ def _setup_routes(self): from r2r import R2RClient client = R2RClient("http://localhost:7272") - # when using auth, do client.login(...) + # if using auth, do client.login(...) - response =client.retrieval.search( + # Basic mode, no overrides + response = client.retrieval.search( query="Who is Aristotle?", - search_settings: { - filters: {"document_id": {"$eq": "3e157b3a-8469-51db-90d9-52e7d896b49b"}}, - use_semantic_search: true, - chunk_settings: { - limit: 20, # separate limit for chunk vs. graph - enabled: true - }, - graph_settings: { - enabled: true, - }, + search_mode="basic" + ) + + # Advanced mode with overrides + response = client.retrieval.search( + query="Who is Aristotle?", + search_mode="advanced", + search_settings={ + "filters": {"document_id": {"$eq": "3e157b3a-..."}}, + "limit": 5 + } + ) + + # Custom mode with full control + response = client.retrieval.search( + query="Who is Aristotle?", + search_mode="custom", + search_settings={ + "use_semantic_search": True, + "filters": {"category": {"$like": "%philosophy%"}}, + "limit": 20, + "chunk_settings": {"limit": 20}, + "graph_settings": {"enabled": True} } ) """ @@ -185,27 +246,68 @@ async def search_app( ..., description="Search query to find relevant documents", ), - search_settings: SearchSettings = Body( - default_factory=SearchSettings, - description="Settings for vector-based search", + search_mode: SearchMode = Body( + default=SearchMode.custom, + description=( + "Default value of `custom` allows full control over search settings.\n\n" + "Pre-configured search modes:\n" + "`basic`: A simple semantic-based search.\n" + "`advanced`: A more powerful hybrid search combining semantic and full-text.\n" + "`custom`: Full control via `search_settings`.\n\n" + "If `filters` or `limit` are provided alongside `basic` or `advanced`, " + "they will override the default settings for that mode." + ), + ), + search_settings: Optional[SearchSettings] = Body( + None, + description=( + "The search configuration object. If `search_mode` is `custom`, " + "these settings are used as-is. For `basic` or `advanced`, these settings will override the default mode configuration.\n\n" + "Common overrides include `filters` to narrow results and `limit` to control how many results are returned." + ), ), auth_user=Depends(self.providers.auth.auth_wrapper), ) -> WrappedSearchResponse: """ - Perform a search query on the vector database and knowledge graph and any other configured search engines. - - This endpoint allows for complex filtering of search results using PostgreSQL-based queries. - Filters can be applied to various fields such as document_id, and internal metadata values. - - Allowed operators include `eq`, `neq`, `gt`, `gte`, `lt`, `lte`, `like`, `ilike`, `in`, and `nin`. + Perform a search query against vector and/or graph-based databases. + + **Search Modes:** + - `basic`: Defaults to semantic search. Simple and easy to use. + - `advanced`: Combines semantic search with full-text search for more comprehensive results. + - `custom`: Complete control over how search is performed. Provide a full `SearchSettings` object. + + **Filters:** + Apply filters directly inside `search_settings.filters`. For example: + ```json + { + "filters": {"document_id": {"$eq": "3e157b3a-..."}} + } + ``` + Supported operators: `$eq`, `$neq`, `$gt`, `$gte`, `$lt`, `$lte`, `$like`, `$ilike`, `$in`, `$nin`. + + **Limit:** + Control how many results you get by specifying `limit` inside `search_settings`. For example: + ```json + { + "limit": 20 + } + ``` + + **Examples:** + - Using `basic` mode and no overrides: + Just specify `search_mode="basic"`. + - Using `advanced` mode and applying a filter: + Specify `search_mode="advanced"` and include `search_settings={"filters": {...}, "limit": 5}` to override defaults. + - Using `custom` mode: + Provide the entire `search_settings` to define your search exactly as you want it. """ - search_settings.filters = self._select_filters( - auth_user, search_settings - ) + effective_settings = self._prepare_search_settings( + auth_user, search_mode, search_settings + ) results = await self.services["retrieval"].search( query=query, - search_settings=search_settings, + search_settings=effective_settings, ) return results @@ -323,9 +425,25 @@ async def search_app( @self.base_endpoint async def rag_app( query: str = Body(...), - search_settings: SearchSettings = Body( - default_factory=SearchSettings, - description="Settings for vector-based search", + search_mode: SearchMode = Body( + default=SearchMode.custom, + description=( + "Default value of `custom` allows full control over search settings.\n\n" + "Pre-configured search modes:\n" + "`basic`: A simple semantic-based search.\n" + "`advanced`: A more powerful hybrid search combining semantic and full-text.\n" + "`custom`: Full control via `search_settings`.\n\n" + "If `filters` or `limit` are provided alongside `basic` or `advanced`, " + "they will override the default settings for that mode." + ), + ), + search_settings: Optional[SearchSettings] = Body( + None, + description=( + "The search configuration object. If `search_mode` is `custom`, " + "these settings are used as-is. For `basic` or `advanced`, these settings will override the default mode configuration.\n\n" + "Common overrides include `filters` to narrow results and `limit` to control how many results are returned." + ), ), rag_generation_config: GenerationConfig = Body( default_factory=GenerationConfig, @@ -351,13 +469,13 @@ async def rag_app( The generation process can be customized using the `rag_generation_config` parameter. """ - search_settings.filters = self._select_filters( - auth_user, search_settings + effective_settings = self._prepare_search_settings( + auth_user, search_mode, search_settings ) response = await self.services["retrieval"].rag( query=query, - search_settings=search_settings, + search_settings=effective_settings, rag_generation_config=rag_generation_config, task_prompt_override=task_prompt_override, include_title_if_available=include_title_if_available, @@ -499,9 +617,25 @@ async def agent_app( deprecated=True, description="List of messages (deprecated, use message instead)", ), - search_settings: SearchSettings = Body( - default_factory=SearchSettings, - description="Settings for vector-based search", + search_mode: SearchMode = Body( + default=SearchMode.custom, + description=( + "Default value of `custom` allows full control over search settings.\n\n" + "Pre-configured search modes:\n" + "`basic`: A simple semantic-based search.\n" + "`advanced`: A more powerful hybrid search combining semantic and full-text.\n" + "`custom`: Full control via `search_settings`.\n\n" + "If `filters` or `limit` are provided alongside `basic` or `advanced`, " + "they will override the default settings for that mode." + ), + ), + search_settings: Optional[SearchSettings] = Body( + None, + description=( + "The search configuration object. If `search_mode` is `custom`, " + "these settings are used as-is. For `basic` or `advanced`, these settings will override the default mode configuration.\n\n" + "Common overrides include `filters` to narrow results and `limit` to control how many results are returned." + ), ), rag_generation_config: GenerationConfig = Body( default_factory=GenerationConfig, @@ -557,16 +691,15 @@ async def agent_app( information, providing detailed, factual responses with proper attribution to source documents. """ - search_settings.filters = self._select_filters( - auth_user=auth_user, - search_settings=search_settings, + effective_settings = self._prepare_search_settings( + auth_user, search_mode, search_settings ) try: response = await self.services["retrieval"].agent( message=message, messages=messages, - search_settings=search_settings, + search_settings=effective_settings, rag_generation_config=rag_generation_config, task_prompt_override=task_prompt_override, include_title_if_available=include_title_if_available, diff --git a/py/core/main/app.py b/py/core/main/app.py index c86de9ce3..c58c43b1a 100644 --- a/py/core/main/app.py +++ b/py/core/main/app.py @@ -16,6 +16,7 @@ from .api.v3.documents_router import DocumentsRouter from .api.v3.graph_router import GraphRouter from .api.v3.indices_router import IndicesRouter +from .api.v3.logs_router import LogsRouter from .api.v3.prompts_router import PromptsRouter from .api.v3.retrieval_router import RetrievalRouterV3 from .api.v3.system_router import SystemRouter @@ -31,30 +32,32 @@ def __init__( HatchetOrchestrationProvider | SimpleOrchestrationProvider ), auth_router: AuthRouter, - documents_router: DocumentsRouter, chunks_router: ChunksRouter, - indices_router: IndicesRouter, - users_router: UsersRouter, collections_router: CollectionsRouter, conversations_router: ConversationsRouter, + documents_router: DocumentsRouter, + graph_router: GraphRouter, + indices_router: IndicesRouter, + logs_router: LogsRouter, prompts_router: PromptsRouter, retrieval_router_v3: RetrievalRouterV3, system_router: SystemRouter, - graph_router: GraphRouter, + users_router: UsersRouter, ): self.config = config self.auth_router = auth_router - self.orchestration_provider = orchestration_provider - self.documents_router = documents_router self.chunks_router = chunks_router - self.indices_router = indices_router - self.users_router = users_router self.collections_router = collections_router self.conversations_router = conversations_router + self.documents_router = documents_router + self.graph_router = graph_router + self.indices_router = indices_router + self.logs_router = logs_router + self.orchestration_provider = orchestration_provider self.prompts_router = prompts_router self.retrieval_router_v3 = retrieval_router_v3 self.system_router = system_router - self.graph_router = graph_router + self.users_router = users_router self.app = FastAPI() @@ -73,16 +76,17 @@ async def r2r_exception_handler(request: Request, exc: R2RException): def _setup_routes(self): - self.app.include_router(self.documents_router, prefix="/v3") self.app.include_router(self.chunks_router, prefix="/v3") - self.app.include_router(self.indices_router, prefix="/v3") - self.app.include_router(self.users_router, prefix="/v3") self.app.include_router(self.collections_router, prefix="/v3") self.app.include_router(self.conversations_router, prefix="/v3") + self.app.include_router(self.documents_router, prefix="/v3") + self.app.include_router(self.graph_router, prefix="/v3") + self.app.include_router(self.indices_router, prefix="/v3") + self.app.include_router(self.logs_router, prefix="/v3") self.app.include_router(self.prompts_router, prefix="/v3") self.app.include_router(self.retrieval_router_v3, prefix="/v3") - self.app.include_router(self.graph_router, prefix="/v3") self.app.include_router(self.system_router, prefix="/v3") + self.app.include_router(self.users_router, prefix="/v3") @self.app.get("/openapi_spec", include_in_schema=False) async def openapi_spec(): @@ -103,10 +107,16 @@ def _apply_cors(self): ) async def serve(self, host: str = "0.0.0.0", port: int = 7272): - # Start the Hatchet worker in a separate thread import uvicorn + from core.utils.logging_config import configure_logging + + configure_logging() - # Run the FastAPI app - config = uvicorn.Config(self.app, host=host, port=port) + config = uvicorn.Config( + self.app, + host=host, + port=port, + log_config=None, + ) server = uvicorn.Server(config) await server.serve() diff --git a/py/core/main/app_entry.py b/py/core/main/app_entry.py index 313f35cd2..380379407 100644 --- a/py/core/main/app_entry.py +++ b/py/core/main/app_entry.py @@ -1,4 +1,5 @@ -import logging +from core.utils.logging_config import configure_logging + import os from contextlib import asynccontextmanager from typing import Optional @@ -12,7 +13,8 @@ from .assembly import R2RBuilder, R2RConfig -logger = logging.getLogger() +logger, log_file = configure_logging() + # Global scheduler scheduler = AsyncIOScheduler() @@ -64,8 +66,6 @@ async def create_r2r_app( return await builder.build() -logging.basicConfig(level=logging.INFO) - config_name = os.getenv("R2R_CONFIG_NAME", None) config_path = os.getenv("R2R_CONFIG_PATH", None) @@ -95,7 +95,10 @@ async def create_r2r_app( ) # Create the FastAPI app -app = FastAPI(lifespan=lifespan) +app = FastAPI( + lifespan=lifespan, + log_config=None, +) @app.exception_handler(R2RException) diff --git a/py/core/main/assembly/builder.py b/py/core/main/assembly/builder.py index 6c2b71936..e50cf2880 100644 --- a/py/core/main/assembly/builder.py +++ b/py/core/main/assembly/builder.py @@ -24,6 +24,7 @@ from ..api.v3.documents_router import DocumentsRouter from ..api.v3.graph_router import GraphRouter from ..api.v3.indices_router import IndicesRouter +from ..api.v3.logs_router import LogsRouter from ..api.v3.prompts_router import PromptsRouter from ..api.v3.retrieval_router import RetrievalRouterV3 from ..api.v3.system_router import SystemRouter @@ -241,32 +242,37 @@ async def build(self, *args, **kwargs) -> R2RApp: services=services, orchestration_provider=orchestration_provider, ).get_router(), - "documents_router": DocumentsRouter( + "chunks_router": ChunksRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, ).get_router(), - "chunks_router": ChunksRouter( + "collections_router": CollectionsRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, ).get_router(), - "indices_router": IndicesRouter( + "conversations_router": ConversationsRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, ).get_router(), - "users_router": UsersRouter( + "documents_router": DocumentsRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, ).get_router(), - "collections_router": CollectionsRouter( + "graph_router": GraphRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, ).get_router(), - "conversations_router": ConversationsRouter( + "indices_router": IndicesRouter( + providers=providers, + services=services, + orchestration_provider=orchestration_provider, + ).get_router(), + "logs_router": LogsRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, @@ -286,7 +292,7 @@ async def build(self, *args, **kwargs) -> R2RApp: services=services, orchestration_provider=orchestration_provider, ).get_router(), - "graph_router": GraphRouter( + "users_router": UsersRouter( providers=providers, services=services, orchestration_provider=orchestration_provider, diff --git a/py/core/main/services/retrieval_service.py b/py/core/main/services/retrieval_service.py index 461b6d6ae..3f9ef458d 100644 --- a/py/core/main/services/retrieval_service.py +++ b/py/core/main/services/retrieval_service.py @@ -15,6 +15,7 @@ Message, R2RException, RunManager, + SearchMode, SearchSettings, manage_run, to_async_generator, @@ -270,67 +271,96 @@ async def agent( message="Either message or messages should be provided", ) + # Ensure 'message' is a Message instance + if message and not isinstance(message, Message): + if isinstance(message, dict): + message = Message.from_dict(message) + else: + raise R2RException( + status_code=400, + message="Invalid message format", + ) + + # Ensure 'messages' is a list of Message instances + if messages: + messages = [ + ( + msg + if isinstance(msg, Message) + else Message.from_dict(msg) + ) + for msg in messages + ] + else: + messages = [] + # Transform UUID filters to strings - for filter, value in search_settings.filters.items(): + for filter_key, value in search_settings.filters.items(): if isinstance(value, UUID): - search_settings.filters[filter] = str(value) + search_settings.filters[filter_key] = str(value) - ids = None + ids = [] - if not messages: - if not message: - raise R2RException( - status_code=400, - message="Message not provided", + if conversation_id: + # Fetch existing conversation + conversation = ( + await self.logging_connection.get_conversation( + conversation_id, branch_id ) - # Fetch or create conversation - if conversation_id: - conversation = ( - await self.logging_connection.get_conversation( - conversation_id, branch_id - ) + ) + if not conversation: + logger.error( + f"No conversation found for ID: {conversation_id}" + ) + raise R2RException( + status_code=404, + message=f"Conversation not found: {conversation_id}", ) - if not conversation: + # Assuming 'conversation' is a list of dicts with 'id' and 'message' keys + messages_from_conversation = [] + for resp in conversation: + if isinstance(resp, dict): + msg = Message.from_dict(resp["message"]) + messages_from_conversation.append(msg) + ids.append(resp["id"]) + else: logger.error( - f"No conversation found for ID: {conversation_id}" - ) - raise R2RException( - status_code=404, - message=f"Conversation not found: {conversation_id}", + f"Unexpected type in conversation: {type(resp)}" ) - messages = [resp.message for resp in conversation] + [ # type: ignore - message - ] - ids = [resp.id for resp in conversation] - else: - conversation = ( - await self.logging_connection.create_conversation() - ) - conversation_id = conversation["id"] + messages = messages_from_conversation + messages + else: + # Create new conversation + conversation_id = ( + await self.logging_connection.create_conversation() + ) + ids = [] + # messages already initialized earlier - parent_id = None - if conversation_id and messages: - for inner_message in messages[:-1]: - parent_id = await self.logging_connection.add_message( - conversation_id, # Use the stored conversation_id - inner_message, - parent_id, - ) - messages = messages or [] + # Append 'message' to 'messages' if provided + if message: + messages.append(message) - if message and not messages: - messages = [message] + if not messages: + raise R2RException( + status_code=400, + message="No messages to process", + ) - current_message = messages[-1] # type: ignore + current_message = messages[-1] # Save the new message to the conversation - message = await self.logging_connection.add_message( - conversation_id, # type: ignore - current_message, # type: ignore - parent_id=str(ids[-2]) if (ids and len(ids) > 1) else None, # type: ignore + parent_id = ids[-1] if ids else None + + message_response = await self.logging_connection.add_message( + conversation_id, + current_message, + parent_id=parent_id, ) - if message is not None: - message_id = message["id"] # type: ignore + + if message_response is not None: + message_id = message_response["id"] + else: + message_id = None if rag_generation_config.stream: t1 = time.time() @@ -372,9 +402,20 @@ async def stream_response(): *args, **kwargs, ) + + # Save the assistant's reply to the conversation + if isinstance(results[-1], dict): + assistant_message = Message(**results[-1]) + elif isinstance(results[-1], Message): + assistant_message = results[-1] + else: + assistant_message = Message( + role="assistant", content=str(results[-1]) + ) + await self.logging_connection.add_message( conversation_id=conversation_id, - content=Message(**results[-1]), + content=assistant_message, parent_id=message_id, ) @@ -387,7 +428,7 @@ async def stream_response(): value=latency, ) return { - "messages": results, + "messages": [msg.to_dict() for msg in results], "conversation_id": str( conversation_id ), # Ensure it's a string diff --git a/py/core/pipes/retrieval/search_rag_pipe.py b/py/core/pipes/retrieval/search_rag_pipe.py index dd9416cbf..d2b13c820 100644 --- a/py/core/pipes/retrieval/search_rag_pipe.py +++ b/py/core/pipes/retrieval/search_rag_pipe.py @@ -7,7 +7,7 @@ AsyncState, CompletionProvider, DatabaseProvider, - KGSearchResultType + KGSearchResultType, ) from core.base.abstractions import GenerationConfig, RAGCompletion @@ -111,11 +111,13 @@ async def _collect_context( # context += f"Results:\n" if search_result.result_type == KGSearchResultType.ENTITY: context += f"[{it}]: Entity Name - {search_result.content.name}\n\nDescription - {search_result.content.description}\n\n" - elif search_result.result_type == KGSearchResultType.RELATIONSHIP: + elif ( + search_result.result_type + == KGSearchResultType.RELATIONSHIP + ): context += f"[{it}]: Relationship - {search_result.content.subject} - {search_result.content.predicate} - {search_result.content.object}\n\n" else: - context += f"[{it}]: Community Name - {search_result.content.name}\n\nDescription - {search_result.content.summary}\n\n" - + context += f"[{it}]: Community Name - {search_result.content.name}\n\nDescription - {search_result.content.summary}\n\n" it += 1 total_results = ( diff --git a/py/core/utils/logging_config.py b/py/core/utils/logging_config.py new file mode 100644 index 000000000..681220f6d --- /dev/null +++ b/py/core/utils/logging_config.py @@ -0,0 +1,131 @@ +import logging +import sys +from pathlib import Path +import logging.config +import re + + +class HTTPStatusFilter(logging.Filter): + """ + This filter inspects uvicorn.access log records. It uses record.getMessage() to retrieve + the fully formatted log message. Then it searches for HTTP status codes and adjusts the + record's log level based on that status: + - 4xx: WARNING + - 5xx: ERROR + All other logs remain unchanged. + """ + + # A broad pattern to find any 3-digit number in the message. + # This should capture the HTTP status code from a line like: + # '127.0.0.1:54946 - "GET /v2/relationships HTTP/1.1" 404' + STATUS_CODE_PATTERN = re.compile(r"\b(\d{3})\b") + + LEVEL_TO_ANSI = { + logging.INFO: "\033[32m", # green + logging.WARNING: "\033[33m", # yellow + logging.ERROR: "\033[31m", # red + } + RESET = "\033[0m" + + def filter(self, record: logging.LogRecord) -> bool: + if record.name != "uvicorn.access": + return True + + message = record.getMessage() + if codes := self.STATUS_CODE_PATTERN.findall(message): + status_code = int(codes[-1]) + if 200 <= status_code < 300: + record.levelno = logging.INFO + record.levelname = "INFO" + color = self.LEVEL_TO_ANSI[logging.INFO] + elif 400 <= status_code < 500: + record.levelno = logging.WARNING + record.levelname = "WARNING" + color = self.LEVEL_TO_ANSI[logging.WARNING] + elif 500 <= status_code < 600: + record.levelno = logging.ERROR + record.levelname = "ERROR" + color = self.LEVEL_TO_ANSI[logging.ERROR] + else: + return True + + # Wrap the status code in ANSI codes + colored_code = f"{color}{status_code}{self.RESET}" + # Replace the status code in the message + new_msg = message.replace(str(status_code), colored_code) + + # Update record.msg and clear args to avoid formatting issues + record.msg = new_msg + record.args = () + + return True + + +def configure_logging(): + log_dir = Path.cwd() / "logs" + log_dir.mkdir(exist_ok=True) + + log_config = { + "version": 1, + "disable_existing_loggers": False, + "filters": {"http_status_filter": {"()": HTTPStatusFilter}}, + "formatters": { + "default": { + "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + }, + "colored": { + "()": "colorlog.ColoredFormatter", + "format": "%(asctime)s - %(log_color)s%(levelname)s%(reset)s - %(message)s", + "datefmt": "%Y-%m-%d %H:%M:%S", + "log_colors": { + "DEBUG": "white", + "INFO": "green", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "bold_red", + }, + }, + }, + "handlers": { + "file": { + "class": "logging.handlers.RotatingFileHandler", + "formatter": "colored", + "filename": str(log_dir / "app.log"), + "maxBytes": 10485760, # 10MB + "backupCount": 5, + "filters": ["http_status_filter"], + }, + "console": { + "class": "logging.StreamHandler", + "formatter": "colored", + "stream": sys.stdout, + "filters": ["http_status_filter"], + }, + }, + "loggers": { + "": { # Root logger + "handlers": ["console", "file"], + "level": "INFO", + }, + "uvicorn": { + "handlers": ["console", "file"], + "level": "INFO", + "propagate": False, + }, + "uvicorn.error": { + "handlers": ["console", "file"], + "level": "INFO", + "propagate": False, + }, + "uvicorn.access": { + "handlers": ["console", "file"], + "level": "INFO", + "propagate": False, + }, + }, + } + + logging.config.dictConfig(log_config) + logger = logging.getLogger() + return logger, Path(log_config["handlers"]["file"]["filename"]) diff --git a/py/poetry.lock b/py/poetry.lock index 8ff6cbb25..4bf9ac3f6 100644 --- a/py/poetry.lock +++ b/py/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "aiofiles" @@ -238,24 +238,24 @@ files = [ [[package]] name = "anyio" -version = "4.6.2.post1" +version = "4.7.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, - {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, + {file = "anyio-4.7.0-py3-none-any.whl", hash = "sha256:ea60c3723ab42ba6fff7e8ccb0488c898ec538ff4df1f1d5e642c3601d07e352"}, + {file = "anyio-4.7.0.tar.gz", hash = "sha256:2f834749c602966b7d456a7567cafcb309f96482b5081d14ac93ccd457f9dd48"}, ] [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -554,17 +554,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.35.74" +version = "1.35.76" description = "The AWS SDK for Python" optional = true python-versions = ">=3.8" files = [ - {file = "boto3-1.35.74-py3-none-any.whl", hash = "sha256:dab5bddbbe57dc707b6f6a1f25dc2823b8e234b6fe99fafef7fc406ab73031b9"}, - {file = "boto3-1.35.74.tar.gz", hash = "sha256:88370c6845ba71a4dae7f6b357099df29b3965da584be040c8e72c9902bc9492"}, + {file = "boto3-1.35.76-py3-none-any.whl", hash = "sha256:69458399f41f57a50770c8974796d96978bcca44915c260319696bb43e47dffd"}, + {file = "boto3-1.35.76.tar.gz", hash = "sha256:31ddcdb6f15dace2b68f6a0f11bdb58dd3ae79b8a3ccb174ff811ef0bbf938e0"}, ] [package.dependencies] -botocore = ">=1.35.74,<1.36.0" +botocore = ">=1.35.76,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -573,13 +573,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.74" +version = "1.35.76" description = "Low-level, data-driven core of boto 3." optional = true python-versions = ">=3.8" files = [ - {file = "botocore-1.35.74-py3-none-any.whl", hash = "sha256:9ac9d33d84dd9f05b35085de081552342a2c9ae22e3c4ee105723c9e92c07bd9"}, - {file = "botocore-1.35.74.tar.gz", hash = "sha256:de5c4fa9a24cef3a758974857b5c5820a12fad345ebf33c052a5988e88f33634"}, + {file = "botocore-1.35.76-py3-none-any.whl", hash = "sha256:b4729d12d00267b3185628f83543917b6caae292385230ab464067621aa086af"}, + {file = "botocore-1.35.76.tar.gz", hash = "sha256:a75a42ae53395796b8300c5fefb2d65a8696dc40dc85e49cf3a769e0c0202b13"}, ] [package.dependencies] @@ -844,6 +844,23 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "colorlog" +version = "6.9.0" +description = "Add colours to the output of Python's logging module." +optional = true +python-versions = ">=3.6" +files = [ + {file = "colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff"}, + {file = "colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +development = ["black", "flake8", "mypy", "pytest", "types-colorama"] + [[package]] name = "contourpy" version = "1.3.1" @@ -1274,61 +1291,61 @@ dev = ["black", "mypy", "pre-commit", "pylint", "pytest", "pytest-benchmark", "p [[package]] name = "fonttools" -version = "4.55.1" +version = "4.55.2" description = "Tools to manipulate font files" optional = true python-versions = ">=3.8" files = [ - {file = "fonttools-4.55.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c17a6f9814f83772cd6d9c9009928e1afa4ab66210a31ced721556651075a9a0"}, - {file = "fonttools-4.55.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c4d14eecc814826a01db87a40af3407c892ba49996bc6e49961e386cd78b537c"}, - {file = "fonttools-4.55.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8589f9a15dc005592b94ecdc45b4dfae9bbe9e73542e89af5a5e776e745db83b"}, - {file = "fonttools-4.55.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfee95bd9395bcd9e6c78955387554335109b6a613db71ef006020b42f761c58"}, - {file = "fonttools-4.55.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:34fa2ecc0bf1923d1a51bf2216a006de2c3c0db02c6aa1470ea50b62b8619bd5"}, - {file = "fonttools-4.55.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9c1c48483148bfb1b9ad951133ceea957faa004f6cb475b67e7bc75d482b48f8"}, - {file = "fonttools-4.55.1-cp310-cp310-win32.whl", hash = "sha256:3e2fc388ca7d023b3c45badd71016fd4185f93e51a22cfe4bd65378af7fba759"}, - {file = "fonttools-4.55.1-cp310-cp310-win_amd64.whl", hash = "sha256:c4c36c71f69d2b3ee30394b0986e5f8b2c461e7eff48dde49b08a90ded9fcdbd"}, - {file = "fonttools-4.55.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5daab3a55d460577f45bb8f5a8eca01fa6cde43ef2ab943b527991f54b735c41"}, - {file = "fonttools-4.55.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:acf1e80cf96c2fbc79e46f669d8713a9a79faaebcc68e31a9fbe600cf8027992"}, - {file = "fonttools-4.55.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e88a0329f7f88a210f09f79c088fb64f8032fc3ab65e2390a40b7d3a11773026"}, - {file = "fonttools-4.55.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03105b42259a8a94b2f0cbf1bee45f7a8a34e7b26c946a8fb89b4967e44091a8"}, - {file = "fonttools-4.55.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9af3577e821649879ab5774ad0e060af34816af556c77c6d3820345d12bf415e"}, - {file = "fonttools-4.55.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:34bd5de3d0ad085359b79a96575cd6bd1bc2976320ef24a2aa152ead36dbf656"}, - {file = "fonttools-4.55.1-cp311-cp311-win32.whl", hash = "sha256:5da92c4b637f0155a41f345fa81143c8e17425260fcb21521cb2ad4d2cea2a95"}, - {file = "fonttools-4.55.1-cp311-cp311-win_amd64.whl", hash = "sha256:f70234253d15f844e6da1178f019a931f03181463ce0c7b19648b8c370527b07"}, - {file = "fonttools-4.55.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9c372e527d58ba64b695f15f8014e97bc8826cf64d3380fc89b4196edd3c0fa8"}, - {file = "fonttools-4.55.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:845a967d3bef3245ba81fb5582dc731f6c2c8417fa211f1068c56893504bc000"}, - {file = "fonttools-4.55.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03be82bcd4ba4418adf10e6165743f824bb09d6594c2743d7f93ea50968805b"}, - {file = "fonttools-4.55.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c42e935cf146f826f556d977660dac88f2fa3fb2efa27d5636c0b89a60c16edf"}, - {file = "fonttools-4.55.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:96328bf91e05621d8e40d9f854af7a262cb0e8313e9b38e7f3a7f3c4c0caaa8b"}, - {file = "fonttools-4.55.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:291acec4d774e8cd2d8472d88c04643a77a3324a15247951bd6cfc969799b69e"}, - {file = "fonttools-4.55.1-cp312-cp312-win32.whl", hash = "sha256:6d768d6632809aec1c3fa8f195b173386d85602334701a6894a601a4d3c80368"}, - {file = "fonttools-4.55.1-cp312-cp312-win_amd64.whl", hash = "sha256:2a3850afdb0be1f79a1e95340a2059226511675c5b68098d4e49bfbeb48a8aab"}, - {file = "fonttools-4.55.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0c88d427eaf8bd8497b9051f56e0f5f9fb96a311aa7c72cda35e03e18d59cd16"}, - {file = "fonttools-4.55.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f062c95a725a79fd908fe8407b6ad63e230e1c7d6dece2d5d6ecaf843d6927f6"}, - {file = "fonttools-4.55.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f298c5324c45cad073475146bf560f4110ce2dc2488ff12231a343ec489f77bc"}, - {file = "fonttools-4.55.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f06dbb71344ffd85a6cb7e27970a178952f0bdd8d319ed938e64ba4bcc41700"}, - {file = "fonttools-4.55.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4c46b3525166976f5855b1f039b02433dc51eb635fb54d6a111e0c5d6e6cdc4c"}, - {file = "fonttools-4.55.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:af46f52a21e086a2f89b87bd941c9f0f91e5f769e1a5eb3b37c912228814d3e5"}, - {file = "fonttools-4.55.1-cp313-cp313-win32.whl", hash = "sha256:cd7f36335c5725a3fd724cc667c10c3f5254e779bdc5bffefebb33cf5a75ecb1"}, - {file = "fonttools-4.55.1-cp313-cp313-win_amd64.whl", hash = "sha256:5d6394897710ccac7f74df48492d7f02b9586ff0588c66a2c218844e90534b22"}, - {file = "fonttools-4.55.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:52c4f4b383c56e1a4fe8dab1b63c2269ba9eab0695d2d8e033fa037e61e6f1ef"}, - {file = "fonttools-4.55.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d83892dafdbd62b56545c77b6bd4fa49eef6ec1d6b95e042ee2c930503d1831e"}, - {file = "fonttools-4.55.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604d5bf16f811fcaaaec2dde139f7ce958462487565edcd54b6fadacb2942083"}, - {file = "fonttools-4.55.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3324b92feb5fd084923a8e89a8248afd5b9f9d81ab9517d7b07cc84403bd448"}, - {file = "fonttools-4.55.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:30f8b1ca9b919c04850678d026fc330c19acaa9e3b282fcacc09a5eb3c8d20c3"}, - {file = "fonttools-4.55.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:1835c98df2cf28c86a66d234895c87df7b9325fd079a8019c5053a389ff55d23"}, - {file = "fonttools-4.55.1-cp38-cp38-win32.whl", hash = "sha256:9f202703720a7cc0049f2ed1a2047925e264384eb5cc4d34f80200d7b17f1b6a"}, - {file = "fonttools-4.55.1-cp38-cp38-win_amd64.whl", hash = "sha256:2efff20aed0338d37c2ff58766bd67f4b9607ded61cf3d6baf1b3e25ea74e119"}, - {file = "fonttools-4.55.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3032d9bf010c395e6eca2851666cafb1f4ecde85d420188555e928ad0144326e"}, - {file = "fonttools-4.55.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0794055588c30ffe25426048e8a7c0a5271942727cd61fc939391e37f4d580d5"}, - {file = "fonttools-4.55.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13ba980e3ffd3206b8c63a365f90dc10eeec27da946d5ee5373c3a325a46d77c"}, - {file = "fonttools-4.55.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d7063babd7434a17a5e355e87de9b2306c85a5c19c7da0794be15c58aab0c39"}, - {file = "fonttools-4.55.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ed84c15144015a58ef550dd6312884c9fb31a2dbc31a6467bcdafd63be7db476"}, - {file = "fonttools-4.55.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e89419d88b0bbfdb55209e03a17afa2d20db3c2fa0d785543c9d0875668195d5"}, - {file = "fonttools-4.55.1-cp39-cp39-win32.whl", hash = "sha256:6eb781e401b93cda99356bc043ababead2a5096550984d8a4ecf3d5c9f859dc2"}, - {file = "fonttools-4.55.1-cp39-cp39-win_amd64.whl", hash = "sha256:db1031acf04523c5a51c3e1ae19c21a1c32bc5f820a477dd4659a02f9cb82002"}, - {file = "fonttools-4.55.1-py3-none-any.whl", hash = "sha256:4bcfb11f90f48b48c366dd638d773a52fca0d1b9e056dc01df766bf5835baa08"}, - {file = "fonttools-4.55.1.tar.gz", hash = "sha256:85bb2e985718b0df96afc659abfe194c171726054314b019dbbfed31581673c7"}, + {file = "fonttools-4.55.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bef0f8603834643b1a6419d57902f18e7d950ec1a998fb70410635c598dc1a1e"}, + {file = "fonttools-4.55.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:944228b86d472612d3b48bcc83b31c25c2271e63fdc74539adfcfa7a96d487fb"}, + {file = "fonttools-4.55.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f0e55f5da594b85f269cfbecd2f6bd3e07d0abba68870bc3f34854de4fa4678"}, + {file = "fonttools-4.55.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b1a6e576db0c83c1b91925bf1363478c4bb968dbe8433147332fb5782ce6190"}, + {file = "fonttools-4.55.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:616368b15716781bc84df5c2191dc0540137aaef56c2771eb4b89b90933f347a"}, + {file = "fonttools-4.55.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7bbae4f3915225c2c37670da68e2bf18a21206060ad31dfb95fec91ef641caa7"}, + {file = "fonttools-4.55.2-cp310-cp310-win32.whl", hash = "sha256:8b02b10648d69d67a7eb055f4d3eedf4a85deb22fb7a19fbd9acbae7c7538199"}, + {file = "fonttools-4.55.2-cp310-cp310-win_amd64.whl", hash = "sha256:bbea0ab841113ac8e8edde067e099b7288ffc6ac2dded538b131c2c0595d5f77"}, + {file = "fonttools-4.55.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d34525e8141286fa976e14806639d32294bfb38d28bbdb5f6be9f46a1cd695a6"}, + {file = "fonttools-4.55.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ecd1c2b1c2ec46bb73685bc5473c72e16ed0930ef79bc2919ccadc43a99fb16"}, + {file = "fonttools-4.55.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9008438ad59e5a8e403a62fbefef2b2ff377eb3857d90a3f2a5f4d674ff441b2"}, + {file = "fonttools-4.55.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:131591ac8d7a47043aaf29581aba755ae151d46e49d2bf49608601efd71e8b4d"}, + {file = "fonttools-4.55.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4c83381c3e3e3d9caa25527c4300543578341f21aae89e4fbbb4debdda8d82a2"}, + {file = "fonttools-4.55.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:42aca564b575252fd9954ed0d91d97a24de24289a16ce8ff74ed0bdf5ecebf11"}, + {file = "fonttools-4.55.2-cp311-cp311-win32.whl", hash = "sha256:c6457f650ebe15baa17fc06e256227f0a47f46f80f27ec5a0b00160de8dc2c13"}, + {file = "fonttools-4.55.2-cp311-cp311-win_amd64.whl", hash = "sha256:5cfa67414d7414442a5635ff634384101c54f53bb7b0e04aa6a61b013fcce194"}, + {file = "fonttools-4.55.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:18f082445b8fe5e91c53e6184f4c1c73f3f965c8bcc614c6cd6effd573ce6c1a"}, + {file = "fonttools-4.55.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c0f91adbbd706e8acd1db73e3e510118e62d0ffb651864567dccc5b2339f90"}, + {file = "fonttools-4.55.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d8ccce035320d63dba0c35f52499322f5531dbe85bba1514c7cea26297e4c54"}, + {file = "fonttools-4.55.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96e126df9615df214ec7f04bebcf60076297fbc10b75c777ce58b702d7708ffb"}, + {file = "fonttools-4.55.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:508ebb42956a7a931c4092dfa2d9b4ffd4f94cea09b8211199090d2bd082506b"}, + {file = "fonttools-4.55.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1b9de46ef7b683d50400abf9f1578eaceee271ff51c36bf4b7366f2be29f498"}, + {file = "fonttools-4.55.2-cp312-cp312-win32.whl", hash = "sha256:2df61d9fc15199cc86dad29f64dd686874a3a52dda0c2d8597d21f509f95c332"}, + {file = "fonttools-4.55.2-cp312-cp312-win_amd64.whl", hash = "sha256:d337ec087da8216a828574aa0525d869df0a2ac217a2efc1890974ddd1fbc5b9"}, + {file = "fonttools-4.55.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:10aff204e2edee1d312fa595c06f201adf8d528a3b659cfb34cd47eceaaa6a26"}, + {file = "fonttools-4.55.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:09fe922a3eff181fd07dd724cdb441fb6b9fc355fd1c0f1aa79aca60faf1fbdd"}, + {file = "fonttools-4.55.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:487e1e8b524143a799bda0169c48b44a23a6027c1bb1957d5a172a7d3a1dd704"}, + {file = "fonttools-4.55.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b1726872e09268bbedb14dc02e58b7ea31ecdd1204c6073eda4911746b44797"}, + {file = "fonttools-4.55.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6fc88cfb58b0cd7b48718c3e61dd0d0a3ee8e2c86b973342967ce09fbf1db6d4"}, + {file = "fonttools-4.55.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e857fe1859901ad8c5cab32e0eebc920adb09f413d2d73b74b677cf47b28590c"}, + {file = "fonttools-4.55.2-cp313-cp313-win32.whl", hash = "sha256:81ccd2b3a420b8050c7d9db3be0555d71662973b3ef2a1d921a2880b58957db8"}, + {file = "fonttools-4.55.2-cp313-cp313-win_amd64.whl", hash = "sha256:d559eb1744c7dcfa90ae60cb1a4b3595e898e48f4198738c321468c01180cd83"}, + {file = "fonttools-4.55.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6b5917ef79cac8300b88fd6113003fd01bbbbea2ea060a27b95d8f77cb4c65c2"}, + {file = "fonttools-4.55.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:663eba5615d6abaaf616432354eb7ce951d518e43404371bcc2b0694ef21e8d6"}, + {file = "fonttools-4.55.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:803d5cef5fc47f44f5084d154aa3d6f069bb1b60e32390c225f897fa19b0f939"}, + {file = "fonttools-4.55.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bc5f100de0173cc39102c0399bd6c3bd544bbdf224957933f10ee442d43cddd"}, + {file = "fonttools-4.55.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3d9bbc1e380fdaf04ad9eabd8e3e6a4301eaf3487940893e9fd98537ea2e283b"}, + {file = "fonttools-4.55.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:42a9afedff07b6f75aa0f39b5e49922ac764580ef3efce035ca30284b2ee65c8"}, + {file = "fonttools-4.55.2-cp38-cp38-win32.whl", hash = "sha256:f1c76f423f1a241df08f87614364dff6e0b7ce23c962c1b74bd995ec7c0dad13"}, + {file = "fonttools-4.55.2-cp38-cp38-win_amd64.whl", hash = "sha256:25062b6ca03464dd5179fc2040fb19e03391b7cc49b9cc4f879312e638605c5c"}, + {file = "fonttools-4.55.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d1100d8e665fe386a79cab59446992de881ea74d0d6c191bb988642692aa2421"}, + {file = "fonttools-4.55.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbdc251c5e472e5ae6bc816f9b82718b8e93ff7992e7331d6cf3562b96aa268e"}, + {file = "fonttools-4.55.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0bf24d2b02dbc9376d795a63062632ff73e3e9e60c0229373f500aed7e86dd7"}, + {file = "fonttools-4.55.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4ff250ed4ff05015dfd9cf2adf7570c7a383ca80f4d9732ac484a5ed0d8453c"}, + {file = "fonttools-4.55.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44cf2a98aa661dbdeb8c03f5e405b074e2935196780bb729888639f5276067d9"}, + {file = "fonttools-4.55.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22ef222740eb89d189bf0612eb98fbae592c61d7efeac51bfbc2a1592d469557"}, + {file = "fonttools-4.55.2-cp39-cp39-win32.whl", hash = "sha256:93f439ca27e55f585e7aaa04a74990acd983b5f2245e41d6b79f0a8b44e684d8"}, + {file = "fonttools-4.55.2-cp39-cp39-win_amd64.whl", hash = "sha256:627cf10d6f5af5bec6324c18a2670f134c29e1b7dce3fb62e8ef88baa6cba7a9"}, + {file = "fonttools-4.55.2-py3-none-any.whl", hash = "sha256:8e2d89fbe9b08d96e22c7a81ec04a4e8d8439c31223e2dc6f2f9fc8ff14bdf9f"}, + {file = "fonttools-4.55.2.tar.gz", hash = "sha256:45947e7b3f9673f91df125d375eb57b9a23f2a603f438a1aebf3171bffa7a205"}, ] [package.extras] @@ -2380,22 +2397,23 @@ files = [ [[package]] name = "litellm" -version = "1.53.3" +version = "1.53.7" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.53.3-py3-none-any.whl", hash = "sha256:b4bda8efa1d12fe98086c8e84342b92fd499a808017e4d642c43aa784f451b13"}, - {file = "litellm-1.53.3.tar.gz", hash = "sha256:42feb755c8887522ac913a9c04ef2d4242efd821c2ee2d3ad59097047e80f3ca"}, + {file = "litellm-1.53.7-py3-none-any.whl", hash = "sha256:f6d58a6bebe8cb530d6e3d45ae6f2f648546687d5fd3eb2e064ac8292b50b9c1"}, + {file = "litellm-1.53.7.tar.gz", hash = "sha256:1b00bb3b7f8f35b0843abc1ced98e7bb0580430ca027f6710128dc1346fb1073"}, ] [package.dependencies] aiohttp = "*" click = "*" +httpx = ">=0.23.0,<0.28.0" importlib-metadata = ">=6.8.0" jinja2 = ">=3.1.2,<4.0.0" jsonschema = ">=4.22.0,<5.0.0" -openai = ">=1.54.0" +openai = ">=1.55.3" pydantic = ">=2.0.0,<3.0.0" python-dotenv = ">=0.2.0" requests = ">=2.31.0,<3.0.0" @@ -3110,13 +3128,13 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.56.1" +version = "1.57.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.56.1-py3-none-any.whl", hash = "sha256:38e61183c2a98fedebbbb04a909a052d9f897358b070483fc0caff17300a227c"}, - {file = "openai-1.56.1.tar.gz", hash = "sha256:8b0449f22a0c318441eae8a8a789753c3b2cac86542be51ca45df788e26aa180"}, + {file = "openai-1.57.0-py3-none-any.whl", hash = "sha256:972e36960b821797952da3dc4532f486c28e28a2a332d7d0c5407f242e9d9c39"}, + {file = "openai-1.57.0.tar.gz", hash = "sha256:76f91971c4bdbd78380c9970581075e0337b5d497c2fbf7b5255078f4b31abf9"}, ] [package.dependencies] @@ -4515,114 +4533,114 @@ requests = ">=2.0.1,<3.0.0" [[package]] name = "rpds-py" -version = "0.22.1" +version = "0.22.3" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" files = [ - {file = "rpds_py-0.22.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ab27dd4edd84b13309f268ffcdfc07aef8339135ffab7b6d43f16884307a2a48"}, - {file = "rpds_py-0.22.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9d5b925156a746dc1f5f52376fdd1fbdd3f6ffe1fcd6f5e06f77ca79abb940a3"}, - {file = "rpds_py-0.22.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201650b309c419143775c15209c620627de3c09a27c7fb58375325aec5cce260"}, - {file = "rpds_py-0.22.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:31264187fc934ff1024a4f56775f33c9252d3f4f3e27ec07d1995a26b52702c3"}, - {file = "rpds_py-0.22.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97c5ffe47ccf92d8b17e10f8a5ce28d015aa1196edc3359684cf31504eae6a14"}, - {file = "rpds_py-0.22.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9ac7280bd045f472b50306d7efeee051b69e3a2dd1b90f46bd7e86e63b1efa2"}, - {file = "rpds_py-0.22.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f941fb86195f97be7f6efe04a21b223f05dfe4d1dfb159999e2f8d101e44cc4"}, - {file = "rpds_py-0.22.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f91bfc39f7a64168e08ab831fa497ec5438c1d6c6e2f9e12848d95ad11ac8523"}, - {file = "rpds_py-0.22.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:effcae2152afe7937a28376dbabb25c770ef99ed4e16a4ffeb8e6a4f7c4f06aa"}, - {file = "rpds_py-0.22.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2177e59c033bf0d1bf7de1ced561205963583caf3242c6c700a723034bfb5f8e"}, - {file = "rpds_py-0.22.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:66f4f48a89cdd30ab3a47335df81c76e9a63799d0d84b29c0618371c66fa37b0"}, - {file = "rpds_py-0.22.1-cp310-cp310-win32.whl", hash = "sha256:b07fa9e634234e84096adfa4be3828c8f26e238679c122824b2b3d7131bec578"}, - {file = "rpds_py-0.22.1-cp310-cp310-win_amd64.whl", hash = "sha256:ca4657e9fd0b1b5376942d403d634ce188f79064f0873aa853ab05b10185ceec"}, - {file = "rpds_py-0.22.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:608c84699b2db09c6a8743845b1a3dad36fae53eaaecb241d45b13dff74405fb"}, - {file = "rpds_py-0.22.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9dae4eb9b5534e09ba6c6ab496a757e5e394b7e7b08767d25ca37e8d36491114"}, - {file = "rpds_py-0.22.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09a1f000c5f6e08b298275bae00921e9fbbf2a35dae0a86db2821c058c2201a9"}, - {file = "rpds_py-0.22.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:580ccbf11f02f948add4cb641843030a89f1463d7c0740cbfc9aca91e9dc34b3"}, - {file = "rpds_py-0.22.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96559e05bdf938b2048353e10a7920b98f853cefe4482c2064a718d7d0a50bd7"}, - {file = "rpds_py-0.22.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:128cbaed7ba26116820bcb992405d6a13ea18c8fca1b8c4f59906d858e91e979"}, - {file = "rpds_py-0.22.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:734783dd7da58f76222f458346ddebdb3621686a1a2a667db5049caf0c9956b9"}, - {file = "rpds_py-0.22.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c9ce6b83597d45bec44a2690857ede62fc98223772135f8a7fa90884eb726501"}, - {file = "rpds_py-0.22.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bca4428c4a957b78ded3e6e62884ab03f029dce8fa8d34818da0f80f61332b49"}, - {file = "rpds_py-0.22.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1ded65691a1d3fd7d2aa89d2c91aa51f941601bb2ce099739909034d957fef4b"}, - {file = "rpds_py-0.22.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:72407065ad459db9f3d052ea8c51e02534f02533fc61e51cbab3bd94166f086c"}, - {file = "rpds_py-0.22.1-cp311-cp311-win32.whl", hash = "sha256:eb013aa01b404219f28dc973d9e6310fd4db216d7299253dd355629952e0564e"}, - {file = "rpds_py-0.22.1-cp311-cp311-win_amd64.whl", hash = "sha256:8bd9ec1db79a664f4cbb12878693b73416f4d2cb425d3e27eccc1bdfbdc826ef"}, - {file = "rpds_py-0.22.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8ec41049c90d204a6561238a9ad6c7263ebb7009d9759c98b58078d9d2fec9ba"}, - {file = "rpds_py-0.22.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:102be79c4cc47a4aeb5912401185c404cd2601c15a7163bbecff7f1bfe20b669"}, - {file = "rpds_py-0.22.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a603155db408f773637f9e3a712c6e3cbc521aaa8fa2b99f9ba6106c59a2496"}, - {file = "rpds_py-0.22.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5dbff9402c2bdf00bf0df9905694b3c292a3847c725651938a72f554351a5fcb"}, - {file = "rpds_py-0.22.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96b3759d8ab2323324e0a92b2f44834f9d88089b8d1ab6f533b61f4be3411cef"}, - {file = "rpds_py-0.22.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3029f481b31f329b1fdb4ec4b56935d82210ddd9c6f86ea5a87c06f1e97b161"}, - {file = "rpds_py-0.22.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d280b4bf09f719b89fd9aab3b71067acc0d0449b7d1eba99a2ade4939cef8296"}, - {file = "rpds_py-0.22.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6c8e97e19aa7b0b0d801a159f932ce4435f1049c8c38e2bb372bb5bee559ce50"}, - {file = "rpds_py-0.22.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:50e4b5d291105f7063259fe0125b1af902fb34499444d7c5c521dd8328b00939"}, - {file = "rpds_py-0.22.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d3777c446bb1c5fcd82dc3f8776e1a146cd91e80cc1892f8634575ace438d22f"}, - {file = "rpds_py-0.22.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:447ae1104fb32197b9262f772d565d38e834cc2e9edd89350b37b88fed636e70"}, - {file = "rpds_py-0.22.1-cp312-cp312-win32.whl", hash = "sha256:55d371b9d8b0c2a68a50413a8cb01c3c3ce1ea4f768bf77b66669a9a486e101e"}, - {file = "rpds_py-0.22.1-cp312-cp312-win_amd64.whl", hash = "sha256:413a30a99d8683dace3765885920ed27ab662efbb6c98d81db76c397ad1ffd71"}, - {file = "rpds_py-0.22.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa2ba0176037c915d8660a4e46581d645e2c22b5373e466bc8640a794d45861a"}, - {file = "rpds_py-0.22.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4ba6c66fbc6015b2f99e7176fec41793cecb00c4cc357cad038dff85e6ac42ab"}, - {file = "rpds_py-0.22.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15fa4ca658f8ad22645d3531682b17e5580832efbfa87304c3e62214c79c1e8a"}, - {file = "rpds_py-0.22.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d7833ef6f5d6cb634f296abfd93452fb3eb44c4e9a6ae95c1021eab704c1cee2"}, - {file = "rpds_py-0.22.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0467838c90435b80793cde486a318fc916ee57f2af54e4b10c72b20cbdcbaa9"}, - {file = "rpds_py-0.22.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d962e2e89b3a95e3597a34b8c93ced1e98958502c5b8096c9fd69deff279f561"}, - {file = "rpds_py-0.22.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ce729f1dc8a4a190c34b69f75377bddc004079b2963ab722ab91fafe040be6d"}, - {file = "rpds_py-0.22.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8080467df22feca0fc9c46567001777c6fbc2b4a2683a7137420896051874ca1"}, - {file = "rpds_py-0.22.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0f9eb37d3a60b262a98ab51ee899cac039de9ca0ce68dcf1a6518a09719020b0"}, - {file = "rpds_py-0.22.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:153248f48d6f90a295a502f53ec544a3ffbd21b0bb32f5dca39c4b93a764d6a2"}, - {file = "rpds_py-0.22.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0a53592cdf98cec3dfcdb24ffec8a4797e7656b65700099af43ec7df023b6de4"}, - {file = "rpds_py-0.22.1-cp313-cp313-win32.whl", hash = "sha256:e8056adcefa2dcb67e8bc91ea5eee26df66e8b297a8cd6ff0903f85c70908fa0"}, - {file = "rpds_py-0.22.1-cp313-cp313-win_amd64.whl", hash = "sha256:a451dba533be77454ebcffc85189108fc05f279100835ac76e7989edacb89156"}, - {file = "rpds_py-0.22.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:2ea23f1525d4f64286dbe0947c929d45c3ffe963b2dbed1d3844a2e4938bda42"}, - {file = "rpds_py-0.22.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3aaa22487477de9618ce3b37f99fbe81219ba96f3c2ca84f576f0ab451b83aba"}, - {file = "rpds_py-0.22.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8954b9ffe60f479a0c0ba40987db2546c735ab02a725ea7fd89342152d4d821d"}, - {file = "rpds_py-0.22.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c8502a02ae3ae67084f5a0bf5a8253b19fa7a887f824e41e016cdb0ac532a06f"}, - {file = "rpds_py-0.22.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a083221b6a4ecdef38a60c95d8d3223d99449cb4da2544e9644958dc16664eb9"}, - {file = "rpds_py-0.22.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:542eb246d5be31b5e0a9c8ddb9539416f9b31f58f75bd4ee328bff2b5c58d6fd"}, - {file = "rpds_py-0.22.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffae97d28ea4f2c613a751d087b75a97fb78311b38cc2e9a2f4587e473ace167"}, - {file = "rpds_py-0.22.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0ff8d5b13ce2357fa8b33a0a2e3775aa71df5bf7c8ba060634c9d15ab12f357"}, - {file = "rpds_py-0.22.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f057a0c546c42964836b209d8de9ea1a4f4b0432006c6343cbe633d8ca14571"}, - {file = "rpds_py-0.22.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:48ee97c7c6027fd423058675b5a39d0b5f7a1648250b671563d5c9f74ff13ff0"}, - {file = "rpds_py-0.22.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:babec324e8654a59122aaa66936a9a483faa03276db9792f51332475c2dddc4a"}, - {file = "rpds_py-0.22.1-cp313-cp313t-win32.whl", hash = "sha256:e69acdbc132c9592c8dc393af85e38e206ca847c7019a953ff625191c3a12312"}, - {file = "rpds_py-0.22.1-cp313-cp313t-win_amd64.whl", hash = "sha256:c783e4ed68200f4e03c125690d23158b1c49c4b186d458a18debc109bbdc3c2e"}, - {file = "rpds_py-0.22.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2143c3aed85992604d758bbe67da839fb4aab3dd2e1c6dddab5b3ca7162b34a2"}, - {file = "rpds_py-0.22.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f57e2d0f8022783426121b586d7c842ea40ea832a29e28ca36c881b54c74fb28"}, - {file = "rpds_py-0.22.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c0c324879d483504b07f7b18eb1b50567c434263bbe4866ecce33056162668a"}, - {file = "rpds_py-0.22.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1c40e02cc4f3e18fd39344edb10eebe04bd11cfd13119606b5771e5ea51630d3"}, - {file = "rpds_py-0.22.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f76c6f319e57007ad52e671ec741d801324760a377e3d4992c9bb8200333ebac"}, - {file = "rpds_py-0.22.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5cae9b415ea8a6a563566dbf46650222eccc5971c7daa16fbee63aef92ae543"}, - {file = "rpds_py-0.22.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b09209cdfcacf5eba9cf80367130532e6c02e695252e1f64d3cfcc2356e6e19f"}, - {file = "rpds_py-0.22.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dbe428d0ac6eacaf05402adbaf137f59ad6063848182d1ff294f95ce0f24005b"}, - {file = "rpds_py-0.22.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:626b9feb01bff049a5aec4804f0c58db12585778b4902e5376a95b01f80a7a16"}, - {file = "rpds_py-0.22.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec1ccc2a9f764cd632fb8ab28fdde166250df54fc8d97315a4a6948dc5367639"}, - {file = "rpds_py-0.22.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ef92b1fbe6aa2e7885eb90853cc016b1fc95439a8cc8da6d526880e9e2148695"}, - {file = "rpds_py-0.22.1-cp39-cp39-win32.whl", hash = "sha256:c88535f83f7391cf3a45af990237e3939a6fdfbedaed2571633bfdd0bceb36b0"}, - {file = "rpds_py-0.22.1-cp39-cp39-win_amd64.whl", hash = "sha256:7839b7528faa4d134c183b1f2dd1ee4dc2ca2f899f4f0cfdf00fc04c255262a7"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a0ed14a4162c2c2b21a162c9fcf90057e3e7da18cd171ab344c1e1664f75090e"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:05fdeae9010533e47715c37df83264df0122584e40d691d50cf3607c060952a3"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4659b2e4a5008715099e216050f5c6976e5a4329482664411789968b82e3f17d"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a18aedc032d6468b73ebbe4437129cb30d54fe543cde2f23671ecad76c3aea24"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149b4d875ef9b12a8f5e303e86a32a58f8ef627e57ec97a7d0e4be819069d141"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fdaee3947eaaa52dae3ceb9d9f66329e13d8bae35682b1e5dd54612938693934"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36ce951800ed2acc6772fd9f42150f29d567f0423989748052fdb39d9e2b5795"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ab784621d3e2a41916e21f13a483602cc989fd45fff637634b9231ba43d4383b"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c2a214bf5b79bd39a9de1c991353aaaacafda83ba1374178309e92be8e67d411"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:85060e96953647871957d41707adb8d7bff4e977042fd0deb4fc1881b98dd2fe"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c6f3fd617db422c9d4e12cb8d84c984fe07d6d9cb0950cbf117f3bccc6268d05"}, - {file = "rpds_py-0.22.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f2d1b58a0c3a73f0361759642e80260a6d28eee6501b40fe25b82af33ef83f21"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:76eaa4c087a061a2c8a0a92536405069878a8f530c00e84a9eaf332e70f5561f"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:959ae04ed30cde606f3a0320f0a1f4167a107e685ef5209cce28c5080590bd31"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:198067aa6f3d942ff5d0d655bb1e91b59ae85279d47590682cba2834ac1b97d2"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3e7e99e2af59c56c59b6c964d612511b8203480d39d1ef83edc56f2cb42a3f5d"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0545928bdf53dfdfcab284468212efefb8a6608ca3b6910c7fb2e5ed8bdc2dc0"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef7282d8a14b60dd515e47060638687710b1d518f4b5e961caad43fb3a3606f9"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe3f245c2f39a5692d9123c174bc48f6f9fe3e96407e67c6d04541a767d99e72"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efb2ad60ca8637d5f9f653f9a9a8d73964059972b6b95036be77e028bffc68a3"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d8306f27418361b788e3fca9f47dec125457f80122e7e31ba7ff5cdba98343f8"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4c8dc7331e8cbb1c0ea2bcb550adb1777365944ffd125c69aa1117fdef4887f5"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:776a06cb5720556a549829896a49acebb5bdd96c7bba100191a994053546975a"}, - {file = "rpds_py-0.22.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e4f91d702b9ce1388660b3d4a28aa552614a1399e93f718ed0dacd68f23b3d32"}, - {file = "rpds_py-0.22.1.tar.gz", hash = "sha256:157a023bded0618a1eea54979fe2e0f9309e9ddc818ef4b8fc3b884ff38fedd5"}, + {file = "rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967"}, + {file = "rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf"}, + {file = "rpds_py-0.22.3-cp310-cp310-win32.whl", hash = "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652"}, + {file = "rpds_py-0.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a"}, + {file = "rpds_py-0.22.3-cp311-cp311-win32.whl", hash = "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64"}, + {file = "rpds_py-0.22.3-cp311-cp311-win_amd64.whl", hash = "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7"}, + {file = "rpds_py-0.22.3-cp312-cp312-win32.whl", hash = "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627"}, + {file = "rpds_py-0.22.3-cp312-cp312-win_amd64.whl", hash = "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f"}, + {file = "rpds_py-0.22.3-cp313-cp313-win32.whl", hash = "sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de"}, + {file = "rpds_py-0.22.3-cp313-cp313-win_amd64.whl", hash = "sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win32.whl", hash = "sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win_amd64.whl", hash = "sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520"}, + {file = "rpds_py-0.22.3-cp39-cp39-win32.whl", hash = "sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9"}, + {file = "rpds_py-0.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6"}, + {file = "rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d"}, ] [[package]] @@ -4664,6 +4682,11 @@ files = [ {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, + {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, {file = "scikit_learn-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:757c7d514ddb00ae249832fe87100d9c73c6ea91423802872d9e74970a0e40b9"}, {file = "scikit_learn-1.5.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:52788f48b5d8bca5c0736c175fa6bdaab2ef00a8f536cda698db61bd89c551c1"}, {file = "scikit_learn-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:643964678f4b5fbdc95cbf8aec638acc7aa70f5f79ee2cdad1eec3df4ba6ead8"}, @@ -4787,13 +4810,13 @@ type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12 [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] @@ -5135,6 +5158,7 @@ files = [ {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, @@ -5894,10 +5918,10 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", type = ["pytest-mypy"] [extras] -core = ["aiosqlite", "apscheduler", "asyncpg", "bcrypt", "boto3", "deepdiff", "fire", "fsspec", "future", "graspologic", "gunicorn", "hatchet-sdk", "networkx", "ollama", "passlib", "psutil", "pydantic", "pyjwt", "python-multipart", "pyyaml", "sendgrid", "sqlalchemy", "supabase", "tokenizers", "unstructured-client", "uvicorn", "vecs"] +core = ["aiosqlite", "apscheduler", "asyncpg", "bcrypt", "boto3", "colorlog", "deepdiff", "fire", "fsspec", "future", "graspologic", "gunicorn", "hatchet-sdk", "networkx", "ollama", "passlib", "psutil", "pydantic", "pyjwt", "python-multipart", "pyyaml", "sendgrid", "sqlalchemy", "supabase", "tokenizers", "unstructured-client", "uvicorn", "vecs"] ingestion-bundle = ["aiofiles", "aioshutil", "beautifulsoup4", "bs4", "markdown", "numpy", "openpyxl", "pdf2image", "pypdf", "pypdf2", "python-docx", "python-pptx"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "27694176c4c0ec215120e9d7bbe04d426bf2a630d5238edf39772c4b4b11e351" +content-hash = "203c69b6aebc7aaf815be4708b33f41814fa49ae8feb37874280c185c1721ac7" diff --git a/py/pyproject.toml b/py/pyproject.toml index 097392ed3..c9006ce52 100644 --- a/py/pyproject.toml +++ b/py/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "r2r" readme = "README.md" -version = "3.3.2" +version = "3.3.3" description = "SciPhi R2R" authors = ["Owen Colegrove "] @@ -49,6 +49,7 @@ apscheduler = { version = "^3.10.4", optional = true } asyncpg = { version = "^0.29.0", optional = true } bcrypt = { version = "^4.1.3", optional = true } boto3 = { version = "^1.35.17", optional = true } # for AWS bedrock support +colorlog = { version = "^6.9.0", optional = true } deepdiff = { version = "^7.0.1", optional = true } fire = { version = "^0.5.0", optional = true } fsspec = { version = "^2024.6.0", optional = true } @@ -92,6 +93,7 @@ core = [ "asyncpg", "bcrypt", "boto3", + "colorlog", "deepdiff", "fire", "fsspec", diff --git a/py/r2r.toml b/py/r2r.toml index 55fa02bbe..9d1d5337d 100644 --- a/py/r2r.toml +++ b/py/r2r.toml @@ -4,7 +4,8 @@ [agent] system_instruction_name = "rag_agent" -tool_names = ["search"] +# tool_names = ["local_search", "web_search"] # uncomment to enable web search +tool_names = ["local_search"] [agent.generation_config] model = "openai/gpt-4o" diff --git a/py/sdk/models.py b/py/sdk/models.py index 277518b92..9ad98d6e7 100644 --- a/py/sdk/models.py +++ b/py/sdk/models.py @@ -4,6 +4,7 @@ GraphSearchResult, GraphSearchSettings, HybridSearchSettings, + IngestionMode, KGCommunityResult, KGCreationSettings, KGEnrichmentSettings, @@ -17,6 +18,7 @@ MessageType, R2RException, R2RSerializable, + SearchMode, SearchSettings, Token, User, @@ -43,6 +45,7 @@ "Token", "ChunkSearchResult", "SearchSettings", + "SearchMode", "KGEntityDeduplicationSettings", "RAGResponse", "CombinedSearchResponse", diff --git a/py/sdk/v3/documents.py b/py/sdk/v3/documents.py index a0b4dff6d..b9ece87af 100644 --- a/py/sdk/v3/documents.py +++ b/py/sdk/v3/documents.py @@ -12,6 +12,8 @@ WrappedDocumentsResponse, ) +from ..models import IngestionMode + class DocumentsSDK: """ @@ -27,9 +29,10 @@ async def create( raw_text: Optional[str] = None, chunks: Optional[list[str]] = None, id: Optional[str | UUID] = None, + ingestion_mode: Optional[str] = None, collection_ids: Optional[list[str | UUID]] = None, metadata: Optional[dict] = None, - ingestion_config: Optional[dict] = None, + ingestion_config: Optional[dict | IngestionMode] = None, run_with_orchestration: Optional[bool] = True, ) -> WrappedIngestionResponse: """ @@ -65,13 +68,17 @@ async def create( if metadata: data["metadata"] = json.dumps(metadata) if ingestion_config: + if not isinstance(ingestion_config, dict): + ingestion_config = ingestion_config.model_dump() + ingestion_config["app"] = {} data["ingestion_config"] = json.dumps(ingestion_config) if collection_ids: collection_ids = [str(collection_id) for collection_id in collection_ids] # type: ignore data["collection_ids"] = json.dumps(collection_ids) if run_with_orchestration is not None: data["run_with_orchestration"] = str(run_with_orchestration) - + if ingestion_mode is not None: + data["ingestion_mode"] = ingestion_mode if file_path: # Create a new file instance that will remain open during the request file_instance = open(file_path, "rb") diff --git a/py/sdk/v3/retrieval.py b/py/sdk/v3/retrieval.py index ed3a1438a..bead7a729 100644 --- a/py/sdk/v3/retrieval.py +++ b/py/sdk/v3/retrieval.py @@ -6,6 +6,7 @@ GraphSearchSettings, Message, RAGResponse, + SearchMode, SearchSettings, ) @@ -21,6 +22,7 @@ def __init__(self, client): async def search( self, query: str, + search_mode: Optional[str | SearchMode] = "custom", search_settings: Optional[dict | SearchSettings] = None, ) -> CombinedSearchResponse: """ @@ -33,6 +35,9 @@ async def search( Returns: CombinedSearchResponse: The search response. """ + if search_mode and not isinstance(search_mode, str): + search_mode = search_mode.value + if search_settings and not isinstance(search_settings, dict): search_settings = search_settings.model_dump() @@ -40,6 +45,9 @@ async def search( "query": query, "search_settings": search_settings, } + if search_mode: + data["search_mode"] = search_mode + return await self.client._make_request( "POST", "retrieval/search", @@ -91,6 +99,7 @@ async def rag( self, query: str, rag_generation_config: Optional[dict | GenerationConfig] = None, + search_mode: Optional[str | SearchMode] = "custom", search_settings: Optional[dict | SearchSettings] = None, task_prompt_override: Optional[str] = None, include_title_if_available: Optional[bool] = False, @@ -122,6 +131,8 @@ async def rag( "task_prompt_override": task_prompt_override, "include_title_if_available": include_title_if_available, } + if search_mode: + data["search_mode"] = search_mode if rag_generation_config and rag_generation_config.get( # type: ignore "stream", False @@ -144,6 +155,7 @@ async def agent( self, message: Optional[dict | Message] = None, rag_generation_config: Optional[dict | GenerationConfig] = None, + search_mode: Optional[str | SearchMode] = "custom", search_settings: Optional[dict | SearchSettings] = None, task_prompt_override: Optional[str] = None, include_title_if_available: Optional[bool] = False, @@ -177,6 +189,8 @@ async def agent( "conversation_id": conversation_id, "branch_id": branch_id, } + if search_mode: + data["search_mode"] = search_mode if message: cast_message: Message = ( diff --git a/py/shared/abstractions/__init__.py b/py/shared/abstractions/__init__.py index 3fcaf4037..d6f4c9a34 100644 --- a/py/shared/abstractions/__init__.py +++ b/py/shared/abstractions/__init__.py @@ -4,6 +4,7 @@ DocumentChunk, DocumentResponse, DocumentType, + IngestionMode, IngestionStatus, KGEnrichmentStatus, KGExtractionStatus, @@ -44,6 +45,7 @@ KGGlobalResult, KGRelationshipResult, KGSearchResultType, + SearchMode, SearchSettings, ) from .user import Token, TokenData, User @@ -110,6 +112,7 @@ "ChunkSearchResult", "SearchSettings", "HybridSearchSettings", + "SearchMode", # KG abstractions "KGCreationSettings", "KGEnrichmentSettings", diff --git a/py/shared/abstractions/document.py b/py/shared/abstractions/document.py index 225e9af3b..12bf02a68 100644 --- a/py/shared/abstractions/document.py +++ b/py/shared/abstractions/document.py @@ -255,3 +255,131 @@ class DocumentChunk(R2RSerializable): class RawChunk(R2RSerializable): text: str + + +class IngestionMode(str, Enum): + hi_res = "hi-res" + fast = "fast" + custom = "custom" + + +class ChunkEnrichmentStrategy(str, Enum): + SEMANTIC = "semantic" + NEIGHBORHOOD = "neighborhood" + + def __str__(self) -> str: + return self.value + + +from .llm import GenerationConfig + + +class ChunkEnrichmentSettings(R2RSerializable): + """ + Settings for chunk enrichment. + """ + + enable_chunk_enrichment: bool = Field( + default=False, + description="Whether to enable chunk enrichment or not", + ) + strategies: list[ChunkEnrichmentStrategy] = Field( + default=[], + description="The strategies to use for chunk enrichment. Union of chunks obtained from each strategy is used as context.", + ) + forward_chunks: int = Field( + default=3, + description="The number after the current chunk to include in the LLM context while enriching", + ) + backward_chunks: int = Field( + default=3, + description="The number of chunks before the current chunk in the LLM context while enriching", + ) + semantic_neighbors: int = Field( + default=10, description="The number of semantic neighbors to include" + ) + semantic_similarity_threshold: float = Field( + default=0.7, + description="The similarity threshold for semantic neighbors", + ) + generation_config: GenerationConfig = Field( + default=GenerationConfig(), + description="The generation config to use for chunk enrichment", + ) + + +## TODO - Move ingestion config + + +class IngestionConfig(R2RSerializable): + provider: str = "r2r" + excluded_parsers: list[str] = ["mp4"] + chunk_enrichment_settings: ChunkEnrichmentSettings = ( + ChunkEnrichmentSettings() + ) + extra_parsers: dict[str, str] = {} + + audio_transcription_model: str = "openai/whisper-1" + + vision_img_prompt_name: str = "vision_img" + vision_img_model: str = "openai/gpt-4o" + + vision_pdf_prompt_name: str = "vision_pdf" + vision_pdf_model: str = "openai/gpt-4o" + + skip_document_summary: bool = False + document_summary_system_prompt: str = "default_system" + document_summary_task_prompt: str = "default_summary" + chunks_for_document_summary: int = 128 + document_summary_model: str = "openai/gpt-4o-mini" + + @property + def supported_providers(self) -> list[str]: + return ["r2r", "unstructured_local", "unstructured_api"] + + def validate_config(self) -> None: + if self.provider not in self.supported_providers: + raise ValueError(f"Provider {self.provider} is not supported.") + + @classmethod + def get_default(cls, mode: str) -> "IngestionConfig": + """Return default ingestion configuration for a given mode.""" + if mode == "hi-res": + # More thorough parsing, no skipping summaries, possibly larger `chunks_for_document_summary`. + return cls( + provider="r2r", + excluded_parsers=["mp4"], + chunk_enrichment_settings=ChunkEnrichmentSettings(), # default + extra_parsers={}, + audio_transcription_model="openai/whisper-1", + vision_img_prompt_name="vision_img", + vision_img_model="openai/gpt-4o", + vision_pdf_prompt_name="vision_pdf", + vision_pdf_model="openai/gpt-4o", + skip_document_summary=False, + document_summary_system_prompt="default_system", + document_summary_task_prompt="default_summary", + chunks_for_document_summary=256, # larger for hi-res + document_summary_model="openai/gpt-4o-mini", + ) + elif mode == "fast": + # Skip summaries and other enrichment steps for speed. + return cls( + provider="r2r", + excluded_parsers=["mp4"], + chunk_enrichment_settings=ChunkEnrichmentSettings(), # default + extra_parsers={}, + audio_transcription_model="openai/whisper-1", + vision_img_prompt_name="vision_img", + vision_img_model="openai/gpt-4o", + vision_pdf_prompt_name="vision_pdf", + vision_pdf_model="openai/gpt-4o", + skip_document_summary=True, # skip summaries + document_summary_system_prompt="default_system", + document_summary_task_prompt="default_summary", + chunks_for_document_summary=64, + document_summary_model="openai/gpt-4o-mini", + ) + else: + # For `custom` or any unrecognized mode, return a base config + return cls() diff --git a/py/shared/abstractions/graph.py b/py/shared/abstractions/graph.py index 4bdc9dd62..f14dba462 100644 --- a/py/shared/abstractions/graph.py +++ b/py/shared/abstractions/graph.py @@ -113,11 +113,9 @@ class Graph(R2RSerializable): name: str description: Optional[str] = None created_at: datetime = Field( - alias="createdAt", default_factory=datetime.utcnow, ) updated_at: datetime = Field( - alias="updatedAt", default_factory=datetime.utcnow, ) status: str = "pending" diff --git a/py/shared/abstractions/kg.py b/py/shared/abstractions/kg.py index bb9a06fcf..adfb46c57 100644 --- a/py/shared/abstractions/kg.py +++ b/py/shared/abstractions/kg.py @@ -171,7 +171,6 @@ class GraphCommunitySettings(R2RSerializable): graphrag_communities: str = Field( default="graphrag_communities", description="The prompt to use for knowledge graph enrichment.", - alias="graphrag_communities", # TODO - mark deprecated & remove ) max_summary_input_length: int = Field( diff --git a/py/shared/abstractions/llm.py b/py/shared/abstractions/llm.py index 2f747953e..677ef865a 100644 --- a/py/shared/abstractions/llm.py +++ b/py/shared/abstractions/llm.py @@ -52,13 +52,11 @@ class GenerationConfig(R2RSerializable): ) top_p: float = Field( default_factory=lambda: GenerationConfig._defaults["top_p"], - alias="topP", ) max_tokens_to_sample: int = Field( default_factory=lambda: GenerationConfig._defaults[ "max_tokens_to_sample" ], - alias="maxTokensToSample", ) stream: bool = Field( default_factory=lambda: GenerationConfig._defaults["stream"] @@ -73,11 +71,9 @@ class GenerationConfig(R2RSerializable): default_factory=lambda: GenerationConfig._defaults[ "add_generation_kwargs" ], - alias="addGenerationKwargs", ) api_base: Optional[str] = Field( default_factory=lambda: GenerationConfig._defaults["api_base"], - alias="apiBase", ) response_format: Optional[dict | BaseModel] = None diff --git a/py/shared/abstractions/search.py b/py/shared/abstractions/search.py index bffe90e2e..74fbac3ea 100644 --- a/py/shared/abstractions/search.py +++ b/py/shared/abstractions/search.py @@ -146,17 +146,67 @@ class Config: } +class WebSearchResult(R2RSerializable): + title: str + link: str + snippet: str + position: int + type: str = "organic" + date: Optional[str] = None + sitelinks: Optional[list[dict]] = None + + +class RelatedSearchResult(R2RSerializable): + query: str + type: str = "related" + + +class PeopleAlsoAskResult(R2RSerializable): + question: str + snippet: str + link: str + title: str + type: str = "peopleAlsoAsk" + + +class WebSearchResponse(R2RSerializable): + organic_results: list[WebSearchResult] = [] + related_searches: list[RelatedSearchResult] = [] + people_also_ask: list[PeopleAlsoAskResult] = [] + + @classmethod + def from_serper_results(cls, results: list[dict]) -> "WebSearchResponse": + organic = [] + related = [] + paa = [] + + for result in results: + if result["type"] == "organic": + organic.append(WebSearchResult(**result)) + elif result["type"] == "relatedSearches": + related.append(RelatedSearchResult(**result)) + elif result["type"] == "peopleAlsoAsk": + paa.append(PeopleAlsoAskResult(**result)) + + return cls( + organic_results=organic, + related_searches=related, + people_also_ask=paa, + ) + + class AggregateSearchResult(R2RSerializable): """Result of an aggregate search operation.""" chunk_search_results: Optional[list[ChunkSearchResult]] graph_search_results: Optional[list[GraphSearchResult]] = None + web_search_results: Optional[list[WebSearchResult]] = None def __str__(self) -> str: - return f"AggregateSearchResult(chunk_search_results={self.chunk_search_results}, graph_search_results={self.graph_search_results})" + return f"AggregateSearchResult(chunk_search_results={self.chunk_search_results}, graph_search_results={self.graph_search_results}, web_search_results={self.web_search_results})" def __repr__(self) -> str: - return f"AggregateSearchResult(chunk_search_results={self.chunk_search_results}, graph_search_results={self.graph_search_results})" + return f"AggregateSearchResult(chunk_search_results={self.chunk_search_results}, graph_search_results={self.graph_search_results}, web_search_results={self.web_search_results})" def as_dict(self) -> dict: return { @@ -165,7 +215,12 @@ def as_dict(self) -> dict: if self.chunk_search_results else [] ), - "graph_search_results": self.graph_search_results or None, + "graph_search_results": [ + result.to_dict() for result in self.graph_search_results + ], + "web_search_results": [ + result.to_dict() for result in self.web_search_results + ], } @@ -202,7 +257,6 @@ class ChunkSearchSettings(R2RSerializable): """Settings specific to chunk/vector search.""" index_measure: IndexMeasure = Field( - alias="indexMeasure", default=IndexMeasure.cosine_distance, description="The distance measure to use for indexing", ) @@ -211,7 +265,6 @@ class ChunkSearchSettings(R2RSerializable): description="Number of ivfflat index lists to query. Higher increases accuracy but decreases speed.", ) ef_search: int = Field( - alias="efSearch", default=40, description="Size of the dynamic candidate list for HNSW index search. Higher increases accuracy but decreases speed.", ) @@ -225,30 +278,24 @@ class GraphSearchSettings(R2RSerializable): """Settings specific to knowledge graph search.""" generation_config: GenerationConfig = Field( - alias="generationConfig", default_factory=GenerationConfig, description="Configuration for text generation during graph search.", ) graphrag_map_system: str = Field( - alias="graphragMapSystem", default="graphrag_map_system", description="The system prompt for the graphrag map prompt.", ) graphrag_reduce_system: str = Field( - alias="graphragReduceSystem", default="graphrag_reduce_system", description="The system prompt for the graphrag reduce prompt.", ) max_community_description_length: int = Field( - alias="maxCommunityDescriptionLength", default=65536, ) max_llm_queries_for_global_search: int = Field( - alias="maxLLMQueriesForGlobalSearch", default=250, ) limits: dict[str, int] = Field( - alias="localSearchLimits", default={}, ) enabled: bool = Field( @@ -264,17 +311,14 @@ class SearchSettings(R2RSerializable): use_hybrid_search: bool = Field( default=False, description="Whether to perform a hybrid search. This is equivalent to setting `use_semantic_search=True` and `use_fulltext_search=True`, e.g. combining vector and keyword search.", - alias="useHybridSearch", ) use_semantic_search: bool = Field( default=True, description="Whether to use semantic search", - alias="useSemanticSearch", ) use_fulltext_search: bool = Field( default=False, description="Whether to use full-text search", - alias="useFulltextSearch", ) # Common search parameters @@ -304,24 +348,20 @@ class SearchSettings(R2RSerializable): description="Offset to paginate search results", ) include_metadatas: bool = Field( - alias="includeMetadatas", default=True, description="Whether to include element metadata in the search results", ) include_scores: bool = Field( - alias="includeScores", default=True, description="Whether to include search score values in the search results", ) # Search strategy and settings search_strategy: str = Field( - alias="searchStrategy", default="vanilla", description="Search strategy to use (e.g., 'vanilla', 'query_fusion', 'hyde')", ) hybrid_settings: HybridSearchSettings = Field( - alias="hybridSearchSettings", default_factory=HybridSearchSettings, description="Settings for hybrid search (only used if `use_semantic_search` and `use_fulltext_search` are both true)", ) @@ -384,3 +424,36 @@ def __init__(self, **data): def model_dump(self, *args, **kwargs): dump = super().model_dump(*args, **kwargs) return dump + + @classmethod + def get_default(cls, mode: str) -> "SearchSettings": + """Return default search settings for a given mode.""" + if mode == "basic": + # A simpler search that relies primarily on semantic search. + return cls( + use_semantic_search=True, + use_fulltext_search=False, + use_hybrid_search=False, + search_strategy="vanilla", + # Other relevant defaults can be provided here as needed + ) + elif mode == "advanced": + # A more powerful, combined search that leverages both semantic and fulltext. + return cls( + use_semantic_search=True, + use_fulltext_search=True, + use_hybrid_search=True, + search_strategy="hyde", + # Other advanced defaults as needed + ) + else: + # For 'custom' or unrecognized modes, return a basic empty config. + return cls() + + +class SearchMode(str, Enum): + """Search modes for the search endpoint.""" + + basic = "basic" + advanced = "advanced" + custom = "custom" diff --git a/py/shared/abstractions/user.py b/py/shared/abstractions/user.py index d6ad8e2fc..31d68bf30 100644 --- a/py/shared/abstractions/user.py +++ b/py/shared/abstractions/user.py @@ -14,11 +14,9 @@ class Collection(BaseModel): name: str description: Optional[str] = None created_at: datetime = Field( - alias="createdAt", default_factory=datetime.utcnow, ) updated_at: datetime = Field( - alias="updatedAt", default_factory=datetime.utcnow, ) diff --git a/py/shared/utils/base_utils.py b/py/shared/utils/base_utils.py index 9680fcc9a..0db6e8ac0 100644 --- a/py/shared/utils/base_utils.py +++ b/py/shared/utils/base_utils.py @@ -81,17 +81,31 @@ def format_search_results_for_llm(results: AggregateSearchResult) -> str: ) source_counter += 1 + if results.web_search_results: + formatted_results.append("Web Search Results:") + for result in results.web_search_results: + formatted_results.extend( + ( + f"Source [{source_counter}]:", + f"Title: {result.title}", + f"Link: {result.link}", + f"Snippet: {result.snippet}", + ) + ) + if result.date: + formatted_results.append(f"Date: {result.date}") + source_counter += 1 return "\n".join(formatted_results) -def format_search_results_for_stream( - result: AggregateSearchResult, -) -> str: - CHUNK_SEARCH_STREAM_MARKER = "chunk_search" # TODO - change this to vector_search in next major release +def format_search_results_for_stream(result: AggregateSearchResult) -> str: + CHUNK_SEARCH_STREAM_MARKER = "chunk_search" GRAPH_SEARCH_STREAM_MARKER = "graph_search" + WEB_SEARCH_STREAM_MARKER = "web_search" context = "" + if result.chunk_search_results: context += f"<{CHUNK_SEARCH_STREAM_MARKER}>" vector_results_list = [ @@ -108,6 +122,14 @@ def format_search_results_for_stream( context += json.dumps(kg_results_list, default=str) context += f"" + if result.web_search_results: + context += f"<{WEB_SEARCH_STREAM_MARKER}>" + web_results_list = [ + result.to_dict() for result in result.web_search_results + ] + context += json.dumps(web_results_list, default=str) + context += f"" + return context