diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 76f4dbb06f2b7..8c1c285710d15 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -101,7 +101,6 @@ def setup(app): autodoc_pydantic_model_show_json = False autodoc_pydantic_field_list_validators = False -autodoc_pydantic_config_members = False autodoc_pydantic_model_show_config_summary = False autodoc_pydantic_model_show_validator_members = False autodoc_pydantic_model_show_validator_summary = False diff --git a/docs/api_reference/requirements.txt b/docs/api_reference/requirements.txt index 994c8196e795d..3d284e2162bca 100644 --- a/docs/api_reference/requirements.txt +++ b/docs/api_reference/requirements.txt @@ -1,5 +1,5 @@ -e libs/langchain -autodoc_pydantic==1.8.0 +autodoc_pydantic==2.0.0 myst_parser nbsphinx==0.8.9 sphinx==4.5.0 diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py index 72ed46d46e1c8..efa5501578df6 100644 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py +++ b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py @@ -6,7 +6,7 @@ from langchain.chains.base import Chain from langchain.schema.language_model import BaseLanguageModel from langchain.vectorstores.base import VectorStore -from pydantic import BaseModel, Field +from pydantic import ConfigDict, BaseModel, Field from langchain_experimental.autonomous_agents.baby_agi.task_creation import ( TaskCreationChain, @@ -29,11 +29,7 @@ class BabyAGI(Chain, BaseModel): task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def add_task(self, task: Dict) -> None: self.task_list.append(task) diff --git a/libs/experimental/langchain_experimental/cpal/models.py b/libs/experimental/langchain_experimental/cpal/models.py index e818fbdcfc170..b090c6a277163 100644 --- a/libs/experimental/langchain_experimental/cpal/models.py +++ b/libs/experimental/langchain_experimental/cpal/models.py @@ -6,7 +6,7 @@ import duckdb import pandas as pd from langchain.graphs.networkx_graph import NetworkxEntityGraph -from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator +from pydantic import field_validator, ConfigDict, BaseModel, Field, PrivateAttr, root_validator from langchain_experimental.cpal.constants import Constant @@ -20,7 +20,8 @@ class NarrativeModel(BaseModel): story_hypothetical: str story_plot: str # causal stack of operations - @validator("*", pre=True) + @field_validator("*", mode="before") + @classmethod def empty_str_to_none(cls, v: str) -> Union[str, None]: """Empty strings are not allowed""" if v == "": @@ -33,14 +34,10 @@ class EntityModel(BaseModel): code: str = Field(description="entity actions") value: float = Field(description="entity initial value") depends_on: list[str] = Field(default=[], description="ancestor entities") + model_config = ConfigDict(validate_assignment=True) - # TODO: generalize to multivariate math - # TODO: acyclic graph - - class Config: - validate_assignment = True - - @validator("name") + @field_validator("name") + @classmethod def lower_case_name(cls, v: str) -> str: v = v.lower() return v @@ -64,7 +61,8 @@ class EntitySettingModel(BaseModel): attribute: str = Field(description="name of the attribute to be calculated") value: float = Field(description="entity's attribute value (calculated)") - @validator("name") + @field_validator("name") + @classmethod def lower_case_transform(cls, v: str) -> str: v = v.lower() return v @@ -98,7 +96,8 @@ class InterventionModel(BaseModel): entity_settings: list[EntitySettingModel] system_settings: Optional[list[SystemSettingModel]] = None - @validator("system_settings") + @field_validator("system_settings") + @classmethod def lower_case_name(cls, v: str) -> Union[str, None]: if v is not None: raise NotImplementedError("system_setting is not implemented yet") diff --git a/libs/experimental/langchain_experimental/generative_agents/generative_agent.py b/libs/experimental/langchain_experimental/generative_agents/generative_agent.py index d497a91b93488..f1d0367cca3f6 100644 --- a/libs/experimental/langchain_experimental/generative_agents/generative_agent.py +++ b/libs/experimental/langchain_experimental/generative_agents/generative_agent.py @@ -5,7 +5,7 @@ from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.schema.language_model import BaseLanguageModel -from pydantic import BaseModel, Field +from pydantic import ConfigDict, BaseModel, Field from langchain_experimental.generative_agents.memory import GenerativeAgentMemory @@ -38,11 +38,7 @@ class GenerativeAgent(BaseModel): daily_summaries: List[str] = Field(default_factory=list) # : :meta private: """Summary of the events in the plan that the agent took.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) # LLM-related methods @staticmethod diff --git a/libs/experimental/langchain_experimental/pal_chain/base.py b/libs/experimental/langchain_experimental/pal_chain/base.py index d29763d21d75e..6d53527c659b9 100644 --- a/libs/experimental/langchain_experimental/pal_chain/base.py +++ b/libs/experimental/langchain_experimental/pal_chain/base.py @@ -17,7 +17,7 @@ from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.utilities import PythonREPL -from pydantic import Extra, Field, root_validator +from pydantic import model_validator, ConfigDict, Field from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT @@ -114,14 +114,10 @@ class PALChain(Chain): """Validations to perform on the generated code.""" timeout: Optional[int] = 10 """Timeout in seconds for the generated code to execute.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/experimental/langchain_experimental/sql/base.py b/libs/experimental/langchain_experimental/sql/base.py index 6360116bc0111..18a778a41ba4c 100644 --- a/libs/experimental/langchain_experimental/sql/base.py +++ b/libs/experimental/langchain_experimental/sql/base.py @@ -12,7 +12,7 @@ from langchain.schema.language_model import BaseLanguageModel from langchain.tools.sql_database.prompt import QUERY_CHECKER from langchain.utilities.sql_database import SQLDatabase -from pydantic import Extra, Field, root_validator +from pydantic import model_validator, ConfigDict, Field from langchain_experimental.sql.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS @@ -53,14 +53,10 @@ class SQLDatabaseChain(Chain): to fix the initial SQL from the LLM.""" query_checker_prompt: Optional[BasePromptTemplate] = None """The prompt template that should be used by the query checker""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/agents/agent.py b/libs/langchain/langchain/agents/agent.py index f60646ad5258d..0030d8009663a 100644 --- a/libs/langchain/langchain/agents/agent.py +++ b/libs/langchain/langchain/agents/agent.py @@ -10,7 +10,7 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import yaml -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.agents.agent_iterator import AgentExecutorIterator from langchain.agents.agent_types import AgentType @@ -494,7 +494,8 @@ def input_keys(self) -> List[str]: """ return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"}) - @root_validator() + @model_validator() + @classmethod def validate_prompt(cls, values: Dict) -> Dict: """Validate that prompt matches format.""" prompt = values["llm_chain"].prompt @@ -693,7 +694,8 @@ def from_agent_and_tools( agent=agent, tools=tools, callback_manager=callback_manager, **kwargs ) - @root_validator() + @model_validator() + @classmethod def validate_tools(cls, values: Dict) -> Dict: """Validate that tools are compatible with agent.""" agent = values["agent"] @@ -707,7 +709,8 @@ def validate_tools(cls, values: Dict) -> Dict: ) return values - @root_validator() + @model_validator() + @classmethod def validate_return_direct_tool(cls, values: Dict) -> Dict: """Validate that tools are compatible with agent.""" agent = values["agent"] diff --git a/libs/langchain/langchain/agents/agent_toolkits/amadeus/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/amadeus/toolkit.py index 28db53bdcb96d..bcef5778e576a 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/amadeus/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/amadeus/toolkit.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, List -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool @@ -18,11 +18,7 @@ class AmadeusToolkit(BaseToolkit): """Toolkit for interacting with Office365.""" client: Client = Field(default_factory=authenticate) - - class Config: - """Pydantic config.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/agents/agent_toolkits/gmail/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/gmail/toolkit.py index e95f2e6862ca2..5b24bafdf8894 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/gmail/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/gmail/toolkit.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, List -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool @@ -31,11 +31,7 @@ class GmailToolkit(BaseToolkit): """Toolkit for interacting with Gmail.""" api_resource: Resource = Field(default_factory=build_resource_service) - - class Config: - """Pydantic config.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/agents/agent_toolkits/office365/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/office365/toolkit.py index 3e3151e780e90..d389ed03a1041 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/office365/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/office365/toolkit.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, List -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools import BaseTool @@ -21,11 +21,7 @@ class O365Toolkit(BaseToolkit): """Toolkit for interacting with Office 365.""" account: Account = Field(default_factory=authenticate) - - class Config: - """Pydantic config.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/agents/agent_toolkits/playwright/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/playwright/toolkit.py index af010e1e22ea8..6fac7f7086fce 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/playwright/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/playwright/toolkit.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, List, Optional, Type, cast -from pydantic import Extra, root_validator +from pydantic import ConfigDict, root_validator from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.tools.base import BaseTool @@ -36,12 +36,7 @@ class PlayWrightBrowserToolkit(BaseToolkit): sync_browser: Optional["SyncBrowser"] = None async_browser: Optional["AsyncBrowser"] = None - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @root_validator def validate_imports_and_browser_provided(cls, values: dict) -> dict: diff --git a/libs/langchain/langchain/agents/agent_toolkits/powerbi/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/powerbi/toolkit.py index e7e66c6a41b3a..368c450d5e9b1 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/powerbi/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/powerbi/toolkit.py @@ -1,7 +1,7 @@ """Toolkit for interacting with a Power BI dataset.""" from typing import List, Optional, Union -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.callbacks.base import BaseCallbackManager @@ -38,11 +38,7 @@ class PowerBIToolkit(BaseToolkit): callback_manager: Optional[BaseCallbackManager] = None output_token_limit: Optional[int] = None tiktoken_model_name: Optional[str] = None - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/agents/agent_toolkits/spark_sql/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/spark_sql/toolkit.py index 7c89c7dc5f416..ec54011df01d0 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/spark_sql/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/spark_sql/toolkit.py @@ -1,7 +1,7 @@ """Toolkit for interacting with Spark SQL.""" from typing import List -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.schema.language_model import BaseLanguageModel @@ -20,11 +20,7 @@ class SparkSQLToolkit(BaseToolkit): db: SparkSQL = Field(exclude=True) llm: BaseLanguageModel = Field(exclude=True) - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/agents/agent_toolkits/sql/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/sql/toolkit.py index 5fe6e078632cd..d5c99f09ee4b1 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/sql/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/sql/toolkit.py @@ -1,7 +1,7 @@ """Toolkit for interacting with an SQL database.""" from typing import List -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.schema.language_model import BaseLanguageModel @@ -26,10 +26,7 @@ def dialect(self) -> str: """Return string representation of SQL dialect to use.""" return self.db.dialect - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py index f706ed215b9e0..9e44f1f9309fa 100644 --- a/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py +++ b/libs/langchain/langchain/agents/agent_toolkits/vectorstore/toolkit.py @@ -1,7 +1,7 @@ """Toolkit for interacting with a vector store.""" from typing import List -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.agents.agent_toolkits.base import BaseToolkit from langchain.llms.openai import OpenAI @@ -20,11 +20,7 @@ class VectorStoreInfo(BaseModel): vectorstore: VectorStore = Field(exclude=True) name: str description: str - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) class VectorStoreToolkit(BaseToolkit): @@ -32,11 +28,7 @@ class VectorStoreToolkit(BaseToolkit): vectorstore_info: VectorStoreInfo = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" @@ -66,11 +58,7 @@ class VectorStoreRouterToolkit(BaseToolkit): vectorstores: List[VectorStoreInfo] = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/langchain/langchain/callbacks/tracers/schemas.py b/libs/langchain/langchain/callbacks/tracers/schemas.py index c9de9e6ae9593..2f01073280715 100644 --- a/libs/langchain/langchain/callbacks/tracers/schemas.py +++ b/libs/langchain/langchain/callbacks/tracers/schemas.py @@ -7,7 +7,7 @@ from langsmith.schemas import RunBase as BaseRunV2 from langsmith.schemas import RunTypeEnum -from pydantic import BaseModel, Field, root_validator +from pydantic import BaseModel, Field, model_validator from langchain.schema import LLMResult @@ -96,7 +96,8 @@ class Run(BaseRunV2): child_runs: List[Run] = Field(default_factory=list) tags: Optional[List[str]] = Field(default_factory=list) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def assign_name(cls, values: dict) -> dict: """Assign name to the run.""" if values.get("name") is None: diff --git a/libs/langchain/langchain/chains/api/base.py b/libs/langchain/langchain/chains/api/base.py index d3e548f395cab..5992472eca42b 100644 --- a/libs/langchain/langchain/chains/api/base.py +++ b/libs/langchain/langchain/chains/api/base.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional -from pydantic import Field, root_validator +from pydantic import Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -43,7 +43,8 @@ def output_keys(self) -> List[str]: """ return [self.output_key] - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_api_request_prompt(cls, values: Dict) -> Dict: """Check that api request prompt expects the right variables.""" input_vars = values["api_request_chain"].prompt.input_variables @@ -54,7 +55,8 @@ def validate_api_request_prompt(cls, values: Dict) -> Dict: ) return values - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_api_answer_prompt(cls, values: Dict) -> Dict: """Check that api answer prompt expects the right variables.""" input_vars = values["api_answer_chain"].prompt.input_variables diff --git a/libs/langchain/langchain/chains/api/openapi/chain.py b/libs/langchain/langchain/chains/api/openapi/chain.py index d3bf442d5ce71..3809e2702f9ce 100644 --- a/libs/langchain/langchain/chains/api/openapi/chain.py +++ b/libs/langchain/langchain/chains/api/openapi/chain.py @@ -29,7 +29,7 @@ class OpenAPIEndpointChain(Chain, BaseModel): """Chain interacts with an OpenAPI endpoint using natural language.""" api_request_chain: LLMChain - api_response_chain: Optional[LLMChain] + api_response_chain: Optional[LLMChain] = None api_operation: APIOperation requests: Requests = Field(exclude=True, default_factory=Requests) param_mapping: _ParamMapping = Field(alias="param_mapping") diff --git a/libs/langchain/langchain/chains/base.py b/libs/langchain/langchain/chains/base.py index 7f79c4ffd9714..e7405420bbd5c 100644 --- a/libs/langchain/langchain/chains/base.py +++ b/libs/langchain/langchain/chains/base.py @@ -8,7 +8,8 @@ from typing import Any, Dict, List, Optional, Union import yaml -from pydantic import Field, root_validator, validator +from pydantic import ConfigDict, Field, model_validator +from pydantic.v1 import validator as v1_validator import langchain from langchain.callbacks.base import BaseCallbackManager @@ -98,17 +99,14 @@ async def ainvoke( and passed as arguments to the handlers defined in `callbacks`. You can use these to eg identify a specific instance of a chain with its use case. """ - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @property def _chain_type(self) -> str: raise NotImplementedError("Saving not supported for this chain type.") - @root_validator() + @model_validator() + @classmethod def raise_callback_manager_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: @@ -119,7 +117,9 @@ def raise_callback_manager_deprecation(cls, values: Dict) -> Dict: values["callbacks"] = values.pop("callback_manager", None) return values - @validator("verbose", pre=True, always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """Set the chain verbosity. diff --git a/libs/langchain/langchain/chains/combine_documents/map_reduce.py b/libs/langchain/langchain/chains/combine_documents/map_reduce.py index 9afed3e7e8027..cc25c0842beaa 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/map_reduce.py @@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional, Tuple -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain @@ -110,13 +110,10 @@ def output_keys(self) -> List[str]: _output_keys = _output_keys + ["intermediate_steps"] return _output_keys - class Config: - """Configuration for this pydantic object.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_reduce_chain(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "combine_document_chain" in values: @@ -139,7 +136,8 @@ def get_reduce_chain(cls, values: Dict) -> Dict: return values - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_return_intermediate_steps(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "return_map_steps" in values: @@ -147,7 +145,8 @@ def get_return_intermediate_steps(cls, values: Dict) -> Dict: del values["return_map_steps"] return values - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: diff --git a/libs/langchain/langchain/chains/combine_documents/map_rerank.py b/libs/langchain/langchain/chains/combine_documents/map_rerank.py index e2d656d07e0fc..764e24b240c0d 100644 --- a/libs/langchain/langchain/chains/combine_documents/map_rerank.py +++ b/libs/langchain/langchain/chains/combine_documents/map_rerank.py @@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain @@ -71,12 +71,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain): return_intermediate_steps: bool = False """Return intermediate steps. Intermediate steps include the results of calling llm_chain on each document.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def output_keys(self) -> List[str]: @@ -91,7 +86,8 @@ def output_keys(self) -> List[str]: _output_keys += self.metadata_keys return _output_keys - @root_validator() + @model_validator() + @classmethod def validate_llm_output(cls, values: Dict) -> Dict: """Validate that the combine chain outputs a dictionary.""" output_parser = values["llm_chain"].prompt.output_parser @@ -113,7 +109,8 @@ def validate_llm_output(cls, values: Dict) -> Dict: ) return values - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: diff --git a/libs/langchain/langchain/chains/combine_documents/reduce.py b/libs/langchain/langchain/chains/combine_documents/reduce.py index b70f301ed20d0..52742f497c122 100644 --- a/libs/langchain/langchain/chains/combine_documents/reduce.py +++ b/libs/langchain/langchain/chains/combine_documents/reduce.py @@ -4,7 +4,7 @@ from typing import Any, Callable, List, Optional, Protocol, Tuple -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import BaseCombineDocumentsChain @@ -156,12 +156,7 @@ class ReduceDocumentsChain(BaseCombineDocumentsChain): """The maximum number of tokens to group documents into. For example, if set to 3000 then documents will be grouped into chunks of no greater than 3000 tokens before trying to combine them into a smaller chunk.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def _collapse_chain(self) -> BaseCombineDocumentsChain: diff --git a/libs/langchain/langchain/chains/combine_documents/refine.py b/libs/langchain/langchain/chains/combine_documents/refine.py index 76087a987712d..6852bd06dce31 100644 --- a/libs/langchain/langchain/chains/combine_documents/refine.py +++ b/libs/langchain/langchain/chains/combine_documents/refine.py @@ -4,7 +4,7 @@ from typing import Any, Dict, List, Tuple -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( @@ -98,13 +98,10 @@ def output_keys(self) -> List[str]: _output_keys = _output_keys + ["intermediate_steps"] return _output_keys - class Config: - """Configuration for this pydantic object.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_return_intermediate_steps(cls, values: Dict) -> Dict: """For backwards compatibility.""" if "return_refine_steps" in values: @@ -112,7 +109,8 @@ def get_return_intermediate_steps(cls, values: Dict) -> Dict: del values["return_refine_steps"] return values - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided.""" if "document_variable_name" not in values: diff --git a/libs/langchain/langchain/chains/combine_documents/stuff.py b/libs/langchain/langchain/chains/combine_documents/stuff.py index 2b113c7ab466b..4718bcbea3f75 100644 --- a/libs/langchain/langchain/chains/combine_documents/stuff.py +++ b/libs/langchain/langchain/chains/combine_documents/stuff.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Tuple -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import Callbacks from langchain.chains.combine_documents.base import ( @@ -68,14 +68,10 @@ class StuffDocumentsChain(BaseCombineDocumentsChain): If only one variable in the llm_chain, this need not be provided.""" document_separator: str = "\n\n" """The string with which to join the formatted documents""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_default_document_variable_name(cls, values: Dict) -> Dict: """Get default document variable name, if not provided. diff --git a/libs/langchain/langchain/chains/conversation/base.py b/libs/langchain/langchain/chains/conversation/base.py index 43d72f91e7c14..fcf92575a3d73 100644 --- a/libs/langchain/langchain/chains/conversation/base.py +++ b/libs/langchain/langchain/chains/conversation/base.py @@ -1,7 +1,7 @@ """Chain that carries on a conversation and calls an LLM.""" from typing import Dict, List -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.chains.conversation.prompt import PROMPT from langchain.chains.llm import LLMChain @@ -27,19 +27,15 @@ class ConversationChain(LLMChain): input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" return [self.input_key] - @root_validator() + @model_validator() + @classmethod def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables diff --git a/libs/langchain/langchain/chains/conversational_retrieval/base.py b/libs/langchain/langchain/chains/conversational_retrieval/base.py index 7f6891ff22e75..41a67b64b2e59 100644 --- a/libs/langchain/langchain/chains/conversational_retrieval/base.py +++ b/libs/langchain/langchain/chains/conversational_retrieval/base.py @@ -7,7 +7,7 @@ from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -75,13 +75,9 @@ class BaseConversationalRetrievalChain(Chain): get_chat_history: Optional[Callable[[List[CHAT_TURN_TYPE]], str]] = None """An optional function to get a string of the chat history. If None is provided, will use a default.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - allow_population_by_field_name = True + model_config = ConfigDict( + extra="forbid", arbitrary_types_allowed=True, populate_by_name=True + ) @property def input_keys(self) -> List[str]: @@ -374,7 +370,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain): def _chain_type(self) -> str: return "chat-vector-db" - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`ChatVectorDBChain` is deprecated - " diff --git a/libs/langchain/langchain/chains/elasticsearch_database/base.py b/libs/langchain/langchain/chains/elasticsearch_database/base.py index 17f8ddcdb7ba0..c2179a12c5faf 100644 --- a/libs/langchain/langchain/chains/elasticsearch_database/base.py +++ b/libs/langchain/langchain/chains/elasticsearch_database/base.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -47,14 +47,10 @@ class ElasticsearchDatabaseChain(Chain): sample_documents_in_index_info: int = 3 return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_indices(cls, values: dict) -> dict: if values["include_indices"] and values["ignore_indices"]: raise ValueError( diff --git a/libs/langchain/langchain/chains/hyde/base.py b/libs/langchain/langchain/chains/hyde/base.py index 1fb2f3ac4ce3a..64ee6c72d36c3 100644 --- a/libs/langchain/langchain/chains/hyde/base.py +++ b/libs/langchain/langchain/chains/hyde/base.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional import numpy as np -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -25,12 +25,7 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings): base_embeddings: Embeddings llm_chain: LLMChain - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/chains/llm.py b/libs/langchain/langchain/chains/llm.py index bb24607a97c27..2884848f0a0a9 100644 --- a/libs/langchain/langchain/chains/llm.py +++ b/libs/langchain/langchain/chains/llm.py @@ -4,7 +4,7 @@ import warnings from typing import Any, Dict, List, Optional, Sequence, Tuple, Union -from pydantic import Extra, Field +from pydantic import ConfigDict, Field from langchain.callbacks.manager import ( AsyncCallbackManager, @@ -58,12 +58,7 @@ def lc_serializable(self) -> bool: """Whether to return only the final parsed result. Defaults to True. If false, will return a bunch of extra information about the generation.""" llm_kwargs: dict = Field(default_factory=dict) - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/chains/llm_bash/base.py b/libs/langchain/langchain/chains/llm_bash/base.py index 2c7cadf838937..e2f785cbe5028 100644 --- a/libs/langchain/langchain/chains/llm_bash/base.py +++ b/libs/langchain/langchain/chains/llm_bash/base.py @@ -5,7 +5,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -36,14 +36,10 @@ class LLMBashChain(Chain): prompt: BasePromptTemplate = PROMPT """[Deprecated]""" bash_process: BashProcess = Field(default_factory=BashProcess) #: :meta private: + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chains/llm_checker/base.py b/libs/langchain/langchain/chains/llm_checker/base.py index e32a93fc4ad46..d102b6689faed 100644 --- a/libs/langchain/langchain/chains/llm_checker/base.py +++ b/libs/langchain/langchain/chains/llm_checker/base.py @@ -4,7 +4,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -87,14 +87,10 @@ class LLMCheckerChain(Chain): """[Deprecated] Prompt to use when questioning the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chains/llm_math/base.py b/libs/langchain/langchain/chains/llm_math/base.py index b5e92620d462b..3653404bfc127 100644 --- a/libs/langchain/langchain/chains/llm_math/base.py +++ b/libs/langchain/langchain/chains/llm_math/base.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional import numexpr -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -37,14 +37,10 @@ class LLMMathChain(Chain): """[Deprecated] Prompt to use to translate to python if necessary.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chains/llm_requests.py b/libs/langchain/langchain/chains/llm_requests.py index 2e1a6dd430412..c7351534a708d 100644 --- a/libs/langchain/langchain/chains/llm_requests.py +++ b/libs/langchain/langchain/chains/llm_requests.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains import LLMChain @@ -27,12 +27,7 @@ class LLMRequestsChain(Chain): requests_key: str = "requests_result" #: :meta private: input_key: str = "url" #: :meta private: output_key: str = "output" #: :meta private: - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: @@ -50,7 +45,8 @@ def output_keys(self) -> List[str]: """ return [self.output_key] - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: diff --git a/libs/langchain/langchain/chains/llm_summarization_checker/base.py b/libs/langchain/langchain/chains/llm_summarization_checker/base.py index 30e7f8ea853a8..840881c2e4801 100644 --- a/libs/langchain/langchain/chains/llm_summarization_checker/base.py +++ b/libs/langchain/langchain/chains/llm_summarization_checker/base.py @@ -6,7 +6,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -101,14 +101,10 @@ class LLMSummarizationCheckerChain(Chain): output_key: str = "result" #: :meta private: max_checks: int = 2 """Maximum number of times to check the assertions. Default to double-checking.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chains/llm_symbolic_math/base.py b/libs/langchain/langchain/chains/llm_symbolic_math/base.py index 8193085b4fa9e..6ddfa8bd45b2b 100644 --- a/libs/langchain/langchain/chains/llm_symbolic_math/base.py +++ b/libs/langchain/langchain/chains/llm_symbolic_math/base.py @@ -4,7 +4,7 @@ import re from typing import Any, Dict, List, Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import ( @@ -30,12 +30,7 @@ class LLMSymbolicMathChain(Chain): llm_chain: LLMChain input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/chains/mapreduce.py b/libs/langchain/langchain/chains/mapreduce.py index e7ba46ba60596..e6331192ff4fe 100644 --- a/libs/langchain/langchain/chains/mapreduce.py +++ b/libs/langchain/langchain/chains/mapreduce.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain @@ -66,11 +66,7 @@ def from_params( **kwargs, ) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/chains/moderation.py b/libs/langchain/langchain/chains/moderation.py index e59dfa0cf6464..8cea66c0ec438 100644 --- a/libs/langchain/langchain/chains/moderation.py +++ b/libs/langchain/langchain/chains/moderation.py @@ -1,7 +1,7 @@ """Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -34,7 +34,8 @@ class OpenAIModerationChain(Chain): openai_api_key: Optional[str] = None openai_organization: Optional[str] = None - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/chains/natbot/base.py b/libs/langchain/langchain/chains/natbot/base.py index 2adcea4fd7967..acd686c5eb1ce 100644 --- a/libs/langchain/langchain/chains/natbot/base.py +++ b/libs/langchain/langchain/chains/natbot/base.py @@ -4,7 +4,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -33,14 +33,10 @@ class NatBotChain(Chain): input_browser_content_key: str = "browser_content" #: :meta private: previous_command: str = "" #: :meta private: output_key: str = "command" #: :meta private: + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chains/pal/base.py b/libs/langchain/langchain/chains/pal/base.py index 4df5180ed07d4..eae7507fb97c9 100644 --- a/libs/langchain/langchain/chains/pal/base.py +++ b/libs/langchain/langchain/chains/pal/base.py @@ -11,7 +11,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -114,14 +114,10 @@ class PALChain(Chain): """Validations to perform on the generated code.""" timeout: Optional[int] = 10 """Timeout in seconds for the generated code to execute.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chains/qa_with_sources/base.py b/libs/langchain/langchain/chains/qa_with_sources/base.py index d0f762afbd5b8..692c686f9c6d2 100644 --- a/libs/langchain/langchain/chains/qa_with_sources/base.py +++ b/libs/langchain/langchain/chains/qa_with_sources/base.py @@ -7,7 +7,7 @@ from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -87,11 +87,7 @@ def from_chain_type( ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: @@ -112,7 +108,8 @@ def output_keys(self) -> List[str]: _output_keys = _output_keys + ["source_documents"] return _output_keys - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_naming(cls, values: Dict) -> Dict: """Fix backwards compatibility in naming.""" if "combine_document_chain" in values: diff --git a/libs/langchain/langchain/chains/qa_with_sources/vector_db.py b/libs/langchain/langchain/chains/qa_with_sources/vector_db.py index 5d1689a9b1a53..699cf6523a015 100644 --- a/libs/langchain/langchain/chains/qa_with_sources/vector_db.py +++ b/libs/langchain/langchain/chains/qa_with_sources/vector_db.py @@ -3,7 +3,7 @@ import warnings from typing import Any, Dict, List -from pydantic import Field, root_validator +from pydantic import Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -63,7 +63,8 @@ async def _aget_docs( ) -> List[Document]: raise NotImplementedError("VectorDBQAWithSourcesChain does not support async") - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQAWithSourcesChain` is deprecated - " diff --git a/libs/langchain/langchain/chains/query_constructor/ir.py b/libs/langchain/langchain/chains/query_constructor/ir.py index 73707e589f338..eef6df31ca163 100644 --- a/libs/langchain/langchain/chains/query_constructor/ir.py +++ b/libs/langchain/langchain/chains/query_constructor/ir.py @@ -98,7 +98,7 @@ class Comparison(FilterDirective): comparator: Comparator attribute: str - value: Any + value: Any = None class Operation(FilterDirective): @@ -113,7 +113,7 @@ class StructuredQuery(Expr): query: str """Query string.""" - filter: Optional[FilterDirective] + filter: Optional[FilterDirective] = None """Filtering expression.""" - limit: Optional[int] + limit: Optional[int] = None """Limit on the number of results.""" diff --git a/libs/langchain/langchain/chains/query_constructor/schema.py b/libs/langchain/langchain/chains/query_constructor/schema.py index 557ad5ea2a172..5ca49b08feebe 100644 --- a/libs/langchain/langchain/chains/query_constructor/schema.py +++ b/libs/langchain/langchain/chains/query_constructor/schema.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict class AttributeInfo(BaseModel): @@ -7,9 +7,4 @@ class AttributeInfo(BaseModel): name: str description: str type: str - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - frozen = True + model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True) diff --git a/libs/langchain/langchain/chains/retrieval_qa/base.py b/libs/langchain/langchain/chains/retrieval_qa/base.py index 3d9ef22ce8211..800c9b98671e7 100644 --- a/libs/langchain/langchain/chains/retrieval_qa/base.py +++ b/libs/langchain/langchain/chains/retrieval_qa/base.py @@ -6,7 +6,7 @@ from abc import abstractmethod from typing import Any, Dict, List, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -33,13 +33,9 @@ class BaseRetrievalQA(Chain): output_key: str = "result" #: :meta private: return_source_documents: bool = False """Return the source documents or not.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - allow_population_by_field_name = True + model_config = ConfigDict( + extra="forbid", arbitrary_types_allowed=True, populate_by_name=True + ) @property def input_keys(self) -> List[str]: @@ -240,7 +236,8 @@ class VectorDBQA(BaseRetrievalQA): search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQA` is deprecated - " @@ -248,7 +245,8 @@ def raise_deprecation(cls, values: Dict) -> Dict: ) return values - @root_validator() + @model_validator() + @classmethod def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: diff --git a/libs/langchain/langchain/chains/router/base.py b/libs/langchain/langchain/chains/router/base.py index 5ee6fb16a5c7e..aa318f365057f 100644 --- a/libs/langchain/langchain/chains/router/base.py +++ b/libs/langchain/langchain/chains/router/base.py @@ -4,7 +4,7 @@ from abc import ABC from typing import Any, Dict, List, Mapping, NamedTuple, Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -59,12 +59,7 @@ class MultiRouteChain(Chain): silent_errors: bool = False """If True, use default_chain when an invalid destination name is provided. Defaults to False.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/chains/router/embedding_router.py b/libs/langchain/langchain/chains/router/embedding_router.py index 53704a987a0d9..fed9510f655dd 100644 --- a/libs/langchain/langchain/chains/router/embedding_router.py +++ b/libs/langchain/langchain/chains/router/embedding_router.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple, Type -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.router.base import RouterChain @@ -16,12 +16,7 @@ class EmbeddingRouterChain(RouterChain): vectorstore: VectorStore routing_keys: List[str] = ["query"] - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/chains/router/llm_router.py b/libs/langchain/langchain/chains/router/llm_router.py index b097864613381..b91719beff8f5 100644 --- a/libs/langchain/langchain/chains/router/llm_router.py +++ b/libs/langchain/langchain/chains/router/llm_router.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Type, cast -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -22,7 +22,8 @@ class LLMRouterChain(RouterChain): llm_chain: LLMChain """LLM chain used to perform routing""" - @root_validator() + @model_validator() + @classmethod def validate_prompt(cls, values: dict) -> dict: prompt = values["llm_chain"].prompt if prompt.output_parser is None: diff --git a/libs/langchain/langchain/chains/sequential.py b/libs/langchain/langchain/chains/sequential.py index 26cbaf7021d1b..0d2109be19269 100644 --- a/libs/langchain/langchain/chains/sequential.py +++ b/libs/langchain/langchain/chains/sequential.py @@ -1,7 +1,7 @@ """Chain pipeline where the outputs of one step feed directly into next.""" from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -18,12 +18,7 @@ class SequentialChain(Chain): input_variables: List[str] output_variables: List[str] #: :meta private: return_all: bool = False - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: @@ -41,7 +36,8 @@ def output_keys(self) -> List[str]: """ return self.output_variables - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_chains(cls, values: Dict) -> Dict: """Validate that the correct inputs exist for all chains.""" chains = values["chains"] @@ -129,12 +125,7 @@ class SimpleSequentialChain(Chain): strip_outputs: bool = False input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def input_keys(self) -> List[str]: @@ -152,7 +143,8 @@ def output_keys(self) -> List[str]: """ return [self.output_key] - @root_validator() + @model_validator() + @classmethod def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: diff --git a/libs/langchain/langchain/chains/sql_database/base.py b/libs/langchain/langchain/chains/sql_database/base.py index 726dd1bbdcf9a..fd373a6574219 100644 --- a/libs/langchain/langchain/chains/sql_database/base.py +++ b/libs/langchain/langchain/chains/sql_database/base.py @@ -4,7 +4,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -53,14 +53,10 @@ class SQLDatabaseChain(Chain): to fix the initial SQL from the LLM.""" query_checker_prompt: Optional[BasePromptTemplate] = None """The prompt template that should be used by the query checker""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: if "llm" in values: warnings.warn( diff --git a/libs/langchain/langchain/chat_models/azure_openai.py b/libs/langchain/langchain/chat_models/azure_openai.py index cf841d68e33d2..1ad7ec28694fd 100644 --- a/libs/langchain/langchain/chat_models/azure_openai.py +++ b/libs/langchain/langchain/chat_models/azure_openai.py @@ -4,7 +4,7 @@ import logging from typing import Any, Dict, Mapping -from pydantic import root_validator +from pydantic import model_validator from langchain.chat_models.openai import ChatOpenAI from langchain.schema import ChatResult @@ -52,7 +52,8 @@ class AzureChatOpenAI(ChatOpenAI): openai_organization: str = "" openai_proxy: str = "" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( diff --git a/libs/langchain/langchain/chat_models/base.py b/libs/langchain/langchain/chat_models/base.py index fe9ed5c59f70e..1df2f490ae11d 100644 --- a/libs/langchain/langchain/chat_models/base.py +++ b/libs/langchain/langchain/chat_models/base.py @@ -14,7 +14,7 @@ cast, ) -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator import langchain from langchain.callbacks.base import BaseCallbackManager @@ -64,7 +64,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessageChunk], ABC): metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: @@ -75,10 +76,7 @@ def raise_deprecation(cls, values: Dict) -> Dict: values["callbacks"] = values.pop("callback_manager", None) return values - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) # --- Runnable methods --- diff --git a/libs/langchain/langchain/chat_models/google_palm.py b/libs/langchain/langchain/chat_models/google_palm.py index e180030e74372..1a9e4a175d971 100644 --- a/libs/langchain/langchain/chat_models/google_palm.py +++ b/libs/langchain/langchain/chat_models/google_palm.py @@ -4,7 +4,7 @@ import logging from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from tenacity import ( before_sleep_log, retry, @@ -231,7 +231,7 @@ class ChatGooglePalm(BaseChatModel, BaseModel): """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = "models/chat-bison-001" """Model name to use.""" google_api_key: Optional[str] = None @@ -248,7 +248,8 @@ class ChatGooglePalm(BaseChatModel, BaseModel): """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists, temperature, top_p, and top_k.""" google_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/chat_models/jinachat.py b/libs/langchain/langchain/chat_models/jinachat.py index fd1225a9ffcb2..ab7b251540035 100644 --- a/libs/langchain/langchain/chat_models/jinachat.py +++ b/libs/langchain/langchain/chat_models/jinachat.py @@ -15,7 +15,7 @@ Union, ) -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator from tenacity import ( before_sleep_log, retry, @@ -174,13 +174,10 @@ def lc_serializable(self) -> bool: """Whether to stream the results or not.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" + model_config = ConfigDict(populate_by_name=True) - class Config: - """Configuration for this pydantic object.""" - - allow_population_by_field_name = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) @@ -206,7 +203,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["jinachat_api_key"] = get_from_dict_or_env( diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index 815e3011bbf8e..5d609cc0fdf26 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -17,7 +17,7 @@ Union, ) -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator from tenacity import ( before_sleep_log, retry, @@ -231,13 +231,10 @@ def lc_serializable(self) -> bool: when using one of the many model providers that expose an OpenAI-like API but with different models. In those cases, in order to avoid erroring when tiktoken is called, you can specify a model name to use here.""" + model_config = ConfigDict(populate_by_name=True) - class Config: - """Configuration for this pydantic object.""" - - allow_population_by_field_name = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) @@ -263,7 +260,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( diff --git a/libs/langchain/langchain/chat_models/vertexai.py b/libs/langchain/langchain/chat_models/vertexai.py index b50eac264a2c7..c8ea864329030 100644 --- a/libs/langchain/langchain/chat_models/vertexai.py +++ b/libs/langchain/langchain/chat_models/vertexai.py @@ -2,7 +2,7 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.chat_models.base import BaseChatModel @@ -97,7 +97,8 @@ class ChatVertexAI(_VertexAICommon, BaseChatModel): model_name: str = "chat-bison" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) diff --git a/libs/langchain/langchain/document_loaders/apify_dataset.py b/libs/langchain/langchain/document_loaders/apify_dataset.py index ca3ae6f995ca1..5404080d5b884 100644 --- a/libs/langchain/langchain/document_loaders/apify_dataset.py +++ b/libs/langchain/langchain/document_loaders/apify_dataset.py @@ -1,6 +1,6 @@ from typing import Any, Callable, Dict, List -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader @@ -25,7 +25,7 @@ class ApifyDatasetLoader(BaseLoader, BaseModel): documents = loader.load() """ # noqa: E501 - apify_client: Any + apify_client: Any = None """An instance of the ApifyClient class from the apify-client Python package.""" dataset_id: str """The ID of the dataset on the Apify platform.""" @@ -48,7 +48,8 @@ def __init__( dataset_id=dataset_id, dataset_mapping_function=dataset_mapping_function ) - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate environment. diff --git a/libs/langchain/langchain/document_loaders/blob_loaders/schema.py b/libs/langchain/langchain/document_loaders/blob_loaders/schema.py index 6548352c4cadb..a689e4286156a 100644 --- a/libs/langchain/langchain/document_loaders/blob_loaders/schema.py +++ b/libs/langchain/langchain/document_loaders/blob_loaders/schema.py @@ -13,7 +13,7 @@ from pathlib import PurePath from typing import Any, Generator, Iterable, Mapping, Optional, Union -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, ConfigDict, model_validator PathLike = Union[str, PurePath] @@ -28,7 +28,7 @@ class Blob(BaseModel): Inspired by: https://developer.mozilla.org/en-US/docs/Web/API/Blob """ - data: Union[bytes, str, None] # Raw data + data: Union[bytes, str, None] = None # Raw data mimetype: Optional[str] = None # Not to be confused with a file extension encoding: str = "utf-8" # Use utf-8 as default encoding, if decoding to string # Location where the original content was found @@ -36,17 +36,15 @@ class Blob(BaseModel): # Useful for situations where downstream code assumes it must work with file paths # rather than in-memory content. path: Optional[PathLike] = None - - class Config: - arbitrary_types_allowed = True - frozen = True + model_config = ConfigDict(arbitrary_types_allowed=True, frozen=True) @property def source(self) -> Optional[str]: """The source location of the blob as string if known otherwise none.""" return str(self.path) if self.path else None - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_blob_is_valid(cls, values: Mapping[str, Any]) -> Mapping[str, Any]: """Verify that either data or path is provided.""" if "data" not in values and "path" not in values: diff --git a/libs/langchain/langchain/document_loaders/docugami.py b/libs/langchain/langchain/document_loaders/docugami.py index b60326f24cd52..5fa4ca2fca34e 100644 --- a/libs/langchain/langchain/document_loaders/docugami.py +++ b/libs/langchain/langchain/document_loaders/docugami.py @@ -39,11 +39,11 @@ class DocugamiLoader(BaseLoader, BaseModel): access_token: Optional[str] = os.environ.get("DOCUGAMI_API_KEY") """The Docugami API access token to use.""" - docset_id: Optional[str] + docset_id: Optional[str] = None """The Docugami API docset ID to use.""" - document_ids: Optional[Sequence[str]] + document_ids: Optional[Sequence[str]] = None """The Docugami API document IDs to use.""" - file_paths: Optional[Sequence[Union[Path, str]]] + file_paths: Optional[Sequence[Union[Path, str]]] = None """The local file paths to use.""" min_chunk_size: int = 32 # appended to the next chunk to avoid over-chunking """The minimum chunk size to use when parsing DGML. Defaults to 32.""" diff --git a/libs/langchain/langchain/document_loaders/embaas.py b/libs/langchain/langchain/document_loaders/embaas.py index 89959f9a59d35..c8a618647883f 100644 --- a/libs/langchain/langchain/document_loaders/embaas.py +++ b/libs/langchain/langchain/document_loaders/embaas.py @@ -3,7 +3,8 @@ from typing import Any, Dict, Iterator, List, Optional import requests -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, model_validator +from pydantic.v1 import validator as v1_validator from typing_extensions import NotRequired, TypedDict from langchain.docstore.document import Document @@ -61,7 +62,8 @@ class BaseEmbaasLoader(BaseModel): params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters() """Additional parameters to pass to the embaas document extraction API.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( @@ -208,10 +210,12 @@ class EmbaasLoader(BaseEmbaasLoader, BaseLoader): file_path: str """The path to the file to load.""" - blob_loader: Optional[EmbaasBlobLoader] + blob_loader: Optional[EmbaasBlobLoader] = None """The blob loader to use. If not provided, a default one will be created.""" - @validator("blob_loader", always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("blob_loader", always=True) def validate_blob_loader( cls, v: EmbaasBlobLoader, values: Dict ) -> EmbaasBlobLoader: diff --git a/libs/langchain/langchain/document_loaders/github.py b/libs/langchain/langchain/document_loaders/github.py index 2a55e3d00222d..83813f48d6f79 100644 --- a/libs/langchain/langchain/document_loaders/github.py +++ b/libs/langchain/langchain/document_loaders/github.py @@ -3,7 +3,7 @@ from typing import Dict, Iterator, List, Literal, Optional, Union import requests -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, field_validator, model_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader @@ -18,7 +18,8 @@ class BaseGitHubLoader(BaseLoader, BaseModel, ABC): access_token: str """Personal access token - see https://github.com/settings/tokens?type=beta""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that access token exists in environment.""" values["access_token"] = get_from_dict_or_env( @@ -63,7 +64,8 @@ class GitHubIssuesLoader(BaseGitHubLoader): """Only show notifications updated after the given time. This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ.""" - @validator("since") + @field_validator("since") + @classmethod def validate_since(cls, v: Optional[str]) -> Optional[str]: if v: try: diff --git a/libs/langchain/langchain/document_loaders/googledrive.py b/libs/langchain/langchain/document_loaders/googledrive.py index 4538b469d02c2..2683eba4ae3f1 100644 --- a/libs/langchain/langchain/document_loaders/googledrive.py +++ b/libs/langchain/langchain/document_loaders/googledrive.py @@ -13,7 +13,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Union -from pydantic import BaseModel, root_validator, validator +from pydantic import BaseModel, field_validator, root_validator from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader @@ -96,7 +96,8 @@ def full_form(x: str) -> str: values["file_types"] = [full_form(file_type) for file_type in file_types] return values - @validator("credentials_path") + @field_validator("credentials_path") + @classmethod def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any: """Validate that credentials_path exists.""" if not v.exists(): diff --git a/libs/langchain/langchain/document_loaders/onedrive.py b/libs/langchain/langchain/document_loaders/onedrive.py index 77f75e2c07e89..8a81ae9eca2dd 100644 --- a/libs/langchain/langchain/document_loaders/onedrive.py +++ b/libs/langchain/langchain/document_loaders/onedrive.py @@ -8,7 +8,8 @@ from pathlib import Path from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union -from pydantic import BaseModel, BaseSettings, Field, FilePath, SecretStr +from pydantic import BaseModel, Field, FilePath, SecretStr +from pydantic_settings import BaseSettings, SettingsConfigDict from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader @@ -23,13 +24,11 @@ class _OneDriveSettings(BaseSettings): - client_id: str = Field(..., env="O365_CLIENT_ID") - client_secret: SecretStr = Field(..., env="O365_CLIENT_SECRET") - - class Config: - env_prefix = "" - case_sentive = False - env_file = ".env" + client_id: str = Field(..., validation_alias="O365_CLIENT_ID") + client_secret: SecretStr = Field(..., validation_alias="O365_CLIENT_SECRET") + model_config = SettingsConfigDict( + env_prefix="", case_sentive=False, env_file=".env" + ) class _OneDriveTokenStorage(BaseSettings): diff --git a/libs/langchain/langchain/document_loaders/onedrive_file.py b/libs/langchain/langchain/document_loaders/onedrive_file.py index c83a216196129..1ccdf123c0e22 100644 --- a/libs/langchain/langchain/document_loaders/onedrive_file.py +++ b/libs/langchain/langchain/document_loaders/onedrive_file.py @@ -3,7 +3,7 @@ import tempfile from typing import TYPE_CHECKING, List -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader @@ -20,11 +20,7 @@ class OneDriveFileLoader(BaseLoader, BaseModel): file: File = Field(...) """The file to load.""" - - class Config: - arbitrary_types_allowed = True - """Allow arbitrary types. This is needed for the File type. Default is True. - See https://pydantic-docs.helpmanual.io/usage/types/#arbitrary-types-allowed""" + model_config = ConfigDict(arbitrary_types_allowed=True) def load(self) -> List[Document]: """Load Documents""" diff --git a/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py b/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py index ba57f2ef724c9..66583038372fc 100644 --- a/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py +++ b/libs/langchain/langchain/document_transformers/embeddings_redundant_filter.py @@ -2,7 +2,7 @@ from typing import Any, Callable, List, Sequence import numpy as np -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.embeddings.base import Embeddings from langchain.schema import BaseDocumentTransformer, Document @@ -133,11 +133,7 @@ class EmbeddingsRedundantFilter(BaseDocumentTransformer, BaseModel): similarity_threshold: float = 0.95 """Threshold for determining when two documents are similar enough to be considered redundant.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def transform_documents( self, documents: Sequence[Document], **kwargs: Any @@ -188,11 +184,7 @@ class EmbeddingsClusteringFilter(BaseDocumentTransformer, BaseModel): This could dramatically reduce results when there is a lot of overlap between clusters. """ - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def transform_documents( self, documents: Sequence[Document], **kwargs: Any diff --git a/libs/langchain/langchain/document_transformers/long_context_reorder.py b/libs/langchain/langchain/document_transformers/long_context_reorder.py index 5debbed5ee816..04bed30e5950a 100644 --- a/libs/langchain/langchain/document_transformers/long_context_reorder.py +++ b/libs/langchain/langchain/document_transformers/long_context_reorder.py @@ -1,7 +1,7 @@ """Reorder documents""" from typing import Any, List, Sequence -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from langchain.schema import BaseDocumentTransformer, Document @@ -27,10 +27,7 @@ class LongContextReorder(BaseDocumentTransformer, BaseModel): in the middle of long contexts. See: https://arxiv.org/abs//2307.03172""" - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def transform_documents( self, documents: Sequence[Document], **kwargs: Any diff --git a/libs/langchain/langchain/embeddings/aleph_alpha.py b/libs/langchain/langchain/embeddings/aleph_alpha.py index f53d509a9cad8..3fec90b080df9 100644 --- a/libs/langchain/langchain/embeddings/aleph_alpha.py +++ b/libs/langchain/langchain/embeddings/aleph_alpha.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -29,7 +29,7 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): """ - client: Any #: :meta private: + client: Any = None #: :meta private: """Aleph Alpha client.""" model: Optional[str] = "luminous-base" """Model name to use.""" @@ -49,7 +49,8 @@ class AlephAlphaAsymmetricSemanticEmbedding(BaseModel, Embeddings): aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/bedrock.py b/libs/langchain/langchain/embeddings/bedrock.py index 93b3d69bbef15..ed5909df54bfa 100644 --- a/libs/langchain/langchain/embeddings/bedrock.py +++ b/libs/langchain/langchain/embeddings/bedrock.py @@ -2,7 +2,7 @@ import os from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings @@ -38,7 +38,7 @@ class BedrockEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: """Bedrock client.""" region_name: Optional[str] = None """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable @@ -62,13 +62,10 @@ class BedrockEmbeddings(BaseModel, Embeddings): endpoint_url: Optional[str] = None """Needed if you don't want to default to us-east-1 endpoint""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" diff --git a/libs/langchain/langchain/embeddings/clarifai.py b/libs/langchain/langchain/embeddings/clarifai.py index 6d97fc075ef26..1173ba3906438 100644 --- a/libs/langchain/langchain/embeddings/clarifai.py +++ b/libs/langchain/langchain/embeddings/clarifai.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -25,9 +25,9 @@ class ClarifaiEmbeddings(BaseModel, Embeddings): ) """ - stub: Any #: :meta private: + stub: Any = None #: :meta private: """Clarifai stub.""" - userDataObject: Any + userDataObject: Any = None """Clarifai user data object.""" model_id: Optional[str] = None """Model id to use.""" @@ -40,13 +40,10 @@ class ClarifaiEmbeddings(BaseModel, Embeddings): pat: Optional[str] = None """Clarifai personal access token to use.""" api_base: str = "https://api.clarifai.com" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT") diff --git a/libs/langchain/langchain/embeddings/cohere.py b/libs/langchain/langchain/embeddings/cohere.py index 28a3b6cc2a1c3..712c32273755e 100644 --- a/libs/langchain/langchain/embeddings/cohere.py +++ b/libs/langchain/langchain/embeddings/cohere.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -22,7 +22,7 @@ class CohereEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: """Cohere client.""" async_client: Any #: :meta private: """Cohere async client.""" @@ -33,13 +33,10 @@ class CohereEmbeddings(BaseModel, Embeddings): """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/dashscope.py b/libs/langchain/langchain/embeddings/dashscope.py index 475c7a5940a7f..615e7e99bbc29 100644 --- a/libs/langchain/langchain/embeddings/dashscope.py +++ b/libs/langchain/langchain/embeddings/dashscope.py @@ -9,7 +9,7 @@ Optional, ) -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, @@ -91,19 +91,16 @@ class DashScopeEmbeddings(BaseModel, Embeddings): """ - client: Any #: :meta private: + client: Any = None #: :meta private: """The DashScope client.""" model: str = "text-embedding-v1" dashscope_api_key: Optional[str] = None max_retries: int = 5 """Maximum number of retries to make when generating.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: import dashscope diff --git a/libs/langchain/langchain/embeddings/deepinfra.py b/libs/langchain/langchain/embeddings/deepinfra.py index 2fb15ce188255..9add32d34dd5a 100644 --- a/libs/langchain/langchain/embeddings/deepinfra.py +++ b/libs/langchain/langchain/embeddings/deepinfra.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -50,13 +50,10 @@ class DeepInfraEmbeddings(BaseModel, Embeddings): """Other model keyword args""" deepinfra_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/embaas.py b/libs/langchain/langchain/embeddings/embaas.py index 945861dc83228..321cb901ada1e 100644 --- a/libs/langchain/langchain/embeddings/embaas.py +++ b/libs/langchain/langchain/embeddings/embaas.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from typing_extensions import NotRequired, TypedDict from langchain.embeddings.base import Embeddings @@ -51,13 +51,10 @@ class EmbaasEmbeddings(BaseModel, Embeddings): api_url: str = EMBAAS_API_URL """The URL for the embaas embeddings API.""" embaas_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" embaas_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/google_palm.py b/libs/langchain/langchain/embeddings/google_palm.py index 8633ce7a94522..1dac51076afb4 100644 --- a/libs/langchain/langchain/embeddings/google_palm.py +++ b/libs/langchain/langchain/embeddings/google_palm.py @@ -3,7 +3,7 @@ import logging from typing import Any, Callable, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from tenacity import ( before_sleep_log, retry, @@ -56,12 +56,13 @@ def _embed_with_retry(*args: Any, **kwargs: Any) -> Any: class GooglePalmEmbeddings(BaseModel, Embeddings): """Google's PaLM Embeddings APIs.""" - client: Any - google_api_key: Optional[str] + client: Any = None + google_api_key: Optional[str] = None model_name: str = "models/embedding-gecko-001" """Model name to use.""" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/gpt4all.py b/libs/langchain/langchain/embeddings/gpt4all.py index 0f109b8453c1e..256829a2e099c 100644 --- a/libs/langchain/langchain/embeddings/gpt4all.py +++ b/libs/langchain/langchain/embeddings/gpt4all.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.embeddings.base import Embeddings @@ -18,9 +18,10 @@ class GPT4AllEmbeddings(BaseModel, Embeddings): embeddings = GPT4AllEmbeddings() """ - client: Any #: :meta private: + client: Any = None #: :meta private: - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that GPT4All library is installed.""" diff --git a/libs/langchain/langchain/embeddings/huggingface.py b/libs/langchain/langchain/embeddings/huggingface.py index 3531c5537fa0e..e5bd583ce27b7 100644 --- a/libs/langchain/langchain/embeddings/huggingface.py +++ b/libs/langchain/langchain/embeddings/huggingface.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.embeddings.base import Embeddings @@ -32,7 +32,7 @@ class HuggingFaceEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" cache_folder: Optional[str] = None @@ -59,10 +59,7 @@ def __init__(self, **kwargs: Any): self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. @@ -112,7 +109,7 @@ class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" cache_folder: Optional[str] = None @@ -139,10 +136,7 @@ def __init__(self, **kwargs: Any): except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. diff --git a/libs/langchain/langchain/embeddings/huggingface_hub.py b/libs/langchain/langchain/embeddings/huggingface_hub.py index 0ca9985a154f1..1e0b8b90838bf 100644 --- a/libs/langchain/langchain/embeddings/huggingface_hub.py +++ b/libs/langchain/langchain/embeddings/huggingface_hub.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -28,7 +28,7 @@ class HuggingFaceHubEmbeddings(BaseModel, Embeddings): ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" @@ -37,13 +37,10 @@ class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/jina.py b/libs/langchain/langchain/embeddings/jina.py index 1931a9c4ee408..b0c14ed43f82e 100644 --- a/libs/langchain/langchain/embeddings/jina.py +++ b/libs/langchain/langchain/embeddings/jina.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional import requests -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -11,7 +11,7 @@ class JinaEmbeddings(BaseModel, Embeddings): """Jina embedding models.""" - client: Any #: :meta private: + client: Any = None #: :meta private: model_name: str = "ViT-B-32::openai" """Model name to use.""" @@ -20,7 +20,8 @@ class JinaEmbeddings(BaseModel, Embeddings): jina_api_url: str = "https://api.clip.jina.ai/api/v1/models/" request_headers: Optional[dict] = None - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that auth token exists in environment.""" # Set Auth diff --git a/libs/langchain/langchain/embeddings/llamacpp.py b/libs/langchain/langchain/embeddings/llamacpp.py index 5c31d53bf3223..7d238da75c213 100644 --- a/libs/langchain/langchain/embeddings/llamacpp.py +++ b/libs/langchain/langchain/embeddings/llamacpp.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from langchain.embeddings.base import Embeddings @@ -19,7 +19,7 @@ class LlamaCppEmbeddings(BaseModel, Embeddings): llama = LlamaCppEmbeddings(model_path="/path/to/model.bin") """ - client: Any #: :meta private: + client: Any = None #: :meta private: model_path: str n_ctx: int = Field(512, alias="n_ctx") @@ -54,13 +54,10 @@ class LlamaCppEmbeddings(BaseModel, Embeddings): n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers") """Number of layers to be loaded into gpu memory. Default None.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] diff --git a/libs/langchain/langchain/embeddings/localai.py b/libs/langchain/langchain/embeddings/localai.py index 532cd49eadb30..e8d6bc76474de 100644 --- a/libs/langchain/langchain/embeddings/localai.py +++ b/libs/langchain/langchain/embeddings/localai.py @@ -15,7 +15,7 @@ Union, ) -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from tenacity import ( AsyncRetrying, before_sleep_log, @@ -135,7 +135,7 @@ class LocalAIEmbeddings(BaseModel, Embeddings): """ - client: Any #: :meta private: + client: Any = None #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model openai_api_version: Optional[str] = None @@ -159,13 +159,10 @@ class LocalAIEmbeddings(BaseModel, Embeddings): """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) @@ -191,7 +188,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( @@ -262,11 +260,7 @@ def _embedding_func(self, text: str, *, engine: str) -> List[float]: # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") - return embed_with_retry( - self, - input=[text], - **self._invocation_params, - )["data"][ + return embed_with_retry(self, input=[text], **self._invocation_params,)["data"][ 0 ]["embedding"] diff --git a/libs/langchain/langchain/embeddings/minimax.py b/libs/langchain/langchain/embeddings/minimax.py index 3ac9a7ef430f1..d0c51699c8c44 100644 --- a/libs/langchain/langchain/embeddings/minimax.py +++ b/libs/langchain/langchain/embeddings/minimax.py @@ -4,7 +4,7 @@ from typing import Any, Callable, Dict, List, Optional import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from tenacity import ( before_sleep_log, retry, @@ -79,13 +79,10 @@ class MiniMaxEmbeddings(BaseModel, Embeddings): """Group ID for MiniMax API.""" minimax_api_key: Optional[str] = None """API Key for MiniMax API.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that group id and api key exists in environment.""" minimax_group_id = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/modelscope_hub.py b/libs/langchain/langchain/embeddings/modelscope_hub.py index 6b6ebab42475c..a4000a8edcc36 100644 --- a/libs/langchain/langchain/embeddings/modelscope_hub.py +++ b/libs/langchain/langchain/embeddings/modelscope_hub.py @@ -1,6 +1,6 @@ from typing import Any, List -from pydantic import BaseModel, Extra +from pydantic import BaseModel, ConfigDict from langchain.embeddings.base import Embeddings @@ -18,7 +18,7 @@ class ModelScopeEmbeddings(BaseModel, Embeddings): embed = ModelScopeEmbeddings(model_id=model_id) """ - embed: Any + embed: Any = None model_id: str = "damo/nlp_corom_sentence-embedding_english-base" """Model name to use.""" @@ -37,10 +37,7 @@ def __init__(self, **kwargs: Any): "Please install it with `pip install modelscope`." ) from e - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a modelscope embedding model. diff --git a/libs/langchain/langchain/embeddings/mosaicml.py b/libs/langchain/langchain/embeddings/mosaicml.py index d2e448da7c1d5..7b3581ed6d98c 100644 --- a/libs/langchain/langchain/embeddings/mosaicml.py +++ b/libs/langchain/langchain/embeddings/mosaicml.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional, Tuple import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -41,13 +41,10 @@ class MosaicMLInstructorEmbeddings(BaseModel, Embeddings): """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/nlpcloud.py b/libs/langchain/langchain/embeddings/nlpcloud.py index 5a65768a9415e..e450ab0a92c13 100644 --- a/libs/langchain/langchain/embeddings/nlpcloud.py +++ b/libs/langchain/langchain/embeddings/nlpcloud.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -21,7 +21,7 @@ class NLPCloudEmbeddings(BaseModel, Embeddings): model_name: str # Define model_name as a class attribute gpu: bool # Define gpu as a class attribute - client: Any #: :meta private: + client: Any = None #: :meta private: def __init__( self, @@ -31,7 +31,8 @@ def __init__( ) -> None: super().__init__(model_name=model_name, gpu=gpu, **kwargs) - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/octoai_embeddings.py b/libs/langchain/langchain/embeddings/octoai_embeddings.py index 4a9d0a5120f89..5d41721bab134 100644 --- a/libs/langchain/langchain/embeddings/octoai_embeddings.py +++ b/libs/langchain/langchain/embeddings/octoai_embeddings.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Mapping, Optional -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env @@ -29,13 +29,10 @@ class OctoAIEmbeddings(BaseModel, Embeddings): query_instruction: str = Field( DEFAULT_QUERY_INSTRUCTION, description="Instruction to use for embedding query." ) + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(allow_reuse=True) + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Ensure that the API key and python package exist in environment.""" values["octoai_api_token"] = get_from_dict_or_env( diff --git a/libs/langchain/langchain/embeddings/openai.py b/libs/langchain/langchain/embeddings/openai.py index 2234975f0a90c..69243e5822b93 100644 --- a/libs/langchain/langchain/embeddings/openai.py +++ b/libs/langchain/langchain/embeddings/openai.py @@ -16,7 +16,7 @@ ) import numpy as np -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from tenacity import ( AsyncRetrying, before_sleep_log, @@ -159,7 +159,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """ - client: Any #: :meta private: + client: Any = None #: :meta private: model: str = "text-embedding-ada-002" deployment: str = model # to support Azure OpenAI Service custom deployment names openai_api_version: Optional[str] = None @@ -196,13 +196,10 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """Whether to show a progress bar when embedding.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) @@ -228,7 +225,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( @@ -371,11 +369,7 @@ def _get_len_safe_embeddings( for i in range(len(texts)): _result = results[i] if len(_result) == 0: - average = embed_with_retry( - self, - input="", - **self._invocation_params, - )[ + average = embed_with_retry(self, input="", **self._invocation_params,)[ "data" ][0]["embedding"] else: @@ -464,11 +458,7 @@ def _embedding_func(self, text: str, *, engine: str) -> List[float]: # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") - return embed_with_retry( - self, - input=[text], - **self._invocation_params, - )[ + return embed_with_retry(self, input=[text], **self._invocation_params,)[ "data" ][0]["embedding"] diff --git a/libs/langchain/langchain/embeddings/sagemaker_endpoint.py b/libs/langchain/langchain/embeddings/sagemaker_endpoint.py index d0fd864a6bdda..97dd40c2bf338 100644 --- a/libs/langchain/langchain/embeddings/sagemaker_endpoint.py +++ b/libs/langchain/langchain/embeddings/sagemaker_endpoint.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase @@ -48,7 +48,7 @@ class SagemakerEndpointEmbeddings(BaseModel, Embeddings): credentials_profile_name=credentials_profile_name ) """ - client: Any #: :meta private: + client: Any = None #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. @@ -98,14 +98,10 @@ def transform_output(self, output: bytes) -> List[List[float]]: function. See `boto3`_. docs for more info. .. _boto3: """ + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: diff --git a/libs/langchain/langchain/embeddings/self_hosted.py b/libs/langchain/langchain/embeddings/self_hosted.py index 3ef3136692462..971c5cf9d2773 100644 --- a/libs/langchain/langchain/embeddings/self_hosted.py +++ b/libs/langchain/langchain/embeddings/self_hosted.py @@ -1,6 +1,6 @@ from typing import Any, Callable, List -from pydantic import Extra +from pydantic import ConfigDict from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline @@ -65,11 +65,7 @@ def get_pipeline(): """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. diff --git a/libs/langchain/langchain/embeddings/spacy_embeddings.py b/libs/langchain/langchain/embeddings/spacy_embeddings.py index ded1fbbd8ac48..31303f993639a 100644 --- a/libs/langchain/langchain/embeddings/spacy_embeddings.py +++ b/libs/langchain/langchain/embeddings/spacy_embeddings.py @@ -1,7 +1,7 @@ import importlib.util from typing import Any, Dict, List -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.embeddings.base import Embeddings @@ -21,14 +21,11 @@ class SpacyEmbeddings(BaseModel, Embeddings): Generates an embedding for a single piece of text. """ - nlp: Any # The Spacy model loaded into memory + nlp: Any = None # The Spacy model loaded into memory + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid # Forbid extra attributes during model initialization - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """ Validates that the Spacy package and the 'en_core_web_sm' model are installed. diff --git a/libs/langchain/langchain/embeddings/tensorflow_hub.py b/libs/langchain/langchain/embeddings/tensorflow_hub.py index 514c7e1c1fd9c..1c06f44e2247a 100644 --- a/libs/langchain/langchain/embeddings/tensorflow_hub.py +++ b/libs/langchain/langchain/embeddings/tensorflow_hub.py @@ -1,6 +1,6 @@ from typing import Any, List -from pydantic import BaseModel, Extra +from pydantic import BaseModel, ConfigDict from langchain.embeddings.base import Embeddings @@ -20,7 +20,7 @@ class TensorflowHubEmbeddings(BaseModel, Embeddings): tf = TensorflowHubEmbeddings(model_url=url) """ - embed: Any #: :meta private: + embed: Any = None #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" @@ -44,10 +44,7 @@ def __init__(self, **kwargs: Any): self.embed = tensorflow_hub.load(self.model_url) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a TensorflowHub embedding model. diff --git a/libs/langchain/langchain/embeddings/vertexai.py b/libs/langchain/langchain/embeddings/vertexai.py index 6ea6d02303324..c21480b818c00 100644 --- a/libs/langchain/langchain/embeddings/vertexai.py +++ b/libs/langchain/langchain/embeddings/vertexai.py @@ -1,6 +1,6 @@ from typing import Dict, List -from pydantic import root_validator +from pydantic import model_validator from langchain.embeddings.base import Embeddings from langchain.llms.vertexai import _VertexAICommon @@ -12,7 +12,8 @@ class VertexAIEmbeddings(_VertexAICommon, Embeddings): model_name: str = "textembedding-gecko" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validates that the python package exists in environment.""" cls._try_init_vertexai(values) diff --git a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py index 63b8fb617ef84..10c0ff52755dd 100644 --- a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union -from pydantic import Extra, Field +from pydantic import ConfigDict, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -131,11 +131,7 @@ def geography_answers(country: str, question: str) -> str: """The output parser used to parse the output.""" return_reasoning: bool = False """Whether to return the reasoning along with the score.""" - - class Config: - """Configuration for the QAEvalChain.""" - - extra = Extra.ignore + model_config = ConfigDict(extra="ignore") @property def requires_reference(self) -> bool: diff --git a/libs/langchain/langchain/evaluation/comparison/eval_chain.py b/libs/langchain/langchain/evaluation/comparison/eval_chain.py index 2eccdd186fdca..41fbdaf062f27 100644 --- a/libs/langchain/langchain/evaluation/comparison/eval_chain.py +++ b/libs/langchain/langchain/evaluation/comparison/eval_chain.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional -from pydantic import Extra, Field +from pydantic import ConfigDict, Field from langchain.callbacks.manager import Callbacks from langchain.chains.llm import LLMChain @@ -106,11 +106,7 @@ class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain): output_parser: BaseOutputParser = Field( default_factory=PairwiseStringResultOutputParser ) - - class Config: - """Configuration for the PairwiseStringEvalChain.""" - - extra = Extra.ignore + model_config = ConfigDict(extra="ignore") @property def requires_reference(self) -> bool: diff --git a/libs/langchain/langchain/evaluation/criteria/eval_chain.py b/libs/langchain/langchain/evaluation/criteria/eval_chain.py index 838c08fd4372d..47422fae6a916 100644 --- a/libs/langchain/langchain/evaluation/criteria/eval_chain.py +++ b/libs/langchain/langchain/evaluation/criteria/eval_chain.py @@ -3,7 +3,7 @@ from enum import Enum from typing import Any, Dict, List, Mapping, Optional, Union -from pydantic import Extra, Field +from pydantic import ConfigDict, Field from langchain.callbacks.manager import Callbacks from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple @@ -156,11 +156,7 @@ class CriteriaEvalChain(StringEvaluator, LLMEvalChain, LLMChain): criterion_name: str """The name of the criterion being evaluated.""" output_key: str = "results" #: :meta private: - - class Config: - """Configuration for the QAEvalChain.""" - - extra = Extra.ignore + model_config = ConfigDict(extra="ignore") @property def requires_reference(self) -> bool: diff --git a/libs/langchain/langchain/evaluation/embedding_distance/base.py b/libs/langchain/langchain/evaluation/embedding_distance/base.py index 3591f45d8beaf..391d29022da3e 100644 --- a/libs/langchain/langchain/evaluation/embedding_distance/base.py +++ b/libs/langchain/langchain/evaluation/embedding_distance/base.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional import numpy as np -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForChainRun, @@ -48,7 +48,8 @@ class _EmbeddingDistanceChainMixin(Chain): embeddings: Embeddings = Field(default_factory=OpenAIEmbeddings) distance_metric: EmbeddingDistance = Field(default=EmbeddingDistance.COSINE) - @root_validator(pre=False) + @model_validator() + @classmethod def _validate_tiktoken_installed(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that the TikTok library is installed. @@ -71,10 +72,7 @@ def _validate_tiktoken_installed(cls, values: Dict[str, Any]) -> Dict[str, Any]: ) return values - class Config: - """Permit embeddings to go unvalidated.""" - - arbitrary_types_allowed: bool = True + model_config = ConfigDict() @property def output_keys(self) -> List[str]: diff --git a/libs/langchain/langchain/evaluation/qa/eval_chain.py b/libs/langchain/langchain/evaluation/qa/eval_chain.py index 90b5e8d5dcf93..437e42386cb5d 100644 --- a/libs/langchain/langchain/evaluation/qa/eval_chain.py +++ b/libs/langchain/langchain/evaluation/qa/eval_chain.py @@ -4,7 +4,7 @@ import re from typing import Any, List, Optional, Sequence -from pydantic import Extra +from pydantic import ConfigDict from langchain import PromptTemplate from langchain.callbacks.manager import Callbacks @@ -53,11 +53,7 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): """LLM Chain for evaluating question answering.""" output_key: str = "results" #: :meta private: - - class Config: - """Configuration for the QAEvalChain.""" - - extra = Extra.ignore + model_config = ConfigDict(extra="ignore") @property def evaluation_name(self) -> str: @@ -196,10 +192,7 @@ def requires_input(self) -> bool: """Whether the chain requires an input string.""" return True - class Config: - """Configuration for the QAEvalChain.""" - - extra = Extra.ignore + model_config = ConfigDict(extra="ignore") @classmethod def _validate_input_vars(cls, prompt: PromptTemplate) -> None: diff --git a/libs/langchain/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py b/libs/langchain/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py index 2a204f5626009..19c9bc83b0200 100644 --- a/libs/langchain/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py +++ b/libs/langchain/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py @@ -2,7 +2,7 @@ from collections import deque from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -29,11 +29,7 @@ class BabyAGI(Chain, BaseModel): task_id_counter: int = Field(1) vectorstore: VectorStore = Field(init=False) max_iterations: Optional[int] = None - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def add_task(self, task: Dict) -> None: self.task_list.append(task) diff --git a/libs/langchain/langchain/experimental/cpal/models.py b/libs/langchain/langchain/experimental/cpal/models.py index 4aba1b7cd9a39..dd108fee61381 100644 --- a/libs/langchain/langchain/experimental/cpal/models.py +++ b/libs/langchain/langchain/experimental/cpal/models.py @@ -5,7 +5,14 @@ import duckdb import pandas as pd -from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PrivateAttr, + field_validator, + root_validator, +) from langchain.experimental.cpal.constants import Constant from langchain.graphs.networkx_graph import NetworkxEntityGraph @@ -20,7 +27,8 @@ class NarrativeModel(BaseModel): story_hypothetical: str story_plot: str # causal stack of operations - @validator("*", pre=True) + @field_validator("*", mode="before") + @classmethod def empty_str_to_none(cls, v: str) -> Union[str, None]: """Empty strings are not allowed""" if v == "": @@ -33,14 +41,10 @@ class EntityModel(BaseModel): code: str = Field(description="entity actions") value: float = Field(description="entity initial value") depends_on: list[str] = Field(default=[], description="ancestor entities") + model_config = ConfigDict(validate_assignment=True) - # TODO: generalize to multivariate math - # TODO: acyclic graph - - class Config: - validate_assignment = True - - @validator("name") + @field_validator("name") + @classmethod def lower_case_name(cls, v: str) -> str: v = v.lower() return v @@ -64,7 +68,8 @@ class EntitySettingModel(BaseModel): attribute: str = Field(description="name of the attribute to be calculated") value: float = Field(description="entity's attribute value (calculated)") - @validator("name") + @field_validator("name") + @classmethod def lower_case_transform(cls, v: str) -> str: v = v.lower() return v @@ -98,7 +103,8 @@ class InterventionModel(BaseModel): entity_settings: list[EntitySettingModel] system_settings: Optional[list[SystemSettingModel]] = None - @validator("system_settings") + @field_validator("system_settings") + @classmethod def lower_case_name(cls, v: str) -> Union[str, None]: if v is not None: raise NotImplementedError("system_setting is not implemented yet") diff --git a/libs/langchain/langchain/experimental/generative_agents/generative_agent.py b/libs/langchain/langchain/experimental/generative_agents/generative_agent.py index c713851939cae..9135cb631ca03 100644 --- a/libs/langchain/langchain/experimental/generative_agents/generative_agent.py +++ b/libs/langchain/langchain/experimental/generative_agents/generative_agent.py @@ -2,7 +2,7 @@ from datetime import datetime from typing import Any, Dict, List, Optional, Tuple -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain import LLMChain from langchain.experimental.generative_agents.memory import GenerativeAgentMemory @@ -34,11 +34,7 @@ class GenerativeAgent(BaseModel): """The last time the character's summary was regenerated.""" daily_summaries: List[str] = Field(default_factory=list) # : :meta private: """Summary of the events in the plan that the agent took.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) # LLM-related methods @staticmethod diff --git a/libs/langchain/langchain/indexes/vectorstore.py b/libs/langchain/langchain/indexes/vectorstore.py index daa83092d073f..b695e5a86de4f 100644 --- a/libs/langchain/langchain/indexes/vectorstore.py +++ b/libs/langchain/langchain/indexes/vectorstore.py @@ -1,6 +1,6 @@ from typing import Any, List, Optional, Type -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA @@ -23,12 +23,7 @@ class VectorStoreIndexWrapper(BaseModel): """Wrapper around a vectorstore for easy access.""" vectorstore: VectorStore - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def query( self, question: str, llm: Optional[BaseLanguageModel] = None, **kwargs: Any @@ -58,12 +53,7 @@ class VectorstoreIndexCreator(BaseModel): embedding: Embeddings = Field(default_factory=OpenAIEmbeddings) text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter) vectorstore_kwargs: dict = Field(default_factory=dict) - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def from_loaders(self, loaders: List[BaseLoader]) -> VectorStoreIndexWrapper: """Create a vectorstore index from loaders.""" diff --git a/libs/langchain/langchain/llms/ai21.py b/libs/langchain/langchain/llms/ai21.py index 5d2562a6afbd8..2d294af626f81 100644 --- a/libs/langchain/langchain/llms/ai21.py +++ b/libs/langchain/langchain/llms/ai21.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Optional import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -68,13 +68,10 @@ class AI21(LLM): base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY") diff --git a/libs/langchain/langchain/llms/aleph_alpha.py b/libs/langchain/langchain/llms/aleph_alpha.py index cd4aec16828ee..ba61464028176 100644 --- a/libs/langchain/langchain/llms/aleph_alpha.py +++ b/libs/langchain/langchain/llms/aleph_alpha.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional, Sequence -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -130,13 +130,10 @@ class AlephAlpha(LLM): stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/amazon_api_gateway.py b/libs/langchain/langchain/llms/amazon_api_gateway.py index 1ba81c11896bf..d9c3a74017dba 100644 --- a/libs/langchain/langchain/llms/amazon_api_gateway.py +++ b/libs/langchain/langchain/llms/amazon_api_gateway.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -43,11 +43,7 @@ class AmazonAPIGateway(LLM): output transform functions to handle formats between LLM and the endpoint. """ - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") @property def _identifying_params(self) -> Mapping[str, Any]: diff --git a/libs/langchain/langchain/llms/anthropic.py b/libs/langchain/langchain/llms/anthropic.py index f32f581d1f72d..c8a2876fd3810 100644 --- a/libs/langchain/langchain/llms/anthropic.py +++ b/libs/langchain/langchain/llms/anthropic.py @@ -2,7 +2,7 @@ import warnings from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Mapping, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -46,7 +46,8 @@ class _AnthropicCommon(BaseLanguageModel): AI_PROMPT: Optional[str] = None count_tokens: Optional[Callable[[str], int]] = None - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["anthropic_api_key"] = get_from_dict_or_env( @@ -142,7 +143,8 @@ class Anthropic(LLM, _AnthropicCommon): response = model(prompt) """ - @root_validator() + @model_validator() + @classmethod def raise_warning(cls, values: Dict) -> Dict: """Raise warning that this class is deprecated.""" warnings.warn( diff --git a/libs/langchain/langchain/llms/anyscale.py b/libs/langchain/langchain/llms/anyscale.py index 5a440b97d4b78..7a8bdf902a533 100644 --- a/libs/langchain/langchain/llms/anyscale.py +++ b/libs/langchain/langchain/llms/anyscale.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -41,13 +41,10 @@ def send_query(llm, prompt): anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/aviary.py b/libs/langchain/langchain/llms/aviary.py index 9f9c0937d071b..13c8bf1ae7fdf 100644 --- a/libs/langchain/langchain/llms/aviary.py +++ b/libs/langchain/langchain/llms/aviary.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Mapping, Optional, Union, cast import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -106,13 +106,10 @@ class Aviary(LLM): use_prompt_format: bool = True # API version to use for Aviary version: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aviary_url = get_from_dict_or_env(values, "aviary_url", "AVIARY_URL") diff --git a/libs/langchain/langchain/llms/azureml_endpoint.py b/libs/langchain/langchain/llms/azureml_endpoint.py index 20a34613f6732..6cbd4e2215316 100644 --- a/libs/langchain/langchain/llms/azureml_endpoint.py +++ b/libs/langchain/langchain/llms/azureml_endpoint.py @@ -3,7 +3,8 @@ from abc import abstractmethod from typing import Any, Dict, List, Mapping, Optional -from pydantic import BaseModel, validator +from pydantic import BaseModel +from pydantic.v1 import validator as v1_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -168,7 +169,9 @@ class AzureMLOnlineEndpoint(LLM, BaseModel): model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" - @validator("http_client", always=True, allow_reuse=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("http_client", always=True, allow_reuse=True) @classmethod def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient: """Validate that api key and python package exists in environment.""" diff --git a/libs/langchain/langchain/llms/bananadev.py b/libs/langchain/langchain/llms/bananadev.py index 76dcb7b8af3db..45b36742f1d5e 100644 --- a/libs/langchain/langchain/llms/bananadev.py +++ b/libs/langchain/langchain/llms/bananadev.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -35,13 +35,10 @@ class Banana(LLM): explicitly specified.""" banana_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -59,7 +56,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" banana_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/base.py b/libs/langchain/langchain/llms/base.py index 10595e83191ea..b3fcdc7eee77d 100644 --- a/libs/langchain/langchain/llms/base.py +++ b/libs/langchain/langchain/llms/base.py @@ -26,7 +26,8 @@ ) import yaml -from pydantic import Field, root_validator, validator +from pydantic import ConfigDict, Field, model_validator +from pydantic.v1 import validator as v1_validator from tenacity import ( before_sleep_log, retry, @@ -137,13 +138,10 @@ class BaseLLM(BaseLanguageModel[str], ABC): """Tags to add to the run trace.""" metadata: Optional[Dict[str, Any]] = Field(default=None, exclude=True) """Metadata to add to the run trace.""" + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: @@ -154,7 +152,9 @@ def raise_deprecation(cls, values: Dict) -> Dict: values["callbacks"] = values.pop("callback_manager", None) return values - @validator("verbose", pre=True, always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("verbose", pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) -> bool: """If verbose is None, set it. diff --git a/libs/langchain/langchain/llms/beam.py b/libs/langchain/langchain/llms/beam.py index 47bc017dadda2..28e0bfd7e5685 100644 --- a/libs/langchain/langchain/llms/beam.py +++ b/libs/langchain/langchain/llms/beam.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -72,13 +72,10 @@ class Beam(LLM): beam_client_id: str = "" beam_client_secret: str = "" app_id: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -96,7 +93,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" beam_client_id = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/bedrock.py b/libs/langchain/langchain/llms/bedrock.py index 481de7f5680ea..7c4673ea9c32a 100644 --- a/libs/langchain/langchain/llms/bedrock.py +++ b/libs/langchain/langchain/llms/bedrock.py @@ -1,7 +1,7 @@ import json from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -99,13 +99,10 @@ class Bedrock(LLM): endpoint_url: Optional[str] = None """Needed if you don't want to default to us-east-1 endpoint""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" diff --git a/libs/langchain/langchain/llms/cerebriumai.py b/libs/langchain/langchain/llms/cerebriumai.py index 02db1cf33efc6..51e425af37347 100644 --- a/libs/langchain/langchain/llms/cerebriumai.py +++ b/libs/langchain/langchain/llms/cerebriumai.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -36,13 +36,10 @@ class CerebriumAI(LLM): explicitly specified.""" cerebriumai_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -60,7 +57,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cerebriumai_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/clarifai.py b/libs/langchain/langchain/llms/clarifai.py index 2eead83592089..7514f34c90b72 100644 --- a/libs/langchain/langchain/llms/clarifai.py +++ b/libs/langchain/langchain/llms/clarifai.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -45,13 +45,10 @@ class Clarifai(LLM): pat: Optional[str] = None api_base: str = "https://api.clarifai.com" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that we have all required info to access Clarifai platform and python package exists in environment.""" diff --git a/libs/langchain/langchain/llms/cohere.py b/libs/langchain/langchain/llms/cohere.py index bad5a15651153..8c9f353be4a18 100644 --- a/libs/langchain/langchain/llms/cohere.py +++ b/libs/langchain/langchain/llms/cohere.py @@ -3,7 +3,7 @@ import logging from typing import Any, Callable, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from tenacity import ( before_sleep_log, retry, @@ -108,13 +108,10 @@ class Cohere(LLM): cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/ctransformers.py b/libs/langchain/langchain/llms/ctransformers.py index 86c657f618176..5ffed5e037c93 100644 --- a/libs/langchain/langchain/llms/ctransformers.py +++ b/libs/langchain/langchain/llms/ctransformers.py @@ -1,7 +1,7 @@ from functools import partial from typing import Any, Dict, List, Optional, Sequence -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -58,7 +58,8 @@ def _llm_type(self) -> str: """Return type of llm.""" return "ctransformers" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that ``ctransformers`` package is installed.""" try: diff --git a/libs/langchain/langchain/llms/databricks.py b/libs/langchain/langchain/llms/databricks.py index 6ed2a6fa7d6d9..83fa9629cd8a5 100644 --- a/libs/langchain/langchain/llms/databricks.py +++ b/libs/langchain/langchain/llms/databricks.py @@ -3,7 +3,8 @@ from typing import Any, Callable, Dict, List, Optional import requests -from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator +from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator +from pydantic.v1 import validator as v1_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -36,7 +37,8 @@ class _DatabricksServingEndpointClient(_DatabricksClientBase): host: str endpoint_name: str - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] @@ -62,7 +64,8 @@ class _DatabricksClusterDriverProxyClient(_DatabricksClientBase): cluster_id: str cluster_driver_port: str - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "api_url" not in values: host = values["host"] @@ -227,12 +230,11 @@ class Databricks(LLM): """ _client: _DatabricksClientBase = PrivateAttr() + model_config = ConfigDict(extra="forbid") - class Config: - extra = Extra.forbid - underscore_attrs_are_private = True - - @validator("cluster_id", always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("cluster_id", always=True) def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_id.") @@ -252,7 +254,9 @@ def set_cluster_id(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: f" error: {e}" ) - @validator("cluster_driver_port", always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("cluster_driver_port", always=True) def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str]: if v and values["endpoint_name"]: raise ValueError("Cannot set both endpoint_name and cluster_driver_port.") @@ -267,7 +271,9 @@ def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) -> Optional[str else: return v - @validator("model_kwargs", always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("model_kwargs", always=True) def set_model_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: if v: assert "prompt" not in v, "model_kwargs must not contain key 'prompt'" diff --git a/libs/langchain/langchain/llms/deepinfra.py b/libs/langchain/langchain/llms/deepinfra.py index 533c236284ae6..ba10e6d69abb6 100644 --- a/libs/langchain/langchain/llms/deepinfra.py +++ b/libs/langchain/langchain/llms/deepinfra.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -32,13 +32,10 @@ class DeepInfra(LLM): model_kwargs: Optional[dict] = None deepinfra_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/forefrontai.py b/libs/langchain/langchain/llms/forefrontai.py index b6c70d5c430d8..feec827c4486c 100644 --- a/libs/langchain/langchain/llms/forefrontai.py +++ b/libs/langchain/langchain/llms/forefrontai.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -45,13 +45,10 @@ class ForefrontAI(LLM): base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/google_palm.py b/libs/langchain/langchain/llms/google_palm.py index 8f45950a29ee7..77fa767a87d58 100644 --- a/libs/langchain/langchain/llms/google_palm.py +++ b/libs/langchain/langchain/llms/google_palm.py @@ -3,7 +3,7 @@ import logging from typing import Any, Callable, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from tenacity import ( before_sleep_log, retry, @@ -75,8 +75,8 @@ def _strip_erroneous_leading_spaces(text: str) -> str: class GooglePalm(BaseLLM, BaseModel): """Google PaLM models.""" - client: Any #: :meta private: - google_api_key: Optional[str] + client: Any = None #: :meta private: + google_api_key: Optional[str] = None model_name: str = "models/text-bison-001" """Model name to use.""" temperature: float = 0.7 @@ -95,7 +95,8 @@ class GooglePalm(BaseLLM, BaseModel): """Number of chat completions to generate for each prompt. Note that the API may not return the full n completions if duplicates are generated.""" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate api key, python package exists.""" google_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/gooseai.py b/libs/langchain/langchain/llms/gooseai.py index cde043ce22131..65eb28b291c25 100644 --- a/libs/langchain/langchain/llms/gooseai.py +++ b/libs/langchain/langchain/llms/gooseai.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -62,13 +62,10 @@ class GooseAI(LLM): """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None + model_config = ConfigDict(extra="ignore") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.ignore - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -87,7 +84,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/gpt4all.py b/libs/langchain/langchain/llms/gpt4all.py index c88374c1c09bb..836976870bb25 100644 --- a/libs/langchain/langchain/llms/gpt4all.py +++ b/libs/langchain/langchain/llms/gpt4all.py @@ -1,7 +1,7 @@ from functools import partial from typing import Any, Dict, List, Mapping, Optional, Set -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -91,11 +91,7 @@ class GPT4All(LLM): """If model does not exist in ~/.cache/gpt4all/, download it.""" client: Any = None #: :meta private: - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") @staticmethod def _model_param_names() -> Set[str]: @@ -122,7 +118,8 @@ def _default_params(self) -> Dict[str, Any]: "repeat_last_n": self.repeat_last_n, } - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: diff --git a/libs/langchain/langchain/llms/huggingface_endpoint.py b/libs/langchain/langchain/llms/huggingface_endpoint.py index ff38a2025a15e..e7233daf1b412 100644 --- a/libs/langchain/langchain/llms/huggingface_endpoint.py +++ b/libs/langchain/langchain/llms/huggingface_endpoint.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -42,13 +42,10 @@ class HuggingFaceEndpoint(LLM): """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/huggingface_hub.py b/libs/langchain/langchain/llms/huggingface_hub.py index 745f0fdccbab4..c83c3241c40b6 100644 --- a/libs/langchain/langchain/llms/huggingface_hub.py +++ b/libs/langchain/langchain/llms/huggingface_hub.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -37,13 +37,10 @@ class HuggingFaceHub(LLM): """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/huggingface_pipeline.py b/libs/langchain/langchain/llms/huggingface_pipeline.py index ef7d428949e3a..1203c05fa86fa 100644 --- a/libs/langchain/langchain/llms/huggingface_pipeline.py +++ b/libs/langchain/langchain/llms/huggingface_pipeline.py @@ -2,7 +2,7 @@ import logging from typing import Any, List, Mapping, Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -53,11 +53,7 @@ class HuggingFacePipeline(LLM): """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") @classmethod def from_model_id( diff --git a/libs/langchain/langchain/llms/huggingface_text_gen_inference.py b/libs/langchain/langchain/llms/huggingface_text_gen_inference.py index befe813480827..124dd3b01393d 100644 --- a/libs/langchain/langchain/llms/huggingface_text_gen_inference.py +++ b/libs/langchain/langchain/llms/huggingface_text_gen_inference.py @@ -1,6 +1,6 @@ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -90,13 +90,10 @@ class HuggingFaceTextGenInference(LLM): streaming: bool = False client: Any async_client: Any + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" diff --git a/libs/langchain/langchain/llms/llamacpp.py b/libs/langchain/langchain/llms/llamacpp.py index be79076f16291..3b0a3febd73ab 100644 --- a/libs/langchain/langchain/llms/llamacpp.py +++ b/libs/langchain/langchain/llms/llamacpp.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, Iterator, List, Optional -from pydantic import Field, root_validator +from pydantic import Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -106,7 +106,8 @@ class LlamaCpp(LLM): verbose: bool = True """Print verbose output to stderr.""" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that llama-cpp-python library is installed.""" model_path = values["model_path"] diff --git a/libs/langchain/langchain/llms/manifest.py b/libs/langchain/langchain/llms/manifest.py index 71a89ed00f20c..b5d05d5f0e138 100644 --- a/libs/langchain/langchain/llms/manifest.py +++ b/libs/langchain/langchain/llms/manifest.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -11,13 +11,10 @@ class ManifestWrapper(LLM): client: Any #: :meta private: llm_kwargs: Optional[Dict] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: diff --git a/libs/langchain/langchain/llms/modal.py b/libs/langchain/langchain/llms/modal.py index bf03947bc53aa..1f8287a0fdafb 100644 --- a/libs/langchain/langchain/llms/modal.py +++ b/libs/langchain/langchain/llms/modal.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -33,13 +33,10 @@ class Modal(LLM): model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} diff --git a/libs/langchain/langchain/llms/mosaicml.py b/libs/langchain/langchain/llms/mosaicml.py index 226b5c3b8b5b9..42a320a5d8adb 100644 --- a/libs/langchain/langchain/llms/mosaicml.py +++ b/libs/langchain/langchain/llms/mosaicml.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -58,13 +58,10 @@ class MosaicML(LLM): """How long to try sleeping for if a rate limit is encountered""" mosaicml_api_token: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/nlpcloud.py b/libs/langchain/langchain/llms/nlpcloud.py index 9e5070acf77d0..cf421bc028e52 100644 --- a/libs/langchain/langchain/llms/nlpcloud.py +++ b/libs/langchain/langchain/llms/nlpcloud.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -59,13 +59,10 @@ class NLPCloud(LLM): """How many completions to generate for each prompt.""" nlpcloud_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/octoai_endpoint.py b/libs/langchain/langchain/llms/octoai_endpoint.py index 46c88d51ac076..55fbe734814cc 100644 --- a/libs/langchain/langchain/llms/octoai_endpoint.py +++ b/libs/langchain/langchain/llms/octoai_endpoint.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -45,13 +45,10 @@ class OctoAIEndpoint(LLM): octoai_api_token: Optional[str] = None """OCTOAI API Token""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(allow_reuse=True) + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" octoai_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/openai.py b/libs/langchain/langchain/llms/openai.py index 2c664a1c8742a..bc834fb38fe11 100644 --- a/libs/langchain/langchain/llms/openai.py +++ b/libs/langchain/langchain/llms/openai.py @@ -20,7 +20,7 @@ Union, ) -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -192,12 +192,10 @@ def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore return OpenAIChat(**data) return super().__new__(cls) - class Config: - """Configuration for this pydantic object.""" + model_config = ConfigDict(populate_by_name=True) - allow_population_by_field_name = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) @@ -223,7 +221,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( @@ -651,7 +650,8 @@ class AzureOpenAI(BaseOpenAI): openai_api_type: str = "" openai_api_version: str = "" - @root_validator() + @model_validator() + @classmethod def validate_azure_settings(cls, values: Dict) -> Dict: values["openai_api_version"] = get_from_dict_or_env( values, @@ -721,7 +721,8 @@ class OpenAIChat(BaseLLM): disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -735,7 +736,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/openllm.py b/libs/langchain/langchain/llms/openllm.py index 62b1dde03f050..d04cddbead69a 100644 --- a/libs/langchain/langchain/llms/openllm.py +++ b/libs/langchain/langchain/llms/openllm.py @@ -15,7 +15,7 @@ overload, ) -from pydantic import PrivateAttr +from pydantic import ConfigDict, PrivateAttr from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -95,9 +95,7 @@ class OpenLLM(LLM): _client: Union[ openllm.client.HTTPClient, openllm.client.GrpcClient, None ] = PrivateAttr(default=None) - - class Config: - extra = "forbid" + model_config = ConfigDict(extra="forbid") @overload def __init__( diff --git a/libs/langchain/langchain/llms/openlm.py b/libs/langchain/langchain/llms/openlm.py index c77badaa3ac16..12858352dd781 100644 --- a/libs/langchain/langchain/llms/openlm.py +++ b/libs/langchain/langchain/llms/openlm.py @@ -1,6 +1,6 @@ from typing import Any, Dict -from pydantic import root_validator +from pydantic import model_validator from langchain.llms.openai import BaseOpenAI @@ -12,7 +12,8 @@ class OpenLM(BaseOpenAI): def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: try: import openlm diff --git a/libs/langchain/langchain/llms/petals.py b/libs/langchain/langchain/llms/petals.py index 413437dc08680..0021952fa5bab 100644 --- a/libs/langchain/langchain/llms/petals.py +++ b/libs/langchain/langchain/llms/petals.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -61,13 +61,10 @@ class Petals(LLM): not explicitly specified.""" huggingface_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -86,7 +83,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingface_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/pipelineai.py b/libs/langchain/langchain/llms/pipelineai.py index b145cc49474b8..50f6fb4a7450c 100644 --- a/libs/langchain/langchain/llms/pipelineai.py +++ b/libs/langchain/langchain/llms/pipelineai.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Mapping, Optional -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -35,13 +35,10 @@ class PipelineAI(LLM, BaseModel): explicitly specified.""" pipeline_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -59,7 +56,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["pipeline_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" pipeline_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/predictionguard.py b/libs/langchain/langchain/llms/predictionguard.py index f91541508f2fd..8f8fe31fe76dd 100644 --- a/libs/langchain/langchain/llms/predictionguard.py +++ b/libs/langchain/langchain/llms/predictionguard.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -47,13 +47,10 @@ class PredictionGuard(LLM): """Your Prediction Guard access token.""" stop: Optional[List[str]] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the access token and python package exists in environment.""" token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN") diff --git a/libs/langchain/langchain/llms/replicate.py b/libs/langchain/langchain/llms/replicate.py index 5ae4638d501c6..8217bcb180cc4 100644 --- a/libs/langchain/langchain/llms/replicate.py +++ b/libs/langchain/langchain/llms/replicate.py @@ -1,7 +1,7 @@ import logging from typing import Any, Dict, List, Mapping, Optional -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -40,13 +40,10 @@ class Replicate(LLM): stop: Optional[List[str]] = Field(default=[]) """Stop sequences to early-terminate generation.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic config.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -64,7 +61,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" replicate_api_token = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/rwkv.py b/libs/langchain/langchain/llms/rwkv.py index 6b717d4a12f82..b79d0212c2bec 100644 --- a/libs/langchain/langchain/llms/rwkv.py +++ b/libs/langchain/langchain/llms/rwkv.py @@ -5,7 +5,7 @@ """ from typing import Any, Dict, List, Mapping, Optional, Set -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -71,11 +71,7 @@ class RWKV(LLM, BaseModel): model_tokens: Any = None #: :meta private: model_state: Any = None #: :meta private: - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") @property def _default_params(self) -> Dict[str, Any]: @@ -97,7 +93,8 @@ def _rwkv_param_names() -> Set[str]: "verbose", } - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: diff --git a/libs/langchain/langchain/llms/sagemaker_endpoint.py b/libs/langchain/langchain/llms/sagemaker_endpoint.py index 1e28435684d7d..2caa769e0f942 100644 --- a/libs/langchain/langchain/llms/sagemaker_endpoint.py +++ b/libs/langchain/langchain/llms/sagemaker_endpoint.py @@ -2,7 +2,7 @@ from abc import abstractmethod from typing import Any, Dict, Generic, List, Mapping, Optional, TypeVar, Union -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -150,13 +150,10 @@ def transform_output(self, output: bytes) -> str: function. See `boto3`_. docs for more info. .. _boto3: """ + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: diff --git a/libs/langchain/langchain/llms/self_hosted.py b/libs/langchain/langchain/llms/self_hosted.py index 42d45c9c545a1..9808fcd6a8082 100644 --- a/libs/langchain/langchain/llms/self_hosted.py +++ b/libs/langchain/langchain/llms/self_hosted.py @@ -3,7 +3,7 @@ import pickle from typing import Any, Callable, List, Mapping, Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -136,11 +136,7 @@ def inference_fn(pipeline, prompt, stop = None): """Key word arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def __init__(self, **kwargs: Any): """Init the pipeline with an auxiliary function. diff --git a/libs/langchain/langchain/llms/self_hosted_hugging_face.py b/libs/langchain/langchain/llms/self_hosted_hugging_face.py index 2e55aaf1d2f28..31b478fea77b2 100644 --- a/libs/langchain/langchain/llms/self_hosted_hugging_face.py +++ b/libs/langchain/langchain/llms/self_hosted_hugging_face.py @@ -2,7 +2,7 @@ import logging from typing import Any, Callable, List, Mapping, Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.self_hosted import SelfHostedPipeline @@ -168,11 +168,7 @@ def get_pipeline(): """Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. diff --git a/libs/langchain/langchain/llms/stochasticai.py b/libs/langchain/langchain/llms/stochasticai.py index 391d9e8c40903..bfc4a9b000b14 100644 --- a/libs/langchain/langchain/llms/stochasticai.py +++ b/libs/langchain/langchain/llms/stochasticai.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -34,13 +34,10 @@ class StochasticAI(LLM): explicitly specified.""" stochasticai_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} @@ -58,7 +55,8 @@ def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: values["model_kwargs"] = extra return values - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/llms/tongyi.py b/libs/langchain/langchain/llms/tongyi.py index 57973861b987e..eb15f3b7264d5 100644 --- a/libs/langchain/langchain/llms/tongyi.py +++ b/libs/langchain/langchain/llms/tongyi.py @@ -3,7 +3,7 @@ import logging from typing import Any, Callable, Dict, List, Optional -from pydantic import Field, root_validator +from pydantic import Field, model_validator from requests.exceptions import HTTPError from tenacity import ( before_sleep_log, @@ -135,7 +135,8 @@ def _llm_type(self) -> str: """Return type of llm.""" return "tongyi" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY") diff --git a/libs/langchain/langchain/llms/vertexai.py b/libs/langchain/langchain/llms/vertexai.py index da85f79edb194..40144ef14deca 100644 --- a/libs/langchain/langchain/llms/vertexai.py +++ b/libs/langchain/langchain/llms/vertexai.py @@ -4,7 +4,7 @@ from concurrent.futures import Executor, ThreadPoolExecutor from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -148,7 +148,8 @@ class VertexAI(_VertexAICommon, LLM): tuned_model_name: Optional[str] = None "The name of a tuned model. If provided, model_name is ignored." - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" cls._try_init_vertexai(values) diff --git a/libs/langchain/langchain/llms/writer.py b/libs/langchain/langchain/llms/writer.py index 546d09b04c1d6..d3a9363060661 100644 --- a/libs/langchain/langchain/llms/writer.py +++ b/libs/langchain/langchain/llms/writer.py @@ -1,7 +1,7 @@ from typing import Any, Dict, List, Mapping, Optional import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -63,13 +63,10 @@ class Writer(LLM): base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and organization id exist in environment.""" diff --git a/libs/langchain/langchain/load/serializable.py b/libs/langchain/langchain/load/serializable.py index 8f0e5ccf8dd0f..b982cbba79c44 100644 --- a/libs/langchain/langchain/load/serializable.py +++ b/libs/langchain/langchain/load/serializable.py @@ -1,7 +1,7 @@ from abc import ABC from typing import Any, Dict, List, Literal, TypedDict, Union, cast -from pydantic import BaseModel, PrivateAttr +from pydantic import BaseModel, ConfigDict, PrivateAttr class BaseSerialized(TypedDict): @@ -65,8 +65,7 @@ def lc_attributes(self) -> Dict: """ return {} - class Config: - extra = "ignore" + model_config = ConfigDict(extra="ignore") _lc_kwargs = PrivateAttr(default_factory=dict) diff --git a/libs/langchain/langchain/memory/buffer.py b/libs/langchain/langchain/memory/buffer.py index 50b1468b648d2..34a36aab6638c 100644 --- a/libs/langchain/langchain/memory/buffer.py +++ b/libs/langchain/langchain/memory/buffer.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.memory.chat_memory import BaseChatMemory, BaseMemory from langchain.memory.utils import get_prompt_input_key @@ -50,7 +50,8 @@ class ConversationStringBufferMemory(BaseMemory): input_key: Optional[str] = None memory_key: str = "history" #: :meta private: - @root_validator() + @model_validator() + @classmethod def validate_chains(cls, values: Dict) -> Dict: """Validate that return messages is not True.""" if values.get("return_messages", False): diff --git a/libs/langchain/langchain/memory/combined.py b/libs/langchain/langchain/memory/combined.py index 1c46c184d4685..44c544f9ca580 100644 --- a/libs/langchain/langchain/memory/combined.py +++ b/libs/langchain/langchain/memory/combined.py @@ -1,7 +1,7 @@ import warnings from typing import Any, Dict, List, Set -from pydantic import validator +from pydantic import field_validator from langchain.memory.chat_memory import BaseChatMemory from langchain.schema import BaseMemory @@ -13,7 +13,8 @@ class CombinedMemory(BaseMemory): memories: List[BaseMemory] """For tracking all the memories that should be accessed.""" - @validator("memories") + @field_validator("memories") + @classmethod def check_repeated_memory_variable( cls, value: List[BaseMemory] ) -> List[BaseMemory]: @@ -29,7 +30,8 @@ def check_repeated_memory_variable( return value - @validator("memories") + @field_validator("memories") + @classmethod def check_input_key(cls, value: List[BaseMemory]) -> List[BaseMemory]: """Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: diff --git a/libs/langchain/langchain/memory/entity.py b/libs/langchain/langchain/memory/entity.py index 54b9574b0dc96..f2af0b64928e9 100644 --- a/libs/langchain/langchain/memory/entity.py +++ b/libs/langchain/langchain/memory/entity.py @@ -77,7 +77,7 @@ class RedisEntityStore(BaseEntityStore): that TTL is extended by 3 days every time the entity is read back. """ - redis_client: Any + redis_client: Any = None session_id: str = "default" key_prefix: str = "memory_store" ttl: Optional[int] = 60 * 60 * 24 diff --git a/libs/langchain/langchain/memory/summary.py b/libs/langchain/langchain/memory/summary.py index afa0f9c8ef6ed..e8a272c485be2 100644 --- a/libs/langchain/langchain/memory/summary.py +++ b/libs/langchain/langchain/memory/summary.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Type -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.chains.llm import LLMChain from langchain.memory.chat_memory import BaseChatMemory @@ -75,7 +75,8 @@ def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: buffer = self.buffer return {self.memory_key: buffer} - @root_validator() + @model_validator() + @classmethod def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables diff --git a/libs/langchain/langchain/memory/summary_buffer.py b/libs/langchain/langchain/memory/summary_buffer.py index 0b49797f59a2a..1c416f11835f9 100644 --- a/libs/langchain/langchain/memory/summary_buffer.py +++ b/libs/langchain/langchain/memory/summary_buffer.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List -from pydantic import root_validator +from pydantic import model_validator from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.summary import SummarizerMixin @@ -42,7 +42,8 @@ def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: ) return {self.memory_key: final_buffer} - @root_validator() + @model_validator() + @classmethod def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values["prompt"].input_variables diff --git a/libs/langchain/langchain/output_parsers/combining.py b/libs/langchain/langchain/output_parsers/combining.py index 511d9398f07af..454e91ec13ff4 100644 --- a/libs/langchain/langchain/output_parsers/combining.py +++ b/libs/langchain/langchain/output_parsers/combining.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List -from pydantic import root_validator +from pydantic import model_validator from langchain.schema import BaseOutputParser @@ -16,7 +16,8 @@ def lc_serializable(self) -> bool: parsers: List[BaseOutputParser] - @root_validator() + @model_validator() + @classmethod def validate_parsers(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate the parsers.""" parsers = values["parsers"] diff --git a/libs/langchain/langchain/output_parsers/enum.py b/libs/langchain/langchain/output_parsers/enum.py index ee3daecfeef8d..f9c5115ae763a 100644 --- a/libs/langchain/langchain/output_parsers/enum.py +++ b/libs/langchain/langchain/output_parsers/enum.py @@ -1,7 +1,7 @@ from enum import Enum from typing import Any, Dict, List, Type -from pydantic import root_validator +from pydantic import model_validator from langchain.schema import BaseOutputParser, OutputParserException @@ -12,7 +12,8 @@ class EnumOutputParser(BaseOutputParser): enum: Type[Enum] """The enum to parse. Its values must be strings.""" - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: enum = values["enum"] if not all(isinstance(e.value, str) for e in enum): diff --git a/libs/langchain/langchain/output_parsers/openai_functions.py b/libs/langchain/langchain/output_parsers/openai_functions.py index c55801c9bdad4..2a4755ef5cdf2 100644 --- a/libs/langchain/langchain/output_parsers/openai_functions.py +++ b/libs/langchain/langchain/output_parsers/openai_functions.py @@ -1,7 +1,7 @@ import json from typing import Any, Dict, List, Type, Union -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.schema import ( ChatGeneration, @@ -62,7 +62,8 @@ class PydanticOutputFunctionsParser(OutputFunctionsParser): pydantic_schema: Union[Type[BaseModel], Dict[str, Type[BaseModel]]] """The pydantic schema to parse the output with.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_schema(cls, values: Dict) -> Dict: schema = values["pydantic_schema"] if "args_only" not in values: diff --git a/libs/langchain/langchain/prompts/chat.py b/libs/langchain/langchain/prompts/chat.py index 45e7e50a230d4..5c68182d23926 100644 --- a/libs/langchain/langchain/prompts/chat.py +++ b/libs/langchain/langchain/prompts/chat.py @@ -5,7 +5,7 @@ from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union -from pydantic import Field, root_validator +from pydantic import Field, model_validator from langchain.load.serializable import Serializable from langchain.prompts.base import StringPromptTemplate @@ -333,7 +333,8 @@ def __add__(self, other: Any) -> ChatPromptTemplate: else: raise NotImplementedError(f"Unsupported operand type for +: {type(other)}") - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_input_variables(cls, values: dict) -> dict: """Validate input variables. diff --git a/libs/langchain/langchain/prompts/example_selector/length_based.py b/libs/langchain/langchain/prompts/example_selector/length_based.py index f6c665de322bf..8e8eb3eefaf52 100644 --- a/libs/langchain/langchain/prompts/example_selector/length_based.py +++ b/libs/langchain/langchain/prompts/example_selector/length_based.py @@ -2,7 +2,8 @@ import re from typing import Callable, Dict, List -from pydantic import BaseModel, validator +from pydantic import BaseModel +from pydantic.v1 import validator as v1_validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate @@ -35,7 +36,9 @@ def add_example(self, example: Dict[str, str]) -> None: string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example)) - @validator("example_text_lengths", always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("example_text_lengths", always=True) def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]: """Calculate text lengths if they don't exist.""" # Check if text lengths were passed in diff --git a/libs/langchain/langchain/prompts/example_selector/ngram_overlap.py b/libs/langchain/langchain/prompts/example_selector/ngram_overlap.py index cfe198d251f85..834761c3ef3a6 100644 --- a/libs/langchain/langchain/prompts/example_selector/ngram_overlap.py +++ b/libs/langchain/langchain/prompts/example_selector/ngram_overlap.py @@ -6,7 +6,7 @@ from typing import Dict, List import numpy as np -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate @@ -63,7 +63,8 @@ class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel): and excludes examples with no ngram overlap with input. """ - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_dependencies(cls, values: Dict) -> Dict: """Check that valid dependencies exist.""" try: diff --git a/libs/langchain/langchain/prompts/example_selector/semantic_similarity.py b/libs/langchain/langchain/prompts/example_selector/semantic_similarity.py index 0d66c13673fbe..cdbfee6c7d552 100644 --- a/libs/langchain/langchain/prompts/example_selector/semantic_similarity.py +++ b/libs/langchain/langchain/prompts/example_selector/semantic_similarity.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Type -from pydantic import BaseModel, Extra +from pydantic import BaseModel, ConfigDict from langchain.embeddings.base import Embeddings from langchain.prompts.example_selector.base import BaseExampleSelector @@ -27,12 +27,7 @@ class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" diff --git a/libs/langchain/langchain/prompts/few_shot.py b/libs/langchain/langchain/prompts/few_shot.py index d89de95c26fa7..1513566f2d646 100644 --- a/libs/langchain/langchain/prompts/few_shot.py +++ b/libs/langchain/langchain/prompts/few_shot.py @@ -1,7 +1,7 @@ """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, @@ -48,7 +48,8 @@ def lc_serializable(self) -> bool: validate_template: bool = True """Whether or not to try validating the template.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) @@ -65,7 +66,8 @@ def check_examples_and_selector(cls, values: Dict) -> Dict: return values - @root_validator() + @model_validator() + @classmethod def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix, and input variables are consistent.""" if values["validate_template"]: @@ -76,11 +78,7 @@ def template_is_valid(cls, values: Dict) -> Dict: ) return values - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: diff --git a/libs/langchain/langchain/prompts/few_shot_with_templates.py b/libs/langchain/langchain/prompts/few_shot_with_templates.py index 5e5330cf94926..f3fc2f39cdcdb 100644 --- a/libs/langchain/langchain/prompts/few_shot_with_templates.py +++ b/libs/langchain/langchain/prompts/few_shot_with_templates.py @@ -1,7 +1,7 @@ """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.prompts.base import DEFAULT_FORMATTER_MAPPING, StringPromptTemplate from langchain.prompts.example_selector.base import BaseExampleSelector @@ -40,7 +40,8 @@ class FewShotPromptWithTemplates(StringPromptTemplate): validate_template: bool = True """Whether or not to try validating the template.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) @@ -57,7 +58,8 @@ def check_examples_and_selector(cls, values: Dict) -> Dict: return values - @root_validator() + @model_validator() + @classmethod def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix, and input variables are consistent.""" if values["validate_template"]: @@ -74,11 +76,7 @@ def template_is_valid(cls, values: Dict) -> Dict: ) return values - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: diff --git a/libs/langchain/langchain/prompts/pipeline.py b/libs/langchain/langchain/prompts/pipeline.py index 28364766a776e..1f1886dd586a0 100644 --- a/libs/langchain/langchain/prompts/pipeline.py +++ b/libs/langchain/langchain/prompts/pipeline.py @@ -1,6 +1,6 @@ from typing import Any, Dict, List, Tuple -from pydantic import root_validator +from pydantic import model_validator from langchain.prompts.chat import BaseChatPromptTemplate from langchain.schema import BasePromptTemplate, PromptValue @@ -28,7 +28,8 @@ class PipelinePromptTemplate(BasePromptTemplate): pipeline_prompts: List[Tuple[str, BasePromptTemplate]] """A list of tuples, consisting of a string (`name`) and a Prompt Template.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def get_input_variables(cls, values: Dict) -> Dict: """Get input variables.""" created_variables = set() diff --git a/libs/langchain/langchain/prompts/prompt.py b/libs/langchain/langchain/prompts/prompt.py index b285684e4c9f7..cfc56190ae6fa 100644 --- a/libs/langchain/langchain/prompts/prompt.py +++ b/libs/langchain/langchain/prompts/prompt.py @@ -5,7 +5,7 @@ from string import Formatter from typing import Any, Dict, List, Union -from pydantic import root_validator +from pydantic import model_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, @@ -102,7 +102,8 @@ def format(self, **kwargs: Any) -> str: kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) - @root_validator() + @model_validator() + @classmethod def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: diff --git a/libs/langchain/langchain/retrievers/azure_cognitive_search.py b/libs/langchain/langchain/retrievers/azure_cognitive_search.py index 214f663e95bca..cb69fbb01e751 100644 --- a/libs/langchain/langchain/retrievers/azure_cognitive_search.py +++ b/libs/langchain/langchain/retrievers/azure_cognitive_search.py @@ -7,7 +7,7 @@ import aiohttp import requests -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -35,12 +35,10 @@ class AzureCognitiveSearchRetriever(BaseRetriever): """Key in a retrieved result to set as the Document page_content.""" top_k: Optional[int] = None """Number of results to retrieve. Set to None to retrieve all results.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that service name, index name and api key exists in environment.""" values["service_name"] = get_from_dict_or_env( diff --git a/libs/langchain/langchain/retrievers/bm25.py b/libs/langchain/langchain/retrievers/bm25.py index a5ef4f28496d4..9619b6528e839 100644 --- a/libs/langchain/langchain/retrievers/bm25.py +++ b/libs/langchain/langchain/retrievers/bm25.py @@ -7,6 +7,8 @@ from typing import Any, Callable, Dict, Iterable, List, Optional +from pydantic import ConfigDict + from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document @@ -26,11 +28,7 @@ class BM25Retriever(BaseRetriever): """ Number of documents to return.""" preprocess_func: Callable[[str], List[str]] = default_preprocessing_func """ Preprocessing function to use on the text before BM25 vectorization.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @classmethod def from_texts( diff --git a/libs/langchain/langchain/retrievers/chatgpt_plugin_retriever.py b/libs/langchain/langchain/retrievers/chatgpt_plugin_retriever.py index 5f2404f88ca50..9adbbdc57a7a9 100644 --- a/libs/langchain/langchain/retrievers/chatgpt_plugin_retriever.py +++ b/libs/langchain/langchain/retrievers/chatgpt_plugin_retriever.py @@ -4,6 +4,7 @@ import aiohttp import requests +from pydantic import ConfigDict from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -25,12 +26,7 @@ class ChatGPTPluginRetriever(BaseRetriever): """Filter to apply to the results.""" aiosession: Optional[aiohttp.ClientSession] = None """Aiohttp session to use for requests.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - """Allow arbitrary types.""" + model_config = ConfigDict(arbitrary_types_allowed=True) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun diff --git a/libs/langchain/langchain/retrievers/contextual_compression.py b/libs/langchain/langchain/retrievers/contextual_compression.py index 0a5654b052b91..6f9987da921f9 100644 --- a/libs/langchain/langchain/retrievers/contextual_compression.py +++ b/libs/langchain/langchain/retrievers/contextual_compression.py @@ -1,5 +1,7 @@ from typing import Any, List +from pydantic import ConfigDict + from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, CallbackManagerForRetrieverRun, @@ -18,11 +20,7 @@ class ContextualCompressionRetriever(BaseRetriever): base_retriever: BaseRetriever """Base Retriever to use for getting relevant documents.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _get_relevant_documents( self, diff --git a/libs/langchain/langchain/retrievers/docarray.py b/libs/langchain/langchain/retrievers/docarray.py index edd4e81f08807..d2f9a719c3173 100644 --- a/libs/langchain/langchain/retrievers/docarray.py +++ b/libs/langchain/langchain/retrievers/docarray.py @@ -2,6 +2,7 @@ from typing import Any, Dict, List, Optional, Union import numpy as np +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings @@ -43,11 +44,7 @@ class DocArrayRetriever(BaseRetriever): search_type: SearchType = SearchType.similarity top_k: int = 1 filters: Optional[Any] = None - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _get_relevant_documents( self, diff --git a/libs/langchain/langchain/retrievers/document_compressors/base.py b/libs/langchain/langchain/retrievers/document_compressors/base.py index 1e0587c59ce7a..dd776515e3258 100644 --- a/libs/langchain/langchain/retrievers/document_compressors/base.py +++ b/libs/langchain/langchain/retrievers/document_compressors/base.py @@ -3,7 +3,7 @@ from inspect import signature from typing import List, Optional, Sequence, Union -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict from langchain.callbacks.manager import Callbacks from langchain.schema import BaseDocumentTransformer, Document @@ -36,11 +36,7 @@ class DocumentCompressorPipeline(BaseDocumentCompressor): transformers: List[Union[BaseDocumentTransformer, BaseDocumentCompressor]] """List of document filters that are chained together and run in sequence.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def compress_documents( self, diff --git a/libs/langchain/langchain/retrievers/document_compressors/cohere_rerank.py b/libs/langchain/langchain/retrievers/document_compressors/cohere_rerank.py index 722cc6d33c30f..d097705496c69 100644 --- a/libs/langchain/langchain/retrievers/document_compressors/cohere_rerank.py +++ b/libs/langchain/langchain/retrievers/document_compressors/cohere_rerank.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Dict, Optional, Sequence -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import Callbacks from langchain.retrievers.document_compressors.base import BaseDocumentCompressor @@ -29,14 +29,10 @@ class CohereRerank(BaseDocumentCompressor): """Number of documents to return.""" model: str = "rerank-english-v2.0" """Model to use for reranking.""" + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py b/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py index fb49ca02ea6f5..402f370fdccb0 100644 --- a/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py +++ b/libs/langchain/langchain/retrievers/document_compressors/embeddings_filter.py @@ -1,7 +1,7 @@ from typing import Callable, Dict, Optional, Sequence import numpy as np -from pydantic import root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import Callbacks from langchain.document_transformers.embeddings_redundant_filter import ( @@ -33,13 +33,10 @@ class EmbeddingsFilter(BaseDocumentCompressor): """Threshold for determining when two documents are similar enough to be considered redundant. Defaults to None, must be specified if `k` is set to None.""" + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_params(cls, values: Dict) -> Dict: """Validate similarity parameters.""" if values["k"] is None and values["similarity_threshold"] is None: diff --git a/libs/langchain/langchain/retrievers/ensemble.py b/libs/langchain/langchain/retrievers/ensemble.py index b01a33fab6b84..71266fc8fc9e3 100644 --- a/libs/langchain/langchain/retrievers/ensemble.py +++ b/libs/langchain/langchain/retrievers/ensemble.py @@ -4,7 +4,7 @@ """ from typing import Any, Dict, List -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -30,7 +30,8 @@ class EnsembleRetriever(BaseRetriever): weights: List[float] c: int = 60 - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def set_weights(cls, values: Dict[str, Any]) -> Dict[str, Any]: if not values.get("weights"): n_retrievers = len(values["retrievers"]) diff --git a/libs/langchain/langchain/retrievers/google_cloud_enterprise_search.py b/libs/langchain/langchain/retrievers/google_cloud_enterprise_search.py index 2acb01b3638cc..dd928f0a6c878 100644 --- a/libs/langchain/langchain/retrievers/google_cloud_enterprise_search.py +++ b/libs/langchain/langchain/retrievers/google_cloud_enterprise_search.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence -from pydantic import Extra, Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document @@ -63,15 +63,10 @@ class GoogleCloudEnterpriseSearchRetriever(BaseRetriever): _client: SearchServiceClient _serving_config: str + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - underscore_attrs_are_private = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validates the environment.""" try: diff --git a/libs/langchain/langchain/retrievers/kendra.py b/libs/langchain/langchain/retrievers/kendra.py index 71e0ea1cf3ba9..280408e8b65e2 100644 --- a/libs/langchain/langchain/retrievers/kendra.py +++ b/libs/langchain/langchain/retrievers/kendra.py @@ -2,7 +2,7 @@ from abc import ABC, abstractmethod from typing import Any, Dict, List, Literal, Optional, Union -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, Extra, model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document @@ -54,9 +54,9 @@ class Highlight(BaseModel, extra=Extra.allow): """The zero-based location in the excerpt where the highlight starts.""" EndOffset: int """The zero-based location in the excerpt where the highlight ends.""" - TopAnswer: Optional[bool] + TopAnswer: Optional[bool] = None """Indicates whether the result is the best one.""" - Type: Optional[str] + Type: Optional[str] = None """The highlight type: STANDARD or THESAURUS_SYNONYM.""" @@ -65,7 +65,7 @@ class TextWithHighLights(BaseModel, extra=Extra.allow): Text: str """The text.""" - Highlights: Optional[Any] + Highlights: Optional[Any] = None """The highlights.""" @@ -93,13 +93,13 @@ def get_value_text(self) -> str: class DocumentAttributeValue(BaseModel, extra=Extra.allow): """The value of a document attribute.""" - DateValue: Optional[str] + DateValue: Optional[str] = None """The date value.""" - LongValue: Optional[int] + LongValue: Optional[int] = None """The long value.""" - StringListValue: Optional[List[str]] + StringListValue: Optional[List[str]] = None """The string list value.""" - StringValue: Optional[str] + StringValue: Optional[str] = None """The string value.""" @property @@ -132,11 +132,11 @@ class DocumentAttribute(BaseModel, extra=Extra.allow): class ResultItem(BaseModel, ABC, extra=Extra.allow): """Abstract class that represents a result item.""" - Id: Optional[str] + Id: Optional[str] = None """The ID of the item.""" - DocumentId: Optional[str] + DocumentId: Optional[str] = None """The document ID.""" - DocumentURI: Optional[str] + DocumentURI: Optional[str] = None """The document URI.""" DocumentAttributes: Optional[List[DocumentAttribute]] = [] """The document attributes.""" @@ -183,19 +183,19 @@ class QueryResultItem(ResultItem): DocumentTitle: TextWithHighLights """The document title.""" - FeedbackToken: Optional[str] + FeedbackToken: Optional[str] = None """Identifies a particular result from a particular query.""" - Format: Optional[str] + Format: Optional[str] = None """ If the Type is ANSWER, then format is either: * TABLE: a table excerpt is returned in TableExcerpt; * TEXT: a text excerpt is returned in DocumentExcerpt. """ - Type: Optional[str] + Type: Optional[str] = None """Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER""" AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = [] """One or more additional attributes associated with the result.""" - DocumentExcerpt: Optional[TextWithHighLights] + DocumentExcerpt: Optional[TextWithHighLights] = None """Excerpt of the document text.""" def get_title(self) -> str: @@ -252,9 +252,9 @@ def get_top_k_docs(self, top_n: int) -> List[Document]: class RetrieveResultItem(ResultItem): """A Retrieve API result item.""" - DocumentTitle: Optional[str] + DocumentTitle: Optional[str] = None """The document title.""" - Content: Optional[str] + Content: Optional[str] = None """The content of the item.""" def get_title(self) -> str: @@ -320,7 +320,8 @@ class AmazonKendraRetriever(BaseRetriever): attribute_filter: Optional[Dict] = None client: Any - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def create_client(cls, values: Dict[str, Any]) -> Dict[str, Any]: if values.get("client") is not None: return values diff --git a/libs/langchain/langchain/retrievers/knn.py b/libs/langchain/langchain/retrievers/knn.py index d28408347f6ea..2b5f67f1f0a2b 100644 --- a/libs/langchain/langchain/retrievers/knn.py +++ b/libs/langchain/langchain/retrievers/knn.py @@ -8,6 +8,7 @@ from typing import Any, List, Optional import numpy as np +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings @@ -42,12 +43,7 @@ class KNNRetriever(BaseRetriever): """Number of results to return.""" relevancy_threshold: Optional[float] = None """Threshold for relevancy.""" - - class Config: - - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @classmethod def from_texts( diff --git a/libs/langchain/langchain/retrievers/metal.py b/libs/langchain/langchain/retrievers/metal.py index b4faaeab3df7c..4edbcc8851389 100644 --- a/libs/langchain/langchain/retrievers/metal.py +++ b/libs/langchain/langchain/retrievers/metal.py @@ -1,6 +1,6 @@ from typing import Any, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document @@ -14,7 +14,8 @@ class MetalRetriever(BaseRetriever): params: Optional[dict] = None """The parameters to pass to the Metal client.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_client(cls, values: dict) -> dict: """Validate that the client is of the correct type.""" from metal_sdk.metal import Metal diff --git a/libs/langchain/langchain/retrievers/milvus.py b/libs/langchain/langchain/retrievers/milvus.py index bc35e73184213..cbe053772ff34 100644 --- a/libs/langchain/langchain/retrievers/milvus.py +++ b/libs/langchain/langchain/retrievers/milvus.py @@ -2,7 +2,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings @@ -24,7 +24,8 @@ class MilvusRetriever(BaseRetriever): store: Milvus retriever: BaseRetriever - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def create_retriever(cls, values: Dict) -> Dict: """Create the Milvus store and retriever.""" values["store"] = Milvus( diff --git a/libs/langchain/langchain/retrievers/pinecone_hybrid_search.py b/libs/langchain/langchain/retrievers/pinecone_hybrid_search.py index 97d562421d3e8..ef5a30f17bef2 100644 --- a/libs/langchain/langchain/retrievers/pinecone_hybrid_search.py +++ b/libs/langchain/langchain/retrievers/pinecone_hybrid_search.py @@ -3,7 +3,7 @@ import hashlib from typing import Any, Dict, List, Optional -from pydantic import Extra, root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings @@ -109,12 +109,7 @@ class PineconeHybridSearchRetriever(BaseRetriever): """Number of documents to return.""" alpha: float = 0.5 """Alpha value for hybrid search.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def add_texts( self, @@ -131,7 +126,8 @@ def add_texts( metadatas=metadatas, ) - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: diff --git a/libs/langchain/langchain/retrievers/self_query/base.py b/libs/langchain/langchain/retrievers/self_query/base.py index 15463751b4fda..e0150bc3b5bb5 100644 --- a/libs/langchain/langchain/retrievers/self_query/base.py +++ b/libs/langchain/langchain/retrievers/self_query/base.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Type, cast -from pydantic import BaseModel, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from langchain import LLMChain from langchain.callbacks.manager import CallbackManagerForRetrieverRun @@ -68,13 +68,10 @@ class SelfQueryRetriever(BaseRetriever, BaseModel): verbose: bool = False """Use original query instead of the revised new query from LLM""" use_original_query: bool = False + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_translator(cls, values: Dict) -> Dict: """Validate translator.""" if "structured_query_translator" not in values: diff --git a/libs/langchain/langchain/retrievers/svm.py b/libs/langchain/langchain/retrievers/svm.py index 3c65e974ebfff..8bc10f8cc55ce 100644 --- a/libs/langchain/langchain/retrievers/svm.py +++ b/libs/langchain/langchain/retrievers/svm.py @@ -4,6 +4,7 @@ from typing import Any, Iterable, List, Optional import numpy as np +from pydantic import ConfigDict from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings @@ -42,12 +43,7 @@ class SVMRetriever(BaseRetriever): """Number of results to return.""" relevancy_threshold: Optional[float] = None """Threshold for relevancy.""" - - class Config: - - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @classmethod def from_texts( diff --git a/libs/langchain/langchain/retrievers/tfidf.py b/libs/langchain/langchain/retrievers/tfidf.py index 1d910f18ecfb9..90d5ef06b200c 100644 --- a/libs/langchain/langchain/retrievers/tfidf.py +++ b/libs/langchain/langchain/retrievers/tfidf.py @@ -2,6 +2,8 @@ from typing import Any, Dict, Iterable, List, Optional +from pydantic import ConfigDict + from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document @@ -21,11 +23,7 @@ class TFIDFRetriever(BaseRetriever): """TF-IDF array.""" k: int = 4 """Number of documents to return.""" - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @classmethod def from_texts( diff --git a/libs/langchain/langchain/retrievers/time_weighted_retriever.py b/libs/langchain/langchain/retrievers/time_weighted_retriever.py index b2aebfa913bf1..4277bba9f7d07 100644 --- a/libs/langchain/langchain/retrievers/time_weighted_retriever.py +++ b/libs/langchain/langchain/retrievers/time_weighted_retriever.py @@ -2,7 +2,7 @@ from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.schema import BaseRetriever, Document @@ -42,11 +42,7 @@ class TimeWeightedVectorStoreRetriever(BaseRetriever): None assigns no salience to documents not fetched from the vector store. """ - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _get_combined_score( self, diff --git a/libs/langchain/langchain/retrievers/weaviate_hybrid_search.py b/libs/langchain/langchain/retrievers/weaviate_hybrid_search.py index 2bd64ed3f172d..47d950c5f904c 100644 --- a/libs/langchain/langchain/retrievers/weaviate_hybrid_search.py +++ b/libs/langchain/langchain/retrievers/weaviate_hybrid_search.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, cast from uuid import uuid4 -from pydantic import root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.docstore.document import Document @@ -28,7 +28,8 @@ class WeaviateHybridSearchRetriever(BaseRetriever): create_schema_if_missing: bool = True """Whether to create the schema if it doesn't exist.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_client( cls, values: Dict[str, Any], @@ -62,10 +63,7 @@ def validate_client( return values - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) # added text_key def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]: diff --git a/libs/langchain/langchain/retrievers/zep.py b/libs/langchain/langchain/retrievers/zep.py index 7d1f18cda1a5d..01e8c058e7bfb 100644 --- a/libs/langchain/langchain/retrievers/zep.py +++ b/libs/langchain/langchain/retrievers/zep.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -37,7 +37,8 @@ class ZepRetriever(BaseRetriever): top_k: Optional[int] """Number of documents to return.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def create_client(cls, values: dict) -> dict: try: from zep_python import ZepClient diff --git a/libs/langchain/langchain/retrievers/zilliz.py b/libs/langchain/langchain/retrievers/zilliz.py index e023bac771433..7d94f55f82bf6 100644 --- a/libs/langchain/langchain/retrievers/zilliz.py +++ b/libs/langchain/langchain/retrievers/zilliz.py @@ -1,7 +1,7 @@ import warnings from typing import Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.embeddings.base import Embeddings @@ -29,7 +29,8 @@ class ZillizRetriever(BaseRetriever): retriever: BaseRetriever """The underlying retriever.""" - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def create_client(cls, values: dict) -> dict: values["store"] = Zilliz( values["embedding_function"], diff --git a/libs/langchain/langchain/schema/memory.py b/libs/langchain/langchain/schema/memory.py index 46da7504b2ac0..a68bca10c2f8f 100644 --- a/libs/langchain/langchain/schema/memory.py +++ b/libs/langchain/langchain/schema/memory.py @@ -3,6 +3,8 @@ from abc import ABC, abstractmethod from typing import Any, Dict, List +from pydantic import ConfigDict + from langchain.load.serializable import Serializable from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage @@ -37,10 +39,7 @@ def clear(self) -> None: pass """ # noqa: E501 - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) @property @abstractmethod diff --git a/libs/langchain/langchain/schema/prompt_template.py b/libs/langchain/langchain/schema/prompt_template.py index b480ecc946449..b4a74b2ddc1d0 100644 --- a/libs/langchain/langchain/schema/prompt_template.py +++ b/libs/langchain/langchain/schema/prompt_template.py @@ -6,7 +6,7 @@ from typing import Any, Callable, Dict, List, Mapping, Optional, Union import yaml -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.load.serializable import Serializable from langchain.schema.document import Document @@ -30,10 +30,7 @@ class BasePromptTemplate(Serializable, Runnable[Dict, PromptValue], ABC): def lc_serializable(self) -> bool: return True - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def invoke(self, input: Dict, config: RunnableConfig | None = None) -> PromptValue: return self._call_with_config( @@ -44,7 +41,8 @@ def invoke(self, input: Dict, config: RunnableConfig | None = None) -> PromptVal def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" - @root_validator() + @model_validator() + @classmethod def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: diff --git a/libs/langchain/langchain/schema/retriever.py b/libs/langchain/langchain/schema/retriever.py index 9df3e7a13890c..9c1ef5c032ec9 100644 --- a/libs/langchain/langchain/schema/retriever.py +++ b/libs/langchain/langchain/schema/retriever.py @@ -5,6 +5,8 @@ from inspect import signature from typing import TYPE_CHECKING, Any, Dict, List, Optional +from pydantic import ConfigDict + from langchain.load.dump import dumpd from langchain.load.serializable import Serializable from langchain.schema.document import Document @@ -46,10 +48,7 @@ def get_relevant_documents(self, query: str) -> List[Document]: return [self.docs[i] for i in results.argsort()[-self.k :][::-1]] """ # noqa: E501 - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) _new_arg_supported: bool = False _expects_other_args: bool = False diff --git a/libs/langchain/langchain/schema/runnable.py b/libs/langchain/langchain/schema/runnable.py index f04835297066e..c1b8571705e43 100644 --- a/libs/langchain/langchain/schema/runnable.py +++ b/libs/langchain/langchain/schema/runnable.py @@ -19,7 +19,7 @@ cast, ) -from pydantic import Field +from pydantic import ConfigDict, Field from langchain.callbacks.base import BaseCallbackManager, Callbacks from langchain.load.dump import dumpd @@ -187,8 +187,7 @@ def steps(self) -> List[Runnable[Any, Any]]: def lc_serializable(self) -> bool: return True - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def __or__( self, @@ -557,8 +556,7 @@ class RunnableMap(Serializable, Runnable[Input, Dict[str, Any]]): def lc_serializable(self) -> bool: return True - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def invoke( self, input: Input, config: Optional[RunnableConfig] = None diff --git a/libs/langchain/langchain/smith/evaluation/config.py b/libs/langchain/langchain/smith/evaluation/config.py index 21cb48482163a..f26d5465107b9 100644 --- a/libs/langchain/langchain/smith/evaluation/config.py +++ b/libs/langchain/langchain/smith/evaluation/config.py @@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union from langsmith import RunEvaluator -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.embeddings.base import Embeddings from langchain.evaluation.criteria.eval_chain import CRITERIA_TYPE @@ -105,9 +105,7 @@ class RunEvalConfig(BaseModel): input. If not provided, it will be inferred automatically.""" eval_llm: Optional[BaseLanguageModel] = None """The language model to pass to any evaluators that require one.""" - - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict() class Criteria(EvalConfig): """Configuration for a reference-free criteria evaluator. @@ -166,9 +164,7 @@ class EmbeddingDistance(EvalConfig): evaluator_type: EvaluatorType = EvaluatorType.EMBEDDING_DISTANCE embeddings: Optional[Embeddings] = None distance_metric: Optional[EmbeddingDistanceEnum] = None - - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) class StringDistance(EvalConfig): """Configuration for a string distance evaluator. diff --git a/libs/langchain/langchain/tools/azure_cognitive_services/form_recognizer.py b/libs/langchain/langchain/tools/azure_cognitive_services/form_recognizer.py index d28d69d520cfc..77277bed54f3c 100644 --- a/libs/langchain/langchain/tools/azure_cognitive_services/form_recognizer.py +++ b/libs/langchain/langchain/tools/azure_cognitive_services/form_recognizer.py @@ -3,7 +3,7 @@ import logging from typing import Any, Dict, List, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -35,7 +35,8 @@ class AzureCogsFormRecognizerTool(BaseTool): "Input should be a url to a document." ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/tools/azure_cognitive_services/image_analysis.py b/libs/langchain/langchain/tools/azure_cognitive_services/image_analysis.py index 4a8aaa94041ad..147821722b6a7 100644 --- a/libs/langchain/langchain/tools/azure_cognitive_services/image_analysis.py +++ b/libs/langchain/langchain/tools/azure_cognitive_services/image_analysis.py @@ -3,7 +3,7 @@ import logging from typing import Any, Dict, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -35,7 +35,8 @@ class AzureCogsImageAnalysisTool(BaseTool): "Input should be a url to an image." ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/tools/azure_cognitive_services/speech2text.py b/libs/langchain/langchain/tools/azure_cognitive_services/speech2text.py index dd358d5c8c2e1..42ce4f1a7851a 100644 --- a/libs/langchain/langchain/tools/azure_cognitive_services/speech2text.py +++ b/libs/langchain/langchain/tools/azure_cognitive_services/speech2text.py @@ -4,7 +4,7 @@ import time from typing import Any, Dict, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -39,7 +39,8 @@ class AzureCogsSpeech2TextTool(BaseTool): "Input should be a url to an audio file." ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/tools/azure_cognitive_services/text2speech.py b/libs/langchain/langchain/tools/azure_cognitive_services/text2speech.py index 2725b5161fa2f..2919636ca45c4 100644 --- a/libs/langchain/langchain/tools/azure_cognitive_services/text2speech.py +++ b/libs/langchain/langchain/tools/azure_cognitive_services/text2speech.py @@ -4,7 +4,7 @@ import tempfile from typing import Any, Dict, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -34,7 +34,8 @@ class AzureCogsText2SpeechTool(BaseTool): "Useful for when you need to convert text to speech. " ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" azure_cogs_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/tools/base.py b/libs/langchain/langchain/tools/base.py index f39132efbdfd6..0d16f2edbeca4 100644 --- a/libs/langchain/langchain/tools/base.py +++ b/libs/langchain/langchain/tools/base.py @@ -8,10 +8,11 @@ from pydantic import ( BaseModel, + ConfigDict, Extra, Field, create_model, - root_validator, + model_validator, validate_arguments, ) from pydantic.main import ModelMetaclass @@ -170,12 +171,7 @@ class BaseTool(ABC, BaseModel, metaclass=ToolMetaclass): Union[bool, str, Callable[[ToolException], str]] ] = False """Handle the content of the ToolException thrown.""" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def is_single_input(self) -> bool: @@ -208,7 +204,8 @@ def _parse_input( return {k: v for k, v in result.dict().items() if k in tool_input} return tool_input - @root_validator() + @model_validator() + @classmethod def raise_deprecation(cls, values: Dict) -> Dict: """Raise deprecation warning if callback_manager is used.""" if values.get("callback_manager") is not None: diff --git a/libs/langchain/langchain/tools/graphql/tool.py b/libs/langchain/langchain/tools/graphql/tool.py index e19587fd0fedb..dbfb71f8795b8 100644 --- a/libs/langchain/langchain/tools/graphql/tool.py +++ b/libs/langchain/langchain/tools/graphql/tool.py @@ -1,6 +1,8 @@ import json from typing import Optional +from pydantic import ConfigDict + from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, @@ -23,11 +25,7 @@ class BaseGraphQLTool(BaseTool): Example Input: query {{ allUsers {{ id, name, email }} }}\ """ # noqa: E501 - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _run( self, diff --git a/libs/langchain/langchain/tools/office365/events_search.py b/libs/langchain/langchain/tools/office365/events_search.py index f2e83748c0c59..d2ec2bd8fed18 100644 --- a/libs/langchain/langchain/tools/office365/events_search.py +++ b/libs/langchain/langchain/tools/office365/events_search.py @@ -7,7 +7,7 @@ from datetime import datetime as dt from typing import Any, Dict, List, Optional, Type -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -72,11 +72,7 @@ class O365SearchEvents(O365BaseTool): " not schedule any meeting over existing meetings, and that the user " "is busy during meetings. Any times without events are free for the user. " ) - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _run( self, diff --git a/libs/langchain/langchain/tools/office365/messages_search.py b/libs/langchain/langchain/tools/office365/messages_search.py index fce0569f39f1b..c9042277ebc56 100644 --- a/libs/langchain/langchain/tools/office365/messages_search.py +++ b/libs/langchain/langchain/tools/office365/messages_search.py @@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional, Type -from pydantic import BaseModel, Extra, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -68,11 +68,7 @@ class O365SearchEmails(O365BaseTool): " The input must be a valid Microsoft Graph v1.0 $search query." " The output is a JSON list of the requested resource." ) - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _run( self, diff --git a/libs/langchain/langchain/tools/plugin.py b/libs/langchain/langchain/tools/plugin.py index eafca8491a3c4..2bdac47df4e15 100644 --- a/libs/langchain/langchain/tools/plugin.py +++ b/libs/langchain/langchain/tools/plugin.py @@ -32,9 +32,9 @@ class AIPlugin(BaseModel): description_for_human: str auth: Optional[dict] = None api: ApiConfig - logo_url: Optional[str] - contact_email: Optional[str] - legal_info_url: Optional[str] + logo_url: Optional[str] = None + contact_email: Optional[str] = None + legal_info_url: Optional[str] = None @classmethod def from_url(cls, url: str) -> AIPlugin: diff --git a/libs/langchain/langchain/tools/powerbi/tool.py b/libs/langchain/langchain/tools/powerbi/tool.py index 2509f9bc11550..fe40c9cffb4be 100644 --- a/libs/langchain/langchain/tools/powerbi/tool.py +++ b/libs/langchain/langchain/tools/powerbi/tool.py @@ -3,7 +3,7 @@ from time import perf_counter from typing import Any, Dict, Optional, Tuple -from pydantic import Field, validator +from pydantic import ConfigDict, Field, field_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -38,13 +38,10 @@ class QueryPowerBITool(BaseTool): max_iterations: int = 5 output_token_limit: int = 4000 tiktoken_model_name: Optional[str] = None # "cl100k_base" + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @validator("llm_chain") + @field_validator("llm_chain") + @classmethod def validate_llm_chain_input_variables( # pylint: disable=E0213 cls, llm_chain: LLMChain ) -> LLMChain: @@ -224,11 +221,7 @@ class InfoPowerBITool(BaseTool): Example Input: "table1, table2, table3" """ # noqa: E501 powerbi: PowerBIDataset = Field(exclude=True) - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _run( self, @@ -252,11 +245,7 @@ class ListPowerBITool(BaseTool): name = "list_tables_powerbi" description = "Input is an empty string, output is a comma separated list of tables in the database." # noqa: E501 # pylint: disable=C0301 powerbi: PowerBIDataset = Field(exclude=True) - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _run( self, diff --git a/libs/langchain/langchain/tools/python/tool.py b/libs/langchain/langchain/tools/python/tool.py index 0351c4044b392..58a246387d1d1 100644 --- a/libs/langchain/langchain/tools/python/tool.py +++ b/libs/langchain/langchain/tools/python/tool.py @@ -8,7 +8,7 @@ from io import StringIO from typing import Any, Dict, Optional -from pydantic import Field, root_validator +from pydantic import Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -92,7 +92,8 @@ class PythonAstREPLTool(BaseTool): locals: Optional[Dict] = Field(default_factory=dict) sanitize_input: bool = True - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_python_version(cls, values: Dict) -> Dict: """Validate valid python version.""" if sys.version_info < (3, 9): diff --git a/libs/langchain/langchain/tools/searx_search/tool.py b/libs/langchain/langchain/tools/searx_search/tool.py index 3de5521c658c9..dd292e5994e28 100644 --- a/libs/langchain/langchain/tools/searx_search/tool.py +++ b/libs/langchain/langchain/tools/searx_search/tool.py @@ -1,7 +1,7 @@ """Tool for the SearxNG search API.""" from typing import Optional -from pydantic import Extra +from pydantic import ConfigDict from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -52,11 +52,7 @@ class SearxSearchResults(BaseTool): wrapper: SearxSearchWrapper num_results: int = 4 kwargs: dict = Field(default_factory=dict) - - class Config: - """Pydantic config.""" - - extra = Extra.allow + model_config = ConfigDict(extra="allow") def _run( self, diff --git a/libs/langchain/langchain/tools/spark_sql/tool.py b/libs/langchain/langchain/tools/spark_sql/tool.py index 0b23741e923ee..3c937786cdee6 100644 --- a/libs/langchain/langchain/tools/spark_sql/tool.py +++ b/libs/langchain/langchain/tools/spark_sql/tool.py @@ -2,7 +2,7 @@ """Tools for interacting with Spark SQL.""" from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import model_validator, BaseModel, Extra, Field, ConfigDict from langchain.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import ( @@ -20,14 +20,7 @@ class BaseSparkSQLTool(BaseModel): """Base tool for interacting with Spark SQL.""" db: SparkSQL = Field(exclude=True) - - # Override BaseTool.Config to appease mypy - # See https://github.com/pydantic/pydantic/issues/4173 - class Config(BaseTool.Config): - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - extra = Extra.forbid + model_config = ConfigDict(arbitrary_types_allowed=True, extra=Extra.forbid) class QuerySparkSQLTool(BaseSparkSQLTool, BaseTool): @@ -118,7 +111,8 @@ class QueryCheckerTool(BaseSparkSQLTool, BaseTool): Always use this tool before executing a query with query_sql_db! """ - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "llm_chain" not in values: values["llm_chain"] = LLMChain( diff --git a/libs/langchain/langchain/tools/sql_database/tool.py b/libs/langchain/langchain/tools/sql_database/tool.py index 6901ea061c2bd..9cdc7893b8697 100644 --- a/libs/langchain/langchain/tools/sql_database/tool.py +++ b/libs/langchain/langchain/tools/sql_database/tool.py @@ -2,7 +2,7 @@ """Tools for interacting with a SQL database.""" from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import model_validator, BaseModel, Extra, Field, ConfigDict from langchain.schema.language_model import BaseLanguageModel from langchain.callbacks.manager import ( @@ -20,14 +20,7 @@ class BaseSQLDatabaseTool(BaseModel): """Base tool for interacting with a SQL database.""" db: SQLDatabase = Field(exclude=True) - - # Override BaseTool.Config to appease mypy - # See https://github.com/pydantic/pydantic/issues/4173 - class Config(BaseTool.Config): - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - extra = Extra.forbid + model_config = ConfigDict(arbitrary_types_allowed=True, extra=Extra.forbid) class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool): @@ -117,7 +110,8 @@ class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool): Always use this tool before executing a query with query_sql_db! """ - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def initialize_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: if "llm_chain" not in values: values["llm_chain"] = LLMChain( diff --git a/libs/langchain/langchain/tools/steamship_image_generation/tool.py b/libs/langchain/langchain/tools/steamship_image_generation/tool.py index 0ab7950a6980e..7ce1a2ccf379d 100644 --- a/libs/langchain/langchain/tools/steamship_image_generation/tool.py +++ b/libs/langchain/langchain/tools/steamship_image_generation/tool.py @@ -16,7 +16,7 @@ from enum import Enum from typing import TYPE_CHECKING, Dict, Optional -from pydantic import root_validator +from pydantic import model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -59,7 +59,8 @@ class SteamshipImageGenerationTool(BaseTool): "Output: the UUID of a generated image" ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_size(cls, values: Dict) -> Dict: if "size" in values: size = values["size"] @@ -69,7 +70,8 @@ def validate_size(cls, values: Dict) -> Dict: return values - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" steamship_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/tools/vectorstore/tool.py b/libs/langchain/langchain/tools/vectorstore/tool.py index 126f4bbd4a035..3b226780d427e 100644 --- a/libs/langchain/langchain/tools/vectorstore/tool.py +++ b/libs/langchain/langchain/tools/vectorstore/tool.py @@ -3,7 +3,7 @@ import json from typing import Any, Dict, Optional -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, @@ -21,11 +21,7 @@ class BaseVectorStoreTool(BaseModel): vectorstore: VectorStore = Field(exclude=True) llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0)) - - class Config(BaseTool.Config): - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) def _create_description_from_template(values: Dict[str, Any]) -> Dict[str, Any]: diff --git a/libs/langchain/langchain/utilities/arxiv.py b/libs/langchain/langchain/utilities/arxiv.py index d958255abf0b3..90e5e73438537 100644 --- a/libs/langchain/langchain/utilities/arxiv.py +++ b/libs/langchain/langchain/utilities/arxiv.py @@ -3,7 +3,7 @@ import os from typing import Any, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.schema import Document @@ -32,15 +32,16 @@ class ArxivAPIWrapper(BaseModel): """ - arxiv_search: Any #: :meta private: - arxiv_exceptions: Any # :meta private: + arxiv_search: Any = None #: :meta private: + arxiv_exceptions: Any = None # :meta private: top_k_results: int = 3 ARXIV_MAX_QUERY_LENGTH = 300 load_max_docs: int = 100 load_all_available_meta: bool = False doc_content_chars_max: Optional[int] = 4000 - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: diff --git a/libs/langchain/langchain/utilities/awslambda.py b/libs/langchain/langchain/utilities/awslambda.py index 292277bc454e9..1e63a95a5ca0a 100644 --- a/libs/langchain/langchain/utilities/awslambda.py +++ b/libs/langchain/langchain/utilities/awslambda.py @@ -2,7 +2,7 @@ import json from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator class LambdaWrapper(BaseModel): @@ -16,17 +16,14 @@ class LambdaWrapper(BaseModel): """ - lambda_client: Any #: :meta private: + lambda_client: Any = None #: :meta private: function_name: Optional[str] = None awslambda_tool_name: Optional[str] = None awslambda_tool_description: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" diff --git a/libs/langchain/langchain/utilities/bibtex.py b/libs/langchain/langchain/utilities/bibtex.py index afafaef22086c..5d30b710824e8 100644 --- a/libs/langchain/langchain/utilities/bibtex.py +++ b/libs/langchain/langchain/utilities/bibtex.py @@ -2,7 +2,7 @@ import logging from typing import Any, Dict, List, Mapping -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator logger = logging.getLogger(__name__) @@ -35,12 +35,10 @@ class BibtexparserWrapper(BaseModel): a bibtex file and fetch document summaries. """ - class Config: - """Configuration for this pydantic object.""" + model_config = ConfigDict(extra="forbid") - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: diff --git a/libs/langchain/langchain/utilities/bing_search.py b/libs/langchain/langchain/utilities/bing_search.py index 6dc2fe4073edb..92a0c0838aa42 100644 --- a/libs/langchain/langchain/utilities/bing_search.py +++ b/libs/langchain/langchain/utilities/bing_search.py @@ -6,7 +6,7 @@ from typing import Dict, List import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -21,11 +21,7 @@ class BingSearchAPIWrapper(BaseModel): bing_subscription_key: str bing_search_url: str k: int = 10 - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _bing_search_results(self, search_term: str, count: int) -> List[dict]: headers = {"Ocp-Apim-Subscription-Key": self.bing_subscription_key} @@ -42,7 +38,8 @@ def _bing_search_results(self, search_term: str, count: int) -> List[dict]: search_results = response.json() return search_results["webPages"]["value"] - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" bing_subscription_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/dataforseo_api_search.py b/libs/langchain/langchain/utilities/dataforseo_api_search.py index b8fc9ce864fa3..aae30803f14c5 100644 --- a/libs/langchain/langchain/utilities/dataforseo_api_search.py +++ b/libs/langchain/langchain/utilities/dataforseo_api_search.py @@ -4,7 +4,7 @@ import aiohttp import requests -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from langchain.utils import get_from_dict_or_env @@ -12,11 +12,7 @@ class DataForSeoAPIWrapper(BaseModel): """Wrapper around the DataForSeo API.""" - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) default_params: dict = Field( default={ @@ -43,7 +39,8 @@ class Config: aiosession: Optional[aiohttp.ClientSession] = None """The aiohttp session to use for the DataForSEO SERP API.""" - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that login and password exists in environment.""" login = get_from_dict_or_env(values, "api_login", "DATAFORSEO_LOGIN") diff --git a/libs/langchain/langchain/utilities/duckduckgo_search.py b/libs/langchain/langchain/utilities/duckduckgo_search.py index 0d9e127058d4c..21a30861ba18c 100644 --- a/libs/langchain/langchain/utilities/duckduckgo_search.py +++ b/libs/langchain/langchain/utilities/duckduckgo_search.py @@ -5,7 +5,7 @@ """ from typing import Dict, List, Optional -from pydantic import BaseModel, Extra +from pydantic import BaseModel, ConfigDict, model_validator from pydantic.class_validators import root_validator @@ -19,13 +19,10 @@ class DuckDuckGoSearchAPIWrapper(BaseModel): safesearch: str = "moderate" time: Optional[str] = "y" max_results: int = 5 + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that python package exists in environment.""" try: diff --git a/libs/langchain/langchain/utilities/github.py b/libs/langchain/langchain/utilities/github.py index 8bc845363d9a3..e7def33310f58 100644 --- a/libs/langchain/langchain/utilities/github.py +++ b/libs/langchain/langchain/utilities/github.py @@ -4,7 +4,7 @@ import json from typing import TYPE_CHECKING, Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -15,19 +15,16 @@ class GitHubAPIWrapper(BaseModel): """Wrapper for GitHub API.""" - github: Any #: :meta private: - github_repo_instance: Any #: :meta private: + github: Any = None #: :meta private: + github_repo_instance: Any = None #: :meta private: github_repository: Optional[str] = None github_app_id: Optional[str] = None github_app_private_key: Optional[str] = None github_branch: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" github_repository = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/golden_query.py b/libs/langchain/langchain/utilities/golden_query.py index df7e505faab85..09409526c30cd 100644 --- a/libs/langchain/langchain/utilities/golden_query.py +++ b/libs/langchain/langchain/utilities/golden_query.py @@ -3,7 +3,7 @@ from typing import Dict, Optional import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -23,13 +23,10 @@ class GoldenQueryAPIWrapper(BaseModel): """ golden_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" golden_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/google_places_api.py b/libs/langchain/langchain/utilities/google_places_api.py index 8b8ea7a93a941..c4d6a267480b5 100644 --- a/libs/langchain/langchain/utilities/google_places_api.py +++ b/libs/langchain/langchain/utilities/google_places_api.py @@ -4,7 +4,7 @@ import logging from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -30,16 +30,12 @@ class GooglePlacesAPIWrapper(BaseModel): """ gplaces_api_key: Optional[str] = None - google_map_client: Any #: :meta private: + google_map_client: Any = None #: :meta private: top_k_results: Optional[int] = None + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key is in your environment variable.""" gplaces_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/google_search.py b/libs/langchain/langchain/utilities/google_search.py index 2c118dd165f76..083833d34871f 100644 --- a/libs/langchain/langchain/utilities/google_search.py +++ b/libs/langchain/langchain/utilities/google_search.py @@ -1,7 +1,7 @@ """Util that calls Google Search.""" from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -45,16 +45,12 @@ class GoogleSearchAPIWrapper(BaseModel): .com """ - search_engine: Any #: :meta private: + search_engine: Any = None #: :meta private: google_api_key: Optional[str] = None google_cse_id: Optional[str] = None k: int = 10 siterestrict: bool = False - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]: cse = self.search_engine.cse() @@ -63,7 +59,8 @@ def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]: res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute() return res.get("items", []) - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" google_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/google_serper.py b/libs/langchain/langchain/utilities/google_serper.py index 9db376c4792b4..5ce87d9730ca5 100644 --- a/libs/langchain/langchain/utilities/google_serper.py +++ b/libs/langchain/langchain/utilities/google_serper.py @@ -3,6 +3,7 @@ import aiohttp import requests +from pydantic import ConfigDict, model_validator from pydantic.class_validators import root_validator from pydantic.main import BaseModel from typing_extensions import Literal @@ -42,13 +43,10 @@ class GoogleSerperAPIWrapper(BaseModel): tbs: Optional[str] = None serper_api_key: Optional[str] = None aiosession: Optional[aiohttp.ClientSession] = None + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" serper_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/graphql.py b/libs/langchain/langchain/utilities/graphql.py index 1e8a7b20392f1..d4d2eaf3a68cb 100644 --- a/libs/langchain/langchain/utilities/graphql.py +++ b/libs/langchain/langchain/utilities/graphql.py @@ -1,7 +1,7 @@ import json from typing import Any, Callable, Dict, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator class GraphQLAPIWrapper(BaseModel): @@ -13,15 +13,12 @@ class GraphQLAPIWrapper(BaseModel): custom_headers: Optional[Dict[str, str]] = None graphql_endpoint: str - gql_client: Any #: :meta private: + gql_client: Any = None #: :meta private: gql_function: Callable[[str], Any] #: :meta private: + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in the environment.""" try: diff --git a/libs/langchain/langchain/utilities/jira.py b/libs/langchain/langchain/utilities/jira.py index d59a76b4722d4..e6d81e9ce5317 100644 --- a/libs/langchain/langchain/utilities/jira.py +++ b/libs/langchain/langchain/utilities/jira.py @@ -1,7 +1,7 @@ """Util that calls Jira.""" from typing import Any, Dict, List, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -10,18 +10,15 @@ class JiraAPIWrapper(BaseModel): """Wrapper for Jira API.""" - jira: Any #: :meta private: - confluence: Any + jira: Any = None #: :meta private: + confluence: Any = None jira_username: Optional[str] = None jira_api_token: Optional[str] = None jira_instance_url: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" jira_username = get_from_dict_or_env(values, "jira_username", "JIRA_USERNAME") diff --git a/libs/langchain/langchain/utilities/metaphor_search.py b/libs/langchain/langchain/utilities/metaphor_search.py index d5d36c74496bb..8c1d8b6d83db4 100644 --- a/libs/langchain/langchain/utilities/metaphor_search.py +++ b/libs/langchain/langchain/utilities/metaphor_search.py @@ -7,7 +7,7 @@ import aiohttp import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -19,11 +19,7 @@ class MetaphorSearchAPIWrapper(BaseModel): metaphor_api_key: str k: int = 10 - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _metaphor_search_results( self, @@ -60,7 +56,8 @@ def _metaphor_search_results( search_results = response.json() return search_results["results"] - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and endpoint exists in environment.""" metaphor_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/openweathermap.py b/libs/langchain/langchain/utilities/openweathermap.py index 7b157625e0bf5..e5b22f3d7cb4d 100644 --- a/libs/langchain/langchain/utilities/openweathermap.py +++ b/libs/langchain/langchain/utilities/openweathermap.py @@ -1,7 +1,7 @@ """Util that calls OpenWeatherMap using PyOWM.""" from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -16,15 +16,12 @@ class OpenWeatherMapAPIWrapper(BaseModel): 3. pip install pyowm """ - owm: Any + owm: Any = None openweathermap_api_key: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" openweathermap_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/powerbi.py b/libs/langchain/langchain/utilities/powerbi.py index 5c00d017b108e..8f9b875507761 100644 --- a/libs/langchain/langchain/utilities/powerbi.py +++ b/libs/langchain/langchain/utilities/powerbi.py @@ -9,7 +9,7 @@ import aiohttp import requests from aiohttp import ServerTimeoutError -from pydantic import BaseModel, Field, root_validator, validator +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator from requests.exceptions import Timeout _LOGGER = logging.getLogger(__name__) @@ -38,18 +38,16 @@ class PowerBIDataset(BaseModel): sample_rows_in_table_info: int = Field(default=1, gt=0, le=10) schemas: Dict[str, str] = Field(default_factory=dict) aiosession: Optional[aiohttp.ClientSession] = None + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @validator("table_names", allow_reuse=True) + @field_validator("table_names") + @classmethod def fix_table_names(cls, table_names: List[str]) -> List[str]: """Fix the table names.""" return [fix_table_name(table) for table in table_names] - @root_validator(pre=True, allow_reuse=True) + @model_validator(mode="before") + @classmethod def token_or_credential_present(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Validate that at least one of token and credentials is present.""" if "token" in values or "credential" in values: diff --git a/libs/langchain/langchain/utilities/requests.py b/libs/langchain/langchain/utilities/requests.py index 2891701c499df..b9634c3574b89 100644 --- a/libs/langchain/langchain/utilities/requests.py +++ b/libs/langchain/langchain/utilities/requests.py @@ -4,7 +4,7 @@ import aiohttp import requests -from pydantic import BaseModel, Extra +from pydantic import BaseModel, ConfigDict class Requests(BaseModel): @@ -17,12 +17,7 @@ class Requests(BaseModel): headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None auth: Optional[Any] = None - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) def get(self, url: str, **kwargs: Any) -> requests.Response: """GET the URL and return the text.""" @@ -123,12 +118,7 @@ class TextRequestsWrapper(BaseModel): headers: Optional[Dict[str, str]] = None aiosession: Optional[aiohttp.ClientSession] = None auth: Optional[Any] = None - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) @property def requests(self) -> Requests: diff --git a/libs/langchain/langchain/utilities/scenexplain.py b/libs/langchain/langchain/utilities/scenexplain.py index 1150a34dc3f75..822fc4f821d2f 100644 --- a/libs/langchain/langchain/utilities/scenexplain.py +++ b/libs/langchain/langchain/utilities/scenexplain.py @@ -8,7 +8,8 @@ from typing import Dict import requests -from pydantic import BaseModel, BaseSettings, Field, root_validator +from pydantic import BaseModel, Field, model_validator +from pydantic_settings import BaseSettings from langchain.utils import get_from_dict_or_env @@ -23,7 +24,7 @@ class SceneXplainAPIWrapper(BaseSettings, BaseModel): and create a new API key. """ - scenex_api_key: str = Field(..., env="SCENEX_API_KEY") + scenex_api_key: str = Field(..., validation_alias="SCENEX_API_KEY") scenex_api_url: str = "https://api.scenex.jina.ai/v1/describe" def _describe_image(self, image: str) -> str: @@ -47,7 +48,8 @@ def _describe_image(self, image: str) -> str: return img.get("text", "") - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" scenex_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/searx_search.py b/libs/langchain/langchain/utilities/searx_search.py index e73e81c3804ff..1e060afebabe4 100644 --- a/libs/langchain/langchain/utilities/searx_search.py +++ b/libs/langchain/langchain/utilities/searx_search.py @@ -132,7 +132,14 @@ import aiohttp import requests -from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PrivateAttr, + field_validator, + model_validator, +) from langchain.utils import get_from_dict_or_env @@ -209,7 +216,8 @@ class SearxSearchWrapper(BaseModel): k: int = 10 aiosession: Optional[Any] = None - @validator("unsecure") + @field_validator("unsecure") + @classmethod def disable_ssl_warnings(cls, v: bool) -> bool: """Disable SSL warnings.""" if v: @@ -223,7 +231,8 @@ def disable_ssl_warnings(cls, v: bool) -> bool: return v - @root_validator() + @model_validator() + @classmethod def validate_params(cls, values: Dict) -> Dict: """Validate that custom searx params are merged with default ones.""" user_params = values["params"] @@ -252,10 +261,7 @@ def validate_params(cls, values: Dict) -> Dict: return values - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _searx_api_query(self, params: dict) -> SearxResults: """Actual request to searx API.""" diff --git a/libs/langchain/langchain/utilities/serpapi.py b/libs/langchain/langchain/utilities/serpapi.py index 98f4214ce0614..8ee043d8555a1 100644 --- a/libs/langchain/langchain/utilities/serpapi.py +++ b/libs/langchain/langchain/utilities/serpapi.py @@ -7,7 +7,7 @@ from typing import Any, Dict, Optional, Tuple import aiohttp -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import BaseModel, ConfigDict, Field, model_validator from langchain.utils import get_from_dict_or_env @@ -40,7 +40,7 @@ class SerpAPIWrapper(BaseModel): serpapi = SerpAPIWrapper() """ - search_engine: Any #: :meta private: + search_engine: Any = None #: :meta private: params: dict = Field( default={ "engine": "google", @@ -51,14 +51,10 @@ class SerpAPIWrapper(BaseModel): ) serpapi_api_key: Optional[str] = None aiosession: Optional[aiohttp.ClientSession] = None + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" serpapi_api_key = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/twilio.py b/libs/langchain/langchain/utilities/twilio.py index 45e009bb2b2b0..242c9234a2908 100644 --- a/libs/langchain/langchain/utilities/twilio.py +++ b/libs/langchain/langchain/utilities/twilio.py @@ -1,7 +1,7 @@ """Util that calls Twilio.""" from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -26,7 +26,7 @@ class TwilioAPIWrapper(BaseModel): twilio.run('test', '+12484345508') """ - client: Any #: :meta private: + client: Any = None #: :meta private: account_sid: Optional[str] = None """Twilio account string identifier.""" auth_token: Optional[str] = None @@ -42,14 +42,10 @@ class TwilioAPIWrapper(BaseModel): cell phone number. If you are using `messaging_service_sid`, this parameter must be empty. """ # noqa: E501 + model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=False) - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = False - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: diff --git a/libs/langchain/langchain/utilities/wikipedia.py b/libs/langchain/langchain/utilities/wikipedia.py index 1202f8b24e1a0..246773cec03be 100644 --- a/libs/langchain/langchain/utilities/wikipedia.py +++ b/libs/langchain/langchain/utilities/wikipedia.py @@ -2,7 +2,7 @@ import logging from typing import Any, Dict, List, Optional -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, model_validator from langchain.schema import Document @@ -21,13 +21,14 @@ class WikipediaAPIWrapper(BaseModel): It limits the Document content by doc_content_chars_max. """ - wiki_client: Any #: :meta private: + wiki_client: Any = None #: :meta private: top_k_results: int = 3 lang: str = "en" load_all_available_meta: bool = False doc_content_chars_max: int = 4000 - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: diff --git a/libs/langchain/langchain/utilities/wolfram_alpha.py b/libs/langchain/langchain/utilities/wolfram_alpha.py index a27aec051f40b..0582fc08ae663 100644 --- a/libs/langchain/langchain/utilities/wolfram_alpha.py +++ b/libs/langchain/langchain/utilities/wolfram_alpha.py @@ -1,7 +1,7 @@ """Util that calls WolframAlpha.""" from typing import Any, Dict, Optional -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from langchain.utils import get_from_dict_or_env @@ -18,15 +18,12 @@ class WolframAlphaAPIWrapper(BaseModel): """ - wolfram_client: Any #: :meta private: + wolfram_client: Any = None #: :meta private: wolfram_alpha_appid: Optional[str] = None + model_config = ConfigDict(extra="forbid") - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - @root_validator() + @model_validator() + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" wolfram_alpha_appid = get_from_dict_or_env( diff --git a/libs/langchain/langchain/utilities/zapier.py b/libs/langchain/langchain/utilities/zapier.py index 5884f5e4e35ac..3ac95c4b74a89 100644 --- a/libs/langchain/langchain/utilities/zapier.py +++ b/libs/langchain/langchain/utilities/zapier.py @@ -16,7 +16,7 @@ import aiohttp import requests -from pydantic import BaseModel, Extra, root_validator +from pydantic import BaseModel, ConfigDict, model_validator from requests import Request, Session from langchain.utils import get_from_dict_or_env @@ -44,11 +44,7 @@ class ZapierNLAWrapper(BaseModel): zapier_nla_api_key: str zapier_nla_oauth_access_token: str zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/" - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid + model_config = ConfigDict(extra="forbid") def _format_headers(self) -> Dict[str, str]: """Format headers for requests.""" @@ -110,7 +106,8 @@ def _create_action_request( # type: ignore[no-untyped-def] json=data, ) - @root_validator(pre=True) + @model_validator(mode="before") + @classmethod def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" diff --git a/libs/langchain/langchain/vectorstores/azuresearch.py b/libs/langchain/langchain/vectorstores/azuresearch.py index 09719a5ff2526..a675d0c5ffe23 100644 --- a/libs/langchain/langchain/vectorstores/azuresearch.py +++ b/libs/langchain/langchain/vectorstores/azuresearch.py @@ -18,7 +18,7 @@ ) import numpy as np -from pydantic import root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -492,13 +492,10 @@ class AzureSearchVectorStoreRetriever(BaseRetriever): "semantic_hybrid".""" k: int = 4 """Number of documents to return.""" + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: diff --git a/libs/langchain/langchain/vectorstores/base.py b/libs/langchain/langchain/vectorstores/base.py index e00b2db7832bb..d2dadd8de1a53 100644 --- a/libs/langchain/langchain/vectorstores/base.py +++ b/libs/langchain/langchain/vectorstores/base.py @@ -22,7 +22,7 @@ TypeVar, ) -from pydantic import Field, root_validator +from pydantic import ConfigDict, Field, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -473,13 +473,10 @@ class VectorStoreRetriever(BaseRetriever): "similarity_score_threshold", "mmr", ) + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" search_type = values["search_type"] diff --git a/libs/langchain/langchain/vectorstores/clickhouse.py b/libs/langchain/langchain/vectorstores/clickhouse.py index 2544d45fd7819..23bc8104343d3 100644 --- a/libs/langchain/langchain/vectorstores/clickhouse.py +++ b/libs/langchain/langchain/vectorstores/clickhouse.py @@ -8,7 +8,7 @@ from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple, Union -from pydantic import BaseSettings +from pydantic_settings import BaseSettings, SettingsConfigDict from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings @@ -95,10 +95,9 @@ class ClickhouseSettings(BaseSettings): def __getitem__(self, item: str) -> Any: return getattr(self, item) - class Config: - env_file = ".env" - env_prefix = "clickhouse_" - env_file_encoding = "utf-8" + model_config = SettingsConfigDict( + env_file=".env", env_prefix="clickhouse_", env_file_encoding="utf-8" + ) class Clickhouse(VectorStore): diff --git a/libs/langchain/langchain/vectorstores/myscale.py b/libs/langchain/langchain/vectorstores/myscale.py index 28e0824e1311c..90573a00950ab 100644 --- a/libs/langchain/langchain/vectorstores/myscale.py +++ b/libs/langchain/langchain/vectorstores/myscale.py @@ -7,7 +7,7 @@ from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple -from pydantic import BaseSettings +from pydantic_settings import BaseSettings, SettingsConfigDict from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings @@ -87,10 +87,9 @@ class MyScaleSettings(BaseSettings): def __getitem__(self, item: str) -> Any: return getattr(self, item) - class Config: - env_file = ".env" - env_prefix = "myscale_" - env_file_encoding = "utf-8" + model_config = SettingsConfigDict( + env_file=".env", env_prefix="myscale_", env_file_encoding="utf-8" + ) class MyScale(VectorStore): diff --git a/libs/langchain/langchain/vectorstores/redis.py b/libs/langchain/langchain/vectorstores/redis.py index f6ed5c4011e43..d56498aa883bf 100644 --- a/libs/langchain/langchain/vectorstores/redis.py +++ b/libs/langchain/langchain/vectorstores/redis.py @@ -20,7 +20,7 @@ ) import numpy as np -from pydantic import root_validator +from pydantic import ConfigDict, model_validator from langchain.callbacks.manager import ( AsyncCallbackManagerForRetrieverRun, @@ -622,13 +622,10 @@ class RedisVectorStoreRetriever(VectorStoreRetriever): """Number of documents to return.""" score_threshold: float = 0.4 """Score threshold for similarity_limit search.""" + model_config = ConfigDict(arbitrary_types_allowed=True) - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - @root_validator() + @model_validator() + @classmethod def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: diff --git a/libs/langchain/langchain/vectorstores/sklearn.py b/libs/langchain/langchain/vectorstores/sklearn.py index dcc6237c257af..fd9ae2bfa39e4 100644 --- a/libs/langchain/langchain/vectorstores/sklearn.py +++ b/libs/langchain/langchain/vectorstores/sklearn.py @@ -290,7 +290,9 @@ def max_marginal_relevance_search_by_vector( embedding, k=fetch_k, **kwargs ) indices, _ = zip(*indices_dists) - result_embeddings = self._embeddings_np[indices,] + result_embeddings = self._embeddings_np[ + indices, + ] mmr_selected = maximal_marginal_relevance( self._np.array(embedding, dtype=self._np.float32), result_embeddings, diff --git a/libs/langchain/langchain/vectorstores/starrocks.py b/libs/langchain/langchain/vectorstores/starrocks.py index 0076a4cf0622c..10b62c79178a1 100644 --- a/libs/langchain/langchain/vectorstores/starrocks.py +++ b/libs/langchain/langchain/vectorstores/starrocks.py @@ -8,7 +8,7 @@ from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple -from pydantic import BaseSettings +from pydantic_settings import BaseSettings, SettingsConfigDict from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings @@ -115,10 +115,9 @@ class StarRocksSettings(BaseSettings): def __getitem__(self, item: str) -> Any: return getattr(self, item) - class Config: - env_file = ".env" - env_prefix = "starrocks_" - env_file_encoding = "utf-8" + model_config = SettingsConfigDict( + env_file=".env", env_prefix="starrocks_", env_file_encoding="utf-8" + ) class StarRocks(VectorStore): diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index fa6c220492b80..3cd46121af3d3 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -12,7 +12,7 @@ langchain-server = "langchain.server:main" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" -pydantic = "^1" +pydantic = "^2.1.1" SQLAlchemy = ">=1.4,<3" requests = "^2" PyYAML = ">=5.4.1" @@ -26,7 +26,6 @@ elasticsearch = {version = "^8", optional = true} opensearch-py = {version = "^2.0.0", optional = true} redis = {version = "^4", optional = true} manifest-ml = {version = "^0.0.1", optional = true} -spacy = {version = "^3", optional = true} nltk = {version = "^3", optional = true} transformers = {version = "^4", optional = true} beautifulsoup4 = {version = "^4", optional = true} @@ -52,7 +51,6 @@ openai = {version = "^0", optional = true} nlpcloud = {version = "^1", optional = true} nomic = {version = "^1.0.43", optional = true} huggingface_hub = {version = "^0", optional = true} -octoai-sdk = {version = "^0.1.1", optional = true} jina = {version = "^3.14", optional = true} google-search-results = {version = "^2", optional = true} sentence-transformers = {version = "^2", optional = true} @@ -81,7 +79,6 @@ pexpect = {version = "^4.8.0", optional = true} pyvespa = {version = "^0.33.0", optional = true} O365 = {version = "^2.0.26", optional = true} jq = {version = "^1.4.1", optional = true} -steamship = {version = "^2.16.9", optional = true} pdfminer-six = {version = "^20221105", optional = true} docarray = {version="^0.32.0", extras=["hnswlib"], optional=true} lxml = {version = "^4.9.2", optional = true} @@ -91,7 +88,6 @@ gql = {version = "^3.4.1", optional = true} pandas = {version = "^2.0.1", optional = true} telethon = {version = "^1.28.5", optional = true} neo4j = {version = "^5.8.1", optional = true} -zep-python = {version=">=0.32", optional=true} langkit = {version = ">=0.0.6, <0.1.0", optional = true} chardet = {version="^5.1.0", optional=true} requests-toolbelt = {version = "^1.0.0", optional = true} @@ -120,7 +116,6 @@ cassio = {version = "^0.0.7", optional = true} rdflib = {version = "^6.3.2", optional = true} sympy = {version = "^1.12", optional = true} rapidfuzz = {version = "^3.1.1", optional = true} -langsmith = "~0.0.11" rank-bm25 = {version = "^0.2.2", optional = true} amadeus = {version = ">=8.1.0", optional = true} geopandas = {version = "^0.13.1", optional = true} @@ -180,7 +175,6 @@ deeplake = "^3.6.8" libdeeplake = "^0.0.60" weaviate-client = "^3.15.5" torch = "^1.0.0" -chromadb = "^0.4.0" tiktoken = "^0.3.3" python-dotenv = "^1.0.0" sentence-transformers = "^2" diff --git a/libs/langchain/tests/mock_servers/robot/server.py b/libs/langchain/tests/mock_servers/robot/server.py index 5af50d3f75363..6c0b5ba610cb8 100644 --- a/libs/langchain/tests/mock_servers/robot/server.py +++ b/libs/langchain/tests/mock_servers/robot/server.py @@ -79,9 +79,9 @@ class WalkInput(BaseModel): """Input for walking.""" direction: Direction - speed: Optional[float] + speed: Optional[float] = None style_or_cautiousness: Union[Style, Cautiousness] - other_commands: Any + other_commands: Any = None class PublicCues(BaseModel): diff --git a/libs/langchain/tests/unit_tests/llms/fake_llm.py b/libs/langchain/tests/unit_tests/llms/fake_llm.py index e7fc0d6dec84d..02167cba1fab6 100644 --- a/libs/langchain/tests/unit_tests/llms/fake_llm.py +++ b/libs/langchain/tests/unit_tests/llms/fake_llm.py @@ -1,7 +1,7 @@ """Fake LLM wrapper for testing purposes.""" from typing import Any, Dict, List, Mapping, Optional, cast -from pydantic import validator +from pydantic.v1 import validator as v1_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM @@ -14,7 +14,9 @@ class FakeLLM(LLM): sequential_responses: Optional[bool] = False response_index: int = 0 - @validator("queries", always=True) + # TODO[pydantic]: Replace with a Pydantic v2 `field_validator`. + # Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information. + @v1_validator("queries", always=True) def check_queries_required( cls, queries: Optional[Mapping], values: Mapping[str, Any] ) -> Optional[Mapping]: diff --git a/poetry.lock b/poetry.lock index efb683e057640..f2e51e789de39 100644 --- a/poetry.lock +++ b/poetry.lock @@ -133,6 +133,20 @@ files = [ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, ] +[[package]] +name = "annotated-types" +version = "0.5.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.7" +files = [ + {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, + {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" version = "3.7.1" @@ -297,23 +311,25 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte [[package]] name = "autodoc-pydantic" -version = "1.9.0" +version = "2.0.0" description = "Seamlessly integrate pydantic models in your Sphinx documentation." optional = false python-versions = ">=3.7.1,<4.0.0" files = [ - {file = "autodoc_pydantic-1.9.0-py3-none-any.whl", hash = "sha256:cbf7ec2f27f913629bd38f9944fa6c4a86541c3cadba4a6fa9d2079e500223d8"}, - {file = "autodoc_pydantic-1.9.0.tar.gz", hash = "sha256:0f35f8051abe77b5ae16d8a1084c47a5871435e2ca9060e36c838d063c03cc89"}, + {file = "autodoc_pydantic-2.0.0-py3-none-any.whl", hash = "sha256:9ac5315064ccac214c48348918eef05b67b8bea6e56a6257a220e2b1cc064f82"}, + {file = "autodoc_pydantic-2.0.0.tar.gz", hash = "sha256:4cfd6573631f498a26c3597bfa6c79fd962aef5feaf44917d35dbad7048f4f1c"}, ] [package.dependencies] -pydantic = ">=1.5,<2.0.0" -Sphinx = ">=3.4" +importlib-metadata = {version = ">1", markers = "python_version <= \"3.8\""} +pydantic = ">=2.0,<3.0.0" +pydantic-settings = ">=2.0,<3.0.0" +Sphinx = ">=4.0" [package.extras] dev = ["coverage (>=7,<8)", "flake8 (>=3,<4)", "pytest (>=7,<8)", "sphinx-copybutton (>=0.4,<0.5)", "sphinx-rtd-theme (>=1.0,<2.0)", "sphinx-tabs (>=3,<4)", "sphinxcontrib-mermaid (>=0.7,<0.8)", "tox (>=3,<4)"] docs = ["sphinx-copybutton (>=0.4,<0.5)", "sphinx-rtd-theme (>=1.0,<2.0)", "sphinx-tabs (>=3,<4)", "sphinxcontrib-mermaid (>=0.7,<0.8)"] -erdantic = ["erdantic (>=0.5,<0.6)"] +erdantic = ["erdantic (>=0.6,<0.7)"] test = ["coverage (>=7,<8)", "pytest (>=7,<8)"] [[package]] @@ -1629,7 +1645,7 @@ files = [ [[package]] name = "langchain" -version = "0.0.240" +version = "0.0.244" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" @@ -1640,18 +1656,17 @@ develop = true aiohttp = "^3.8.3" async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} dataclasses-json = "^0.5.7" -langsmith = "~0.0.11" numexpr = "^2.8.4" numpy = "^1" openapi-schema-pydantic = "^1.2" -pydantic = "^1" +pydantic = "^2" PyYAML = ">=5.4.1" requests = "^2" SQLAlchemy = ">=1.4,<3" tenacity = "^8.1.0" [package.extras] -all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "anthropic (>=0.3,<0.4)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.3,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=0.11.0,<0.12.0)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "octoai-sdk (>=0.1.1,<0.2.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"] +all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "anthropic (>=0.3,<0.4)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.3,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=0.11.0,<0.12.0)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "octoai-sdk (>=0.1.1,<0.2.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"] azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0a20230509004)", "openai (>=0,<1)"] clarifai = ["clarifai (>=9.1.0)"] cohere = ["cohere (>=3,<4)"] @@ -1668,21 +1683,6 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"] type = "directory" url = "libs/langchain" -[[package]] -name = "langsmith" -version = "0.0.12" -description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langsmith-0.0.12-py3-none-any.whl", hash = "sha256:d7a4360d1984b57689c6470ffcef79dcb217dd7129bdfd3bcfcb22dc058787f6"}, - {file = "langsmith-0.0.12.tar.gz", hash = "sha256:5abf30ac4ebb6b68955bea617669585e80a6fe9def90664503d5fbd6d095e91e"}, -] - -[package.dependencies] -pydantic = ">=1,<2" -requests = ">=2,<3" - [[package]] name = "linkchecker" version = "10.2.1" @@ -2554,55 +2554,150 @@ files = [ [[package]] name = "pydantic" -version = "1.10.11" -description = "Data validation and settings management using python type hints" +version = "2.1.1" +description = "Data validation using Python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"}, - {file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"}, - {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"}, - {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"}, - {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"}, - {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"}, - {file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"}, - {file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"}, - {file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"}, - {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"}, - {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"}, - {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"}, - {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"}, - {file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"}, - {file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"}, - {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"}, - {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"}, - {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"}, - {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"}, - {file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"}, - {file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"}, - {file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"}, - {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"}, - {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"}, - {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"}, - {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"}, - {file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"}, - {file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"}, - {file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"}, - {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"}, - {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"}, - {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"}, - {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"}, - {file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"}, - {file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"}, - {file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"}, + {file = "pydantic-2.1.1-py3-none-any.whl", hash = "sha256:43bdbf359d6304c57afda15c2b95797295b702948082d4c23851ce752f21da70"}, + {file = "pydantic-2.1.1.tar.gz", hash = "sha256:22d63db5ce4831afd16e7c58b3192d3faf8f79154980d9397d9867254310ba4b"}, ] [package.dependencies] -typing-extensions = ">=4.2.0" +annotated-types = ">=0.4.0" +pydantic-core = "2.4.0" +typing-extensions = ">=4.6.1" [package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.4.0" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.4.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:2ca4687dd996bde7f3c420def450797feeb20dcee2b9687023e3323c73fc14a2"}, + {file = "pydantic_core-2.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:782fced7d61469fd1231b184a80e4f2fa7ad54cd7173834651a453f96f29d673"}, + {file = "pydantic_core-2.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6213b471b68146af97b8551294e59e7392c2117e28ffad9c557c65087f4baee3"}, + {file = "pydantic_core-2.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63797499a219d8e81eb4e0c42222d0a4c8ec896f5c76751d4258af95de41fdf1"}, + {file = "pydantic_core-2.4.0-cp310-cp310-manylinux_2_24_armv7l.whl", hash = "sha256:0455876d575a35defc4da7e0a199596d6c773e20d3d42fa1fc29f6aa640369ed"}, + {file = "pydantic_core-2.4.0-cp310-cp310-manylinux_2_24_ppc64le.whl", hash = "sha256:8c938c96294d983dcf419b54dba2d21056959c22911d41788efbf949a29ae30d"}, + {file = "pydantic_core-2.4.0-cp310-cp310-manylinux_2_24_s390x.whl", hash = "sha256:878a5017d93e776c379af4e7b20f173c82594d94fa073059bcc546789ad50bf8"}, + {file = "pydantic_core-2.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:69159afc2f2dc43285725f16143bc5df3c853bc1cb7df6021fce7ef1c69e8171"}, + {file = "pydantic_core-2.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54df7df399b777c1fd144f541c95d351b3aa110535a6810a6a569905d106b6f3"}, + {file = "pydantic_core-2.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e412607ca89a0ced10758dfb8f9adcc365ce4c1c377e637c01989a75e9a9ec8a"}, + {file = "pydantic_core-2.4.0-cp310-none-win32.whl", hash = "sha256:853f103e2b9a58832fdd08a587a51de8b552ae90e1a5d167f316b7eabf8d7dde"}, + {file = "pydantic_core-2.4.0-cp310-none-win_amd64.whl", hash = "sha256:3ba2c9c94a9176f6321a879c8b864d7c5b12d34f549a4c216c72ce213d7d953c"}, + {file = "pydantic_core-2.4.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:a8b7acd04896e8f161e1500dc5f218017db05c1d322f054e89cbd089ce5d0071"}, + {file = "pydantic_core-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16468bd074fa4567592d3255bf25528ed41e6b616d69bf07096bdb5b66f947d1"}, + {file = "pydantic_core-2.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cba5ad5eef02c86a1f3da00544cbc59a510d596b27566479a7cd4d91c6187a11"}, + {file = "pydantic_core-2.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7206e41e04b443016e930e01685bab7a308113c0b251b3f906942c8d4b48fcb"}, + {file = "pydantic_core-2.4.0-cp311-cp311-manylinux_2_24_armv7l.whl", hash = "sha256:c1375025f0bfc9155286ebae8eecc65e33e494c90025cda69e247c3ccd2bab00"}, + {file = "pydantic_core-2.4.0-cp311-cp311-manylinux_2_24_ppc64le.whl", hash = "sha256:3534118289e33130ed3f1cc487002e8d09b9f359be48b02e9cd3de58ce58fba9"}, + {file = "pydantic_core-2.4.0-cp311-cp311-manylinux_2_24_s390x.whl", hash = "sha256:94d2b36a74623caab262bf95f0e365c2c058396082bd9d6a9e825657d0c1e7fa"}, + {file = "pydantic_core-2.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:af24ad4fbaa5e4a2000beae0c3b7fd1c78d7819ab90f9370a1cfd8998e3f8a3c"}, + {file = "pydantic_core-2.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bf10963d8aed8bbe0165b41797c9463d4c5c8788ae6a77c68427569be6bead41"}, + {file = "pydantic_core-2.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68199ada7c310ddb8c76efbb606a0de656b40899388a7498954f423e03fc38be"}, + {file = "pydantic_core-2.4.0-cp311-none-win32.whl", hash = "sha256:6f855bcc96ed3dd56da7373cfcc9dcbabbc2073cac7f65c185772d08884790ce"}, + {file = "pydantic_core-2.4.0-cp311-none-win_amd64.whl", hash = "sha256:de39eb3bab93a99ddda1ac1b9aa331b944d8bcc4aa9141148f7fd8ee0299dafc"}, + {file = "pydantic_core-2.4.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:f773b39780323a0499b53ebd91a28ad11cde6705605d98d999dfa08624caf064"}, + {file = "pydantic_core-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a297c0d6c61963c5c3726840677b798ca5b7dfc71bc9c02b9a4af11d23236008"}, + {file = "pydantic_core-2.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:546064c55264156b973b5e65e5fafbe5e62390902ce3cf6b4005765505e8ff56"}, + {file = "pydantic_core-2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36ba9e728588588f0196deaf6751b9222492331b5552f865a8ff120869d372e0"}, + {file = "pydantic_core-2.4.0-cp312-cp312-manylinux_2_24_armv7l.whl", hash = "sha256:57a53a75010c635b3ad6499e7721eaa3b450e03f6862afe2dbef9c8f66e46ec8"}, + {file = "pydantic_core-2.4.0-cp312-cp312-manylinux_2_24_ppc64le.whl", hash = "sha256:4b262bbc13022f2097c48a21adcc360a81d83dc1d854c11b94953cd46d7d3c07"}, + {file = "pydantic_core-2.4.0-cp312-cp312-manylinux_2_24_s390x.whl", hash = "sha256:01947ad728f426fa07fcb26457ebf90ce29320259938414bc0edd1476e75addb"}, + {file = "pydantic_core-2.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b2799c2eaf182769889761d4fb4d78b82bc47dae833799fedbf69fc7de306faa"}, + {file = "pydantic_core-2.4.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a08fd490ba36d1fbb2cd5dcdcfb9f3892deb93bd53456724389135712b5fc735"}, + {file = "pydantic_core-2.4.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1e8a7c62d15a5c4b307271e4252d76ebb981d6251c6ecea4daf203ef0179ea4f"}, + {file = "pydantic_core-2.4.0-cp312-none-win32.whl", hash = "sha256:9206c14a67c38de7b916e486ae280017cf394fa4b1aa95cfe88621a4e1d79725"}, + {file = "pydantic_core-2.4.0-cp312-none-win_amd64.whl", hash = "sha256:884235507549a6b2d3c4113fb1877ae263109e787d9e0eb25c35982ab28d0399"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:4cbe929efa77a806e8f1a97793f2dc3ea3475ae21a9ed0f37c21320fe93f6f50"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:9137289de8fe845c246a8c3482dd0cb40338846ba683756d8f489a4bd8fddcae"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5d8e764b5646623e57575f624f8ebb8f7a9f7fd1fae682ef87869ca5fec8dcf"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fba0aff4c407d0274e43697e785bcac155ad962be57518d1c711f45e72da70f"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-manylinux_2_24_armv7l.whl", hash = "sha256:30527d173e826f2f7651f91c821e337073df1555e3b5a0b7b1e2c39e26e50678"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-manylinux_2_24_ppc64le.whl", hash = "sha256:bd7d1dde70ff3e09e4bc7a1cbb91a7a538add291bfd5b3e70ef1e7b45192440f"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-manylinux_2_24_s390x.whl", hash = "sha256:72f1216ca8cef7b8adacd4c4c6b89c3b0c4f97503197f5284c80f36d6e4edd30"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b013c7861a7c7bfcec48fd709513fea6f9f31727e7a0a93ca0dd12e056740717"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:478f5f6d7e32bd4a04d102160efb2d389432ecf095fe87c555c0a6fc4adfc1a4"}, + {file = "pydantic_core-2.4.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d9610b47b5fe4aacbbba6a9cb5f12cbe864eec99dbfed5710bd32ef5dd8a5d5b"}, + {file = "pydantic_core-2.4.0-cp37-none-win32.whl", hash = "sha256:ff246c0111076c8022f9ba325c294f2cb5983403506989253e04dbae565e019b"}, + {file = "pydantic_core-2.4.0-cp37-none-win_amd64.whl", hash = "sha256:d0c2b713464a8e263a243ae7980d81ce2de5ac59a9f798a282e44350b42dc516"}, + {file = "pydantic_core-2.4.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:12ef6838245569fd60a179fade81ca4b90ae2fa0ef355d616f519f7bb27582db"}, + {file = "pydantic_core-2.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49db206eb8fdc4b4f30e6e3e410584146d813c151928f94ec0db06c4f2595538"}, + {file = "pydantic_core-2.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a507d7fa44688bbac76af6521e488b3da93de155b9cba6f2c9b7833ce243d59"}, + {file = "pydantic_core-2.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffe18407a4d000c568182ce5388bbbedeb099896904e43fc14eee76cfae6dec5"}, + {file = "pydantic_core-2.4.0-cp38-cp38-manylinux_2_24_armv7l.whl", hash = "sha256:fa8e48001b39d54d97d7b380a0669fa99fc0feeb972e35a2d677ba59164a9a22"}, + {file = "pydantic_core-2.4.0-cp38-cp38-manylinux_2_24_ppc64le.whl", hash = "sha256:394f12a2671ff8c4dfa2e85be6c08be0651ad85bc1e6aa9c77c21671baaf28cd"}, + {file = "pydantic_core-2.4.0-cp38-cp38-manylinux_2_24_s390x.whl", hash = "sha256:2f9ea0355f90db2a76af530245fa42f04d98f752a1236ed7c6809ec484560d5b"}, + {file = "pydantic_core-2.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:61d4e713f467abcdd59b47665d488bb898ad3dd47ce7446522a50e0cbd8e8279"}, + {file = "pydantic_core-2.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:453862ab268f6326b01f067ed89cb3a527d34dc46f6f4eeec46a15bbc706d0da"}, + {file = "pydantic_core-2.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:56a85fa0dab1567bd0cac10f0c3837b03e8a0d939e6a8061a3a420acd97e9421"}, + {file = "pydantic_core-2.4.0-cp38-none-win32.whl", hash = "sha256:0d726108c1c0380b88b6dd4db559f0280e0ceda9e077f46ff90bc85cd4d03e77"}, + {file = "pydantic_core-2.4.0-cp38-none-win_amd64.whl", hash = "sha256:047580388644c473b934d27849f8ed8dbe45df0adb72104e78b543e13bf69762"}, + {file = "pydantic_core-2.4.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:867d3eea954bea807cabba83cfc939c889a18576d66d197c60025b15269d7cc0"}, + {file = "pydantic_core-2.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:664402ef0c238a7f8a46efb101789d5f2275600fb18114446efec83cfadb5b66"}, + {file = "pydantic_core-2.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64e8012ad60a5f0da09ed48725e6e923d1be25f2f091a640af6079f874663813"}, + {file = "pydantic_core-2.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac2b680de398f293b68183317432b3d67ab3faeba216aec18de0c395cb5e3060"}, + {file = "pydantic_core-2.4.0-cp39-cp39-manylinux_2_24_armv7l.whl", hash = "sha256:8efc1be43b036c2b6bcfb1451df24ee0ddcf69c31351003daf2699ed93f5687b"}, + {file = "pydantic_core-2.4.0-cp39-cp39-manylinux_2_24_ppc64le.whl", hash = "sha256:d93aedbc4614cc21b9ab0d0c4ccd7143354c1f7cffbbe96ae5216ad21d1b21b5"}, + {file = "pydantic_core-2.4.0-cp39-cp39-manylinux_2_24_s390x.whl", hash = "sha256:af788b64e13d52fc3600a68b16d31fa8d8573e3ff2fc9a38f8a60b8d94d1f012"}, + {file = "pydantic_core-2.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97c6349c81cee2e69ef59eba6e6c08c5936e6b01c2d50b9e4ac152217845ae09"}, + {file = "pydantic_core-2.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cc086ddb6dc654a15deeed1d1f2bcb1cb924ebd70df9dca738af19f64229b06c"}, + {file = "pydantic_core-2.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e953353180bec330c3b830891d260b6f8e576e2d18db3c78d314e56bb2276066"}, + {file = "pydantic_core-2.4.0-cp39-none-win32.whl", hash = "sha256:6feb4b64d11d5420e517910d60a907d08d846cacaf4e029668725cd21d16743c"}, + {file = "pydantic_core-2.4.0-cp39-none-win_amd64.whl", hash = "sha256:153a61ac4030fa019b70b31fb7986461119230d3ba0ab661c757cfea652f4332"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3fcf529382b282a30b466bd7af05be28e22aa620e016135ac414f14e1ee6b9e1"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2edef05b63d82568b877002dc4cb5cc18f8929b59077120192df1e03e0c633f8"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da055a1b0bfa8041bb2ff586b2cb0353ed03944a3472186a02cc44a557a0e661"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:77dadc764cf7c5405e04866181c5bd94a447372a9763e473abb63d1dfe9b7387"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a4ea23b07f29487a7bef2a869f68c7ee0e05424d81375ce3d3de829314c6b5ec"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:382f0baa044d674ad59455a5eff83d7965572b745cc72df35c52c2ce8c731d37"}, + {file = "pydantic_core-2.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:08f89697625e453421401c7f661b9d1eb4c9e4c0a12fd256eeb55b06994ac6af"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:43a405ce520b45941df9ff55d0cd09762017756a7b413bbad3a6e8178e64a2c2"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584a7a818c84767af16ce8bda5d4f7fedb37d3d231fc89928a192f567e4ef685"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04922fea7b13cd480586fa106345fe06e43220b8327358873c22d8dfa7a711c7"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17156abac20a9feed10feec867fddd91a80819a485b0107fe61f09f2117fe5f3"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e562cc63b04636cde361fd47569162f1daa94c759220ff202a8129902229114"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:90f3785146f701e053bb6b9e8f53acce2c919aca91df88bd4975be0cb926eb41"}, + {file = "pydantic_core-2.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e40b1e97edd3dc127aa53d8a5e539a3d0c227d71574d3f9ac1af02d58218a122"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:b27f3e67f6e031f6620655741b7d0d6bebea8b25d415924b3e8bfef2dd7bd841"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be86c2eb12fb0f846262ace9d8f032dc6978b8cb26a058920ecb723dbcb87d05"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4665f7ed345012a8d2eddf4203ef145f5f56a291d010382d235b94e91813f88a"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:79262be5a292d1df060f29b9a7cdd66934801f987a817632d7552534a172709a"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5fd905a69ac74eaba5041e21a1e8b1a479dab2b41c93bdcc4c1cede3c12a8d86"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:2ad538b7e07343001934417cdc8584623b4d8823c5b8b258e75ec8d327cec969"}, + {file = "pydantic_core-2.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:dd2429f7635ad4857b5881503f9c310be7761dc681c467a9d27787b674d1250a"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:efff8b6761a1f6e45cebd1b7a6406eb2723d2d5710ff0d1b624fe11313693989"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32a1e0352558cd7ccc014ffe818c7d87b15ec6145875e2cc5fa4bb7351a1033d"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a027f41c5008571314861744d83aff75a34cf3a07022e0be32b214a5bc93f7f1"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1927f0e15d190f11f0b8344373731e28fd774c6d676d8a6cfadc95c77214a48b"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7aa82d483d5fb867d4fb10a138ffd57b0f1644e99f2f4f336e48790ada9ada5e"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b85778308bf945e9b33ac604e6793df9b07933108d20bdf53811bc7c2798a4af"}, + {file = "pydantic_core-2.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3ded19dcaefe2f6706d81e0db787b59095f4ad0fbadce1edffdf092294c8a23f"}, + {file = "pydantic_core-2.4.0.tar.gz", hash = "sha256:ec3473c9789cc00c7260d840c3db2c16dbfc816ca70ec87a00cddfa3e1a1cdd5"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.0.2" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_settings-2.0.2-py3-none-any.whl", hash = "sha256:6183a2abeab465d5a3ab69758e9a22d38b0cc2ba193f0b85f6971a252ea630f6"}, + {file = "pydantic_settings-2.0.2.tar.gz", hash = "sha256:342337fff50b23585e807a86dec85037900972364435c55c2fc00d16ff080539"}, +] + +[package.dependencies] +pydantic = ">=2.0.1" +python-dotenv = ">=0.21.0" [[package]] name = "pydata-sphinx-theme" @@ -2655,6 +2750,20 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "python-dotenv" +version = "1.0.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, + {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + [[package]] name = "python-json-logger" version = "2.0.7" @@ -3829,4 +3938,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "eeeb245db6de8d353cbfc59842821a31f6759344834b116df27f3b517ef55d32" +content-hash = "0b0e5423adb701e26e33e145692b860fc7b471bd270359a6308eeacd0fff308c" diff --git a/pyproject.toml b/pyproject.toml index ab18b4e1e8a08..20fc6ab2b1ae7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,10 +10,11 @@ repository = "https://www.github.com/hwchase17/langchain" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" +autodoc-pydantic = "2.0.0" [tool.poetry.group.docs.dependencies] langchain = { path = "libs/langchain/", develop = true } -autodoc_pydantic = "^1.8.0" +autodoc_pydantic = "2.0.0" myst_parser = "^0.18.1" nbsphinx = "^0.8.9" sphinx = "^4.5.0"