Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Ollama #218

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions app/core/config_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,21 @@ def __init__(self):
"password": os.getenv("NEO4J_PASSWORD"),
}
self.github_key = os.getenv("GITHUB_PRIVATE_KEY")
self.ollama_endpoint = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
self.ollama_model = os.getenv("OLLAMA_MODEL", "llama2")

def get_neo4j_config(self):
return self.neo4j_config

def get_github_key(self):
return self.github_key

def get_ollama_config(self):
return {
"endpoint": self.ollama_endpoint,
"model": self.ollama_model,
}

def get_demo_repo_list(self):
return [
{
Expand Down
5 changes: 5 additions & 0 deletions app/modules/intelligence/agents/agent_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
AgentType,
ProviderService,
)
from langchain_ollama import Ollama


class AgentFactory:
Expand Down Expand Up @@ -70,6 +71,10 @@ def _create_agent(
"code_generation_agent": lambda: CodeGenerationChatAgent(
mini_llm, reasoning_llm, self.db
),
"ollama_agent": lambda: Ollama(
base_url=self.provider_service.get_ollama_endpoint(),
model=self.provider_service.get_ollama_model(),
),
}

if agent_id in agent_map:
Expand Down
5 changes: 5 additions & 0 deletions app/modules/intelligence/agents/agent_injector_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
AgentType,
ProviderService,
)
from langchain_ollama import Ollama

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -59,6 +60,10 @@ def _initialize_agents(self) -> Dict[str, Any]:
"code_generation_agent": CodeGenerationChatAgent(
mini_llm, reasoning_llm, self.sql_db
),
"ollama_agent": Ollama(
base_url=self.provider_service.get_ollama_endpoint(),
model=self.provider_service.get_ollama_model(),
),
}

def get_agent(self, agent_id: str) -> Any:
Expand Down
20 changes: 20 additions & 0 deletions app/modules/intelligence/provider/provider_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from langchain_anthropic import ChatAnthropic
from langchain_openai.chat_models import ChatOpenAI
from portkey_ai import PORTKEY_GATEWAY_URL, createHeaders
from langchain_ollama import Ollama

from app.modules.key_management.secret_manager import SecretManager
from app.modules.users.user_preferences_model import UserPreferences
Expand Down Expand Up @@ -44,6 +45,11 @@ async def list_available_llms(self) -> List[ProviderInfo]:
name="Anthropic",
description="An AI safety-focused company known for models like Claude.",
),
ProviderInfo(
id="ollama",
name="Ollama",
description="A provider for running open source models locally.",
),
]

async def set_global_ai_provider(self, user_id: str, provider: str):
Expand Down Expand Up @@ -195,6 +201,12 @@ def get_large_llm(self, agent_type: AgentType):
default_headers=portkey_headers,
)

elif preferred_provider == "ollama":
logging.info("Initializing Ollama LLM")
ollama_endpoint = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
ollama_model = os.getenv("OLLAMA_MODEL", "llama2")
self.llm = Ollama(base_url=ollama_endpoint, model=ollama_model)

else:
raise ValueError("Invalid LLM provider selected.")

Expand Down Expand Up @@ -323,6 +335,12 @@ def get_small_llm(self, agent_type: AgentType):
default_headers=portkey_headers,
)

elif preferred_provider == "ollama":
logging.info("Initializing Ollama LLM")
ollama_endpoint = os.getenv("OLLAMA_ENDPOINT", "http://localhost:11434")
ollama_model = os.getenv("OLLAMA_MODEL", "llama2")
self.llm = Ollama(base_url=ollama_endpoint, model=ollama_model)

else:
raise ValueError("Invalid LLM provider selected.")

Expand All @@ -337,6 +355,8 @@ def get_llm_provider_name(self) -> str:
return "OpenAI"
elif isinstance(llm, ChatAnthropic):
return "Anthropic"
elif isinstance(llm, Ollama):
return "Ollama"
elif isinstance(llm, LLM):
return "OpenAI" if llm.model.split("/")[0] == "openai" else "Anthropic"
else:
Expand Down
12 changes: 12 additions & 0 deletions app/modules/intelligence/tools/tool_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
GetNodesFromTags,
)
from app.modules.intelligence.tools.tool_schema import ToolInfo
from langchain_ollama import Ollama
from app.core.config_provider import config_provider


class ToolService:
Expand Down Expand Up @@ -65,8 +67,18 @@ def _initialize_tools(self) -> Dict[str, Any]:
"get_node_neighbours_from_node_id": GetNodeNeighboursFromNodeIdTool(
self.db
),
"ollama_tool": Ollama(
base_url=self._get_ollama_endpoint(),
model=self._get_ollama_model(),
),
Comment on lines +70 to +73
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add error handling and description for the Ollama tool.

Two improvements needed:

  1. Add error handling for missing configuration
  2. Add a description attribute for the tool listing functionality

Consider this implementation:

     "ollama_tool": Ollama(
         base_url=self._get_ollama_endpoint(),
         model=self._get_ollama_model(),
+        description="Local LLM powered by Ollama",
     ),

Also, add error handling:

     "ollama_tool": (
+        lambda: Ollama(
             base_url=self._get_ollama_endpoint(),
             model=self._get_ollama_model(),
+            description="Local LLM powered by Ollama",
+        )
+        if config_provider.get_ollama_config()
+        else None
     )(),

Committable suggestion skipped: line range outside the PR's diff.

}

def _get_ollama_endpoint(self) -> str:
return config_provider.get_ollama_config()["endpoint"]

def _get_ollama_model(self) -> str:
return config_provider.get_ollama_config()["model"]

Comment on lines +76 to +81
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add validation and error handling to configuration methods.

The methods should validate the configuration values and handle missing or malformed config gracefully.

Consider this implementation:

     def _get_ollama_endpoint(self) -> str:
-        return config_provider.get_ollama_config()["endpoint"]
+        config = config_provider.get_ollama_config()
+        if not config or "endpoint" not in config:
+            raise ValueError("Ollama endpoint configuration is missing")
+        endpoint = config["endpoint"]
+        if not endpoint.startswith(("http://", "https://")):
+            raise ValueError(f"Invalid Ollama endpoint URL format: {endpoint}")
+        return endpoint

     def _get_ollama_model(self) -> str:
-        return config_provider.get_ollama_config()["model"]
+        config = config_provider.get_ollama_config()
+        if not config or "model" not in config:
+            raise ValueError("Ollama model configuration is missing")
+        return config["model"]
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
def _get_ollama_endpoint(self) -> str:
return config_provider.get_ollama_config()["endpoint"]
def _get_ollama_model(self) -> str:
return config_provider.get_ollama_config()["model"]
def _get_ollama_endpoint(self) -> str:
config = config_provider.get_ollama_config()
if not config or "endpoint" not in config:
raise ValueError("Ollama endpoint configuration is missing")
endpoint = config["endpoint"]
if not endpoint.startswith(("http://", "https://")):
raise ValueError(f"Invalid Ollama endpoint URL format: {endpoint}")
return endpoint
def _get_ollama_model(self) -> str:
config = config_provider.get_ollama_config()
if not config or "model" not in config:
raise ValueError("Ollama model configuration is missing")
return config["model"]

async def run_tool(self, tool_id: str, params: Dict[str, Any]) -> Dict[str, Any]:
tool = self.tools.get(tool_id)
if not tool:
Expand Down