diff --git a/src/crewai/cli/crew_chat.py b/src/crewai/cli/crew_chat.py index 5348b648fa..eee0592be3 100644 --- a/src/crewai/cli/crew_chat.py +++ b/src/crewai/cli/crew_chat.py @@ -27,9 +27,17 @@ def run_chat(): crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs) system_message = build_system_message(crew_chat_inputs) + # Call the LLM to generate the introductory message + introductory_message = chat_llm.call( + messages=[{"role": "system", "content": system_message}] + ) + click.secho(f"\nAssistant: {introductory_message}\n", fg="green") + messages = [ {"role": "system", "content": system_message}, + {"role": "assistant", "content": introductory_message}, ] + available_functions = { crew_chat_inputs.crew_name: create_tool_function(crew, messages), } @@ -77,6 +85,8 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str: "If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. " "After calling the tool, be prepared to take user feedback and make adjustments as needed. " "If you are ever unsure about a user's request or need clarification, ask the user for more information." + "Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' " + "For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'" f"\nCrew Name: {crew_chat_inputs.crew_name}" f"\nCrew Description: {crew_chat_inputs.crew_description}" ) diff --git a/src/crewai/cli/fetch_crew_inputs.py b/src/crewai/cli/fetch_crew_inputs.py deleted file mode 100644 index 63f61b0f9e..0000000000 --- a/src/crewai/cli/fetch_crew_inputs.py +++ /dev/null @@ -1,86 +0,0 @@ -import json -import subprocess -from typing import Optional - -import click -from packaging import version - -from crewai.cli.utils import read_toml -from crewai.cli.version import get_crewai_version -from crewai.types.crew_chat import ChatInputs - - -def fetch_crew_inputs() -> Optional[ChatInputs]: - """ - Fetch the crew's ChatInputs (a structure containing crew_description and input fields) - by running "uv run fetch_chat_inputs", which prints JSON representing a ChatInputs object. - - This function will parse that JSON and return a ChatInputs instance. - If the output is empty or invalid, an empty ChatInputs object is returned. - """ - - command = ["uv", "run", "fetch_chat_inputs"] - crewai_version = get_crewai_version() - min_required_version = "0.87.0" - - pyproject_data = read_toml() - crew_name = pyproject_data.get("project", {}).get("name", None) - - # If you're on an older poetry-based setup and version < min_required_version - if pyproject_data.get("tool", {}).get("poetry") and ( - version.parse(crewai_version) < version.parse(min_required_version) - ): - click.secho( - f"You are running an older version of crewAI ({crewai_version}) that uses poetry pyproject.toml.\n" - f"Please run `crewai update` to update your pyproject.toml to use uv.", - fg="red", - ) - - try: - result = subprocess.run(command, capture_output=True, text=True, check=True) - stdout_lines = result.stdout.strip().splitlines() - - # Find the line that contains the JSON data - json_line = next( - ( - line - for line in stdout_lines - if line.startswith("{") and line.endswith("}") - ), - None, - ) - - if not json_line: - click.echo( - "No valid JSON output received from `fetch_chat_inputs` command.", - err=True, - ) - return None - - try: - raw_data = json.loads(json_line) - chat_inputs = ChatInputs(**raw_data) - if crew_name: - chat_inputs.crew_name = crew_name - return chat_inputs - except json.JSONDecodeError as e: - click.echo( - f"Unable to parse JSON from `fetch_chat_inputs` output: {e}\nOutput: {repr(json_line)}", - err=True, - ) - return None - - except subprocess.CalledProcessError as e: - click.echo(f"An error occurred while fetching chat inputs: {e}", err=True) - click.echo(e.output, err=True, nl=True) - - if pyproject_data.get("tool", {}).get("poetry"): - click.secho( - "It's possible that you are using an old version of crewAI that uses poetry.\n" - "Please run `crewai update` to update your pyproject.toml to use uv.", - fg="yellow", - ) - except Exception as e: - click.echo(f"An unexpected error occurred: {e}", err=True) - - return None diff --git a/src/crewai/cli/templates/crew/main.py b/src/crewai/cli/templates/crew/main.py index 9344372b01..8f68a83c76 100644 --- a/src/crewai/cli/templates/crew/main.py +++ b/src/crewai/cli/templates/crew/main.py @@ -1,10 +1,8 @@ #!/usr/bin/env python import sys -import json import warnings from {{folder_name}}.crew import {{crew_name}} -from crewai.utilities.llm_utils import create_llm warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd") @@ -15,13 +13,10 @@ def run(): """ - Run the crew, allowing CLI overrides for required inputs. - Usage example: - uv run run_crew -- --topic="New Topic" --some_other_field="Value" + Run the crew. """ inputs = { 'topic': 'AI LLMs' - # Add any other default fields here } try: diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 74862b9e9f..b9555070fe 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -209,10 +209,6 @@ class Crew(BaseModel): default=None, description="LLM used to handle chatting with the crew.", ) - chat_inputs: Optional[ChatInputs] = Field( - default=None, - description="Holds descriptions of the crew as well as named inputs for chat usage.", - ) _knowledge: Optional[Knowledge] = PrivateAttr( default=None, ) diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 77bec3355e..44dbe51860 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -146,7 +146,6 @@ def __init__( self.callbacks = callbacks self.context_window_size = 0 - # For safety, we disable passing init params to next calls litellm.drop_params = True self.set_callbacks(callbacks) @@ -247,40 +246,36 @@ def call( function_name = tool_call.function.name if function_name in available_functions: - # Parse arguments try: function_args = json.loads(tool_call.function.arguments) except json.JSONDecodeError as e: logging.warning(f"Failed to parse function arguments: {e}") - return text_response # Fallback to text response + return text_response fn = available_functions[function_name] try: # Call the actual tool function result = fn(**function_args) - # Return the result directly return result except Exception as e: logging.error( f"Error executing function '{function_name}': {e}" ) - return text_response # Fallback to text response + return text_response else: logging.warning( f"Tool call requested unknown function '{function_name}'" ) - return text_response # Fallback to text response + return text_response except Exception as e: - # Check if context length was exceeded, otherwise log if not LLMContextLengthExceededException( str(e) )._is_context_limit_error(str(e)): logging.error(f"LiteLLM call failed: {str(e)}") - # Re-raise the exception raise def supports_function_calling(self) -> bool: diff --git a/src/crewai/task.py b/src/crewai/task.py index 662e9e1470..030bce779c 100644 --- a/src/crewai/task.py +++ b/src/crewai/task.py @@ -393,7 +393,7 @@ def _execute_core( self.retry_count += 1 context = self.i18n.errors("validation_error").format( guardrail_result_error=guardrail_result.error, - task_output=task_output.raw + task_output=task_output.raw, ) printer = Printer() printer.print( @@ -431,9 +431,7 @@ def _execute_core( content = ( json_output if json_output - else pydantic_output.model_dump_json() - if pydantic_output - else result + else pydantic_output.model_dump_json() if pydantic_output else result ) self._save_file(content) @@ -453,11 +451,12 @@ def prompt(self) -> str: tasks_slices = [self.description, output] return "\n".join(tasks_slices) - - def interpolate_inputs_and_add_conversation_history(self, inputs: Dict[str, Union[str, int, float]]) -> None: + def interpolate_inputs_and_add_conversation_history( + self, inputs: Dict[str, Union[str, int, float]] + ) -> None: """Interpolate inputs into the task description, expected output, and output file path. Add conversation history if present. - + Args: inputs: Dictionary mapping template variables to their values. Supported value types are strings, integers, and floats. @@ -497,16 +496,15 @@ def interpolate_inputs_and_add_conversation_history(self, inputs: Dict[str, Unio input_string=self._original_output_file, inputs=inputs ) except (KeyError, ValueError) as e: - raise ValueError(f"Error interpolating output_file path: {str(e)}") from e - + raise ValueError( + f"Error interpolating output_file path: {str(e)}" + ) from e + if "crew_chat_messages" in inputs and inputs["crew_chat_messages"]: - # Fetch the conversation history instruction using self.i18n.slice conversation_instruction = self.i18n.slice( "conversation_history_instruction" ) - print("crew_chat_messages:", inputs["crew_chat_messages"]) - # Ensure that inputs["crew_chat_messages"] is a string crew_chat_messages_json = str(inputs["crew_chat_messages"]) try: @@ -515,15 +513,15 @@ def interpolate_inputs_and_add_conversation_history(self, inputs: Dict[str, Unio print("An error occurred while parsing crew chat messages:", e) raise - # Process the messages to build conversation history conversation_history = "\n".join( f"{msg['role'].capitalize()}: {msg['content']}" for msg in crew_chat_messages if isinstance(msg, dict) and "role" in msg and "content" in msg ) - # Add the instruction and conversation history to the description - self.description += f"\n\n{conversation_instruction}\n\n{conversation_history}" + self.description += ( + f"\n\n{conversation_instruction}\n\n{conversation_history}" + ) def interpolate_only( self, input_string: Optional[str], inputs: Dict[str, Union[str, int, float]] diff --git a/src/crewai/utilities/llm_utils.py b/src/crewai/utilities/llm_utils.py index ea1783fa9c..f2d09a6675 100644 --- a/src/crewai/utilities/llm_utils.py +++ b/src/crewai/utilities/llm_utils.py @@ -34,7 +34,6 @@ def create_llm( if isinstance(llm_value, str): try: created_llm = LLM(model=llm_value) - print(f"LLM created with model='{llm_value}'") return created_llm except Exception as e: print(f"Failed to instantiate LLM with model='{llm_value}': {e}") @@ -197,7 +196,6 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]: # Try creating the LLM try: new_llm = LLM(**llm_params) - print(f"LLM created with model='{model_name}'") return new_llm except Exception as e: print(