Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions lib/crewai/src/crewai/agent/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent,
)
from crewai.events.types.task_events import TaskFailedEvent
from crewai.hooks import LLMCallBlockedError
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.lite_agent import LiteAgent
Expand Down Expand Up @@ -409,6 +411,15 @@ def execute_task(
),
)
raise e
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(
Expand Down Expand Up @@ -615,6 +626,15 @@ async def aexecute_task(
),
)
raise e
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(
Expand Down
2 changes: 1 addition & 1 deletion lib/crewai/src/crewai/agents/crew_agent_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
get_llm_response,
handle_agent_action_core,
handle_context_length,
handle_llm_call_blocked_error,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you want to utilize this?

handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
Expand Down Expand Up @@ -284,7 +285,6 @@ def _invoke_loop(self) -> AgentFinish:
log_error_after=self.log_error_after,
printer=self._printer,
)

except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
Expand Down
3 changes: 3 additions & 0 deletions lib/crewai/src/crewai/hooks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
before_tool_call,
)
from crewai.hooks.llm_hooks import (
LLMCallBlockedError,
LLMCallHookContext,
clear_after_llm_call_hooks,
clear_all_llm_call_hooks,
Expand Down Expand Up @@ -74,6 +75,8 @@ def clear_all_global_hooks() -> dict[str, tuple[int, int]]:


__all__ = [
# Exceptions
"LLMCallBlockedError",
# Context classes
"LLMCallHookContext",
"ToolCallHookContext",
Expand Down
9 changes: 9 additions & 0 deletions lib/crewai/src/crewai/hooks/llm_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,14 @@
from crewai.utilities.types import LLMMessage


class LLMCallBlockedError(Exception):
"""Raised when a before_llm_call hook blocks the LLM call.

This exception is intentionally NOT retried by the agent,
as it represents an intentional block by the hook.
"""


class LLMCallHookContext:
"""Context object passed to LLM call hooks.

Expand Down Expand Up @@ -131,6 +139,7 @@ def request_human_input(
... if response.lower() == "no":
... print("LLM call skipped by user")
"""
# from crewai.events.event_listener import event_listener
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wondering if we can remove


printer = Printer()
event_listener.formatter.pause_live_updates()
Expand Down
3 changes: 1 addition & 2 deletions lib/crewai/src/crewai/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1645,8 +1645,7 @@ def call(
msg_role: Literal["assistant"] = "assistant"
message["role"] = msg_role

if not self._invoke_before_llm_call_hooks(messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(messages, from_agent)

# --- 5) Set up callbacks if provided
with suppress_warnings():
Expand Down
25 changes: 13 additions & 12 deletions lib/crewai/src/crewai/llms/base_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,7 @@ def _invoke_before_llm_call_hooks(
self,
messages: list[LLMMessage],
from_agent: Agent | None = None,
) -> bool:
) -> None:
"""Invoke before_llm_call hooks for direct LLM calls (no agent context).

This method should be called by native provider implementations before
Expand All @@ -601,20 +601,19 @@ def _invoke_before_llm_call_hooks(
messages: The messages being sent to the LLM
from_agent: The agent making the call (None for direct calls)

Returns:
True if LLM call should proceed, False if blocked by hook
Raises:
LLMCallBlockedError: If any hook returns False to block the LLM call.

Example:
>>> # In a native provider's call() method:
>>> if from_agent is None and not self._invoke_before_llm_call_hooks(
... messages, from_agent
... ):
... raise ValueError("LLM call blocked by hook")
>>> if from_agent is None:
... self._invoke_before_llm_call_hooks(messages, from_agent)
"""
# Only invoke hooks for direct calls (no agent context)
if from_agent is not None:
return True
return

from crewai.hooks import LLMCallBlockedError
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
get_before_llm_call_hooks,
Expand All @@ -623,7 +622,7 @@ def _invoke_before_llm_call_hooks(

before_hooks = get_before_llm_call_hooks()
if not before_hooks:
return True
return

hook_context = LLMCallHookContext(
executor=None,
Expand All @@ -643,15 +642,17 @@ def _invoke_before_llm_call_hooks(
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
raise LLMCallBlockedError(
"LLM call blocked by before_llm_call hook"
)
except LLMCallBlockedError:
raise
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
color="yellow",
)

return True

def _invoke_after_llm_call_hooks(
self,
messages: list[LLMMessage],
Expand Down
4 changes: 1 addition & 3 deletions lib/crewai/src/crewai/llms/providers/anthropic/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import os
from typing import TYPE_CHECKING, Any, Literal, cast

from anthropic.types import ThinkingBlock
from pydantic import BaseModel

from crewai.events.types.llm_events import LLMCallType
Expand Down Expand Up @@ -197,8 +196,7 @@ def call(
messages
)

if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)

# Prepare completion parameters
completion_params = self._prepare_completion_params(
Expand Down
3 changes: 1 addition & 2 deletions lib/crewai/src/crewai/llms/providers/azure/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,7 @@ def call(
# Format messages for Azure
formatted_messages = self._format_messages_for_azure(messages)

if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)

# Prepare completion parameters
completion_params = self._prepare_completion_params(
Expand Down
5 changes: 2 additions & 3 deletions lib/crewai/src/crewai/llms/providers/bedrock/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,10 +315,9 @@ def call(
messages
)

if not self._invoke_before_llm_call_hooks(
self._invoke_before_llm_call_hooks(
cast(list[LLMMessage], formatted_messages), from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
)

# Prepare request body
body: BedrockConverseRequestBody = {
Expand Down
3 changes: 1 addition & 2 deletions lib/crewai/src/crewai/llms/providers/gemini/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,8 +250,7 @@ def call(

messages_for_hooks = self._convert_contents_to_dict(formatted_content)

if not self._invoke_before_llm_call_hooks(messages_for_hooks, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(messages_for_hooks, from_agent)

config = self._prepare_generation_config(
system_instruction, tools, response_model
Expand Down
3 changes: 1 addition & 2 deletions lib/crewai/src/crewai/llms/providers/openai/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,7 @@ def call(

formatted_messages = self._format_messages(messages)

if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)

completion_params = self._prepare_completion_params(
messages=formatted_messages, tools=tools
Expand Down
33 changes: 23 additions & 10 deletions lib/crewai/src/crewai/utilities/agent_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
parse,
)
from crewai.cli.config import Settings
from crewai.hooks import LLMCallBlockedError
from crewai.llms.base_llm import BaseLLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
Expand Down Expand Up @@ -260,8 +261,7 @@ def get_llm_response(
"""

if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
_setup_before_llm_call_hooks(executor_context, printer) # Raises if blocked
messages = executor_context.messages

try:
Expand Down Expand Up @@ -314,8 +314,7 @@ async def aget_llm_response(
ValueError: If the response is None or empty.
"""
if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
_setup_before_llm_call_hooks(executor_context, printer) # Raises if blocked
messages = executor_context.messages

try:
Expand Down Expand Up @@ -461,6 +460,18 @@ def handle_output_parser_exception(
return formatted_answer


def handle_llm_call_blocked_error(
e: LLMCallBlockedError,
messages: list[LLMMessage],
) -> AgentFinish:
messages.append({"role": "user", "content": str(e)})
return AgentFinish(
thought="",
output=str(e),
text=str(e),
)


def is_context_length_exceeded(exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding.

Expand Down Expand Up @@ -728,15 +739,15 @@ def load_agent_from_repository(from_repository: str) -> dict[str, Any]:

def _setup_before_llm_call_hooks(
executor_context: CrewAgentExecutor | LiteAgent | None, printer: Printer
) -> bool:
) -> None:
"""Setup and invoke before_llm_call hooks for the executor context.

Args:
executor_context: The executor context to setup the hooks for.
printer: Printer instance for error logging.

Returns:
True if LLM execution should proceed, False if blocked by a hook.
Raises:
LLMCallBlockedError: If any hook returns False to block the LLM call.
"""
if executor_context and executor_context.before_llm_call_hooks:
from crewai.hooks.llm_hooks import LLMCallHookContext
Expand All @@ -752,7 +763,11 @@ def _setup_before_llm_call_hooks(
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
raise LLMCallBlockedError(
"LLM call blocked by before_llm_call hook"
)
except LLMCallBlockedError:
raise
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
Expand All @@ -773,8 +788,6 @@ def _setup_before_llm_call_hooks(
else:
executor_context.messages = []

return True


def _setup_after_llm_call_hooks(
executor_context: CrewAgentExecutor | LiteAgent | None,
Expand Down
Loading
Loading