Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions docs/llm.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,15 @@ IntentKit supports a wide range of LLM providers and models to give you flexibil
- **venice-uncensored** - Venice Uncensored model
- **venice-llama-4-maverick-17b** - Venice Llama-4 Maverick 17B

#### BlockRun.AI
BlockRun is a pay-per-request AI gateway using x402 micropayments on Base. Access multiple LLM providers with just a wallet - no API keys needed.

- **openai/gpt-4o** - GPT-4o via BlockRun
- **openai/gpt-4o-mini** - GPT-4o Mini via BlockRun
- **anthropic/claude-sonnet-4** - Claude Sonnet 4 via BlockRun
- **deepseek/deepseek-chat** - DeepSeek V3 via BlockRun
- **google/gemini-2.0-flash** - Gemini 2.0 Flash via BlockRun

### Model Capabilities

Each model supports different capabilities:
Expand All @@ -57,5 +66,16 @@ To use these models, configure the appropriate API keys in your environment:
- `ETERNAL_API_KEY` for Eternal AI models
- `REIGENT_API_KEY` for Reigent models
- `VENICE_API_KEY` for Venice AI models
- `BLOCKRUN_WALLET_KEY` for BlockRun models (wallet private key, never leaves your machine)

The system will automatically route requests to the appropriate provider based on the model selected.

### BlockRun.AI Setup

BlockRun uses x402 micropayments - your wallet private key never leaves your machine. Only EIP-712 signatures are sent for payment authorization.

1. Set your Base chain wallet private key: `BLOCKRUN_WALLET_KEY=0x...`
2. Fund your wallet with USDC on Base
3. Use any BlockRun model (e.g., `anthropic/claude-sonnet-4`)

Learn more: https://blockrun.ai/docs
6 changes: 6 additions & 0 deletions example.env
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,12 @@ DEEPSEEK_API_KEY=
XAI_API_KEY=
REIGENT_API_KEY=

# BlockRun.AI - x402 micropayment LLM gateway
# Access GPT-4, Claude, DeepSeek, Gemini via wallet-based payments
# No API keys needed - just fund your wallet with USDC on Base
# Get started: https://blockrun.ai/docs
BLOCKRUN_WALLET_KEY=


DB_HOST=
DB_PORT=
Expand Down
2 changes: 2 additions & 0 deletions intentkit/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@ def __init__(self) -> None:
self.reigent_api_key: str | None = self.load("REIGENT_API_KEY")
self.venice_api_key: str | None = self.load("VENICE_API_KEY")
self.openrouter_api_key: str | None = self.load("OPENROUTER_API_KEY")
# BlockRun.AI - x402 micropayment LLM gateway
self.blockrun_wallet_key: str | None = self.load("BLOCKRUN_WALLET_KEY")
# LLM Config
self.system_prompt: str | None = self.load("SYSTEM_PROMPT")
self.intentkit_prompt: str | None = self.load("INTENTKIT_PROMPT")
Expand Down
127 changes: 127 additions & 0 deletions intentkit/models/blockrun_chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
"""BlockRun LangChain ChatModel wrapper.

Provides LangChain-compatible ChatModel that uses BlockRun.AI's x402 micropayment
gateway to access multiple LLM providers (OpenAI, Anthropic, Google, DeepSeek).
"""

import os
from typing import Any, Iterator, List, Optional

from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_core.outputs import ChatGeneration, ChatResult

from intentkit.config.config import config


class BlockRunChat(BaseChatModel):
"""LangChain ChatModel for BlockRun.AI x402 micropayment gateway.

This wrapper uses the blockrun_llm SDK to handle x402 payment flow
and provides a standard LangChain interface.

Example:
chat = BlockRunChat(model="anthropic/claude-sonnet-4", max_tokens=4096)
response = chat.invoke("Hello!")
"""

model: str = "openai/gpt-4o-mini"
max_tokens: int = 4096
temperature: float = 0.7

_client: Any = None

def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
self._init_client()

def _init_client(self) -> None:
"""Initialize the BlockRun LLM client."""
from blockrun_llm import LLMClient

private_key = config.blockrun_wallet_key
if not private_key:
private_key = os.getenv("BLOCKRUN_WALLET_KEY")

if not private_key:
raise ValueError(
"BlockRun wallet key not found. Set BLOCKRUN_WALLET_KEY env var "
"or config.blockrun_wallet_key"
)

self._client = LLMClient(private_key=private_key)

@property
def _llm_type(self) -> str:
return "blockrun"

@property
def _identifying_params(self) -> dict[str, Any]:
return {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}

def _convert_messages(self, messages: List[BaseMessage]) -> List[dict]:
"""Convert LangChain messages to BlockRun format."""
converted = []
for msg in messages:
if isinstance(msg, SystemMessage):
converted.append({"role": "system", "content": msg.content})
elif isinstance(msg, HumanMessage):
converted.append({"role": "user", "content": msg.content})
elif isinstance(msg, AIMessage):
converted.append({"role": "assistant", "content": msg.content})
else:
# Default to user role for unknown message types
converted.append({"role": "user", "content": str(msg.content)})
return converted

def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate a response using BlockRun API."""
converted_messages = self._convert_messages(messages)

# Call BlockRun API
response = self._client.chat_completion(
model=self.model,
messages=converted_messages,
max_tokens=kwargs.get("max_tokens", self.max_tokens),
temperature=kwargs.get("temperature", self.temperature),
)

# Extract response content
content = response.choices[0].message.content

# Create ChatGeneration
generation = ChatGeneration(
message=AIMessage(content=content),
generation_info={
"model": self.model,
"usage": {
"prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
"completion_tokens": response.usage.completion_tokens if response.usage else 0,
"total_tokens": response.usage.total_tokens if response.usage else 0,
}
}
)

return ChatResult(generations=[generation])

async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Async generate - currently delegates to sync version."""
# TODO: Use AsyncLLMClient when available
return self._generate(messages, stop, run_manager, **kwargs)
5 changes: 5 additions & 0 deletions intentkit/models/llm.csv
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,8 @@ eternalai,Eternal AI (Llama-3.3-70B),eternal,FALSE,0.25,0.75,2,64000,4096,4,3,FA
reigent,REI Network,reigent,FALSE,0.5,1.5,3,32000,4096,4,3,FALSE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,https://api.reisearch.box/v1,300
venice-uncensored,Venice Uncensored,venice,TRUE,0.5,2,3,32000,4096,4,3,FALSE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,https://api.venice.ai/api/v1,300
venice-llama-4-maverick-17b,Venice Llama 4 Maverick 17B,venice,TRUE,1.5,6,4,32000,4096,4,3,FALSE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,https://api.venice.ai/api/v1,300
openai/gpt-4o,BlockRun GPT-4o,blockrun,TRUE,2.5,10,4,128000,4096,4,3,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,FALSE,https://pay.blockrun.ai,180
openai/gpt-4o-mini,BlockRun GPT-4o Mini,blockrun,TRUE,0.15,0.6,1,128000,4096,3,4,FALSE,TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,FALSE,https://pay.blockrun.ai,180
anthropic/claude-sonnet-4,BlockRun Claude Sonnet 4,blockrun,TRUE,3,15,5,200000,4096,5,3,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,FALSE,https://pay.blockrun.ai,180
deepseek/deepseek-chat,BlockRun DeepSeek V3,blockrun,TRUE,0.28,0.42,2,128000,4096,4,3,FALSE,TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,FALSE,https://pay.blockrun.ai,300
google/gemini-2.0-flash,BlockRun Gemini 2.0 Flash,blockrun,TRUE,0.1,0.4,1,1048576,8192,3,4,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,FALSE,https://pay.blockrun.ai,180
30 changes: 30 additions & 0 deletions intentkit/models/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ def _load_default_llm_models() -> dict[str, "LLMModelInfo"]:
is_configured = bool(config.reigent_api_key)
elif model.provider == LLMProvider.VENICE:
is_configured = bool(config.venice_api_key)
elif model.provider == LLMProvider.BLOCKRUN:
is_configured = bool(config.blockrun_wallet_key)

if not is_configured:
continue
Expand All @@ -128,6 +130,7 @@ class LLMProvider(str, Enum):
REIGENT = "reigent"
VENICE = "venice"
OLLAMA = "ollama"
BLOCKRUN = "blockrun"

def display_name(self) -> str:
"""Return user-friendly display name for the provider."""
Expand All @@ -141,6 +144,7 @@ def display_name(self) -> str:
self.REIGENT: "Reigent",
self.VENICE: "Venice",
self.OLLAMA: "Ollama",
self.BLOCKRUN: "BlockRun",
}
return display_names.get(self, self.value)

Expand Down Expand Up @@ -695,6 +699,30 @@ async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
return ChatOllama(**kwargs)


class BlockRunLLM(LLMModel):
"""BlockRun.AI LLM configuration using x402 micropayments."""

async def create_instance(self, params: dict[str, Any] = {}) -> BaseChatModel:
"""Create and return a BlockRun LangChain ChatModel instance."""
from intentkit.models.blockrun_chat import BlockRunChat

info = await self.model_info()

kwargs = {
"model": self.model_name,
"max_tokens": info.output_length,
}

# Add optional parameters based on model support
if info.supports_temperature:
kwargs["temperature"] = self.temperature

# Update kwargs with params to allow overriding
kwargs.update(params)

return BlockRunChat(**kwargs)


# Factory function to create the appropriate LLM model based on the model name
async def create_llm_model(
model_name: str,
Expand Down Expand Up @@ -742,6 +770,8 @@ async def create_llm_model(
return OpenRouterLLM(**base_params)
elif provider == LLMProvider.OLLAMA:
return OllamaLLM(**base_params)
elif provider == LLMProvider.BLOCKRUN:
return BlockRunLLM(**base_params)
else:
# Default to OpenAI
return OpenAILLM(**base_params)
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ dependencies = [
"aiosqlite>=0.21.0",
"langchain-deepseek>=0.1.4",
"langchain-google-genai>=3.2.0",
"blockrun-llm>=0.1.0",
]
keywords = ["ai", "agent", "intent", "blockchain", "crypto"]
classifiers = [
Expand Down