LangChain integration for PraisonAI multi-agent framework.
pip install langchain-praisonai- Install and start PraisonAI server:
pip install praisonai
praisonai serve agents.yaml --port 8080from langchain_praisonai import PraisonAITool
from langchain_openai import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
# Create the PraisonAI tool
praisonai_tool = PraisonAITool(api_url="http://localhost:8080")
# Use with LangChain agent
llm = ChatOpenAI(model="gpt-4o-mini")
agent = initialize_agent(
tools=[praisonai_tool],
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
result = agent.run("Research the latest trends in AI and summarize them")
print(result)from langchain_praisonai import PraisonAIAgentTool
# Create tools for specific agents
researcher = PraisonAIAgentTool(agent_name="researcher")
writer = PraisonAIAgentTool(agent_name="writer")
# Use multiple agent tools
tools = [researcher, writer]from langchain_praisonai import PraisonAIListAgentsTool
list_tool = PraisonAIListAgentsTool()
agents = list_tool.run({})
print(agents)from langchain_praisonai import PraisonAITool
tool = PraisonAITool()
# Run with default workflow
result = tool.run({"query": "What are the benefits of AI?"})
# Run with specific agent
result = tool.run({"query": "Research quantum computing", "agent": "researcher"})import asyncio
from langchain_praisonai import PraisonAITool
async def main():
tool = PraisonAITool()
result = await tool.arun({"query": "Analyze market trends"})
print(result)
asyncio.run(main())| Parameter | Default | Description |
|---|---|---|
api_url |
http://localhost:8080 |
PraisonAI server URL |
timeout |
300 |
Request timeout in seconds |
| Tool | Description |
|---|---|
PraisonAITool |
General-purpose tool for running PraisonAI workflows |
PraisonAIAgentTool |
Tool for running a specific named agent |
PraisonAIListAgentsTool |
Tool for listing available agents |
MIT