Skip to content

MCP Integration

Model Context Protocol (MCP) is an open standard that enables secure connections between large language models and data sources. MCP servers provide tools and resources that agents can use to access external systems, APIs, and data sources.

MCP integration in LangCrew provides:

  • Standardized Connections - Connect to any MCP-compatible server
  • Secure Access - Built-in security and permission controls
  • Multiple Transports - Support for SSE, HTTP streaming, and stdio
  • Tool Filtering - Control which tools agents can access
  • Easy Scaling - Add new capabilities without code changes

LangCrew supports three MCP transport methods:

Best for real-time data and live updates:

server_config = {
"url": "https://api.example.com/mcp/sse?key=your_key",
"transport": "sse"
}

Ideal for REST API integrations:

server_config = {
"url": "https://api.example.com/mcp",
"transport": "streamable_http"
}

Perfect for local tools and scripts:

server_config = {
"command": "python3",
"args": ["path/to/your/tool.py"],
"transport": "stdio"
}

Define your MCP server configuration:

from langcrew import Agent
# Configure your MCP server
mcp_server_configs = {
"my_server": {
"url": "https://api.example.com/mcp",
"transport": "streamable_http"
}
}

Add MCP servers to your agent:

@agent
def my_agent(self) -> Agent:
return Agent(
config=self.agents_config["my_agent"],
mcp_servers=mcp_server_configs,
llm=self._get_default_llm(),
verbose=True
)

Control which tools the agent can access:

@agent
def restricted_agent(self) -> Agent:
return Agent(
config=self.agents_config["my_agent"],
mcp_servers=mcp_server_configs,
mcp_tool_filter=["search", "calculator"], # Only these tools
llm=self._get_default_llm()
)

Here’s a full working example using different MCP transport types:

import os
from langchain_openai import ChatOpenAI
from langcrew import Agent, CrewBase, agent, task, crew
from langcrew.task import Task
from langcrew.crew import Crew
@CrewBase
class MyCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
def _get_default_llm(self):
return ChatOpenAI(
model="gpt-4o-mini",
temperature=0.1,
api_key=os.getenv("OPENAI_API_KEY")
)
@agent
def web_agent(self) -> Agent:
# SSE transport for real-time data
server_config = {
"url": f"https://api.example.com/sse?key={os.getenv('API_KEY')}",
"transport": "sse"
}
return Agent(
config=self.agents_config["web_agent"],
mcp_servers={"web_server": server_config},
llm=self._get_default_llm()
)
@agent
def calculator_agent(self) -> Agent:
# stdio transport for local tools
current_dir = os.path.dirname(os.path.abspath(__file__))
calc_script = os.path.join(current_dir, "tools", "calculator.py")
server_config = {
"command": "python3",
"args": [calc_script],
"transport": "stdio"
}
return Agent(
config=self.agents_config["calculator"],
mcp_servers={"calc_server": server_config},
mcp_tool_filter=["add", "multiply"], # Restrict tools
llm=self._get_default_llm()
)
@task
def web_search_task(self) -> Task:
return Task(
config=self.tasks_config["web_search"],
agent=self.web_agent()
)
@task
def calculation_task(self) -> Task:
return Task(
config=self.tasks_config["calculation"],
agent=self.calculator_agent()
)
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
verbose=True
)