Skip to content

Getting Started

Guardrails are a safety and validation system in langcrew that ensures data quality, security, and compliance. They act as protective barriers that validate inputs before processing and outputs before delivery.

Let’s implement basic guardrails in your langcrew application in just a few minutes.

from langcrew import Agent, Task, Crew
from langcrew.guardrail import input_guard, output_guard, GuardrailError
from langcrew.llm_factory import LLMFactory
@input_guard
def check_no_sensitive_info(data):
"""Prevent processing of sensitive information"""
content = str(data).lower()
sensitive_patterns = ["password:", "api_key:", "secret:", "ssn:"]
for pattern in sensitive_patterns:
if pattern in content:
return False, f"Contains sensitive information: {pattern}"
return True, "No sensitive information detected"
@output_guard
def check_output_quality(data):
"""Ensure output meets quality standards"""
output_str = str(data)
if not output_str:
return False, "Empty output not allowed"
if len(output_str) < 10:
return False, "Output too short (minimum 10 characters)"
return True, "Output quality check passed"
# Create LLM
llm = LLMFactory.create_llm({
"provider": "openai",
"model": "gpt-4o-mini",
"temperature": 0.3
})
# Create agent with guardrails
agent = Agent(
role="Content Creator",
goal="Generate safe content",
backstory="Professional content creator with safety checks.",
llm=llm,
input_guards=[check_no_sensitive_info],
output_guards=[check_output_quality]
)
# Create task
task = Task(
description="Write an article about artificial intelligence",
expected_output="A well-written article about AI",
agent=agent
)
# Create crew and execute
crew = Crew(agents=[agent], tasks=[task])
try:
result = crew.kickoff()
print("✅ Task completed successfully")
except GuardrailError as e:
print(f"❌ Guardrail blocked execution: {e}")

Input guardrails validate data before it reaches your AI agents:

@input_guard
def check_input_length(data):
"""Limit input length to prevent abuse"""
content = str(data)
max_length = 1000
if len(content) > max_length:
return False, f"Input too long: {len(content)} > {max_length} characters"
return True, f"Input length OK: {len(content)} characters"
@input_guard
def check_language_support(data):
"""Check if input language is supported"""
content = str(data)
# Simple language detection
non_latin_chars = sum(1 for char in content if ord(char) > 127)
total_chars = len(content)
if total_chars > 0 and non_latin_chars / total_chars > 0.3:
return False, "Language not supported"
return True, "Language supported"

Output guardrails validate content after AI processing:

@output_guard
def check_output_quality(data):
"""Ensure output meets quality standards"""
output_str = str(data)
if not output_str:
return False, "Empty output not allowed"
if len(output_str) < 10:
return False, "Output too short (minimum 10 characters)"
# Check for placeholder text
placeholders = ["TODO", "FIXME", "[INSERT", "[PLACEHOLDER"]
for placeholder in placeholders:
if placeholder in output_str.upper():
return False, f"Contains placeholder text: {placeholder}"
return True, "Output quality check passed"
@output_guard
def filter_profanity(data):
"""Filter inappropriate language"""
inappropriate_words = ["spam", "junk", "garbage"]
output_str = str(data).lower()
for word in inappropriate_words:
if word in output_str:
return False, f"Contains inappropriate content: {word}"
return True, "Content appropriate"

Apply guardrails to all tasks executed by an agent:

agent = Agent(
role="Content Creator",
goal="Generate safe content",
llm=llm,
input_guards=[check_no_sensitive_info, check_input_length],
output_guards=[check_output_quality, filter_profanity]
)

Guardrails that adapt based on context:

@input_guard
def conditional_sensitive_check(data):
"""Conditional sensitive data check based on context"""
content = str(data).lower()
# Check if this is a security-related task
security_keywords = ["security", "authentication", "login"]
is_security_task = any(keyword in content for keyword in security_keywords)
if is_security_task:
# For security tasks, be more strict
strict_patterns = ["password:", "pwd:", "secret:", "key:"]
for pattern in strict_patterns:
if pattern in content:
return False, f"Security task contains sensitive data: {pattern}"
else:
# For non-security tasks, be more lenient
lenient_patterns = ["password:", "pwd:"]
for pattern in lenient_patterns:
if pattern in content:
return False, f"Contains sensitive data: {pattern}"
return True, "Conditional check passed"

Implement rate limiting to prevent abuse:

@input_guard
def check_rate_limiting(data):
"""Simple rate limiting guardrail"""
import time
if not hasattr(check_rate_limiting, "_request_count"):
check_rate_limiting._request_count = 0
check_rate_limiting._last_reset = time.time()
current_time = time.time()
# Reset counter every minute
if current_time - check_rate_limiting._last_reset > 60:
check_rate_limiting._request_count = 0
check_rate_limiting._last_reset = current_time
# Allow max 5 requests per minute
max_requests = 5
check_rate_limiting._request_count += 1
if check_rate_limiting._request_count > max_requests:
return False, f"Rate limit exceeded: {check_rate_limiting._request_count} > {max_requests}"
return True, f"Rate limit OK: {check_rate_limiting._request_count}/{max_requests}"