Guardrails Kit

Input/output validation pipeline with configurable policies, PII detection, prompt injection defense, and custom rules.

Policy Configuration

Active rules: 5

SSN Detected
Email Detected
Phone Number Detected
API Key Detected
Credit Card Detected

Custom Rules

Input Text

Integration Code

import { createGuardrail, PiiPolicy, PromptInjectionPolicy } from 'agent-tools-kit/safety'

// Create a guardrail pipeline with multiple policies
const guardrail = createGuardrail({
  policies: [
    new PiiPolicy({
      detect: ['ssn', 'email', 'phone', 'credit-card', 'api-key'],
      action: 'redact',     // 'block' | 'redact' | 'warn'
      severity: 'critical',
    }),
    new PromptInjectionPolicy({
      patterns: ['instruction-override', 'role-hijack', 'jailbreak'],
      blockOnMatch: true,
    }),
  ],
  customRules: [
    
  ],
  onViolation: (violation) => {
    logger.warn('Guardrail violation', {
      rule: violation.rule,
      severity: violation.severity,
      input: violation.match,
    })
  }
})

// Use in your agent pipeline
const result = await guardrail.validate(userInput)

if (result.blocked) {
  return { error: 'Input blocked by safety policy', violations: result.violations }
}

// Pass sanitized text to LLM
const response = await llm.chat({
  messages: [{ role: 'user', content: result.sanitized }]
})

// Validate output too
const outputCheck = await guardrail.validateOutput(response.text)