chore: pre-migration snapshot
Layer0, MCP servers, Terraform consolidation
This commit is contained in:
@@ -10,22 +10,24 @@ This module provides tools to:
|
||||
Export primary classes and functions:
|
||||
"""
|
||||
|
||||
from mcp.waf_intelligence.analyzer import (
|
||||
WAFRuleAnalyzer,
|
||||
RuleViolation,
|
||||
__version__ = "0.3.0"
|
||||
|
||||
from .analyzer import (
|
||||
AnalysisResult,
|
||||
RuleViolation,
|
||||
WAFRuleAnalyzer,
|
||||
)
|
||||
from mcp.waf_intelligence.generator import (
|
||||
WAFRuleGenerator,
|
||||
GeneratedRule,
|
||||
)
|
||||
from mcp.waf_intelligence.compliance import (
|
||||
from .compliance import (
|
||||
ComplianceMapper,
|
||||
FrameworkMapping,
|
||||
)
|
||||
from mcp.waf_intelligence.orchestrator import (
|
||||
WAFIntelligence,
|
||||
from .generator import (
|
||||
GeneratedRule,
|
||||
WAFRuleGenerator,
|
||||
)
|
||||
from .orchestrator import (
|
||||
WAFInsight,
|
||||
WAFIntelligence,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -10,6 +10,7 @@ from typing import Any, Dict, List
|
||||
from layer0 import layer0_entry
|
||||
from layer0.shadow_classifier import ShadowEvalResult
|
||||
|
||||
from . import __version__ as WAF_INTEL_VERSION
|
||||
from .orchestrator import WAFInsight, WAFIntelligence
|
||||
|
||||
|
||||
@@ -56,11 +57,18 @@ def run_cli(argv: List[str] | None = None) -> int:
|
||||
action="store_true",
|
||||
help="Exit with non-zero code if any error-severity violations are found.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
action="version",
|
||||
version=f"%(prog)s {WAF_INTEL_VERSION}",
|
||||
)
|
||||
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
# Layer 0: pre-boot Shadow Eval gate.
|
||||
routing_action, shadow = layer0_entry(f"waf_intel_cli file={args.file} limit={args.limit}")
|
||||
routing_action, shadow = layer0_entry(
|
||||
f"waf_intel_cli file={args.file} limit={args.limit}"
|
||||
)
|
||||
if routing_action != "HANDOFF_TO_LAYER1":
|
||||
_render_layer0_block(routing_action, shadow)
|
||||
return 1
|
||||
@@ -90,7 +98,9 @@ def run_cli(argv: List[str] | None = None) -> int:
|
||||
print(f"\nWAF Intelligence Report for: {path}\n{'-' * 72}")
|
||||
|
||||
if not insights:
|
||||
print("No high-severity, high-confidence issues detected based on current heuristics.")
|
||||
print(
|
||||
"No high-severity, high-confidence issues detected based on current heuristics."
|
||||
)
|
||||
return 0
|
||||
|
||||
for idx, insight in enumerate(insights, start=1):
|
||||
@@ -119,7 +129,9 @@ def run_cli(argv: List[str] | None = None) -> int:
|
||||
if insight.mappings:
|
||||
print("\nCompliance Mapping:")
|
||||
for mapping in insight.mappings:
|
||||
print(f" - {mapping.framework} {mapping.control_id}: {mapping.description}")
|
||||
print(
|
||||
f" - {mapping.framework} {mapping.control_id}: {mapping.description}"
|
||||
)
|
||||
|
||||
print()
|
||||
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
MANAGED_WAF_RULESET_IDS = (
|
||||
# Cloudflare managed WAF ruleset IDs (last updated 2025-12-18).
|
||||
"efb7b8c949ac4650a09736fc376e9aee", # Cloudflare Managed Ruleset
|
||||
"4814384a9e5d4991b9815dcfc25d2f1f", # OWASP Core Ruleset
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RuleViolation:
|
||||
@@ -57,6 +64,20 @@ class WAFRuleAnalyzer:
|
||||
Analyze Cloudflare WAF rules from Terraform with a quality-first posture.
|
||||
"""
|
||||
|
||||
def _has_managed_waf_rules(self, text: str) -> bool:
|
||||
text_lower = text.lower()
|
||||
|
||||
if "managed_rules" in text_lower:
|
||||
return True
|
||||
|
||||
if re.search(r'phase\s*=\s*"http_request_firewall_managed"', text_lower):
|
||||
return True
|
||||
|
||||
if "cf.waf" in text_lower:
|
||||
return True
|
||||
|
||||
return any(ruleset_id in text_lower for ruleset_id in MANAGED_WAF_RULESET_IDS)
|
||||
|
||||
def analyze_file(
|
||||
self,
|
||||
path: str | Path,
|
||||
@@ -70,7 +91,7 @@ class WAFRuleAnalyzer:
|
||||
violations: List[RuleViolation] = []
|
||||
|
||||
# Example heuristic: no managed rules present
|
||||
if "managed_rules" not in text:
|
||||
if not self._has_managed_waf_rules(text):
|
||||
violations.append(
|
||||
RuleViolation(
|
||||
rule_id=None,
|
||||
@@ -102,7 +123,7 @@ class WAFRuleAnalyzer:
|
||||
violations=violations,
|
||||
metadata={
|
||||
"file_size": path.stat().st_size,
|
||||
"heuristics_version": "0.2.0",
|
||||
"heuristics_version": "0.3.0",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -125,7 +146,7 @@ class WAFRuleAnalyzer:
|
||||
tmp_path = Path(source_name)
|
||||
violations: List[RuleViolation] = []
|
||||
|
||||
if "managed_rules" not in text:
|
||||
if not self._has_managed_waf_rules(text):
|
||||
violations.append(
|
||||
RuleViolation(
|
||||
rule_id=None,
|
||||
@@ -141,7 +162,7 @@ class WAFRuleAnalyzer:
|
||||
result = AnalysisResult(
|
||||
source=str(tmp_path),
|
||||
violations=violations,
|
||||
metadata={"heuristics_version": "0.2.0"},
|
||||
metadata={"heuristics_version": "0.3.0"},
|
||||
)
|
||||
|
||||
result.violations = result.top_violations(
|
||||
@@ -161,27 +182,37 @@ class WAFRuleAnalyzer:
|
||||
) -> AnalysisResult:
|
||||
"""
|
||||
Enhanced analysis using threat intelligence data.
|
||||
|
||||
|
||||
Args:
|
||||
path: WAF config file path
|
||||
threat_indicators: List of ThreatIndicator objects from threat_intel module
|
||||
min_severity: Minimum severity to include
|
||||
min_confidence: Minimum confidence threshold
|
||||
|
||||
|
||||
Returns:
|
||||
AnalysisResult with violations informed by threat intel
|
||||
"""
|
||||
# Start with base analysis
|
||||
base_result = self.analyze_file(path, min_severity=min_severity, min_confidence=min_confidence)
|
||||
|
||||
base_result = self.analyze_file(
|
||||
path, min_severity=min_severity, min_confidence=min_confidence
|
||||
)
|
||||
|
||||
path = Path(path)
|
||||
text = path.read_text(encoding="utf-8")
|
||||
text_lower = text.lower()
|
||||
|
||||
|
||||
# Check if threat indicators are addressed by existing rules
|
||||
critical_ips = [i for i in threat_indicators if i.indicator_type == "ip" and i.severity in ("critical", "high")]
|
||||
critical_patterns = [i for i in threat_indicators if i.indicator_type == "pattern" and i.severity in ("critical", "high")]
|
||||
|
||||
critical_ips = [
|
||||
i
|
||||
for i in threat_indicators
|
||||
if i.indicator_type == "ip" and i.severity in ("critical", "high")
|
||||
]
|
||||
critical_patterns = [
|
||||
i
|
||||
for i in threat_indicators
|
||||
if i.indicator_type == "pattern" and i.severity in ("critical", "high")
|
||||
]
|
||||
|
||||
# Check for IP blocking coverage
|
||||
if critical_ips:
|
||||
ip_block_present = "ip.src" in text_lower or "cf.client.ip" in text_lower
|
||||
@@ -197,14 +228,14 @@ class WAFRuleAnalyzer:
|
||||
hint=f"Add IP blocking rules for identified threat actors. Sample IPs: {', '.join(i.value for i in critical_ips[:3])}",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Check for pattern-based attack coverage
|
||||
attack_types_seen = set()
|
||||
for ind in critical_patterns:
|
||||
for tag in ind.tags:
|
||||
if tag in ("sqli", "xss", "rce", "path_traversal"):
|
||||
attack_types_seen.add(tag)
|
||||
|
||||
|
||||
# Check managed ruleset coverage
|
||||
for attack_type in attack_types_seen:
|
||||
if attack_type not in text_lower and f'"{attack_type}"' not in text_lower:
|
||||
@@ -219,13 +250,12 @@ class WAFRuleAnalyzer:
|
||||
hint=f"Enable Cloudflare managed rules for {attack_type.upper()} protection.",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Update metadata with threat intel stats
|
||||
base_result.metadata["threat_intel"] = {
|
||||
"critical_ips": len(critical_ips),
|
||||
"critical_patterns": len(critical_patterns),
|
||||
"attack_types_seen": list(attack_types_seen),
|
||||
}
|
||||
|
||||
return base_result
|
||||
|
||||
return base_result
|
||||
|
||||
632
mcp/waf_intelligence/mcp_server.py
Normal file
632
mcp/waf_intelligence/mcp_server.py
Normal file
@@ -0,0 +1,632 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import asdict
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from cloudflare.layer0 import layer0_entry
|
||||
from cloudflare.layer0.shadow_classifier import ShadowEvalResult
|
||||
|
||||
from .orchestrator import ThreatAssessment, WAFInsight, WAFIntelligence
|
||||
|
||||
MAX_BYTES_DEFAULT = 32_000
|
||||
|
||||
|
||||
def _cloudflare_root() -> Path:
|
||||
# mcp_server.py -> waf_intelligence -> mcp -> cloudflare
|
||||
return Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def _max_bytes() -> int:
|
||||
raw = (os.getenv("VM_MCP_MAX_BYTES") or "").strip()
|
||||
if not raw:
|
||||
return MAX_BYTES_DEFAULT
|
||||
try:
|
||||
return max(4_096, int(raw))
|
||||
except ValueError:
|
||||
return MAX_BYTES_DEFAULT
|
||||
|
||||
|
||||
def _redact(obj: Any) -> Any:
|
||||
sensitive_keys = ("token", "secret", "password", "private", "key", "certificate")
|
||||
|
||||
if isinstance(obj, dict):
|
||||
out: Dict[str, Any] = {}
|
||||
for k, v in obj.items():
|
||||
if any(s in str(k).lower() for s in sensitive_keys):
|
||||
out[k] = "<REDACTED>"
|
||||
else:
|
||||
out[k] = _redact(v)
|
||||
return out
|
||||
if isinstance(obj, list):
|
||||
return [_redact(v) for v in obj]
|
||||
if isinstance(obj, str):
|
||||
if obj.startswith("ghp_") or obj.startswith("github_pat_"):
|
||||
return "<REDACTED>"
|
||||
return obj
|
||||
return obj
|
||||
|
||||
|
||||
def _safe_json(payload: Dict[str, Any]) -> str:
|
||||
payload = _redact(payload)
|
||||
raw = json.dumps(payload, ensure_ascii=False, separators=(",", ":"), default=str)
|
||||
if len(raw.encode("utf-8")) <= _max_bytes():
|
||||
return json.dumps(payload, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
truncated = {
|
||||
"ok": payload.get("ok", True),
|
||||
"truncated": True,
|
||||
"summary": payload.get("summary", "Response exceeded max size; truncated."),
|
||||
"next_steps": payload.get(
|
||||
"next_steps",
|
||||
[
|
||||
"request fewer files/insights (limit=...)",
|
||||
"use higher min_severity to reduce output",
|
||||
],
|
||||
),
|
||||
}
|
||||
return json.dumps(truncated, ensure_ascii=False, indent=2, default=str)
|
||||
|
||||
|
||||
def _mcp_text_result(
|
||||
payload: Dict[str, Any], *, is_error: bool = False
|
||||
) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {
|
||||
"content": [{"type": "text", "text": _safe_json(payload)}]
|
||||
}
|
||||
if is_error:
|
||||
result["isError"] = True
|
||||
return result
|
||||
|
||||
|
||||
def _insight_to_dict(insight: WAFInsight) -> Dict[str, Any]:
|
||||
return asdict(insight)
|
||||
|
||||
|
||||
def _assessment_to_dict(assessment: ThreatAssessment) -> Dict[str, Any]:
|
||||
violations = []
|
||||
if assessment.analysis_result and getattr(
|
||||
assessment.analysis_result, "violations", None
|
||||
):
|
||||
violations = list(assessment.analysis_result.violations)
|
||||
|
||||
severity_counts = {"error": 0, "warning": 0, "info": 0}
|
||||
for v in violations:
|
||||
sev = getattr(v, "severity", "info")
|
||||
if sev in severity_counts:
|
||||
severity_counts[sev] += 1
|
||||
|
||||
return {
|
||||
"risk_score": assessment.risk_score,
|
||||
"risk_level": assessment.risk_level,
|
||||
"classification_summary": assessment.classification_summary,
|
||||
"recommended_actions": assessment.recommended_actions,
|
||||
"analysis": {
|
||||
"has_config_analysis": assessment.analysis_result is not None,
|
||||
"violations_total": len(violations),
|
||||
"violations_by_severity": severity_counts,
|
||||
},
|
||||
"has_threat_intel": assessment.threat_report is not None,
|
||||
"generated_at": str(assessment.generated_at),
|
||||
}
|
||||
|
||||
|
||||
TOOLS: List[Dict[str, Any]] = [
|
||||
{
|
||||
"name": "waf_capabilities",
|
||||
"description": "List available WAF Intelligence capabilities.",
|
||||
"inputSchema": {"type": "object", "properties": {}},
|
||||
},
|
||||
{
|
||||
"name": "analyze_waf",
|
||||
"description": "Analyze Terraform WAF file(s) and return curated insights (legacy alias for waf_analyze).",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file": {
|
||||
"type": "string",
|
||||
"description": "Single file path to analyze.",
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of file paths or glob patterns to analyze.",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"default": 3,
|
||||
"description": "Max insights per file.",
|
||||
},
|
||||
"severity_threshold": {
|
||||
"type": "string",
|
||||
"enum": ["info", "warning", "error"],
|
||||
"default": "warning",
|
||||
"description": "Minimum severity to include (alias for min_severity).",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "waf_analyze",
|
||||
"description": "Analyze Terraform WAF file(s) and return curated insights (requires file or files).",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"file": {
|
||||
"type": "string",
|
||||
"description": "Single file path to analyze.",
|
||||
},
|
||||
"files": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of file paths or glob patterns to analyze.",
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"default": 3,
|
||||
"description": "Max insights per file.",
|
||||
},
|
||||
"min_severity": {
|
||||
"type": "string",
|
||||
"enum": ["info", "warning", "error"],
|
||||
"default": "warning",
|
||||
"description": "Minimum severity to include.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "waf_assess",
|
||||
"description": "Run a broader assessment (optionally includes threat intel collection).",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"waf_config_path": {
|
||||
"type": "string",
|
||||
"description": "Path to Terraform WAF config (default: terraform/waf.tf).",
|
||||
},
|
||||
"include_threat_intel": {
|
||||
"type": "boolean",
|
||||
"default": False,
|
||||
"description": "If true, attempt to collect threat intel (may require network and credentials).",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "waf_generate_gitops_proposals",
|
||||
"description": "Generate GitOps-ready rule proposals (best-effort; requires threat intel to produce output).",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"waf_config_path": {
|
||||
"type": "string",
|
||||
"description": "Path to Terraform WAF config (default: terraform/waf.tf).",
|
||||
},
|
||||
"include_threat_intel": {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
"description": "Attempt to collect threat intel before proposing rules.",
|
||||
},
|
||||
"max_proposals": {
|
||||
"type": "integer",
|
||||
"default": 5,
|
||||
"description": "Maximum proposals to generate.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
class WafIntelligenceTools:
|
||||
def __init__(self) -> None:
|
||||
self.workspace_root = _cloudflare_root()
|
||||
self.repo_root = self.workspace_root.parent
|
||||
self.waf = WAFIntelligence(workspace_path=str(self.workspace_root))
|
||||
|
||||
def _resolve_path(self, raw: str) -> Path:
|
||||
path = Path(raw)
|
||||
if path.is_absolute():
|
||||
return path
|
||||
|
||||
candidates = [
|
||||
Path.cwd() / path,
|
||||
self.workspace_root / path,
|
||||
self.repo_root / path,
|
||||
]
|
||||
for candidate in candidates:
|
||||
if candidate.exists():
|
||||
return candidate
|
||||
return self.workspace_root / path
|
||||
|
||||
def waf_capabilities(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"ok": True,
|
||||
"summary": "WAF Intelligence capabilities.",
|
||||
"data": {"capabilities": self.waf.capabilities},
|
||||
"truncated": False,
|
||||
"next_steps": [
|
||||
"Call waf_analyze(file=..., limit=...) to analyze config.",
|
||||
"Call waf_assess(include_threat_intel=true) for a broader assessment.",
|
||||
],
|
||||
}
|
||||
|
||||
def waf_analyze(
|
||||
self,
|
||||
*,
|
||||
file: Optional[str] = None,
|
||||
files: Optional[List[str]] = None,
|
||||
limit: int = 3,
|
||||
min_severity: str = "warning",
|
||||
) -> Dict[str, Any]:
|
||||
paths: List[str] = []
|
||||
if files:
|
||||
for pattern in files:
|
||||
paths.extend(glob.glob(pattern))
|
||||
if file:
|
||||
paths.append(file)
|
||||
|
||||
seen = set()
|
||||
unique_paths: List[str] = []
|
||||
for p in paths:
|
||||
if p not in seen:
|
||||
seen.add(p)
|
||||
unique_paths.append(p)
|
||||
|
||||
if not unique_paths:
|
||||
return {
|
||||
"ok": False,
|
||||
"summary": "Provide 'file' or 'files' to analyze.",
|
||||
"truncated": False,
|
||||
"next_steps": ["Call waf_analyze(file='terraform/waf.tf')"],
|
||||
}
|
||||
|
||||
results: List[Dict[str, Any]] = []
|
||||
for p in unique_paths:
|
||||
path = self._resolve_path(p)
|
||||
if not path.exists():
|
||||
results.append(
|
||||
{
|
||||
"file": str(path),
|
||||
"ok": False,
|
||||
"summary": "File not found.",
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
insights = self.waf.analyze_and_recommend(
|
||||
str(path),
|
||||
limit=limit,
|
||||
min_severity=min_severity,
|
||||
)
|
||||
results.append(
|
||||
{
|
||||
"file": str(path),
|
||||
"ok": True,
|
||||
"insights": [_insight_to_dict(i) for i in insights],
|
||||
}
|
||||
)
|
||||
|
||||
ok = all(r.get("ok") for r in results)
|
||||
return {
|
||||
"ok": ok,
|
||||
"summary": f"Analyzed {len(results)} file(s).",
|
||||
"data": {"results": results},
|
||||
"truncated": False,
|
||||
"next_steps": [
|
||||
"Raise/lower min_severity or limit to tune output size.",
|
||||
],
|
||||
}
|
||||
|
||||
def waf_assess(
|
||||
self,
|
||||
*,
|
||||
waf_config_path: Optional[str] = None,
|
||||
include_threat_intel: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
waf_config_path_resolved = (
|
||||
str(self._resolve_path(waf_config_path)) if waf_config_path else None
|
||||
)
|
||||
assessment = self.waf.full_assessment(
|
||||
waf_config_path=waf_config_path_resolved,
|
||||
include_threat_intel=include_threat_intel,
|
||||
)
|
||||
return {
|
||||
"ok": True,
|
||||
"summary": "WAF assessment complete.",
|
||||
"data": _assessment_to_dict(assessment),
|
||||
"truncated": False,
|
||||
"next_steps": [
|
||||
"Call waf_generate_gitops_proposals(...) to draft Terraform rule proposals (best-effort).",
|
||||
],
|
||||
}
|
||||
|
||||
def waf_generate_gitops_proposals(
|
||||
self,
|
||||
*,
|
||||
waf_config_path: Optional[str] = None,
|
||||
include_threat_intel: bool = True,
|
||||
max_proposals: int = 5,
|
||||
) -> Dict[str, Any]:
|
||||
waf_config_path_resolved = (
|
||||
str(self._resolve_path(waf_config_path)) if waf_config_path else None
|
||||
)
|
||||
assessment = self.waf.full_assessment(
|
||||
waf_config_path=waf_config_path_resolved,
|
||||
include_threat_intel=include_threat_intel,
|
||||
)
|
||||
proposals = self.waf.generate_gitops_proposals(
|
||||
threat_report=assessment.threat_report,
|
||||
max_proposals=max_proposals,
|
||||
)
|
||||
return {
|
||||
"ok": True,
|
||||
"summary": f"Generated {len(proposals)} proposal(s).",
|
||||
"data": {
|
||||
"assessment": _assessment_to_dict(assessment),
|
||||
"proposals": proposals,
|
||||
},
|
||||
"truncated": False,
|
||||
"next_steps": [
|
||||
"If proposals are empty, enable threat intel and ensure required credentials/log sources exist.",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class StdioJsonRpc:
|
||||
def __init__(self) -> None:
|
||||
self._in = sys.stdin.buffer
|
||||
self._out = sys.stdout.buffer
|
||||
self._mode: str | None = None # "headers" | "line"
|
||||
|
||||
def read_message(self) -> Optional[Dict[str, Any]]:
|
||||
while True:
|
||||
if self._mode == "line":
|
||||
line = self._in.readline()
|
||||
if not line:
|
||||
return None
|
||||
raw = line.decode("utf-8", "replace").strip()
|
||||
if not raw:
|
||||
continue
|
||||
try:
|
||||
msg = json.loads(raw)
|
||||
except Exception:
|
||||
continue
|
||||
if isinstance(msg, dict):
|
||||
return msg
|
||||
continue
|
||||
|
||||
first = self._in.readline()
|
||||
if not first:
|
||||
return None
|
||||
|
||||
if first in (b"\r\n", b"\n"):
|
||||
continue
|
||||
|
||||
# Auto-detect newline-delimited JSON framing.
|
||||
if self._mode is None and first.lstrip().startswith(b"{"):
|
||||
try:
|
||||
msg = json.loads(first.decode("utf-8", "replace"))
|
||||
except Exception:
|
||||
msg = None
|
||||
if isinstance(msg, dict):
|
||||
self._mode = "line"
|
||||
return msg
|
||||
|
||||
headers: Dict[str, str] = {}
|
||||
try:
|
||||
text = first.decode("utf-8", "replace").strip()
|
||||
except Exception:
|
||||
continue
|
||||
if ":" not in text:
|
||||
continue
|
||||
k, v = text.split(":", 1)
|
||||
headers[k.lower().strip()] = v.strip()
|
||||
|
||||
while True:
|
||||
line = self._in.readline()
|
||||
if not line:
|
||||
return None
|
||||
if line in (b"\r\n", b"\n"):
|
||||
break
|
||||
try:
|
||||
text = line.decode("utf-8", "replace").strip()
|
||||
except Exception:
|
||||
continue
|
||||
if ":" not in text:
|
||||
continue
|
||||
k, v = text.split(":", 1)
|
||||
headers[k.lower().strip()] = v.strip()
|
||||
|
||||
if "content-length" not in headers:
|
||||
return None
|
||||
try:
|
||||
length = int(headers["content-length"])
|
||||
except ValueError:
|
||||
return None
|
||||
body = self._in.read(length)
|
||||
if not body:
|
||||
return None
|
||||
self._mode = "headers"
|
||||
msg = json.loads(body.decode("utf-8", "replace"))
|
||||
if isinstance(msg, dict):
|
||||
return msg
|
||||
return None
|
||||
|
||||
def write_message(self, message: Dict[str, Any]) -> None:
|
||||
if self._mode == "line":
|
||||
payload = json.dumps(
|
||||
message, ensure_ascii=False, separators=(",", ":"), default=str
|
||||
).encode("utf-8")
|
||||
self._out.write(payload + b"\n")
|
||||
self._out.flush()
|
||||
return
|
||||
|
||||
body = json.dumps(
|
||||
message, ensure_ascii=False, separators=(",", ":"), default=str
|
||||
).encode("utf-8")
|
||||
header = f"Content-Length: {len(body)}\r\n\r\n".encode("utf-8")
|
||||
self._out.write(header)
|
||||
self._out.write(body)
|
||||
self._out.flush()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
tools = WafIntelligenceTools()
|
||||
rpc = StdioJsonRpc()
|
||||
|
||||
handlers: Dict[str, Callable[[Dict[str, Any]], Dict[str, Any]]] = {
|
||||
"waf_capabilities": lambda a: tools.waf_capabilities(),
|
||||
"analyze_waf": lambda a: tools.waf_analyze(
|
||||
file=a.get("file"),
|
||||
files=a.get("files"),
|
||||
limit=int(a.get("limit", 3)),
|
||||
min_severity=str(a.get("severity_threshold", "warning")),
|
||||
),
|
||||
"waf_analyze": lambda a: tools.waf_analyze(**a),
|
||||
"waf_assess": lambda a: tools.waf_assess(**a),
|
||||
"waf_generate_gitops_proposals": lambda a: tools.waf_generate_gitops_proposals(
|
||||
**a
|
||||
),
|
||||
}
|
||||
|
||||
while True:
|
||||
msg = rpc.read_message()
|
||||
if msg is None:
|
||||
return
|
||||
|
||||
method = msg.get("method")
|
||||
msg_id = msg.get("id")
|
||||
params = msg.get("params") or {}
|
||||
|
||||
try:
|
||||
if method == "initialize":
|
||||
result = {
|
||||
"protocolVersion": "2024-11-05",
|
||||
"serverInfo": {"name": "waf_intelligence", "version": "0.1.0"},
|
||||
"capabilities": {"tools": {}},
|
||||
}
|
||||
rpc.write_message({"jsonrpc": "2.0", "id": msg_id, "result": result})
|
||||
continue
|
||||
|
||||
if method == "tools/list":
|
||||
rpc.write_message(
|
||||
{"jsonrpc": "2.0", "id": msg_id, "result": {"tools": TOOLS}}
|
||||
)
|
||||
continue
|
||||
|
||||
if method == "tools/call":
|
||||
tool_name = str(params.get("name") or "")
|
||||
args = params.get("arguments") or {}
|
||||
|
||||
routing_action, shadow = layer0_entry(
|
||||
_shadow_query_repr(tool_name, args)
|
||||
)
|
||||
if routing_action != "HANDOFF_TO_LAYER1":
|
||||
rpc.write_message(
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": msg_id,
|
||||
"result": _mcp_text_result(
|
||||
_layer0_payload(routing_action, shadow), is_error=True
|
||||
),
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
handler = handlers.get(tool_name)
|
||||
if not handler:
|
||||
rpc.write_message(
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": msg_id,
|
||||
"result": _mcp_text_result(
|
||||
{
|
||||
"ok": False,
|
||||
"summary": f"Unknown tool: {tool_name}",
|
||||
"data": {"known_tools": sorted(handlers.keys())},
|
||||
"truncated": False,
|
||||
"next_steps": ["Call tools/list"],
|
||||
},
|
||||
is_error=True,
|
||||
),
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
payload = handler(args)
|
||||
is_error = (
|
||||
not bool(payload.get("ok", True))
|
||||
if isinstance(payload, dict)
|
||||
else False
|
||||
)
|
||||
rpc.write_message(
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": msg_id,
|
||||
"result": _mcp_text_result(payload, is_error=is_error),
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
# Ignore notifications.
|
||||
if msg_id is None:
|
||||
continue
|
||||
|
||||
rpc.write_message(
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": msg_id,
|
||||
"result": _mcp_text_result(
|
||||
{"ok": False, "summary": f"Unsupported method: {method}"},
|
||||
is_error=True,
|
||||
),
|
||||
}
|
||||
)
|
||||
except Exception as e: # noqa: BLE001
|
||||
if msg_id is not None:
|
||||
rpc.write_message(
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": msg_id,
|
||||
"result": _mcp_text_result(
|
||||
{"ok": False, "summary": f"fatal error: {e}"},
|
||||
is_error=True,
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _shadow_query_repr(tool_name: str, tool_args: Dict[str, Any]) -> str:
|
||||
if tool_name == "waf_capabilities":
|
||||
return "List WAF Intelligence capabilities."
|
||||
try:
|
||||
return f"{tool_name}: {json.dumps(tool_args, sort_keys=True, default=str)}"
|
||||
except Exception:
|
||||
return f"{tool_name}: {str(tool_args)}"
|
||||
|
||||
|
||||
def _layer0_payload(routing_action: str, shadow: ShadowEvalResult) -> Dict[str, Any]:
|
||||
if routing_action == "FAIL_CLOSED":
|
||||
return {"ok": False, "summary": "Layer 0: cannot comply with this request."}
|
||||
if routing_action == "HANDOFF_TO_GUARDRAILS":
|
||||
reason = shadow.reason or "governance_violation"
|
||||
return {
|
||||
"ok": False,
|
||||
"summary": f"Layer 0: governance violation detected ({reason}).",
|
||||
}
|
||||
if routing_action == "PROMPT_FOR_CLARIFICATION":
|
||||
return {
|
||||
"ok": False,
|
||||
"summary": "Layer 0: request is ambiguous. Please clarify and retry.",
|
||||
}
|
||||
return {"ok": False, "summary": "Layer 0: unrecognized routing action; refusing."}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -6,27 +6,26 @@ from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from mcp.waf_intelligence.analyzer import AnalysisResult, RuleViolation, WAFRuleAnalyzer
|
||||
from mcp.waf_intelligence.compliance import ComplianceMapper, FrameworkMapping
|
||||
from mcp.waf_intelligence.generator import GeneratedRule, WAFRuleGenerator
|
||||
from .analyzer import AnalysisResult, RuleViolation, WAFRuleAnalyzer
|
||||
from .compliance import ComplianceMapper, FrameworkMapping
|
||||
from .generator import GeneratedRule, WAFRuleGenerator
|
||||
|
||||
# Optional advanced modules (Phase 7)
|
||||
try:
|
||||
from mcp.waf_intelligence.threat_intel import (
|
||||
from .threat_intel import (
|
||||
ThreatIntelCollector,
|
||||
ThreatIntelReport,
|
||||
ThreatIndicator,
|
||||
)
|
||||
|
||||
_HAS_THREAT_INTEL = True
|
||||
except ImportError:
|
||||
_HAS_THREAT_INTEL = False
|
||||
ThreatIntelCollector = None
|
||||
|
||||
try:
|
||||
from mcp.waf_intelligence.classifier import (
|
||||
ThreatClassifier,
|
||||
ClassificationResult,
|
||||
)
|
||||
from .classifier import ThreatClassifier
|
||||
|
||||
_HAS_CLASSIFIER = True
|
||||
except ImportError:
|
||||
_HAS_CLASSIFIER = False
|
||||
@@ -45,14 +44,14 @@ class WAFInsight:
|
||||
@dataclass
|
||||
class ThreatAssessment:
|
||||
"""Phase 7: Comprehensive threat assessment result."""
|
||||
|
||||
|
||||
analysis_result: Optional[AnalysisResult] = None
|
||||
threat_report: Optional[Any] = None # ThreatIntelReport when available
|
||||
classification_summary: Dict[str, int] = field(default_factory=dict)
|
||||
risk_score: float = 0.0
|
||||
recommended_actions: List[str] = field(default_factory=list)
|
||||
generated_at: datetime = field(default_factory=datetime.utcnow)
|
||||
|
||||
|
||||
@property
|
||||
def risk_level(self) -> str:
|
||||
if self.risk_score >= 0.8:
|
||||
@@ -81,22 +80,22 @@ class WAFIntelligence:
|
||||
enable_ml_classifier: bool = True,
|
||||
) -> None:
|
||||
self.workspace = Path(workspace_path) if workspace_path else Path.cwd()
|
||||
|
||||
|
||||
# Core components
|
||||
self.analyzer = WAFRuleAnalyzer()
|
||||
self.generator = WAFRuleGenerator()
|
||||
self.mapper = ComplianceMapper()
|
||||
|
||||
|
||||
# Phase 7 components (optional)
|
||||
self.threat_intel: Optional[Any] = None
|
||||
self.classifier: Optional[Any] = None
|
||||
|
||||
|
||||
if enable_threat_intel and _HAS_THREAT_INTEL:
|
||||
try:
|
||||
self.threat_intel = ThreatIntelCollector()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
if enable_ml_classifier and _HAS_CLASSIFIER:
|
||||
try:
|
||||
self.classifier = ThreatClassifier()
|
||||
@@ -149,24 +148,24 @@ class WAFIntelligence:
|
||||
) -> Optional[Any]:
|
||||
"""
|
||||
Collect threat intelligence from logs and external feeds.
|
||||
|
||||
|
||||
Args:
|
||||
log_paths: Paths to Cloudflare log files
|
||||
max_indicators: Maximum indicators to collect
|
||||
|
||||
|
||||
Returns:
|
||||
ThreatIntelReport or None if unavailable
|
||||
"""
|
||||
if not self.threat_intel:
|
||||
return None
|
||||
|
||||
|
||||
# Default log paths
|
||||
if log_paths is None:
|
||||
log_paths = [
|
||||
str(self.workspace / "logs"),
|
||||
"/var/log/cloudflare",
|
||||
]
|
||||
|
||||
|
||||
return self.threat_intel.collect(
|
||||
log_paths=log_paths,
|
||||
max_indicators=max_indicators,
|
||||
@@ -175,16 +174,16 @@ class WAFIntelligence:
|
||||
def classify_threat(self, payload: str) -> Optional[Any]:
|
||||
"""
|
||||
Classify a payload using ML classifier.
|
||||
|
||||
|
||||
Args:
|
||||
payload: Request payload to classify
|
||||
|
||||
|
||||
Returns:
|
||||
ClassificationResult or None
|
||||
"""
|
||||
if not self.classifier:
|
||||
return None
|
||||
|
||||
|
||||
return self.classifier.classify(payload)
|
||||
|
||||
def full_assessment(
|
||||
@@ -195,51 +194,52 @@ class WAFIntelligence:
|
||||
) -> ThreatAssessment:
|
||||
"""
|
||||
Phase 7: Perform comprehensive threat assessment.
|
||||
|
||||
|
||||
Combines:
|
||||
- WAF configuration analysis
|
||||
- Threat intelligence collection
|
||||
- ML classification summary
|
||||
- Risk scoring
|
||||
|
||||
|
||||
Args:
|
||||
waf_config_path: Path to WAF Terraform file
|
||||
log_paths: Paths to log files
|
||||
include_threat_intel: Whether to collect threat intel
|
||||
|
||||
|
||||
Returns:
|
||||
ThreatAssessment with full analysis results
|
||||
"""
|
||||
assessment = ThreatAssessment()
|
||||
risk_factors: List[float] = []
|
||||
recommendations: List[str] = []
|
||||
|
||||
|
||||
# 1. Analyze WAF configuration
|
||||
if waf_config_path is None:
|
||||
waf_config_path = str(self.workspace / "terraform" / "waf.tf")
|
||||
|
||||
|
||||
if Path(waf_config_path).exists():
|
||||
assessment.analysis_result = self.analyzer.analyze_file(
|
||||
waf_config_path,
|
||||
min_severity="info",
|
||||
)
|
||||
|
||||
|
||||
# Calculate risk from violations
|
||||
severity_weights = {"error": 0.8, "warning": 0.5, "info": 0.2}
|
||||
for violation in assessment.analysis_result.violations:
|
||||
weight = severity_weights.get(violation.severity, 0.3)
|
||||
risk_factors.append(weight)
|
||||
|
||||
|
||||
# Generate recommendations
|
||||
critical_count = sum(
|
||||
1 for v in assessment.analysis_result.violations
|
||||
1
|
||||
for v in assessment.analysis_result.violations
|
||||
if v.severity == "error"
|
||||
)
|
||||
if critical_count > 0:
|
||||
recommendations.append(
|
||||
f"🔴 Fix {critical_count} critical WAF configuration issues"
|
||||
)
|
||||
|
||||
|
||||
# 2. Collect threat intelligence
|
||||
if include_threat_intel and self.threat_intel:
|
||||
try:
|
||||
@@ -247,52 +247,55 @@ class WAFIntelligence:
|
||||
log_paths=log_paths,
|
||||
max_indicators=50,
|
||||
)
|
||||
|
||||
|
||||
if assessment.threat_report:
|
||||
indicators = assessment.threat_report.indicators
|
||||
|
||||
|
||||
# Count by severity
|
||||
severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
|
||||
for ind in indicators:
|
||||
sev = getattr(ind, "severity", "low")
|
||||
severity_counts[sev] = severity_counts.get(sev, 0) + 1
|
||||
|
||||
|
||||
# Add to classification summary
|
||||
assessment.classification_summary["threat_indicators"] = len(indicators)
|
||||
assessment.classification_summary["threat_indicators"] = len(
|
||||
indicators
|
||||
)
|
||||
assessment.classification_summary.update(severity_counts)
|
||||
|
||||
|
||||
# Calculate threat intel risk
|
||||
if indicators:
|
||||
critical_ratio = severity_counts["critical"] / len(indicators)
|
||||
high_ratio = severity_counts["high"] / len(indicators)
|
||||
risk_factors.append(critical_ratio * 0.9 + high_ratio * 0.7)
|
||||
|
||||
|
||||
if severity_counts["critical"] > 0:
|
||||
recommendations.append(
|
||||
f"🚨 Block {severity_counts['critical']} critical threat IPs immediately"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# 3. ML classification summary (from any collected data)
|
||||
if self.classifier and assessment.threat_report:
|
||||
try:
|
||||
attack_types = {"sqli": 0, "xss": 0, "rce": 0, "clean": 0, "unknown": 0}
|
||||
|
||||
|
||||
indicators = assessment.threat_report.indicators
|
||||
pattern_indicators = [
|
||||
i for i in indicators
|
||||
i
|
||||
for i in indicators
|
||||
if getattr(i, "indicator_type", "") == "pattern"
|
||||
]
|
||||
|
||||
|
||||
for ind in pattern_indicators[:20]: # Sample first 20
|
||||
result = self.classifier.classify(ind.value)
|
||||
if result:
|
||||
label = result.label
|
||||
attack_types[label] = attack_types.get(label, 0) + 1
|
||||
|
||||
|
||||
assessment.classification_summary["ml_classifications"] = attack_types
|
||||
|
||||
|
||||
# Add ML risk factor
|
||||
dangerous = attack_types.get("sqli", 0) + attack_types.get("rce", 0)
|
||||
if dangerous > 5:
|
||||
@@ -302,15 +305,17 @@ class WAFIntelligence:
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
# 4. Calculate final risk score
|
||||
if risk_factors:
|
||||
assessment.risk_score = min(1.0, sum(risk_factors) / max(len(risk_factors), 1))
|
||||
assessment.risk_score = min(
|
||||
1.0, sum(risk_factors) / max(len(risk_factors), 1)
|
||||
)
|
||||
else:
|
||||
assessment.risk_score = 0.3 # Baseline risk
|
||||
|
||||
|
||||
assessment.recommended_actions = recommendations
|
||||
|
||||
|
||||
return assessment
|
||||
|
||||
def generate_gitops_proposals(
|
||||
@@ -320,42 +325,44 @@ class WAFIntelligence:
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate GitOps-ready rule proposals.
|
||||
|
||||
|
||||
Args:
|
||||
threat_report: ThreatIntelReport to use
|
||||
max_proposals: Maximum proposals to generate
|
||||
|
||||
|
||||
Returns:
|
||||
List of proposal dicts ready for MR creation
|
||||
"""
|
||||
proposals: List[Dict[str, Any]] = []
|
||||
|
||||
|
||||
if not threat_report:
|
||||
return proposals
|
||||
|
||||
|
||||
try:
|
||||
# Import proposer dynamically
|
||||
from gitops.waf_rule_proposer import WAFRuleProposer
|
||||
|
||||
|
||||
proposer = WAFRuleProposer(workspace_path=str(self.workspace))
|
||||
batch = proposer.generate_proposals(
|
||||
threat_report=threat_report,
|
||||
max_proposals=max_proposals,
|
||||
)
|
||||
|
||||
|
||||
for proposal in batch.proposals:
|
||||
proposals.append({
|
||||
"name": proposal.rule_name,
|
||||
"type": proposal.rule_type,
|
||||
"severity": proposal.severity,
|
||||
"confidence": proposal.confidence,
|
||||
"terraform": proposal.terraform_code,
|
||||
"justification": proposal.justification,
|
||||
"auto_deploy": proposal.auto_deploy_eligible,
|
||||
})
|
||||
proposals.append(
|
||||
{
|
||||
"name": proposal.rule_name,
|
||||
"type": proposal.rule_type,
|
||||
"severity": proposal.severity,
|
||||
"confidence": proposal.confidence,
|
||||
"terraform": proposal.terraform_code,
|
||||
"justification": proposal.justification,
|
||||
"auto_deploy": proposal.auto_deploy_eligible,
|
||||
}
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
return proposals
|
||||
|
||||
@property
|
||||
|
||||
326
mcp/waf_intelligence/server.py
Executable file → Normal file
326
mcp/waf_intelligence/server.py
Executable file → Normal file
@@ -1,326 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
WAF Intelligence MCP Server for VS Code Copilot.
|
||||
from __future__ import annotations
|
||||
|
||||
This implements the Model Context Protocol (MCP) stdio interface
|
||||
so VS Code can communicate with your WAF Intelligence system.
|
||||
"""
|
||||
Deprecated entrypoint kept for older editor configs.
|
||||
|
||||
Use `python3 -m mcp.waf_intelligence.mcp_server` (or `waf_intel_mcp.py`) instead.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
# Add parent to path for imports
|
||||
sys.path.insert(0, '/Users/sovereign/Desktop/CLOUDFLARE')
|
||||
|
||||
from mcp.waf_intelligence.orchestrator import WAFIntelligence
|
||||
from mcp.waf_intelligence.analyzer import WAFRuleAnalyzer
|
||||
from layer0 import layer0_entry
|
||||
from layer0.shadow_classifier import ShadowEvalResult
|
||||
|
||||
|
||||
class WAFIntelligenceMCPServer:
|
||||
"""MCP Server wrapper for WAF Intelligence."""
|
||||
|
||||
def __init__(self):
|
||||
self.waf = WAFIntelligence()
|
||||
self.analyzer = WAFRuleAnalyzer()
|
||||
|
||||
def get_capabilities(self) -> dict:
|
||||
"""Return server capabilities."""
|
||||
return {
|
||||
"tools": [
|
||||
{
|
||||
"name": "waf_analyze",
|
||||
"description": "Analyze WAF logs and detect attack patterns",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"log_file": {
|
||||
"type": "string",
|
||||
"description": "Path to WAF log file (optional)"
|
||||
},
|
||||
"zone_id": {
|
||||
"type": "string",
|
||||
"description": "Cloudflare zone ID (optional)"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "waf_assess",
|
||||
"description": "Run full security assessment with threat intel and ML classification",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"zone_id": {
|
||||
"type": "string",
|
||||
"description": "Cloudflare zone ID"
|
||||
}
|
||||
},
|
||||
"required": ["zone_id"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "waf_generate_rules",
|
||||
"description": "Generate Terraform WAF rules from threat intelligence",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"zone_id": {
|
||||
"type": "string",
|
||||
"description": "Cloudflare zone ID"
|
||||
},
|
||||
"min_confidence": {
|
||||
"type": "number",
|
||||
"description": "Minimum confidence threshold (0-1)",
|
||||
"default": 0.7
|
||||
}
|
||||
},
|
||||
"required": ["zone_id"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "waf_capabilities",
|
||||
"description": "List available WAF Intelligence capabilities",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def handle_tool_call(self, name: str, arguments: dict) -> dict:
|
||||
"""Handle a tool invocation."""
|
||||
try:
|
||||
if name == "waf_capabilities":
|
||||
return {
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": json.dumps({
|
||||
"capabilities": self.waf.capabilities,
|
||||
"status": "operational"
|
||||
}, indent=2)
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
elif name == "waf_analyze":
|
||||
log_file = arguments.get("log_file")
|
||||
zone_id = arguments.get("zone_id")
|
||||
|
||||
if log_file:
|
||||
result = self.analyzer.analyze_log_file(log_file)
|
||||
else:
|
||||
result = {
|
||||
"message": "No log file provided. Use zone_id for live analysis.",
|
||||
"capabilities": self.waf.capabilities
|
||||
}
|
||||
|
||||
return {
|
||||
"content": [
|
||||
{"type": "text", "text": json.dumps(result, indent=2, default=str)}
|
||||
]
|
||||
}
|
||||
|
||||
elif name == "waf_assess":
|
||||
zone_id = arguments.get("zone_id")
|
||||
# full_assessment uses workspace paths, not zone_id
|
||||
assessment = self.waf.full_assessment(
|
||||
include_threat_intel=True
|
||||
)
|
||||
# Build result from ThreatAssessment dataclass
|
||||
result = {
|
||||
"zone_id": zone_id,
|
||||
"risk_score": assessment.risk_score,
|
||||
"risk_level": assessment.risk_level,
|
||||
"classification_summary": assessment.classification_summary,
|
||||
"recommended_actions": assessment.recommended_actions[:10], # Top 10
|
||||
"has_analysis": assessment.analysis_result is not None,
|
||||
"has_threat_intel": assessment.threat_report is not None,
|
||||
"generated_at": str(assessment.generated_at)
|
||||
}
|
||||
|
||||
return {
|
||||
"content": [
|
||||
{"type": "text", "text": json.dumps(result, indent=2, default=str)}
|
||||
]
|
||||
}
|
||||
|
||||
elif name == "waf_generate_rules":
|
||||
zone_id = arguments.get("zone_id")
|
||||
min_confidence = arguments.get("min_confidence", 0.7)
|
||||
|
||||
# Generate proposals (doesn't use zone_id directly)
|
||||
proposals = self.waf.generate_gitops_proposals(
|
||||
max_proposals=5
|
||||
)
|
||||
|
||||
result = {
|
||||
"zone_id": zone_id,
|
||||
"min_confidence": min_confidence,
|
||||
"proposals_count": len(proposals),
|
||||
"proposals": proposals
|
||||
}
|
||||
|
||||
return {
|
||||
"content": [
|
||||
{"type": "text", "text": json.dumps(result, indent=2, default=str) if proposals else "No rules generated (no threat data available)"}
|
||||
]
|
||||
}
|
||||
|
||||
else:
|
||||
return {
|
||||
"content": [
|
||||
{"type": "text", "text": f"Unknown tool: {name}"}
|
||||
],
|
||||
"isError": True
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"content": [
|
||||
{"type": "text", "text": f"Error: {str(e)}"}
|
||||
],
|
||||
"isError": True
|
||||
}
|
||||
|
||||
def run(self):
|
||||
"""Run the MCP server (stdio mode)."""
|
||||
# Send server info
|
||||
server_info = {
|
||||
"jsonrpc": "2.0",
|
||||
"method": "initialized",
|
||||
"params": {
|
||||
"serverInfo": {
|
||||
"name": "waf-intelligence",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"capabilities": self.get_capabilities()
|
||||
}
|
||||
}
|
||||
|
||||
# Main loop - read JSON-RPC messages from stdin
|
||||
for line in sys.stdin:
|
||||
try:
|
||||
message = json.loads(line.strip())
|
||||
|
||||
if message.get("method") == "initialize":
|
||||
response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message.get("id"),
|
||||
"result": {
|
||||
"protocolVersion": "2024-11-05",
|
||||
"serverInfo": {
|
||||
"name": "waf-intelligence",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
"capabilities": {
|
||||
"tools": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
print(json.dumps(response), flush=True)
|
||||
|
||||
elif message.get("method") == "tools/list":
|
||||
response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message.get("id"),
|
||||
"result": self.get_capabilities()
|
||||
}
|
||||
print(json.dumps(response), flush=True)
|
||||
|
||||
elif message.get("method") == "tools/call":
|
||||
params = message.get("params", {})
|
||||
tool_name = params.get("name")
|
||||
tool_args = params.get("arguments", {})
|
||||
|
||||
# Layer 0: pre-boot Shadow Eval gate before handling tool calls.
|
||||
routing_action, shadow = layer0_entry(_shadow_query_repr(tool_name, tool_args))
|
||||
if routing_action != "HANDOFF_TO_LAYER1":
|
||||
response = _layer0_mcp_response(routing_action, shadow, message.get("id"))
|
||||
print(json.dumps(response), flush=True)
|
||||
continue
|
||||
|
||||
result = self.handle_tool_call(tool_name, tool_args)
|
||||
|
||||
response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message.get("id"),
|
||||
"result": result
|
||||
}
|
||||
print(json.dumps(response), flush=True)
|
||||
|
||||
elif message.get("method") == "notifications/initialized":
|
||||
# Client acknowledged initialization
|
||||
pass
|
||||
|
||||
else:
|
||||
# Unknown method
|
||||
response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": message.get("id"),
|
||||
"error": {
|
||||
"code": -32601,
|
||||
"message": f"Method not found: {message.get('method')}"
|
||||
}
|
||||
}
|
||||
print(json.dumps(response), flush=True)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception as e:
|
||||
error_response = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": None,
|
||||
"error": {
|
||||
"code": -32603,
|
||||
"message": str(e)
|
||||
}
|
||||
}
|
||||
print(json.dumps(error_response), flush=True)
|
||||
|
||||
from .mcp_server import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
server = WAFIntelligenceMCPServer()
|
||||
server.run()
|
||||
main()
|
||||
|
||||
|
||||
def _shadow_query_repr(tool_name: str, tool_args: dict) -> str:
|
||||
"""Build a textual representation of the tool call for Layer 0 classification."""
|
||||
try:
|
||||
return f"{tool_name}: {json.dumps(tool_args, sort_keys=True)}"
|
||||
except TypeError:
|
||||
return f"{tool_name}: {str(tool_args)}"
|
||||
|
||||
|
||||
def _layer0_mcp_response(routing_action: str, shadow: ShadowEvalResult, msg_id: Any) -> dict:
|
||||
"""
|
||||
Map Layer 0 outcomes to MCP responses.
|
||||
Catastrophic/forbidden/ambiguous short-circuit with minimal disclosure.
|
||||
"""
|
||||
base = {"jsonrpc": "2.0", "id": msg_id}
|
||||
|
||||
if routing_action == "FAIL_CLOSED":
|
||||
base["error"] = {"code": -32000, "message": "Layer 0: cannot comply with this request."}
|
||||
return base
|
||||
|
||||
if routing_action == "HANDOFF_TO_GUARDRAILS":
|
||||
reason = shadow.reason or "governance_violation"
|
||||
base["error"] = {
|
||||
"code": -32001,
|
||||
"message": f"Layer 0: governance violation detected ({reason}).",
|
||||
}
|
||||
return base
|
||||
|
||||
if routing_action == "PROMPT_FOR_CLARIFICATION":
|
||||
base["error"] = {
|
||||
"code": -32002,
|
||||
"message": "Layer 0: request is ambiguous. Please clarify and retry.",
|
||||
}
|
||||
return base
|
||||
|
||||
base["error"] = {"code": -32099, "message": "Layer 0: unrecognized routing action; refusing."}
|
||||
return base
|
||||
|
||||
Reference in New Issue
Block a user