111 lines
3.2 KiB
Python
Executable File
111 lines
3.2 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
from __future__ import annotations
|
|
|
|
import glob
|
|
from dataclasses import asdict
|
|
from typing import Any, Dict, List
|
|
|
|
from modelcontextprotocol.python import Server
|
|
from mcp.waf_intelligence.orchestrator import WAFInsight, WAFIntelligence
|
|
from layer0 import layer0_entry
|
|
from layer0.shadow_classifier import ShadowEvalResult
|
|
|
|
server = Server("waf_intel")
|
|
|
|
|
|
def _insight_to_dict(insight: WAFInsight) -> Dict[str, Any]:
|
|
"""Convert a WAFInsight dataclass into a plain dict."""
|
|
return asdict(insight)
|
|
|
|
|
|
@server.tool()
|
|
async def analyze_waf(
|
|
file: str | None = None,
|
|
files: List[str] | None = None,
|
|
limit: int = 3,
|
|
severity_threshold: str = "warning",
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Analyze one or more Terraform WAF files and return curated insights.
|
|
|
|
Args:
|
|
file: Single file path (e.g. "terraform/waf.tf").
|
|
files: Optional list of file paths or glob patterns (e.g. ["terraform/waf*.tf"]).
|
|
limit: Max number of high-priority insights to return.
|
|
severity_threshold: Minimum severity to include ("info", "warning", "error").
|
|
|
|
Returns:
|
|
{
|
|
"results": [
|
|
{
|
|
"file": "...",
|
|
"insights": [ ... ]
|
|
},
|
|
...
|
|
]
|
|
}
|
|
"""
|
|
routing_action, shadow = layer0_entry(_shadow_repr(file, files, limit, severity_threshold))
|
|
if routing_action != "HANDOFF_TO_LAYER1":
|
|
_raise_layer0(routing_action, shadow)
|
|
|
|
paths: List[str] = []
|
|
|
|
if files:
|
|
for pattern in files:
|
|
for matched in glob.glob(pattern):
|
|
paths.append(matched)
|
|
|
|
if file:
|
|
paths.append(file)
|
|
|
|
seen = set()
|
|
unique_paths: List[str] = []
|
|
for p in paths:
|
|
if p not in seen:
|
|
seen.add(p)
|
|
unique_paths.append(p)
|
|
|
|
if not unique_paths:
|
|
raise ValueError("Please provide 'file' or 'files' to analyze.")
|
|
|
|
intel = WAFIntelligence()
|
|
results: List[Dict[str, Any]] = []
|
|
|
|
for path in unique_paths:
|
|
insights: List[WAFInsight] = intel.analyze_and_recommend(
|
|
path,
|
|
limit=limit,
|
|
min_severity=severity_threshold,
|
|
)
|
|
results.append(
|
|
{
|
|
"file": path,
|
|
"insights": [_insight_to_dict(insight) for insight in insights],
|
|
}
|
|
)
|
|
|
|
return {"results": results}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
server.run()
|
|
|
|
|
|
def _shadow_repr(file: str | None, files: List[str] | None, limit: int, severity: str) -> str:
|
|
try:
|
|
return f"analyze_waf: file={file}, files={files}, limit={limit}, severity={severity}"
|
|
except Exception:
|
|
return "analyze_waf"
|
|
|
|
|
|
def _raise_layer0(routing_action: str, shadow: ShadowEvalResult) -> None:
|
|
if routing_action == "FAIL_CLOSED":
|
|
raise ValueError("Layer 0: cannot comply with this request.")
|
|
if routing_action == "HANDOFF_TO_GUARDRAILS":
|
|
reason = shadow.reason or "governance_violation"
|
|
raise ValueError(f"Layer 0: governance violation detected ({reason}).")
|
|
if routing_action == "PROMPT_FOR_CLARIFICATION":
|
|
raise ValueError("Layer 0: request is ambiguous. Please clarify and retry.")
|
|
raise ValueError("Layer 0: unrecognized routing action; refusing request.")
|