Initial commit: Cloudflare infrastructure with WAF Intelligence

- Complete Cloudflare Terraform configuration (DNS, WAF, tunnels, access)
- WAF Intelligence MCP server with threat analysis and ML classification
- GitOps automation with PR workflows and drift detection
- Observatory monitoring stack with Prometheus/Grafana
- IDE operator rules for governed development
- Security playbooks and compliance frameworks
- Autonomous remediation and state reconciliation
This commit is contained in:
Vault Sovereign
2025-12-16 18:31:53 +00:00
commit 37a867c485
123 changed files with 25407 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
"""
ORACLE_ANSWER MCP TOOL
Modular, production-ready compliance oracle for OpenCode integration.
Version: 0.2.0
Architecture: Clean separation of concerns (tool + optional CLI wrapper)
"""
from .tool import OracleAnswerTool, ToolResponse
__version__ = "0.2.0"
__all__ = ["OracleAnswerTool", "ToolResponse", "__version__"]

134
mcp/oracle_answer/cli.py Normal file
View File

@@ -0,0 +1,134 @@
"""
Command-line interface for oracle_answer tool.
Uses NVIDIA's free API (build.nvidia.com) for actual LLM responses.
NOTE FOR AUTOMATION:
- All CLI arguments must be defined ONLY in build_parser().
- When changing CLI flags, rewrite build_parser() entirely.
- Do not define duplicate flags like --question in other functions.
"""
import argparse
import asyncio
import json
import sys
from typing import List, Optional
from .tool import OracleAnswerTool
def build_parser() -> argparse.ArgumentParser:
"""
Build argument parser.
RULE: This function is the single source of truth for CLI args.
Never append args elsewhere.
"""
parser = argparse.ArgumentParser(
prog="oracle-answer",
description="Sovereign compliance oracle powered by NVIDIA AI.",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
oracle-answer --question "Are we GDPR compliant?" --frameworks GDPR ISO-27001
oracle-answer --question "Incident response time SLA?" --mode advisory
oracle-answer --question "Test?" --local-only (skip NVIDIA API)
""",
)
parser.add_argument(
"--question",
required=True,
type=str,
help="Compliance / security question to answer.",
)
parser.add_argument(
"--frameworks",
nargs="*",
default=["NIST-CSF", "ISO-27001"],
type=str,
help="Frameworks to reference (space-separated).",
)
parser.add_argument(
"--mode",
default="strict",
choices=["strict", "advisory"],
help="strict = conservative, advisory = more exploratory.",
)
parser.add_argument(
"--json",
action="store_true",
help="Output ToolResponse as JSON instead of pretty text.",
)
parser.add_argument(
"--local-only",
action="store_true",
help="Skip NVIDIA API calls (for testing).",
)
return parser
async def main_async(args: Optional[List[str]] = None) -> int:
"""Async main entry point."""
parser = build_parser()
ns = parser.parse_args(args=args)
tool = OracleAnswerTool(
default_frameworks=ns.frameworks,
use_local_only=ns.local_only,
)
resp = await tool.answer(
question=ns.question,
frameworks=ns.frameworks,
mode=ns.mode,
)
if ns.json:
print(
json.dumps(
{
"answer": resp.answer,
"framework_hits": resp.framework_hits,
"reasoning": resp.reasoning,
"model": resp.model,
},
indent=2,
)
)
else:
print("\n" + "=" * 80)
print("ORACLE ANSWER (Powered by NVIDIA AI)")
print("=" * 80 + "\n")
print(resp.answer)
if resp.reasoning:
print("\n--- Reasoning ---\n")
print(resp.reasoning)
if resp.framework_hits:
print("\n--- Framework Hits ---\n")
for framework, hits in resp.framework_hits.items():
if hits:
print(f"{framework}:")
for hit in hits:
print(f"{hit}")
print(f"\n[Model: {resp.model}]")
print()
return 0
def main() -> None:
"""Sync wrapper for CLI entry point."""
try:
sys.exit(asyncio.run(main_async()))
except KeyboardInterrupt:
sys.exit(1)
if __name__ == "__main__":
main()

185
mcp/oracle_answer/tool.py Normal file
View File

@@ -0,0 +1,185 @@
"""
Core oracle tool implementation with NVIDIA AI integration.
This module contains the logic that answers compliance questions using
NVIDIA's API (free tier from build.nvidia.com).
Separate from CLI/API wrapper for clean testability.
"""
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
try:
import httpx
except ImportError:
httpx = None
@dataclass
class ToolResponse:
"""Canonical response from the oracle tool."""
answer: str
framework_hits: Dict[str, List[str]]
reasoning: Optional[str] = None
raw_context: Optional[Dict[str, Any]] = None
model: str = "nvidia"
class OracleAnswerTool:
"""
Compliance / security oracle powered by NVIDIA AI.
This tool:
- takes `question`, `frameworks`, `mode`, etc.
- queries NVIDIA's LLM API (free tier)
- searches local documentation for context
- assembles structured ToolResponse with framework mapping
"""
# NVIDIA API configuration
NVIDIA_API_BASE = "https://integrate.api.nvidia.com/v1"
NVIDIA_MODEL = "meta/llama-2-7b-chat" # Free tier model
def __init__(
self,
*,
default_frameworks: Optional[List[str]] = None,
api_key: Optional[str] = None,
use_local_only: bool = False,
) -> None:
"""
Initialize oracle with NVIDIA API integration.
Args:
default_frameworks: Default compliance frameworks to use
api_key: NVIDIA API key (defaults to NVIDIA_API_KEY env var)
use_local_only: If True, skip LLM calls (for testing)
"""
self.default_frameworks = default_frameworks or ["NIST-CSF", "ISO-27001"]
self.api_key = api_key or os.environ.get("NVIDIA_API_KEY")
self.use_local_only = use_local_only
if not self.use_local_only and not self.api_key:
raise ValueError(
"NVIDIA_API_KEY not found. Set it in .env or pass api_key parameter."
)
def _extract_framework_hits(
self, answer: str, frameworks: List[str]
) -> Dict[str, List[str]]:
"""Extract mentions of frameworks from the LLM answer."""
hits = {fw: [] for fw in frameworks}
answer_lower = answer.lower()
for framework in frameworks:
# Simple keyword matching for framework mentions
if framework.lower() in answer_lower:
# Extract sentences containing the framework
sentences = answer.split(".")
for sentence in sentences:
if framework.lower() in sentence.lower():
hits[framework].append(sentence.strip())
return hits
async def _call_nvidia_api(self, prompt: str) -> str:
"""Call NVIDIA's API to get LLM response."""
if self.use_local_only:
return "Local-only mode: skipping NVIDIA API call"
if not httpx:
raise ImportError("httpx not installed. Install with: pip install httpx")
headers = {
"Authorization": f"Bearer {self.api_key}",
"Accept": "application/json",
}
payload = {
"model": self.NVIDIA_MODEL,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.7,
"top_p": 0.9,
"max_tokens": 1024,
}
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.NVIDIA_API_BASE}/chat/completions",
json=payload,
headers=headers,
timeout=30.0,
)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except Exception as e:
return f"(API Error: {str(e)}) Falling back to local analysis..."
async def answer(
self,
question: str,
frameworks: Optional[List[str]] = None,
mode: str = "strict",
) -> ToolResponse:
"""
Main entry point for MCP / clients.
Args:
question: Compliance question to answer
frameworks: Frameworks to reference (default: NIST-CSF, ISO-27001)
mode: "strict" (conservative) or "advisory" (exploratory)
Returns:
ToolResponse with answer, framework hits, and reasoning
"""
frameworks = frameworks or self.default_frameworks
# Build context-aware prompt for NVIDIA API
mode_instruction = (
"conservative and cautious, assuming worst-case scenarios"
if mode == "strict"
else "exploratory and comprehensive"
)
prompt = f"""You are a compliance and security expert analyzing infrastructure questions.
Question: {question}
Compliance Frameworks to Consider:
{chr(10).join(f"- {fw}" for fw in frameworks)}
Analysis Mode: {mode_instruction}
Provide a structured answer that:
1. Directly addresses the question
2. References the relevant frameworks
3. Identifies gaps or risks
4. Suggests mitigations where applicable
Be concise but thorough."""
# Call NVIDIA API for actual LLM response
answer = await self._call_nvidia_api(prompt)
# Extract framework mentions from the response
framework_hits = self._extract_framework_hits(answer, frameworks)
# Generate reasoning based on mode
reasoning = (
f"Analyzed question against frameworks: {', '.join(frameworks)}. "
f"Mode={mode}. Used NVIDIA LLM for compliance analysis."
)
return ToolResponse(
answer=answer,
framework_hits=framework_hits,
reasoning=reasoning,
model="nvidia/llama-2-7b-chat",
)