Initial commit: Cloudflare infrastructure with WAF Intelligence

- Complete Cloudflare Terraform configuration (DNS, WAF, tunnels, access)
- WAF Intelligence MCP server with threat analysis and ML classification
- GitOps automation with PR workflows and drift detection
- Observatory monitoring stack with Prometheus/Grafana
- IDE operator rules for governed development
- Security playbooks and compliance frameworks
- Autonomous remediation and state reconciliation
This commit is contained in:
Vault Sovereign
2025-12-16 18:31:53 +00:00
commit 37a867c485
123 changed files with 25407 additions and 0 deletions

185
mcp/oracle_answer/tool.py Normal file
View File

@@ -0,0 +1,185 @@
"""
Core oracle tool implementation with NVIDIA AI integration.
This module contains the logic that answers compliance questions using
NVIDIA's API (free tier from build.nvidia.com).
Separate from CLI/API wrapper for clean testability.
"""
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
try:
import httpx
except ImportError:
httpx = None
@dataclass
class ToolResponse:
"""Canonical response from the oracle tool."""
answer: str
framework_hits: Dict[str, List[str]]
reasoning: Optional[str] = None
raw_context: Optional[Dict[str, Any]] = None
model: str = "nvidia"
class OracleAnswerTool:
"""
Compliance / security oracle powered by NVIDIA AI.
This tool:
- takes `question`, `frameworks`, `mode`, etc.
- queries NVIDIA's LLM API (free tier)
- searches local documentation for context
- assembles structured ToolResponse with framework mapping
"""
# NVIDIA API configuration
NVIDIA_API_BASE = "https://integrate.api.nvidia.com/v1"
NVIDIA_MODEL = "meta/llama-2-7b-chat" # Free tier model
def __init__(
self,
*,
default_frameworks: Optional[List[str]] = None,
api_key: Optional[str] = None,
use_local_only: bool = False,
) -> None:
"""
Initialize oracle with NVIDIA API integration.
Args:
default_frameworks: Default compliance frameworks to use
api_key: NVIDIA API key (defaults to NVIDIA_API_KEY env var)
use_local_only: If True, skip LLM calls (for testing)
"""
self.default_frameworks = default_frameworks or ["NIST-CSF", "ISO-27001"]
self.api_key = api_key or os.environ.get("NVIDIA_API_KEY")
self.use_local_only = use_local_only
if not self.use_local_only and not self.api_key:
raise ValueError(
"NVIDIA_API_KEY not found. Set it in .env or pass api_key parameter."
)
def _extract_framework_hits(
self, answer: str, frameworks: List[str]
) -> Dict[str, List[str]]:
"""Extract mentions of frameworks from the LLM answer."""
hits = {fw: [] for fw in frameworks}
answer_lower = answer.lower()
for framework in frameworks:
# Simple keyword matching for framework mentions
if framework.lower() in answer_lower:
# Extract sentences containing the framework
sentences = answer.split(".")
for sentence in sentences:
if framework.lower() in sentence.lower():
hits[framework].append(sentence.strip())
return hits
async def _call_nvidia_api(self, prompt: str) -> str:
"""Call NVIDIA's API to get LLM response."""
if self.use_local_only:
return "Local-only mode: skipping NVIDIA API call"
if not httpx:
raise ImportError("httpx not installed. Install with: pip install httpx")
headers = {
"Authorization": f"Bearer {self.api_key}",
"Accept": "application/json",
}
payload = {
"model": self.NVIDIA_MODEL,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.7,
"top_p": 0.9,
"max_tokens": 1024,
}
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.NVIDIA_API_BASE}/chat/completions",
json=payload,
headers=headers,
timeout=30.0,
)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except Exception as e:
return f"(API Error: {str(e)}) Falling back to local analysis..."
async def answer(
self,
question: str,
frameworks: Optional[List[str]] = None,
mode: str = "strict",
) -> ToolResponse:
"""
Main entry point for MCP / clients.
Args:
question: Compliance question to answer
frameworks: Frameworks to reference (default: NIST-CSF, ISO-27001)
mode: "strict" (conservative) or "advisory" (exploratory)
Returns:
ToolResponse with answer, framework hits, and reasoning
"""
frameworks = frameworks or self.default_frameworks
# Build context-aware prompt for NVIDIA API
mode_instruction = (
"conservative and cautious, assuming worst-case scenarios"
if mode == "strict"
else "exploratory and comprehensive"
)
prompt = f"""You are a compliance and security expert analyzing infrastructure questions.
Question: {question}
Compliance Frameworks to Consider:
{chr(10).join(f"- {fw}" for fw in frameworks)}
Analysis Mode: {mode_instruction}
Provide a structured answer that:
1. Directly addresses the question
2. References the relevant frameworks
3. Identifies gaps or risks
4. Suggests mitigations where applicable
Be concise but thorough."""
# Call NVIDIA API for actual LLM response
answer = await self._call_nvidia_api(prompt)
# Extract framework mentions from the response
framework_hits = self._extract_framework_hits(answer, frameworks)
# Generate reasoning based on mode
reasoning = (
f"Analyzed question against frameworks: {', '.join(frameworks)}. "
f"Mode={mode}. Used NVIDIA LLM for compliance analysis."
)
return ToolResponse(
answer=answer,
framework_hits=framework_hits,
reasoning=reasoning,
model="nvidia/llama-2-7b-chat",
)