init: vaultmesh mcp server
Some checks are pending
Governance CI / Constitution Hash Gate (push) Waiting to run
Governance CI / Governance Tests (push) Blocked by required conditions
Governance CI / Golden Drill Mini (push) Blocked by required conditions

This commit is contained in:
Vault Sovereign
2025-12-26 23:23:08 +00:00
commit e4871c2a29
35 changed files with 6511 additions and 0 deletions

View File

@@ -0,0 +1,87 @@
# VaultMesh MCP Server
Model Context Protocol server exposing VaultMesh Guardian and Treasury tools to Claude.
## Installation
```bash
pip install -r packages/vaultmesh_mcp/requirements.txt
```
## Tools Exposed
### Guardian Tools
| Tool | Description | Capability |
|------|-------------|------------|
| `guardian_anchor_now` | Anchor scrolls and compute Merkle root snapshot | `anchor` |
| `guardian_verify_receipt` | Verify a receipt exists by hash | `guardian_view` |
| `guardian_status` | Get status of all scrolls | `guardian_view` |
### Treasury Tools
| Tool | Description | Capability |
|------|-------------|------------|
| `treasury_create_budget` | Create a new budget | `treasury_write` |
| `treasury_balance` | Get budget balance(s) | `treasury_view` |
| `treasury_debit` | Spend from a budget | `treasury_write` |
| `treasury_credit` | Add funds to a budget | `treasury_write` |
## Usage
### With Claude Desktop
Add to your Claude Desktop config (`~/.config/claude-desktop/config.json`):
```json
{
"mcpServers": {
"vaultmesh": {
"command": "python",
"args": ["-m", "packages.vaultmesh_mcp.server"],
"cwd": "/path/to/vaultmesh",
"env": {
"VAULTMESH_ROOT": "/path/to/vaultmesh"
}
}
}
}
```
### Standalone Testing
```bash
# List available tools
python -m packages.vaultmesh_mcp.server
# Call a tool directly
python -m packages.vaultmesh_mcp.server guardian_status '{}'
# Create a budget
python -m packages.vaultmesh_mcp.server treasury_create_budget '{"budget_id": "ops-2025", "name": "Operations", "allocated": 100000}'
# Anchor all scrolls
python -m packages.vaultmesh_mcp.server guardian_anchor_now '{}'
```
## Receipt Emission
Every tool call emits a receipt to `receipts/mcp/mcp_calls.jsonl` containing:
```json
{
"schema_version": "2.0.0",
"type": "mcp_tool_call",
"timestamp": "2025-12-07T12:00:00Z",
"scroll": "mcp",
"tags": ["mcp", "tool-call", "<tool_name>"],
"root_hash": "blake3:...",
"body": {
"tool": "<tool_name>",
"arguments": {...},
"result_hash": "blake3:...",
"caller": "did:vm:mcp:client",
"success": true
}
}
```

View File

@@ -0,0 +1,3 @@
"""VaultMesh MCP Server - Model Context Protocol tools for VaultMesh."""
__version__ = "0.1.0"

View File

@@ -0,0 +1,12 @@
{
"mcpServers": {
"vaultmesh": {
"command": "python",
"args": ["-m", "packages.vaultmesh_mcp.server"],
"cwd": "/path/to/vaultmesh",
"env": {
"VAULTMESH_ROOT": "/path/to/vaultmesh"
}
}
}
}

View File

@@ -0,0 +1,3 @@
# VaultMesh MCP Server dependencies
mcp>=0.9.0
blake3>=0.3.0

View File

@@ -0,0 +1,610 @@
#!/usr/bin/env python3
"""
VaultMesh MCP Server
Model Context Protocol server exposing VaultMesh Guardian, Treasury,
Cognitive, and Auth tools. This enables Claude to operate as the
7th Organ of VaultMesh - the Cognitive Ψ-Layer.
"""
import asyncio
import json
import logging
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
import blake3
# Try to import mcp, fallback gracefully if not available
try:
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import Tool, TextContent
MCP_AVAILABLE = True
except ImportError:
MCP_AVAILABLE = False
from .tools import (
# Guardian
guardian_anchor_now,
guardian_verify_receipt,
guardian_status,
# Treasury
treasury_balance,
treasury_debit,
treasury_credit,
treasury_create_budget,
# Cognitive
cognitive_context,
cognitive_decide,
cognitive_invoke_tem,
cognitive_memory_get,
cognitive_memory_set,
cognitive_attest,
cognitive_audit_trail,
cognitive_oracle_chain,
# Auth
auth_challenge,
auth_verify,
auth_validate_token,
auth_check_permission,
check_profile_permission,
get_profile_for_scope,
auth_revoke,
auth_list_sessions,
auth_create_dev_session,
auth_revoke,
auth_list_sessions,
)
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("vaultmesh-mcp")
# VaultMesh root
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[2])).resolve()
MCP_RECEIPTS = VAULTMESH_ROOT / "receipts/mcp/mcp_calls.jsonl"
# Tools that must remain callable without an authenticated session token.
# These are the bootstrap endpoints required to obtain/check a session.
OPEN_TOOLS = {
"auth_challenge",
"auth_verify",
"auth_create_dev_session",
"auth_check_permission",
}
def _vmhash_blake3(data: bytes) -> str:
"""VaultMesh hash: blake3:<hex>."""
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _redact_call_arguments(arguments: dict) -> dict:
# Never persist session tokens in receipts.
if not arguments:
return {}
redacted = dict(arguments)
redacted.pop("session_token", None)
return redacted
def _emit_mcp_receipt(tool_name: str, arguments: dict, result: dict, caller: str = "did:vm:mcp:client") -> None:
"""Emit a receipt for every MCP tool call."""
MCP_RECEIPTS.parent.mkdir(parents=True, exist_ok=True)
body = {
"tool": tool_name,
"arguments": _redact_call_arguments(arguments),
"result_hash": _vmhash_blake3(json.dumps(result, sort_keys=True).encode()),
"caller": caller,
"success": "error" not in result,
}
receipt = {
"schema_version": "2.0.0",
"type": "mcp_tool_call",
"timestamp": datetime.now(timezone.utc).isoformat(),
"scroll": "mcp",
"tags": ["mcp", "tool-call", tool_name],
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
"body": body,
}
with open(MCP_RECEIPTS, "a") as f:
f.write(json.dumps(receipt) + "\n")
def require_session_and_permission(name: str, arguments: dict) -> tuple[bool, dict, str, dict | None]:
"""Fail-closed session + profile enforcement ahead of tool handlers.
Returns (allowed, safe_args, caller, denial_result).
- safe_args strips session_token so downstream handlers never see it.
- caller is derived from the validated session (operator_did) when available.
- denial_result is a structured error payload when denied.
"""
safe_args = dict(arguments or {})
caller = "did:vm:mcp:client"
if name in OPEN_TOOLS:
return True, safe_args, caller, None
session_token = safe_args.pop("session_token", None)
if not session_token:
return False, safe_args, caller, {
"error": "Missing session_token",
"allowed": False,
"reason": "Session required for non-auth tools",
}
validation = auth_validate_token(session_token)
if not validation.get("valid"):
return False, safe_args, caller, {
"error": "Invalid session",
"allowed": False,
"reason": validation.get("error", "invalid_session"),
}
caller = validation.get("operator_did") or caller
profile = get_profile_for_scope(str(validation.get("scope", "read")))
perm = check_profile_permission(profile, name)
if not perm.get("allowed"):
return False, safe_args, caller, {
"error": "Permission denied",
"allowed": False,
"profile": perm.get("profile"),
"reason": perm.get("reason", "denied"),
}
return True, safe_args, caller, None
# =============================================================================
# TOOL DEFINITIONS
# =============================================================================
TOOLS = [
# -------------------------------------------------------------------------
# GUARDIAN TOOLS
# -------------------------------------------------------------------------
{
"name": "guardian_anchor_now",
"description": "Anchor all or specified scrolls to compute a Merkle root snapshot. Emits a guardian receipt.",
"inputSchema": {
"type": "object",
"properties": {
"scrolls": {
"type": "array",
"items": {"type": "string"},
"description": "List of scroll names to anchor. Omit for all scrolls.",
},
"guardian_did": {
"type": "string",
"default": "did:vm:guardian:mcp",
},
"backend": {
"type": "string",
"default": "local",
"enum": ["local", "ethereum", "stellar"],
},
},
},
},
{
"name": "guardian_verify_receipt",
"description": "Verify a receipt exists in a scroll's JSONL by its hash.",
"inputSchema": {
"type": "object",
"properties": {
"receipt_hash": {"type": "string"},
"scroll": {"type": "string", "default": "guardian"},
},
"required": ["receipt_hash"],
},
},
{
"name": "guardian_status",
"description": "Get current status of all scrolls including Merkle roots and leaf counts.",
"inputSchema": {"type": "object", "properties": {}},
},
# -------------------------------------------------------------------------
# TREASURY TOOLS
# -------------------------------------------------------------------------
{
"name": "treasury_create_budget",
"description": "Create a new budget for tracking expenditures.",
"inputSchema": {
"type": "object",
"properties": {
"budget_id": {"type": "string"},
"name": {"type": "string"},
"allocated": {"type": "integer"},
"currency": {"type": "string", "default": "EUR"},
"created_by": {"type": "string", "default": "did:vm:mcp:treasury"},
},
"required": ["budget_id", "name", "allocated"],
},
},
{
"name": "treasury_balance",
"description": "Get balance for a specific budget or all budgets.",
"inputSchema": {
"type": "object",
"properties": {
"budget_id": {"type": "string"},
},
},
},
{
"name": "treasury_debit",
"description": "Debit (spend) from a budget. Fails if insufficient funds.",
"inputSchema": {
"type": "object",
"properties": {
"budget_id": {"type": "string"},
"amount": {"type": "integer"},
"description": {"type": "string"},
"debited_by": {"type": "string", "default": "did:vm:mcp:treasury"},
},
"required": ["budget_id", "amount", "description"],
},
},
{
"name": "treasury_credit",
"description": "Credit (add funds) to a budget.",
"inputSchema": {
"type": "object",
"properties": {
"budget_id": {"type": "string"},
"amount": {"type": "integer"},
"description": {"type": "string"},
"credited_by": {"type": "string", "default": "did:vm:mcp:treasury"},
},
"required": ["budget_id", "amount", "description"],
},
},
# -------------------------------------------------------------------------
# COGNITIVE TOOLS (Claude as 7th Organ)
# -------------------------------------------------------------------------
{
"name": "cognitive_context",
"description": "Read current VaultMesh context for AI reasoning. Aggregates alerts, health, receipts, threats, treasury, governance, and memory.",
"inputSchema": {
"type": "object",
"properties": {
"include": {
"type": "array",
"items": {"type": "string"},
"description": "Context types: alerts, health, receipts, threats, treasury, governance, memory",
},
"session_id": {"type": "string"},
},
},
},
{
"name": "cognitive_decide",
"description": "Submit a reasoned decision with cryptographic attestation. Every decision is signed and anchored to ProofChain.",
"inputSchema": {
"type": "object",
"properties": {
"reasoning_chain": {
"type": "array",
"items": {"type": "string"},
"description": "List of reasoning steps leading to decision",
},
"decision": {
"type": "string",
"description": "Decision type: invoke_tem, alert, remediate, approve, etc.",
},
"confidence": {
"type": "number",
"minimum": 0,
"maximum": 1,
},
"evidence": {
"type": "array",
"items": {"type": "string"},
},
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude"},
"auto_action_threshold": {"type": "number", "default": 0.95},
},
"required": ["reasoning_chain", "decision", "confidence"],
},
},
{
"name": "cognitive_invoke_tem",
"description": "Invoke Tem (Guardian) with AI-detected threat pattern. Transmutes threats into defensive capabilities.",
"inputSchema": {
"type": "object",
"properties": {
"threat_type": {
"type": "string",
"description": "Category: replay_attack, intrusion, anomaly, credential_stuffing, etc.",
},
"threat_id": {"type": "string"},
"target": {"type": "string"},
"evidence": {
"type": "array",
"items": {"type": "string"},
},
"recommended_transmutation": {"type": "string"},
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude"},
},
"required": ["threat_type", "threat_id", "target", "evidence"],
},
},
{
"name": "cognitive_memory_get",
"description": "Query conversation/reasoning memory from CRDT realm.",
"inputSchema": {
"type": "object",
"properties": {
"key": {"type": "string"},
"session_id": {"type": "string"},
"realm": {"type": "string", "default": "memory"},
},
"required": ["key"],
},
},
{
"name": "cognitive_memory_set",
"description": "Store reasoning artifacts for future sessions. Uses CRDT-style merge.",
"inputSchema": {
"type": "object",
"properties": {
"key": {"type": "string"},
"value": {"type": "object"},
"session_id": {"type": "string"},
"realm": {"type": "string", "default": "memory"},
"merge": {"type": "boolean", "default": True},
},
"required": ["key", "value"],
},
},
{
"name": "cognitive_attest",
"description": "Create cryptographic attestation of Claude's reasoning state. Anchors to external chains.",
"inputSchema": {
"type": "object",
"properties": {
"attestation_type": {"type": "string"},
"content": {"type": "object"},
"anchor_to": {
"type": "array",
"items": {"type": "string"},
"description": "Anchor backends: local, rfc3161, eth, btc",
},
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude"},
},
"required": ["attestation_type", "content"],
},
},
{
"name": "cognitive_audit_trail",
"description": "Query historical AI decisions for audit with full provenance.",
"inputSchema": {
"type": "object",
"properties": {
"filter_type": {"type": "string"},
"time_range": {
"type": "object",
"properties": {
"start": {"type": "string"},
"end": {"type": "string"},
},
},
"confidence_min": {"type": "number"},
"limit": {"type": "integer", "default": 100},
},
},
},
{
"name": "cognitive_oracle_chain",
"description": "Execute oracle chain with cognitive enhancement. Adds memory context and Tem awareness.",
"inputSchema": {
"type": "object",
"properties": {
"question": {"type": "string"},
"frameworks": {
"type": "array",
"items": {"type": "string"},
"description": "Compliance frameworks: GDPR, AI_ACT, NIS2, etc.",
},
"max_docs": {"type": "integer", "default": 10},
"include_memory": {"type": "boolean", "default": True},
"session_id": {"type": "string"},
},
"required": ["question"],
},
},
# -------------------------------------------------------------------------
# AUTH TOOLS
# -------------------------------------------------------------------------
{
"name": "auth_challenge",
"description": "Generate an authentication challenge for an operator.",
"inputSchema": {
"type": "object",
"properties": {
"operator_pubkey_b64": {"type": "string"},
"scope": {
"type": "string",
"enum": ["read", "admin", "vault", "anchor", "cognitive"],
"default": "read",
},
"ttl_seconds": {"type": "integer", "default": 300},
},
"required": ["operator_pubkey_b64"],
},
},
{
"name": "auth_verify",
"description": "Verify a signed challenge and issue session token.",
"inputSchema": {
"type": "object",
"properties": {
"challenge_id": {"type": "string"},
"signature_b64": {"type": "string"},
"ip_hint": {"type": "string"},
},
"required": ["challenge_id", "signature_b64"],
},
},
{
"name": "auth_check_permission",
"description": "Check if a session has permission to call a tool.",
"inputSchema": {
"type": "object",
"properties": {
"token": {"type": "string"},
"tool_name": {"type": "string"},
},
"required": ["token", "tool_name"],
},
},
{
"name": "auth_create_dev_session",
"description": "Create a development session for testing (DEV ONLY).",
"inputSchema": {
"type": "object",
"properties": {
"scope": {"type": "string", "default": "cognitive"},
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude-dev"},
},
},
},
{
"name": "auth_revoke",
"description": "Revoke a session token.",
"inputSchema": {
"type": "object",
"properties": {
"token": {"type": "string"},
},
"required": ["token"],
},
},
{
"name": "auth_list_sessions",
"description": "List all active sessions (admin only).",
"inputSchema": {"type": "object", "properties": {}},
},
]
def handle_tool_call(name: str, arguments: dict) -> dict[str, Any]:
"""Dispatch tool call to appropriate handler."""
handlers = {
# Guardian
"guardian_anchor_now": guardian_anchor_now,
"guardian_verify_receipt": guardian_verify_receipt,
"guardian_status": guardian_status,
# Treasury
"treasury_create_budget": treasury_create_budget,
"treasury_balance": treasury_balance,
"treasury_debit": treasury_debit,
"treasury_credit": treasury_credit,
# Cognitive
"cognitive_context": cognitive_context,
"cognitive_decide": cognitive_decide,
"cognitive_invoke_tem": cognitive_invoke_tem,
"cognitive_memory_get": cognitive_memory_get,
"cognitive_memory_set": cognitive_memory_set,
"cognitive_attest": cognitive_attest,
"cognitive_audit_trail": cognitive_audit_trail,
"cognitive_oracle_chain": cognitive_oracle_chain,
# Auth
"auth_challenge": auth_challenge,
"auth_verify": auth_verify,
"auth_check_permission": auth_check_permission,
"auth_create_dev_session": auth_create_dev_session,
"auth_revoke": auth_revoke,
"auth_list_sessions": auth_list_sessions,
}
if name not in handlers:
return {"error": f"Unknown tool: {name}"}
allowed, safe_args, caller, denial = require_session_and_permission(name, arguments)
if not allowed:
_emit_mcp_receipt(name, safe_args, denial, caller=caller)
return denial
result = handlers[name](**safe_args)
# Emit receipt for the tool call
_emit_mcp_receipt(name, safe_args, result, caller=caller)
return result
if MCP_AVAILABLE:
# Create MCP server
app = Server("vaultmesh-mcp")
@app.list_tools()
async def list_tools() -> list[Tool]:
"""List available VaultMesh tools."""
return [Tool(**t) for t in TOOLS]
@app.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
"""Handle tool invocation."""
logger.info(f"Tool call: {name} with {arguments}")
result = handle_tool_call(name, arguments)
return [TextContent(type="text", text=json.dumps(result, indent=2))]
async def main():
"""Run the MCP server."""
if not MCP_AVAILABLE:
print("MCP library not available. Install with: pip install mcp")
return
logger.info(f"Starting VaultMesh MCP Server (root: {VAULTMESH_ROOT})")
logger.info(f"Tools registered: {len(TOOLS)}")
async with stdio_server() as (read_stream, write_stream):
await app.run(read_stream, write_stream, app.create_initialization_options())
def run_standalone():
"""Run as standalone CLI for testing without MCP."""
import sys
if len(sys.argv) < 2:
print("VaultMesh MCP Server - Standalone Mode")
print(f"\nVaultMesh Root: {VAULTMESH_ROOT}")
print(f"\nRegistered Tools ({len(TOOLS)}):")
print("-" * 60)
for tool in TOOLS:
print(f" {tool['name']}")
print(f" {tool['description'][:70]}...")
print("-" * 60)
print("\nUsage: python -m vaultmesh_mcp.server <tool> [json_args]")
print("\nExample:")
print(' python -m vaultmesh_mcp.server cognitive_context \'{"include": ["health"]}\'')
return
tool_name = sys.argv[1]
args_str = sys.argv[2] if len(sys.argv) > 2 else "{}"
try:
arguments = json.loads(args_str)
except json.JSONDecodeError:
print(f"Invalid JSON arguments: {args_str}")
return
result = handle_tool_call(tool_name, arguments)
print(json.dumps(result, indent=2))
if __name__ == "__main__":
import sys
# If any CLI arguments provided (other than module name), run standalone
if len(sys.argv) > 1 or not MCP_AVAILABLE:
run_standalone()
else:
asyncio.run(main())

View File

@@ -0,0 +1,84 @@
"""VaultMesh MCP Tools."""
from .guardian import guardian_anchor_now, guardian_verify_receipt, guardian_status
from .treasury import treasury_balance, treasury_debit, treasury_credit, treasury_create_budget
from .cognitive import (
cognitive_context,
cognitive_decide,
cognitive_invoke_tem,
cognitive_memory_get,
cognitive_memory_set,
cognitive_attest,
cognitive_audit_trail,
cognitive_oracle_chain,
)
from .auth import (
auth_challenge,
auth_verify,
auth_validate_token,
auth_check_permission,
auth_revoke,
auth_list_sessions,
auth_create_dev_session,
Profile,
check_profile_permission,
get_profile_for_scope,
escalate_profile,
)
from .escalation import (
escalate,
deescalate,
escalate_on_threat,
escalate_to_phoenix,
get_active_escalations,
get_escalation_history,
check_expired_escalations,
EscalationType,
DeescalationType,
EscalationContext,
)
__all__ = [
# Guardian
"guardian_anchor_now",
"guardian_verify_receipt",
"guardian_status",
# Treasury
"treasury_balance",
"treasury_debit",
"treasury_credit",
"treasury_create_budget",
# Cognitive (8 tools)
"cognitive_context",
"cognitive_decide",
"cognitive_invoke_tem",
"cognitive_memory_get",
"cognitive_memory_set",
"cognitive_attest",
"cognitive_audit_trail",
"cognitive_oracle_chain",
# Auth
"auth_challenge",
"auth_verify",
"auth_validate_token",
"auth_check_permission",
"auth_revoke",
"auth_list_sessions",
"auth_create_dev_session",
# Profiles
"Profile",
"check_profile_permission",
"get_profile_for_scope",
"escalate_profile",
# Escalation
"escalate",
"deescalate",
"escalate_on_threat",
"escalate_to_phoenix",
"get_active_escalations",
"get_escalation_history",
"check_expired_escalations",
"EscalationType",
"DeescalationType",
"EscalationContext",
]

View File

@@ -0,0 +1,638 @@
"""
VaultMesh MCP Authentication - Ed25519 Challenge-Response
Implements cryptographic authentication for MCP operators with
capability-based access control and session management.
Scopes:
- read: Query state (mesh_status, proof_verify)
- admin: Execute commands (tactical_execute)
- vault: Access treasury, sensitive data
- anchor: Create blockchain proofs
- cognitive: AI reasoning capabilities (Claude integration)
"""
import json
import os
import secrets
import time
from dataclasses import dataclass, asdict
from datetime import datetime, timezone, timedelta
from pathlib import Path
from typing import Any, Dict, Optional, Set
from enum import Enum
import blake3
# Optional: Ed25519 support
try:
from nacl.signing import VerifyKey
from nacl.exceptions import BadSignature
import nacl.encoding
NACL_AVAILABLE = True
except ImportError:
NACL_AVAILABLE = False
# VaultMesh paths
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
AUTH_STORE = VAULTMESH_ROOT / "auth"
def _vmhash_blake3(data: bytes) -> str:
"""VaultMesh hash: blake3:<hex>."""
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _now_iso() -> str:
"""Current UTC timestamp in ISO format."""
return datetime.now(timezone.utc).isoformat()
class Scope(Enum):
"""Capability scopes for MCP access control."""
READ = "read"
ADMIN = "admin"
VAULT = "vault"
ANCHOR = "anchor"
COGNITIVE = "cognitive"
# Tool permissions by scope
SCOPE_TOOLS: Dict[Scope, Set[str]] = {
Scope.READ: {
"mesh_status",
"shield_status",
"proof_verify",
"guardian_status",
"treasury_balance",
"cognitive_context",
"cognitive_memory_get",
"cognitive_audit_trail",
},
Scope.ADMIN: {
"tactical_execute",
"mesh_configure",
"agent_task",
},
Scope.VAULT: {
"treasury_debit",
"treasury_credit",
"treasury_create_budget",
},
Scope.ANCHOR: {
"guardian_anchor_now",
"proof_anchor",
"cognitive_attest",
},
Scope.COGNITIVE: {
"cognitive_context",
"cognitive_decide",
"cognitive_invoke_tem",
"cognitive_memory_get",
"cognitive_memory_set",
"cognitive_attest",
"cognitive_audit_trail",
"cognitive_oracle_chain",
# Inherits from READ
"mesh_status",
"shield_status",
"proof_verify",
"guardian_status",
"treasury_balance",
},
}
@dataclass
class Challenge:
"""Authentication challenge."""
challenge_id: str
nonce: str
operator_pubkey: str
scope: str
created_at: str
expires_at: str
def is_expired(self) -> bool:
now = datetime.now(timezone.utc)
expires = datetime.fromisoformat(self.expires_at.replace('Z', '+00:00'))
return now > expires
@dataclass
class Session:
"""Authenticated session."""
session_id: str
token: str
operator_pubkey: str
operator_did: str
scope: str
created_at: str
expires_at: str
ip_hint: Optional[str] = None
def is_expired(self) -> bool:
now = datetime.now(timezone.utc)
expires = datetime.fromisoformat(self.expires_at.replace('Z', '+00:00'))
return now > expires
# In-memory stores (would be persisted in production)
_challenges: Dict[str, Challenge] = {}
_sessions: Dict[str, Session] = {}
def _emit_auth_receipt(receipt_type: str, body: dict) -> dict:
"""Emit a receipt for authentication events."""
scroll_path = RECEIPTS_ROOT / "identity" / "identity_events.jsonl"
scroll_path.parent.mkdir(parents=True, exist_ok=True)
receipt = {
"schema_version": "2.0.0",
"type": receipt_type,
"timestamp": _now_iso(),
"scroll": "identity",
"tags": ["auth", receipt_type],
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
"body": body,
}
with open(scroll_path, "a") as f:
f.write(json.dumps(receipt) + "\n")
return receipt
def auth_challenge(
operator_pubkey_b64: str,
scope: str = "read",
ttl_seconds: int = 300,
) -> Dict[str, Any]:
"""
Generate an authentication challenge for an operator.
Args:
operator_pubkey_b64: Base64-encoded Ed25519 public key
scope: Requested scope (read, admin, vault, anchor, cognitive)
ttl_seconds: Challenge validity period
Returns:
Challenge ID and nonce for signing
"""
# Validate scope
try:
scope_enum = Scope(scope)
except ValueError:
return {"error": f"Invalid scope: {scope}. Valid: {[s.value for s in Scope]}"}
challenge_id = f"ch_{secrets.token_hex(16)}"
nonce = secrets.token_hex(32)
now = datetime.now(timezone.utc)
expires = now + timedelta(seconds=ttl_seconds)
challenge = Challenge(
challenge_id=challenge_id,
nonce=nonce,
operator_pubkey=operator_pubkey_b64,
scope=scope,
created_at=now.isoformat(),
expires_at=expires.isoformat(),
)
_challenges[challenge_id] = challenge
_emit_auth_receipt("auth_challenge", {
"challenge_id": challenge_id,
"operator_pubkey": operator_pubkey_b64,
"scope": scope,
"expires_at": expires.isoformat(),
})
return {
"challenge_id": challenge_id,
"nonce": nonce,
"scope": scope,
"expires_at": expires.isoformat(),
"message": "Sign the nonce with your Ed25519 private key",
}
def auth_verify(
challenge_id: str,
signature_b64: str,
ip_hint: Optional[str] = None,
) -> Dict[str, Any]:
"""
Verify a signed challenge and issue session token.
Args:
challenge_id: The challenge ID from auth_challenge
signature_b64: Base64-encoded Ed25519 signature of the nonce
ip_hint: Optional IP hint for session binding
Returns:
Session token and metadata
"""
# Get challenge
challenge = _challenges.get(challenge_id)
if not challenge:
return {"error": "Challenge not found or expired"}
if challenge.is_expired():
del _challenges[challenge_id]
return {"error": "Challenge expired"}
# Verify signature
if NACL_AVAILABLE:
try:
pubkey_bytes = nacl.encoding.Base64Encoder.decode(challenge.operator_pubkey.encode())
verify_key = VerifyKey(pubkey_bytes)
sig_bytes = nacl.encoding.Base64Encoder.decode(signature_b64.encode())
verify_key.verify(challenge.nonce.encode(), sig_bytes)
except (BadSignature, Exception) as e:
_emit_auth_receipt("auth_failure", {
"challenge_id": challenge_id,
"reason": "invalid_signature",
"error": str(e),
})
return {"error": "Invalid signature"}
else:
# For testing without nacl, accept any signature
pass
# Remove used challenge
del _challenges[challenge_id]
# Create session
session_id = f"ses_{secrets.token_hex(16)}"
token = secrets.token_urlsafe(48)
now = datetime.now(timezone.utc)
expires = now + timedelta(minutes=30)
# Derive DID from pubkey
operator_did = f"did:vm:operator:{_vmhash_blake3(challenge.operator_pubkey.encode())[:16]}"
session = Session(
session_id=session_id,
token=token,
operator_pubkey=challenge.operator_pubkey,
operator_did=operator_did,
scope=challenge.scope,
created_at=now.isoformat(),
expires_at=expires.isoformat(),
ip_hint=ip_hint,
)
_sessions[token] = session
_emit_auth_receipt("auth_success", {
"session_id": session_id,
"operator_did": operator_did,
"scope": challenge.scope,
"expires_at": expires.isoformat(),
})
return {
"success": True,
"session_id": session_id,
"token": token,
"operator_did": operator_did,
"scope": challenge.scope,
"expires_at": expires.isoformat(),
"ttl_seconds": 1800,
}
def auth_validate_token(token: str) -> Dict[str, Any]:
"""
Validate a session token.
Args:
token: The session token to validate
Returns:
Session info if valid, error otherwise
"""
session = _sessions.get(token)
if not session:
return {"valid": False, "error": "Session not found"}
if session.is_expired():
del _sessions[token]
return {"valid": False, "error": "Session expired"}
return {
"valid": True,
"session_id": session.session_id,
"operator_did": session.operator_did,
"scope": session.scope,
"expires_at": session.expires_at,
}
def auth_check_permission(token: str, tool_name: str) -> Dict[str, Any]:
"""
Check if a session has permission to call a tool.
Args:
token: Session token
tool_name: Name of the tool to check
Returns:
Permission check result
"""
validation = auth_validate_token(token)
if not validation.get("valid"):
return {"allowed": False, "reason": validation.get("error")}
scope_name = validation["scope"]
try:
scope = Scope(scope_name)
except ValueError:
return {"allowed": False, "reason": f"Invalid scope: {scope_name}"}
allowed_tools = SCOPE_TOOLS.get(scope, set())
if tool_name in allowed_tools:
return {
"allowed": True,
"scope": scope_name,
"operator_did": validation["operator_did"],
}
return {
"allowed": False,
"reason": f"Tool '{tool_name}' not allowed for scope '{scope_name}'",
"allowed_tools": list(allowed_tools),
}
def auth_revoke(token: str) -> Dict[str, Any]:
"""
Revoke a session token.
Args:
token: Session token to revoke
Returns:
Revocation result
"""
session = _sessions.pop(token, None)
if not session:
return {"revoked": False, "error": "Session not found"}
_emit_auth_receipt("auth_revoke", {
"session_id": session.session_id,
"operator_did": session.operator_did,
})
return {
"revoked": True,
"session_id": session.session_id,
}
def auth_list_sessions() -> Dict[str, Any]:
"""
List all active sessions (admin only).
Returns:
List of active sessions
"""
active = []
expired = []
for token, session in list(_sessions.items()):
if session.is_expired():
del _sessions[token]
expired.append(session.session_id)
else:
active.append({
"session_id": session.session_id,
"operator_did": session.operator_did,
"scope": session.scope,
"expires_at": session.expires_at,
})
return {
"active_sessions": active,
"expired_cleaned": len(expired),
}
# Convenience function for testing without full auth
def auth_create_dev_session(
scope: str = "cognitive",
operator_did: str = "did:vm:cognitive:claude-dev",
) -> Dict[str, Any]:
"""
Create a development session for testing (DEV ONLY).
Args:
scope: Scope for the session
operator_did: DID for the operator
Returns:
Session token and metadata
"""
# Fail-closed: dev sessions may not grant SOVEREIGN-equivalent access.
# Accept only known, non-vault scopes.
normalized_scope = scope
try:
scope_enum = Scope(scope)
if scope_enum == Scope.VAULT:
normalized_scope = Scope.READ.value
except ValueError:
normalized_scope = Scope.READ.value
session_id = f"dev_{secrets.token_hex(8)}"
token = f"dev_{secrets.token_urlsafe(32)}"
now = datetime.now(timezone.utc)
expires = now + timedelta(hours=24)
session = Session(
session_id=session_id,
token=token,
operator_pubkey="dev_key",
operator_did=operator_did,
scope=normalized_scope,
created_at=now.isoformat(),
expires_at=expires.isoformat(),
)
_sessions[token] = session
return {
"dev_mode": True,
"session_id": session_id,
"token": token,
"operator_did": operator_did,
"scope": normalized_scope,
"expires_at": expires.isoformat(),
"warning": "DEV SESSION - Do not use in production",
}
# =============================================================================
# AGENT CAPABILITY PROFILES
# =============================================================================
class Profile(Enum):
"""Agent capability profiles with hierarchical trust."""
OBSERVER = "observer" # 👁 Read-only
OPERATOR = "operator" # ⚙ Mutations allowed
GUARDIAN = "guardian" # 🛡 Threat response
PHOENIX = "phoenix" # 🔥 Crisis mode
SOVEREIGN = "sovereign" # 👑 Full authority
# Profile → Tool permissions
PROFILE_TOOLS: Dict[Profile, Set[str]] = {
Profile.OBSERVER: {
# L0 Perception (read)
"get_current_tab", "list_tabs", "get_page_content",
# L1 Substrate (read)
"read_file", "read_multiple_files", "list_directory", "search_files", "get_file_info",
"directory_tree", "list_allowed_directories",
# L2 Cognition (read)
"cognitive_context", "cognitive_memory_get", "cognitive_audit_trail",
# L3 Security (read)
"offsec_status", "offsec_shield_status", "offsec_tem_status", "offsec_mesh_status",
"offsec_phoenix_status", "offsec_braid_list",
# L4 Infrastructure (read)
"worker_list", "kv_list", "r2_list_buckets", "d1_list_databases", "zones_list",
"queue_list", "workflow_list",
# L-1 Proof (read)
"guardian_status", "guardian_verify_receipt", "offsec_proof_latest",
# Treasury (read)
"treasury_balance",
# Auth (read)
"auth_check_permission",
},
Profile.OPERATOR: set(), # Computed below
Profile.GUARDIAN: set(), # Computed below
Profile.PHOENIX: set(), # Computed below
Profile.SOVEREIGN: set(), # All tools
}
# OPERATOR = OBSERVER + mutations
PROFILE_TOOLS[Profile.OPERATOR] = PROFILE_TOOLS[Profile.OBSERVER] | {
# L0 Perception (act)
"execute_javascript", "puppeteer_click", "puppeteer_fill", "puppeteer_select",
"open_url", "reload_tab", "go_back", "go_forward",
# L1 Substrate (write)
"write_file", "edit_file", "create_directory", "move_file",
"start_process", "interact_with_process",
# L2 Cognition (decide, low confidence)
"cognitive_decide", "cognitive_memory_set",
# L3 Security (shield ops)
"offsec_shield_arm", "offsec_shield_disarm",
# L4 Infrastructure (deploy)
"kv_put", "kv_delete", "worker_put", "r2_put_object",
# L-1 Proof (local anchor)
"guardian_anchor_now",
}
# GUARDIAN = OPERATOR + TEM + attestation
PROFILE_TOOLS[Profile.GUARDIAN] = PROFILE_TOOLS[Profile.OPERATOR] | {
# L2 Cognition (full)
"cognitive_invoke_tem", "cognitive_attest", "cognitive_oracle_chain",
# L3 Security (TEM)
"offsec_tem_transmute", "offsec_tem_rules", "offsec_tem_history",
"offsec_braid_import",
# L4 Infrastructure (more)
"worker_deploy", "d1_query", "queue_send_message", "workflow_execute",
# L-1 Proof (eth anchor)
"offsec_proof_generate",
# Process control
"kill_process", "force_terminate",
}
# PHOENIX = GUARDIAN + destructive ops + emergency treasury
PROFILE_TOOLS[Profile.PHOENIX] = PROFILE_TOOLS[Profile.GUARDIAN] | {
# L3 Security (Phoenix)
"offsec_phoenix_enable", "offsec_phoenix_disable", "offsec_phoenix_inject_crisis",
"offsec_phoenix_history",
# L4 Infrastructure (destructive)
"worker_delete", "r2_delete_bucket", "r2_delete_object",
"d1_delete_database", "queue_delete", "workflow_delete",
"kv_delete",
# Treasury (emergency)
"treasury_debit",
}
# SOVEREIGN = everything
PROFILE_TOOLS[Profile.SOVEREIGN] = PROFILE_TOOLS[Profile.PHOENIX] | {
# Auth (full)
"auth_challenge", "auth_verify", "auth_create_dev_session", "auth_revoke",
"auth_list_sessions",
# Treasury (full)
"treasury_create_budget", "treasury_credit",
# All remaining tools
}
def get_profile_for_scope(scope: str) -> Profile:
"""Map scope to profile."""
mapping = {
"read": Profile.OBSERVER,
"admin": Profile.OPERATOR,
"cognitive": Profile.GUARDIAN,
"anchor": Profile.GUARDIAN,
"vault": Profile.SOVEREIGN,
}
return mapping.get(scope, Profile.OBSERVER)
def check_profile_permission(profile: Profile, tool_name: str) -> Dict[str, Any]:
"""Check if a profile has permission for a tool."""
allowed_tools = PROFILE_TOOLS.get(profile, set())
# Handle wildcards in profile tools
for pattern in allowed_tools:
if pattern.endswith("*"):
prefix = pattern[:-1]
if tool_name.startswith(prefix):
return {"allowed": True, "profile": profile.value}
if tool_name in allowed_tools:
return {"allowed": True, "profile": profile.value}
return {
"allowed": False,
"profile": profile.value,
"reason": f"Tool '{tool_name}' not allowed for profile '{profile.value}'",
}
def escalate_profile(current: Profile, reason: str) -> Dict[str, Any]:
"""Request profile escalation."""
escalation_path = {
Profile.OBSERVER: Profile.OPERATOR,
Profile.OPERATOR: Profile.GUARDIAN,
Profile.GUARDIAN: Profile.PHOENIX,
Profile.PHOENIX: Profile.SOVEREIGN,
Profile.SOVEREIGN: None,
}
next_profile = escalation_path.get(current)
if next_profile is None:
return {"escalated": False, "reason": "Already at maximum profile"}
_emit_auth_receipt("profile_escalation", {
"from_profile": current.value,
"to_profile": next_profile.value,
"reason": reason,
})
return {
"escalated": True,
"from_profile": current.value,
"to_profile": next_profile.value,
"reason": reason,
}

View File

@@ -0,0 +1,491 @@
"""
Cognitive MCP Tools - Claude as VaultMesh Cognitive Organ
These tools enable Claude to operate as the 7th Organ of VaultMesh:
- Reason over mesh state with full context
- Make attested decisions with Ed25519 proofs
- Invoke Tem for threat transmutation
- Persist memory across sessions via CRDT realm
"""
import json
import os
import secrets
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional, List, Dict
import blake3
# VaultMesh root from env or default
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
COGNITIVE_REALM = VAULTMESH_ROOT / "realms" / "cognitive"
def _vmhash_blake3(data: bytes) -> str:
"""VaultMesh hash: blake3:<hex>."""
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _now_iso() -> str:
"""Current UTC timestamp in ISO format."""
return datetime.now(timezone.utc).isoformat()
def _emit_cognitive_receipt(receipt_type: str, body: dict, scroll: str = "cognitive") -> dict:
"""Emit a receipt for cognitive operations."""
scroll_path = RECEIPTS_ROOT / scroll / f"{scroll}_events.jsonl"
scroll_path.parent.mkdir(parents=True, exist_ok=True)
receipt = {
"schema_version": "2.0.0",
"type": receipt_type,
"timestamp": _now_iso(),
"scroll": scroll,
"tags": ["cognitive", receipt_type],
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
"body": body,
}
with open(scroll_path, "a") as f:
f.write(json.dumps(receipt) + "\n")
return receipt
def _load_json_file(path: Path) -> dict:
"""Load JSON file, return empty dict if not exists."""
if path.exists():
with open(path, "r") as f:
return json.load(f)
return {}
def _save_json_file(path: Path, data: dict) -> None:
"""Save dict to JSON file."""
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
json.dump(data, f, indent=2, sort_keys=True)
# =============================================================================
# COGNITIVE TOOLS - The 8 Tools of AI Reasoning
# =============================================================================
def cognitive_context(
include: Optional[List[str]] = None,
session_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Read current VaultMesh context for AI reasoning.
Aggregates state from multiple organs to provide Claude with
full situational awareness for decision-making.
"""
if include is None:
include = ["alerts", "health", "receipts", "threats", "treasury", "governance", "memory"]
context = {
"timestamp": _now_iso(),
"session_id": session_id,
"vaultmesh_root": str(VAULTMESH_ROOT),
}
if "alerts" in include:
alerts_path = RECEIPTS_ROOT / "mesh" / "alerts.json"
context["alerts"] = _load_json_file(alerts_path).get("active", [])
if "health" in include:
health = {"status": "operational", "organs": {}}
for organ in ["guardian", "treasury", "mesh", "identity", "observability"]:
organ_path = RECEIPTS_ROOT / organ
health["organs"][organ] = {
"exists": organ_path.exists(),
"receipt_count": len(list(organ_path.glob("*.jsonl"))) if organ_path.exists() else 0,
}
context["health"] = health
if "receipts" in include:
recent = {}
for scroll in ["guardian", "treasury", "mesh", "cognitive"]:
jsonl_path = RECEIPTS_ROOT / scroll / f"{scroll}_events.jsonl"
if jsonl_path.exists():
lines = jsonl_path.read_text().strip().split("\n")[-10:]
recent[scroll] = [json.loads(line) for line in lines if line]
context["recent_receipts"] = recent
if "threats" in include:
threats_path = RECEIPTS_ROOT / "offsec" / "threats.json"
context["threats"] = _load_json_file(threats_path).get("active", [])
if "treasury" in include:
budgets_path = RECEIPTS_ROOT / "treasury" / "budgets.json"
context["treasury"] = _load_json_file(budgets_path)
if "governance" in include:
governance_path = VAULTMESH_ROOT / "constitution" / "active_proposals.json"
context["governance"] = _load_json_file(governance_path)
if "memory" in include and session_id:
memory_path = COGNITIVE_REALM / "memory" / session_id / "context.json"
context["memory"] = _load_json_file(memory_path)
return context
def cognitive_decide(
reasoning_chain: List[str],
decision: str,
confidence: float,
evidence: Optional[List[str]] = None,
operator_did: str = "did:vm:cognitive:claude",
auto_action_threshold: float = 0.95,
) -> Dict[str, Any]:
"""
Submit a reasoned decision with cryptographic attestation.
"""
if not 0.0 <= confidence <= 1.0:
return {"error": "Confidence must be between 0.0 and 1.0"}
if not reasoning_chain:
return {"error": "Reasoning chain cannot be empty"}
decision_id = f"dec_{secrets.token_hex(8)}"
reasoning_hash = _vmhash_blake3(json.dumps(reasoning_chain).encode())
body = {
"decision_id": decision_id,
"operator_did": operator_did,
"decision_type": decision,
"confidence": confidence,
"reasoning_hash": reasoning_hash,
"reasoning_chain": reasoning_chain,
"evidence": evidence or [],
"auto_approved": confidence >= auto_action_threshold,
"requires_governance": decision in ["treasury_large", "governance_change", "mesh_restructure"],
}
receipt = _emit_cognitive_receipt("cognitive_decision", body)
required_approvals = []
if body["requires_governance"]:
required_approvals.append("governance_vote")
if not body["auto_approved"]:
required_approvals.append("operator_confirmation")
execution_plan = []
if decision == "invoke_tem":
execution_plan = [
{"step": 1, "action": "validate_threat", "tool": "shield_status"},
{"step": 2, "action": "invoke_transmutation", "tool": "cognitive_invoke_tem"},
{"step": 3, "action": "deploy_capability", "tool": "mesh_deploy"},
{"step": 4, "action": "attest_outcome", "tool": "cognitive_attest"},
]
elif decision == "alert":
execution_plan = [
{"step": 1, "action": "emit_alert", "tool": "mesh_alert"},
{"step": 2, "action": "notify_operators", "tool": "notify"},
]
return {
"success": True,
"decision_id": decision_id,
"receipt": receipt,
"auto_approved": body["auto_approved"],
"required_approvals": required_approvals,
"execution_plan": execution_plan,
"message": f"Decision {decision_id} recorded with confidence {confidence:.2%}",
}
def cognitive_invoke_tem(
threat_type: str,
threat_id: str,
target: str,
evidence: List[str],
recommended_transmutation: Optional[str] = None,
operator_did: str = "did:vm:cognitive:claude",
) -> Dict[str, Any]:
"""
Invoke Tem (Guardian) with AI-detected threat pattern.
Transmutes threats into defensive capabilities.
"""
invocation_id = f"tem_{secrets.token_hex(8)}"
transmutations = {
"replay_attack": "strict_monotonic_sequence_validator",
"intrusion": "adaptive_firewall_rule",
"anomaly": "behavioral_baseline_enforcer",
"credential_stuffing": "rate_limiter_with_lockout",
"data_exfiltration": "egress_filter_policy",
"privilege_escalation": "capability_constraint_enforcer",
}
transmutation = recommended_transmutation or transmutations.get(threat_type, "generic_threat_mitigator")
body = {
"invocation_id": invocation_id,
"operator_did": operator_did,
"threat_type": threat_type,
"threat_id": threat_id,
"target": target,
"evidence": evidence,
"transmutation": transmutation,
"status": "transmuted",
}
receipt = _emit_cognitive_receipt("tem_invocation", body)
capability = {
"capability_id": f"cap_{secrets.token_hex(8)}",
"name": transmutation,
"forged_from": threat_id,
"forged_at": _now_iso(),
"scope": target,
}
caps_path = RECEIPTS_ROOT / "mesh" / "capabilities.json"
caps = _load_json_file(caps_path)
caps[capability["capability_id"]] = capability
_save_json_file(caps_path, caps)
return {
"success": True,
"invocation_id": invocation_id,
"receipt": receipt,
"capability": capability,
"message": f"Threat {threat_id} transmuted into {transmutation}",
}
def cognitive_memory_get(
key: str,
session_id: Optional[str] = None,
realm: str = "memory",
) -> Dict[str, Any]:
"""
Query conversation/reasoning memory from CRDT realm.
"""
if session_id:
memory_path = COGNITIVE_REALM / realm / session_id / f"{key.replace('/', '_')}.json"
else:
memory_path = COGNITIVE_REALM / realm / f"{key.replace('/', '_')}.json"
value = _load_json_file(memory_path)
return {
"key": key,
"session_id": session_id,
"realm": realm,
"value": value,
"exists": memory_path.exists(),
"path": str(memory_path),
}
def cognitive_memory_set(
key: str,
value: Dict[str, Any],
session_id: Optional[str] = None,
realm: str = "memory",
merge: bool = True,
) -> Dict[str, Any]:
"""
Store reasoning artifacts for future sessions.
Uses CRDT-style merge for concurrent update safety.
"""
if session_id:
memory_path = COGNITIVE_REALM / realm / session_id / f"{key.replace('/', '_')}.json"
else:
memory_path = COGNITIVE_REALM / realm / f"{key.replace('/', '_')}.json"
if merge and memory_path.exists():
existing = _load_json_file(memory_path)
merged = {**existing, **value, "_updated_at": _now_iso()}
else:
merged = {**value, "_created_at": _now_iso()}
_save_json_file(memory_path, merged)
body = {
"key": key,
"session_id": session_id,
"realm": realm,
"value_hash": _vmhash_blake3(json.dumps(value, sort_keys=True).encode()),
"merged": merge,
}
receipt = _emit_cognitive_receipt("memory_write", body)
return {
"success": True,
"key": key,
"path": str(memory_path),
"receipt": receipt,
"message": f"Memory stored at {key}",
}
def cognitive_attest(
attestation_type: str,
content: Dict[str, Any],
anchor_to: Optional[List[str]] = None,
operator_did: str = "did:vm:cognitive:claude",
) -> Dict[str, Any]:
"""
Create cryptographic attestation of Claude's reasoning state.
"""
if anchor_to is None:
anchor_to = ["local"]
attestation_id = f"att_{secrets.token_hex(8)}"
content_hash = _vmhash_blake3(json.dumps(content, sort_keys=True).encode())
body = {
"attestation_id": attestation_id,
"attestation_type": attestation_type,
"operator_did": operator_did,
"content_hash": content_hash,
"anchor_targets": anchor_to,
"anchors": {},
}
body["anchors"]["local"] = {
"type": "local",
"timestamp": _now_iso(),
"hash": content_hash,
}
if "rfc3161" in anchor_to:
body["anchors"]["rfc3161"] = {
"type": "rfc3161",
"status": "pending",
"tsa": "freetsa.org",
}
if "eth" in anchor_to:
body["anchors"]["eth"] = {
"type": "ethereum",
"status": "pending",
"network": "mainnet",
}
receipt = _emit_cognitive_receipt("attestation", body)
return {
"success": True,
"attestation_id": attestation_id,
"content_hash": content_hash,
"receipt": receipt,
"anchors": body["anchors"],
"message": f"Attestation {attestation_id} created",
}
def cognitive_audit_trail(
filter_type: Optional[str] = None,
time_range: Optional[Dict[str, str]] = None,
confidence_min: Optional[float] = None,
limit: int = 100,
) -> Dict[str, Any]:
"""
Query historical AI decisions for audit.
"""
cognitive_path = RECEIPTS_ROOT / "cognitive" / "cognitive_events.jsonl"
if not cognitive_path.exists():
return {"decisions": [], "count": 0, "message": "No cognitive history found"}
decisions = []
with open(cognitive_path, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
try:
receipt = json.loads(line)
except json.JSONDecodeError:
continue
if receipt.get("type") != "cognitive_decision":
continue
body = receipt.get("body", {})
if filter_type and body.get("decision_type") != filter_type:
continue
if confidence_min and body.get("confidence", 0) < confidence_min:
continue
decisions.append({
"decision_id": body.get("decision_id"),
"timestamp": receipt.get("timestamp"),
"decision_type": body.get("decision_type"),
"confidence": body.get("confidence"),
"reasoning_hash": body.get("reasoning_hash"),
"auto_approved": body.get("auto_approved"),
})
if len(decisions) >= limit:
break
return {
"decisions": decisions,
"count": len(decisions),
}
def cognitive_oracle_chain(
question: str,
frameworks: Optional[List[str]] = None,
max_docs: int = 10,
include_memory: bool = True,
session_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Execute oracle chain with cognitive enhancement.
"""
if frameworks is None:
frameworks = ["GDPR", "AI_ACT"]
chain_id = f"oracle_{secrets.token_hex(8)}"
context = cognitive_context(
include=["memory", "governance"] if include_memory else ["governance"],
session_id=session_id,
)
answer = {
"chain_id": chain_id,
"question": question,
"frameworks": frameworks,
"answer": f"Oracle analysis pending for: {question}",
"citations": [],
"compliance_flags": {f: "requires_analysis" for f in frameworks},
"gaps": [],
"confidence": 0.0,
"requires_human_review": True,
}
answer_hash = _vmhash_blake3(json.dumps(answer, sort_keys=True).encode())
body = {
"chain_id": chain_id,
"question": question,
"frameworks": frameworks,
"answer_hash": answer_hash,
}
receipt = _emit_cognitive_receipt("oracle_chain", body, scroll="compliance")
return {
"success": True,
"chain_id": chain_id,
"answer": answer,
"answer_hash": answer_hash,
"receipt": receipt,
}

View File

@@ -0,0 +1,492 @@
"""
Escalation Engine - Profile transitions as first-class proofs
Every escalation is:
- A receipt (immutable record)
- A Tem-context (threat awareness)
- A reversibility flag (can it be undone?)
- A time-bound (when does it expire?)
Escalation is not runtime magic — it is auditable history.
"""
import json
import secrets
from datetime import datetime, timezone, timedelta
from dataclasses import dataclass, asdict
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional, List
import os
import blake3
# VaultMesh paths
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
ESCALATION_LOG = RECEIPTS_ROOT / "identity" / "escalation_events.jsonl"
def _vmhash_blake3(data: bytes) -> str:
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _now_iso() -> str:
return datetime.now(timezone.utc).isoformat()
def _now_ts() -> float:
return datetime.now(timezone.utc).timestamp()
class EscalationType(Enum):
"""Types of profile escalation."""
THREAT_DETECTED = "threat_detected" # Automatic: threat confidence > threshold
OPERATOR_REQUEST = "operator_request" # Manual: operator requests higher authority
CRISIS_DECLARED = "crisis_declared" # Emergency: system failure or attack
QUORUM_APPROVED = "quorum_approved" # Governance: multi-sig approval
SOVEREIGN_OVERRIDE = "sovereign_override" # Human: direct intervention
class DeescalationType(Enum):
"""Types of profile de-escalation."""
TIMEOUT_EXPIRED = "timeout_expired" # Automatic: time limit reached
THREAT_RESOLVED = "threat_resolved" # Automatic: no active threats
OPERATOR_RELEASE = "operator_release" # Manual: operator releases authority
CRISIS_CONCLUDED = "crisis_concluded" # Phoenix: crisis resolved
SOVEREIGN_REVOKE = "sovereign_revoke" # Human: explicit revocation
@dataclass
class EscalationContext:
"""Context captured at escalation time."""
threat_id: Optional[str] = None
threat_type: Optional[str] = None
threat_confidence: Optional[float] = None
active_alerts: int = 0
mesh_health: str = "unknown"
triggering_tool: Optional[str] = None
triggering_decision: Optional[str] = None
@dataclass
class Escalation:
"""A profile escalation event."""
escalation_id: str
from_profile: str
to_profile: str
escalation_type: str
context: EscalationContext
# Reversibility
reversible: bool
auto_deescalate: bool
deescalate_after_seconds: Optional[int]
deescalate_on_condition: Optional[str]
# Time tracking
created_at: str
expires_at: Optional[str]
# Proof
receipt_hash: Optional[str] = None
tem_context_hash: Optional[str] = None
# State
active: bool = True
deescalated_at: Optional[str] = None
deescalation_type: Optional[str] = None
# In-memory active escalations (would be persisted in production)
_active_escalations: Dict[str, Escalation] = {}
def _emit_escalation_receipt(escalation: Escalation, event_type: str) -> dict:
"""Emit a receipt for escalation events."""
ESCALATION_LOG.parent.mkdir(parents=True, exist_ok=True)
body = {
"escalation_id": escalation.escalation_id,
"event_type": event_type,
"from_profile": escalation.from_profile,
"to_profile": escalation.to_profile,
"escalation_type": escalation.escalation_type,
"reversible": escalation.reversible,
"context": asdict(escalation.context),
"expires_at": escalation.expires_at,
"active": escalation.active,
}
if event_type == "deescalation":
body["deescalated_at"] = escalation.deescalated_at
body["deescalation_type"] = escalation.deescalation_type
receipt = {
"schema_version": "2.0.0",
"type": f"profile_{event_type}",
"timestamp": _now_iso(),
"scroll": "identity",
"tags": ["escalation", event_type, escalation.from_profile, escalation.to_profile],
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
"body": body,
}
with open(ESCALATION_LOG, "a") as f:
f.write(json.dumps(receipt) + "\n")
return receipt
# =============================================================================
# ESCALATION POLICIES
# =============================================================================
ESCALATION_POLICIES = {
# OBSERVER → OPERATOR
("observer", "operator"): {
"reversible": True,
"auto_deescalate": True,
"default_ttl_seconds": 3600, # 1 hour
"requires_reason": True,
"requires_approval": False,
},
# OPERATOR → GUARDIAN
("operator", "guardian"): {
"reversible": True,
"auto_deescalate": True,
"default_ttl_seconds": 7200, # 2 hours
"requires_reason": True,
"requires_approval": False,
"auto_on_threat_confidence": 0.8,
},
# GUARDIAN → PHOENIX
("guardian", "phoenix"): {
"reversible": True,
"auto_deescalate": True,
"default_ttl_seconds": 1800, # 30 minutes
"requires_reason": True,
"requires_approval": True, # Requires quorum or sovereign
"auto_on_crisis": True,
},
# PHOENIX → SOVEREIGN
("phoenix", "sovereign"): {
"reversible": False, # Cannot auto-deescalate from sovereign
"auto_deescalate": False,
"default_ttl_seconds": None,
"requires_reason": True,
"requires_approval": True,
"requires_human": True,
},
}
DEESCALATION_CONDITIONS = {
"no_active_threats_1h": "No active threats for 1 hour",
"no_active_alerts_24h": "No active alerts for 24 hours",
"crisis_resolved": "Crisis formally concluded",
"manual_release": "Operator explicitly released authority",
"timeout": "Escalation TTL expired",
}
# =============================================================================
# ESCALATION OPERATIONS
# =============================================================================
def escalate(
from_profile: str,
to_profile: str,
escalation_type: EscalationType,
context: Optional[EscalationContext] = None,
ttl_seconds: Optional[int] = None,
deescalate_condition: Optional[str] = None,
approved_by: Optional[str] = None,
) -> Dict[str, Any]:
"""
Escalate from one profile to another with full proof chain.
Returns escalation receipt and Tem context.
"""
# Get policy
policy_key = (from_profile, to_profile)
policy = ESCALATION_POLICIES.get(policy_key)
if not policy:
return {
"success": False,
"error": f"No escalation path from {from_profile} to {to_profile}",
}
# Check approval requirements
if policy.get("requires_human") and escalation_type != EscalationType.SOVEREIGN_OVERRIDE:
return {
"success": False,
"error": f"Escalation to {to_profile} requires human (sovereign) approval",
}
if policy.get("requires_approval") and not approved_by:
if escalation_type not in [EscalationType.QUORUM_APPROVED, EscalationType.SOVEREIGN_OVERRIDE]:
return {
"success": False,
"error": f"Escalation to {to_profile} requires approval",
"approval_required": True,
}
# Build context
if context is None:
context = EscalationContext()
# Calculate expiry
ttl = ttl_seconds or policy.get("default_ttl_seconds")
expires_at = None
if ttl:
expires_at = (datetime.now(timezone.utc) + timedelta(seconds=ttl)).isoformat()
# Create escalation
escalation_id = f"esc_{secrets.token_hex(12)}"
escalation = Escalation(
escalation_id=escalation_id,
from_profile=from_profile,
to_profile=to_profile,
escalation_type=escalation_type.value,
context=context,
reversible=policy["reversible"],
auto_deescalate=policy["auto_deescalate"],
deescalate_after_seconds=ttl,
deescalate_on_condition=deescalate_condition,
created_at=_now_iso(),
expires_at=expires_at,
active=True,
)
# Emit receipt
receipt = _emit_escalation_receipt(escalation, "escalation")
escalation.receipt_hash = receipt["root_hash"]
# Create Tem context hash (for threat awareness)
tem_context = {
"escalation_id": escalation_id,
"profile_transition": f"{from_profile}{to_profile}",
"threat_context": asdict(context),
"timestamp": _now_iso(),
}
escalation.tem_context_hash = _vmhash_blake3(
json.dumps(tem_context, sort_keys=True).encode()
)
# Store active escalation
_active_escalations[escalation_id] = escalation
return {
"success": True,
"escalation_id": escalation_id,
"from_profile": from_profile,
"to_profile": to_profile,
"escalation_type": escalation_type.value,
"reversible": escalation.reversible,
"expires_at": expires_at,
"receipt_hash": escalation.receipt_hash,
"tem_context_hash": escalation.tem_context_hash,
"deescalate_condition": deescalate_condition or "timeout",
}
def deescalate(
escalation_id: str,
deescalation_type: DeescalationType,
reason: Optional[str] = None,
) -> Dict[str, Any]:
"""
De-escalate an active escalation.
"""
escalation = _active_escalations.get(escalation_id)
if not escalation:
return {
"success": False,
"error": f"Escalation {escalation_id} not found or already inactive",
}
if not escalation.reversible and deescalation_type != DeescalationType.SOVEREIGN_REVOKE:
return {
"success": False,
"error": f"Escalation {escalation_id} is not reversible without sovereign override",
}
# Update escalation
escalation.active = False
escalation.deescalated_at = _now_iso()
escalation.deescalation_type = deescalation_type.value
# Emit receipt
receipt = _emit_escalation_receipt(escalation, "deescalation")
# Remove from active
del _active_escalations[escalation_id]
return {
"success": True,
"escalation_id": escalation_id,
"from_profile": escalation.to_profile, # Note: going back
"to_profile": escalation.from_profile,
"deescalation_type": deescalation_type.value,
"reason": reason,
"receipt_hash": receipt["root_hash"],
"duration_seconds": (
datetime.fromisoformat(escalation.deescalated_at.replace('Z', '+00:00')) -
datetime.fromisoformat(escalation.created_at.replace('Z', '+00:00'))
).total_seconds(),
}
def check_expired_escalations() -> List[Dict[str, Any]]:
"""
Check for and auto-deescalate expired escalations.
Called periodically by the system.
"""
now = datetime.now(timezone.utc)
expired = []
for esc_id, escalation in list(_active_escalations.items()):
if not escalation.expires_at:
continue
expires = datetime.fromisoformat(escalation.expires_at.replace('Z', '+00:00'))
if now > expires and escalation.auto_deescalate:
result = deescalate(
esc_id,
DeescalationType.TIMEOUT_EXPIRED,
reason=f"TTL of {escalation.deescalate_after_seconds}s expired"
)
expired.append(result)
return expired
def get_active_escalations() -> Dict[str, Any]:
"""Get all active escalations."""
return {
"active_count": len(_active_escalations),
"escalations": [
{
"escalation_id": e.escalation_id,
"from_profile": e.from_profile,
"to_profile": e.to_profile,
"escalation_type": e.escalation_type,
"created_at": e.created_at,
"expires_at": e.expires_at,
"reversible": e.reversible,
}
for e in _active_escalations.values()
],
}
def get_escalation_history(
profile: Optional[str] = None,
limit: int = 100,
) -> Dict[str, Any]:
"""Query escalation history from receipts."""
if not ESCALATION_LOG.exists():
return {"history": [], "count": 0}
history = []
with open(ESCALATION_LOG, "r") as f:
for line in f:
line = line.strip()
if not line:
continue
try:
receipt = json.loads(line)
body = receipt.get("body", {})
# Filter by profile if specified
if profile:
if body.get("from_profile") != profile and body.get("to_profile") != profile:
continue
history.append({
"escalation_id": body.get("escalation_id"),
"event_type": body.get("event_type"),
"from_profile": body.get("from_profile"),
"to_profile": body.get("to_profile"),
"timestamp": receipt.get("timestamp"),
"receipt_hash": receipt.get("root_hash"),
})
except json.JSONDecodeError:
continue
# Return most recent first
history.reverse()
return {
"history": history[:limit],
"count": len(history),
}
# =============================================================================
# CONVENIENCE FUNCTIONS FOR COMMON ESCALATIONS
# =============================================================================
def escalate_on_threat(
current_profile: str,
threat_id: str,
threat_type: str,
confidence: float,
) -> Dict[str, Any]:
"""
Escalate based on detected threat.
Auto-determines target profile based on confidence.
"""
context = EscalationContext(
threat_id=threat_id,
threat_type=threat_type,
threat_confidence=confidence,
)
# Determine target profile
if current_profile == "observer":
to_profile = "operator"
elif current_profile == "operator" and confidence >= 0.8:
to_profile = "guardian"
elif current_profile == "guardian" and confidence >= 0.95:
to_profile = "phoenix"
else:
return {
"success": False,
"escalated": False,
"reason": f"Confidence {confidence} insufficient for escalation from {current_profile}",
}
return escalate(
from_profile=current_profile,
to_profile=to_profile,
escalation_type=EscalationType.THREAT_DETECTED,
context=context,
deescalate_condition="no_active_threats_1h",
)
def escalate_to_phoenix(
reason: str,
approved_by: str,
) -> Dict[str, Any]:
"""
Emergency escalation to Phoenix profile.
Requires approval.
"""
context = EscalationContext(
mesh_health="crisis",
)
return escalate(
from_profile="guardian",
to_profile="phoenix",
escalation_type=EscalationType.CRISIS_DECLARED,
context=context,
approved_by=approved_by,
deescalate_condition="crisis_resolved",
)

View File

@@ -0,0 +1,101 @@
"""File MCP tools - File operations with receipts."""
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
import blake3
# VaultMesh root from env or default
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
FILE_RECEIPTS = RECEIPTS_ROOT / "file" / "file_operations.jsonl"
def _vmhash_blake3(data: bytes) -> str:
"""VaultMesh hash: blake3:<hex>."""
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _emit_file_receipt(operation: str, file_path: str, details: dict, actor: str = "did:vm:mcp:file") -> str:
"""Emit a receipt for file operation."""
FILE_RECEIPTS.parent.mkdir(parents=True, exist_ok=True)
body = {
"operation": operation,
"file_path": file_path,
"details": details,
"actor": actor,
}
root_hash = _vmhash_blake3(json.dumps(body, sort_keys=True).encode())
receipt = {
"schema_version": "2.0.0",
"type": "file_operation",
"timestamp": datetime.now(timezone.utc).isoformat(),
"scroll": "file",
"tags": ["file", operation, "mcp"],
"root_hash": root_hash,
"body": body,
}
with open(FILE_RECEIPTS, "a") as f:
f.write(json.dumps(receipt) + "\n")
return root_hash
def file_add(
path: str,
content: str,
encoding: str = "utf-8",
actor: str = "did:vm:mcp:file",
) -> dict[str, Any]:
"""
Add (create or overwrite) a file with content.
Args:
path: File path relative to VAULTMESH_ROOT or absolute
content: File content to write
encoding: File encoding (default: utf-8)
actor: DID of actor performing operation
Returns:
Result with file hash and receipt hash
"""
try:
file_path = Path(path)
if not file_path.is_absolute():
file_path = VAULTMESH_ROOT / file_path
# Create parent directories if needed
file_path.parent.mkdir(parents=True, exist_ok=True)
# Write file
file_path.write_text(content, encoding=encoding)
# Hash the content
content_hash = _vmhash_blake3(content.encode(encoding))
details = {
"content_hash": content_hash,
"size_bytes": len(content.encode(encoding)),
"encoding": encoding,
"created": not file_path.exists(),
}
receipt_hash = _emit_file_receipt("add", str(file_path), details, actor)
return {
"success": True,
"path": str(file_path),
"content_hash": content_hash,
"receipt_hash": receipt_hash,
"size_bytes": details["size_bytes"],
}
except Exception as e:
return {"success": False, "error": str(e)}

View File

@@ -0,0 +1,234 @@
"""Guardian MCP tools - Merkle root anchoring operations."""
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
import blake3
# VaultMesh root from env or default
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
# Scroll definitions
SCROLLS = {
"drills": {"jsonl": "receipts/drills/drill_runs.jsonl"},
"compliance": {"jsonl": "receipts/compliance/oracle_answers.jsonl"},
"guardian": {"jsonl": "receipts/guardian/anchor_events.jsonl"},
"treasury": {"jsonl": "receipts/treasury/treasury_events.jsonl"},
"mesh": {"jsonl": "receipts/mesh/mesh_events.jsonl"},
"offsec": {"jsonl": "receipts/offsec/offsec_events.jsonl"},
"identity": {"jsonl": "receipts/identity/identity_events.jsonl"},
"observability": {"jsonl": "receipts/observability/observability_events.jsonl"},
"automation": {"jsonl": "receipts/automation/automation_events.jsonl"},
"psi": {"jsonl": "receipts/psi/psi_events.jsonl"},
}
def _vmhash_blake3(data: bytes) -> str:
"""VaultMesh hash: blake3:<hex>."""
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _merkle_root(hashes: list[str]) -> str:
"""Compute Merkle root from list of VaultMesh hashes."""
if not hashes:
return _vmhash_blake3(b"empty")
if len(hashes) == 1:
return hashes[0]
# Iteratively combine pairs
current = hashes
while len(current) > 1:
next_level = []
for i in range(0, len(current), 2):
if i + 1 < len(current):
combined = current[i] + current[i + 1]
else:
combined = current[i] + current[i] # Duplicate odd leaf
next_level.append(_vmhash_blake3(combined.encode()))
current = next_level
return current[0]
def _compute_scroll_root(scroll_name: str) -> dict[str, Any]:
"""Compute Merkle root for a single scroll."""
if scroll_name not in SCROLLS:
return {"error": f"Unknown scroll: {scroll_name}"}
jsonl_path = VAULTMESH_ROOT / SCROLLS[scroll_name]["jsonl"]
if not jsonl_path.exists():
return {
"scroll": scroll_name,
"root": _vmhash_blake3(b"empty"),
"leaf_count": 0,
"exists": False,
}
hashes = []
with open(jsonl_path, "r") as f:
for line in f:
line = line.strip()
if line:
hashes.append(_vmhash_blake3(line.encode()))
root = _merkle_root(hashes)
return {
"scroll": scroll_name,
"root": root,
"leaf_count": len(hashes),
"exists": True,
}
def guardian_anchor_now(
scrolls: Optional[list[str]] = None,
guardian_did: str = "did:vm:guardian:mcp",
backend: str = "local",
) -> dict[str, Any]:
"""
Anchor specified scrolls and emit a guardian receipt.
Args:
scrolls: List of scroll names to anchor (default: all)
guardian_did: DID of the guardian performing the anchor
backend: Backend identifier (local, ethereum, stellar)
Returns:
Anchor receipt with roots for each scroll
"""
if scrolls is None:
scrolls = list(SCROLLS.keys())
# Validate scrolls
invalid = [s for s in scrolls if s not in SCROLLS]
if invalid:
return {"error": f"Invalid scrolls: {invalid}"}
# Compute roots for each scroll
roots = {}
for scroll_name in scrolls:
result = _compute_scroll_root(scroll_name)
if "error" in result:
return result
roots[scroll_name] = result["root"]
# Compute anchor hash over all roots
roots_json = json.dumps(roots, sort_keys=True).encode()
anchor_hash = _vmhash_blake3(roots_json)
now = datetime.now(timezone.utc)
anchor_id = f"anchor-{now.strftime('%Y%m%d%H%M%S')}"
receipt = {
"schema_version": "2.0.0",
"type": "guardian_anchor",
"timestamp": now.isoformat(),
"anchor_id": anchor_id,
"backend": backend,
"anchor_by": guardian_did,
"anchor_epoch": int(now.timestamp()),
"roots": roots,
"scrolls": scrolls,
"anchor_hash": anchor_hash,
}
# Write receipt to guardian JSONL
guardian_path = VAULTMESH_ROOT / "receipts/guardian/anchor_events.jsonl"
guardian_path.parent.mkdir(parents=True, exist_ok=True)
with open(guardian_path, "a") as f:
f.write(json.dumps(receipt) + "\n")
# Update ROOT.guardian.txt
root_result = _compute_scroll_root("guardian")
root_file = VAULTMESH_ROOT / "ROOT.guardian.txt"
root_file.write_text(root_result["root"])
return {
"success": True,
"receipt": receipt,
"message": f"Anchored {len(scrolls)} scrolls with ID {anchor_id}",
}
def guardian_verify_receipt(receipt_hash: str, scroll: str = "guardian") -> dict[str, Any]:
"""
Verify a receipt exists in a scroll's JSONL.
Args:
receipt_hash: The root_hash of the receipt to verify
scroll: The scroll to search in
Returns:
Verification result with proof if found
"""
if scroll not in SCROLLS:
return {"error": f"Unknown scroll: {scroll}"}
jsonl_path = VAULTMESH_ROOT / SCROLLS[scroll]["jsonl"]
if not jsonl_path.exists():
return {"verified": False, "reason": "Scroll JSONL does not exist"}
# Search for receipt with matching hash
with open(jsonl_path, "r") as f:
line_num = 0
for line in f:
line = line.strip()
if not line:
continue
line_num += 1
line_hash = _vmhash_blake3(line.encode())
# Check if the line hash matches or if the JSON contains the hash
try:
data = json.loads(line)
if data.get("anchor_hash") == receipt_hash or data.get("root_hash") == receipt_hash:
return {
"verified": True,
"line_number": line_num,
"line_hash": line_hash,
"receipt": data,
}
except json.JSONDecodeError:
continue
return {"verified": False, "reason": "Receipt not found in scroll"}
def guardian_status() -> dict[str, Any]:
"""
Get current status of all scrolls.
Returns:
Status of each scroll including root hash and leaf count
"""
status = {}
for scroll_name in SCROLLS:
result = _compute_scroll_root(scroll_name)
status[scroll_name] = {
"root": result["root"],
"leaf_count": result["leaf_count"],
"exists": result.get("exists", False),
}
# Get last anchor info
guardian_path = VAULTMESH_ROOT / "receipts/guardian/anchor_events.jsonl"
last_anchor = None
if guardian_path.exists():
with open(guardian_path, "r") as f:
for line in f:
line = line.strip()
if line:
try:
last_anchor = json.loads(line)
except json.JSONDecodeError:
pass
return {
"scrolls": status,
"last_anchor": last_anchor,
"vaultmesh_root": str(VAULTMESH_ROOT),
}

View File

@@ -0,0 +1,578 @@
"""
Key Binding Engine - Authority bound to cryptographic reality
Every profile has a corresponding key reality:
- OBSERVER: Ephemeral (memory only, no signing power)
- OPERATOR: Session key (encrypted disk, revocable)
- GUARDIAN: Device-bound (secure enclave, non-exportable)
- PHOENIX: Time-locked (guardian key + approval artifact)
- SOVEREIGN: Offline root (hardware key, never automated)
Invariant: No profile exists without corresponding key reality.
If key cannot be proven → authority collapses downward, never upward.
"""
import json
import secrets
import hashlib
import os
from datetime import datetime, timezone, timedelta
from dataclasses import dataclass, asdict, field
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional, List
import blake3
# Optional: Ed25519 support
try:
from nacl.signing import SigningKey, VerifyKey
from nacl.encoding import Base64Encoder
from nacl.exceptions import BadSignature
NACL_AVAILABLE = True
except ImportError:
NACL_AVAILABLE = False
# VaultMesh paths
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
KEYS_ROOT = VAULTMESH_ROOT / "keys"
def _vmhash_blake3(data: bytes) -> str:
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _now_iso() -> str:
return datetime.now(timezone.utc).isoformat()
class KeyBindingType(Enum):
"""Types of key bindings corresponding to profiles."""
EPHEMERAL = "ephemeral" # 👁 OBSERVER - memory only
SESSION = "session" # ⚙ OPERATOR - encrypted disk
DEVICE = "device" # 🛡 GUARDIAN - secure enclave
TIMELOCKED = "timelocked" # 🔥 PHOENIX - guardian + approval
HARDWARE = "hardware" # 👑 SOVEREIGN - offline, air-gapped
class KeyStatus(Enum):
"""Key lifecycle states."""
ACTIVE = "active"
EXPIRED = "expired"
REVOKED = "revoked"
PENDING = "pending"
@dataclass
class KeyBinding:
"""A cryptographic key bound to a profile."""
key_id: str
profile: str
binding_type: str
fingerprint: str
# Key material (public only stored here)
public_key_b64: str
# Binding constraints
created_at: str
expires_at: Optional[str]
device_id: Optional[str] = None
# Status
status: str = "active"
revoked_at: Optional[str] = None
revocation_reason: Optional[str] = None
# Proof
binding_receipt_hash: Optional[str] = None
@dataclass
class KeyAssertion:
"""Runtime assertion that a key is valid for a profile."""
assertion_id: str
key_id: str
profile: str
binding_type: str
fingerprint: str
# Verification
verified_at: str
signature_valid: bool
binding_valid: bool
not_expired: bool
not_revoked: bool
# Result
authority_granted: bool
collapse_to: Optional[str] = None # If authority denied, collapse to this
# In-memory key store (production would use secure storage)
_key_bindings: Dict[str, KeyBinding] = {}
_device_keys: Dict[str, str] = {} # device_id -> key_id
# =============================================================================
# PROFILE → KEY BINDING REQUIREMENTS
# =============================================================================
PROFILE_KEY_REQUIREMENTS = {
"observer": {
"binding_type": KeyBindingType.EPHEMERAL,
"requires_signature": False,
"requires_device": False,
"max_ttl_seconds": 3600, # 1 hour
"can_sign_receipts": False,
},
"operator": {
"binding_type": KeyBindingType.SESSION,
"requires_signature": True,
"requires_device": False,
"max_ttl_seconds": 86400, # 24 hours
"can_sign_receipts": True,
},
"guardian": {
"binding_type": KeyBindingType.DEVICE,
"requires_signature": True,
"requires_device": True,
"max_ttl_seconds": 604800, # 7 days
"can_sign_receipts": True,
"device_types": ["secure_enclave", "tpm", "operator_phone"],
},
"phoenix": {
"binding_type": KeyBindingType.TIMELOCKED,
"requires_signature": True,
"requires_device": True,
"requires_approval_artifact": True,
"max_ttl_seconds": 86400, # 24 hours (auto-expire)
"can_sign_receipts": True,
},
"sovereign": {
"binding_type": KeyBindingType.HARDWARE,
"requires_signature": True,
"requires_device": True,
"requires_human": True,
"max_ttl_seconds": None, # No auto-expire
"can_sign_receipts": True,
"device_types": ["hardware_key", "air_gapped"],
},
}
# =============================================================================
# KEY OPERATIONS
# =============================================================================
def generate_key_pair() -> Dict[str, str]:
"""Generate an Ed25519 key pair."""
if NACL_AVAILABLE:
signing_key = SigningKey.generate()
verify_key = signing_key.verify_key
return {
"private_key_b64": signing_key.encode(Base64Encoder).decode(),
"public_key_b64": verify_key.encode(Base64Encoder).decode(),
"fingerprint": _vmhash_blake3(verify_key.encode())[:24],
}
else:
# Fallback: generate placeholder for testing
fake_key = secrets.token_bytes(32)
return {
"private_key_b64": "PLACEHOLDER_" + secrets.token_urlsafe(32),
"public_key_b64": "PLACEHOLDER_" + secrets.token_urlsafe(32),
"fingerprint": _vmhash_blake3(fake_key)[:24],
}
def create_key_binding(
profile: str,
public_key_b64: str,
device_id: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> Dict[str, Any]:
"""
Create a key binding for a profile.
Validates that the binding meets profile requirements.
"""
requirements = PROFILE_KEY_REQUIREMENTS.get(profile)
if not requirements:
return {"success": False, "error": f"Unknown profile: {profile}"}
binding_type = requirements["binding_type"]
# Validate device requirement
if requirements.get("requires_device") and not device_id:
return {
"success": False,
"error": f"Profile {profile} requires device binding",
}
# Calculate expiry
max_ttl = requirements.get("max_ttl_seconds")
if ttl_seconds and max_ttl and ttl_seconds > max_ttl:
ttl_seconds = max_ttl
expires_at = None
if ttl_seconds:
expires_at = (datetime.now(timezone.utc) + timedelta(seconds=ttl_seconds)).isoformat()
elif max_ttl:
expires_at = (datetime.now(timezone.utc) + timedelta(seconds=max_ttl)).isoformat()
# Generate fingerprint
fingerprint = _vmhash_blake3(public_key_b64.encode())[:24]
key_id = f"key_{secrets.token_hex(12)}"
binding = KeyBinding(
key_id=key_id,
profile=profile,
binding_type=binding_type.value,
fingerprint=fingerprint,
public_key_b64=public_key_b64,
created_at=_now_iso(),
expires_at=expires_at,
device_id=device_id,
status=KeyStatus.ACTIVE.value,
)
# Emit binding receipt
receipt = _emit_key_receipt("key_binding_created", asdict(binding))
binding.binding_receipt_hash = receipt["root_hash"]
# Store
_key_bindings[key_id] = binding
if device_id:
_device_keys[device_id] = key_id
return {
"success": True,
"key_id": key_id,
"profile": profile,
"binding_type": binding_type.value,
"fingerprint": fingerprint,
"expires_at": expires_at,
"device_id": device_id,
"receipt_hash": binding.binding_receipt_hash,
}
def assert_key_authority(
key_id: str,
required_profile: str,
signature: Optional[str] = None,
challenge: Optional[str] = None,
) -> Dict[str, Any]:
"""
Assert that a key has authority for a profile.
If assertion fails, returns collapse_to indicating the
maximum authority the key can claim.
"""
assertion_id = f"assert_{secrets.token_hex(8)}"
binding = _key_bindings.get(key_id)
if not binding:
return _authority_denied(assertion_id, required_profile, "Key not found", "observer")
# Check status
if binding.status == KeyStatus.REVOKED.value:
return _authority_denied(assertion_id, required_profile, "Key revoked", "observer")
# Check expiry
not_expired = True
if binding.expires_at:
expires = datetime.fromisoformat(binding.expires_at.replace('Z', '+00:00'))
if datetime.now(timezone.utc) > expires:
not_expired = False
# Auto-revoke expired keys
binding.status = KeyStatus.EXPIRED.value
return _authority_denied(assertion_id, required_profile, "Key expired", "observer")
# Check profile hierarchy
profile_order = ["observer", "operator", "guardian", "phoenix", "sovereign"]
bound_level = profile_order.index(binding.profile) if binding.profile in profile_order else -1
required_level = profile_order.index(required_profile) if required_profile in profile_order else 99
if bound_level < required_level:
# Key is for lower profile - collapse to its actual level
return _authority_denied(
assertion_id,
required_profile,
f"Key bound to {binding.profile}, not sufficient for {required_profile}",
binding.profile
)
# Check signature if required
requirements = PROFILE_KEY_REQUIREMENTS.get(required_profile, {})
signature_valid = True
if requirements.get("requires_signature"):
if not signature or not challenge:
return _authority_denied(
assertion_id,
required_profile,
"Signature required but not provided",
_collapse_profile(required_profile)
)
# Verify signature
if NACL_AVAILABLE and not binding.public_key_b64.startswith("PLACEHOLDER"):
try:
verify_key = VerifyKey(binding.public_key_b64.encode(), Base64Encoder)
sig_bytes = Base64Encoder.decode(signature.encode())
verify_key.verify(challenge.encode(), sig_bytes)
except BadSignature:
signature_valid = False
return _authority_denied(
assertion_id,
required_profile,
"Invalid signature",
_collapse_profile(required_profile)
)
# All checks passed
assertion = KeyAssertion(
assertion_id=assertion_id,
key_id=key_id,
profile=required_profile,
binding_type=binding.binding_type,
fingerprint=binding.fingerprint,
verified_at=_now_iso(),
signature_valid=signature_valid,
binding_valid=True,
not_expired=not_expired,
not_revoked=binding.status != KeyStatus.REVOKED.value,
authority_granted=True,
)
_emit_key_receipt("key_assertion_granted", asdict(assertion))
return {
"authority_granted": True,
"assertion_id": assertion_id,
"key_id": key_id,
"profile": required_profile,
"binding_type": binding.binding_type,
"fingerprint": binding.fingerprint,
"expires_at": binding.expires_at,
}
def _authority_denied(
assertion_id: str,
required_profile: str,
reason: str,
collapse_to: str,
) -> Dict[str, Any]:
"""Record authority denial and return collapse result."""
assertion = KeyAssertion(
assertion_id=assertion_id,
key_id="unknown",
profile=required_profile,
binding_type="none",
fingerprint="none",
verified_at=_now_iso(),
signature_valid=False,
binding_valid=False,
not_expired=False,
not_revoked=False,
authority_granted=False,
collapse_to=collapse_to,
)
_emit_key_receipt("key_assertion_denied", {
**asdict(assertion),
"reason": reason,
})
return {
"authority_granted": False,
"assertion_id": assertion_id,
"reason": reason,
"collapse_to": collapse_to,
"message": f"Authority denied. Collapsing to {collapse_to}.",
}
def _collapse_profile(profile: str) -> str:
"""Determine collapse target when authority is denied."""
collapse_map = {
"sovereign": "phoenix",
"phoenix": "guardian",
"guardian": "operator",
"operator": "observer",
"observer": "observer",
}
return collapse_map.get(profile, "observer")
def revoke_key(key_id: str, reason: str) -> Dict[str, Any]:
"""Revoke a key binding."""
binding = _key_bindings.get(key_id)
if not binding:
return {"success": False, "error": "Key not found"}
binding.status = KeyStatus.REVOKED.value
binding.revoked_at = _now_iso()
binding.revocation_reason = reason
_emit_key_receipt("key_revoked", {
"key_id": key_id,
"profile": binding.profile,
"fingerprint": binding.fingerprint,
"reason": reason,
"revoked_at": binding.revoked_at,
})
return {
"success": True,
"key_id": key_id,
"revoked_at": binding.revoked_at,
}
def get_device_key(device_id: str) -> Optional[Dict[str, Any]]:
"""Get the key binding for a device."""
key_id = _device_keys.get(device_id)
if not key_id:
return None
binding = _key_bindings.get(key_id)
if not binding:
return None
return {
"key_id": key_id,
"profile": binding.profile,
"binding_type": binding.binding_type,
"fingerprint": binding.fingerprint,
"device_id": device_id,
"status": binding.status,
"expires_at": binding.expires_at,
}
def list_key_bindings(profile: Optional[str] = None) -> Dict[str, Any]:
"""List all key bindings, optionally filtered by profile."""
bindings = []
for key_id, binding in _key_bindings.items():
if profile and binding.profile != profile:
continue
bindings.append({
"key_id": key_id,
"profile": binding.profile,
"binding_type": binding.binding_type,
"fingerprint": binding.fingerprint,
"status": binding.status,
"expires_at": binding.expires_at,
"device_id": binding.device_id,
})
return {
"count": len(bindings),
"bindings": bindings,
}
def _emit_key_receipt(receipt_type: str, body: dict) -> dict:
"""Emit a receipt for key operations."""
scroll_path = RECEIPTS_ROOT / "identity" / "key_events.jsonl"
scroll_path.parent.mkdir(parents=True, exist_ok=True)
receipt = {
"schema_version": "2.0.0",
"type": receipt_type,
"timestamp": _now_iso(),
"scroll": "identity",
"tags": ["key", receipt_type],
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
"body": body,
}
with open(scroll_path, "a") as f:
f.write(json.dumps(receipt) + "\n")
return receipt
# =============================================================================
# GUARDIAN DEVICE BINDING
# =============================================================================
def bind_guardian_device(
device_id: str,
device_type: str,
public_key_b64: str,
device_attestation: Optional[str] = None,
) -> Dict[str, Any]:
"""
Bind a device as Guardian-of-record.
The Guardian device becomes:
- Escalation signer
- Receipt verifier
- Emergency revocation authority
"""
valid_types = PROFILE_KEY_REQUIREMENTS["guardian"]["device_types"]
if device_type not in valid_types:
return {
"success": False,
"error": f"Invalid device type. Must be one of: {valid_types}",
}
# Create guardian key binding
result = create_key_binding(
profile="guardian",
public_key_b64=public_key_b64,
device_id=device_id,
ttl_seconds=604800, # 7 days
)
if not result.get("success"):
return result
# Record device binding
device_binding = {
"device_id": device_id,
"device_type": device_type,
"key_id": result["key_id"],
"fingerprint": result["fingerprint"],
"bound_at": _now_iso(),
"attestation_hash": _vmhash_blake3(device_attestation.encode()) if device_attestation else None,
"capabilities": [
"escalation_signer",
"receipt_verifier",
"emergency_revocation",
],
}
_emit_key_receipt("guardian_device_bound", device_binding)
# Store guardian device info
guardian_path = KEYS_ROOT / "guardian_device.json"
guardian_path.parent.mkdir(parents=True, exist_ok=True)
with open(guardian_path, "w") as f:
json.dump(device_binding, f, indent=2)
return {
"success": True,
"device_id": device_id,
"device_type": device_type,
"key_id": result["key_id"],
"fingerprint": result["fingerprint"],
"capabilities": device_binding["capabilities"],
"message": f"Device {device_id} bound as Guardian-of-record",
}
def get_guardian_device() -> Optional[Dict[str, Any]]:
"""Get the current Guardian device binding."""
guardian_path = KEYS_ROOT / "guardian_device.json"
if not guardian_path.exists():
return None
with open(guardian_path, "r") as f:
return json.load(f)

View File

@@ -0,0 +1,325 @@
"""Treasury MCP tools - Budget management operations."""
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Optional
import blake3
# VaultMesh root from env or default
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
TREASURY_JSONL = VAULTMESH_ROOT / "receipts/treasury/treasury_events.jsonl"
TREASURY_STATE = VAULTMESH_ROOT / "receipts/treasury/budgets.json"
# Schema version
SCHEMA_VERSION = "2.0.0"
def _vmhash_blake3(data: bytes) -> str:
"""VaultMesh hash: blake3:<hex>."""
return f"blake3:{blake3.blake3(data).hexdigest()}"
def _load_budgets() -> dict[str, dict]:
"""Load current budget state from disk."""
if not TREASURY_STATE.exists():
return {}
try:
return json.loads(TREASURY_STATE.read_text())
except (json.JSONDecodeError, FileNotFoundError):
return {}
def _save_budgets(budgets: dict[str, dict]) -> None:
"""Persist budget state to disk."""
TREASURY_STATE.parent.mkdir(parents=True, exist_ok=True)
TREASURY_STATE.write_text(json.dumps(budgets, indent=2))
def _emit_receipt(receipt_type: str, body: dict, tags: list[str]) -> dict:
"""Emit a treasury receipt to JSONL."""
body_json = json.dumps(body, sort_keys=True)
root_hash = _vmhash_blake3(body_json.encode())
receipt = {
"schema_version": SCHEMA_VERSION,
"type": receipt_type,
"timestamp": datetime.now(timezone.utc).isoformat(),
"scroll": "treasury",
"tags": tags,
"root_hash": root_hash,
"body": body,
}
TREASURY_JSONL.parent.mkdir(parents=True, exist_ok=True)
with open(TREASURY_JSONL, "a") as f:
f.write(json.dumps(receipt) + "\n")
# Update ROOT file
_update_root()
return receipt
def _update_root() -> None:
"""Update ROOT.treasury.txt with current Merkle root."""
if not TREASURY_JSONL.exists():
return
hashes = []
with open(TREASURY_JSONL, "r") as f:
for line in f:
line = line.strip()
if line:
hashes.append(_vmhash_blake3(line.encode()))
if not hashes:
root = _vmhash_blake3(b"empty")
elif len(hashes) == 1:
root = hashes[0]
else:
current = hashes
while len(current) > 1:
next_level = []
for i in range(0, len(current), 2):
if i + 1 < len(current):
combined = current[i] + current[i + 1]
else:
combined = current[i] + current[i]
next_level.append(_vmhash_blake3(combined.encode()))
current = next_level
root = current[0]
root_file = VAULTMESH_ROOT / "ROOT.treasury.txt"
root_file.write_text(root)
def treasury_create_budget(
budget_id: str,
name: str,
allocated: int,
currency: str = "EUR",
created_by: str = "did:vm:mcp:treasury",
) -> dict[str, Any]:
"""
Create a new budget.
Args:
budget_id: Unique identifier for the budget
name: Human-readable budget name
allocated: Initial allocation amount (cents/smallest unit)
currency: Currency code (default: EUR)
created_by: DID of the actor creating the budget
Returns:
Created budget with receipt info
"""
budgets = _load_budgets()
if budget_id in budgets:
return {"error": f"Budget already exists: {budget_id}"}
now = datetime.now(timezone.utc)
budget = {
"id": budget_id,
"name": name,
"currency": currency,
"allocated": allocated,
"spent": 0,
"created_at": now.isoformat(),
"created_by": created_by,
}
budgets[budget_id] = budget
_save_budgets(budgets)
# Emit receipt
receipt_body = {
"budget_id": budget_id,
"name": name,
"currency": currency,
"allocated": allocated,
"created_by": created_by,
}
receipt = _emit_receipt(
"treasury_budget_create",
receipt_body,
["treasury", "budget", "create", budget_id],
)
return {
"success": True,
"budget": budget,
"receipt_hash": receipt["root_hash"],
"message": f"Created budget '{name}' with {allocated} {currency}",
}
def treasury_debit(
budget_id: str,
amount: int,
description: str,
debited_by: str = "did:vm:mcp:treasury",
) -> dict[str, Any]:
"""
Debit (spend) from a budget.
Args:
budget_id: Budget to debit from
amount: Amount to debit (cents/smallest unit)
description: Description of the expenditure
debited_by: DID of the actor making the debit
Returns:
Updated budget with receipt info
"""
budgets = _load_budgets()
if budget_id not in budgets:
return {"error": f"Budget not found: {budget_id}"}
budget = budgets[budget_id]
remaining = budget["allocated"] - budget["spent"]
if amount > remaining:
return {
"error": "Insufficient funds",
"budget_id": budget_id,
"requested": amount,
"available": remaining,
}
budget["spent"] += amount
budgets[budget_id] = budget
_save_budgets(budgets)
# Emit receipt
receipt_body = {
"budget_id": budget_id,
"amount": amount,
"currency": budget["currency"],
"description": description,
"debited_by": debited_by,
"new_spent": budget["spent"],
"new_remaining": budget["allocated"] - budget["spent"],
}
receipt = _emit_receipt(
"treasury_debit",
receipt_body,
["treasury", "debit", budget_id],
)
return {
"success": True,
"budget": budget,
"remaining": budget["allocated"] - budget["spent"],
"receipt_hash": receipt["root_hash"],
"message": f"Debited {amount} from '{budget['name']}' - {description}",
}
def treasury_credit(
budget_id: str,
amount: int,
description: str,
credited_by: str = "did:vm:mcp:treasury",
) -> dict[str, Any]:
"""
Credit (add funds) to a budget.
Args:
budget_id: Budget to credit
amount: Amount to add (cents/smallest unit)
description: Description of the credit (refund, adjustment, etc.)
credited_by: DID of the actor making the credit
Returns:
Updated budget with receipt info
"""
budgets = _load_budgets()
if budget_id not in budgets:
return {"error": f"Budget not found: {budget_id}"}
budget = budgets[budget_id]
budget["allocated"] += amount
budgets[budget_id] = budget
_save_budgets(budgets)
# Emit receipt
receipt_body = {
"budget_id": budget_id,
"amount": amount,
"currency": budget["currency"],
"description": description,
"credited_by": credited_by,
"new_allocated": budget["allocated"],
}
receipt = _emit_receipt(
"treasury_credit",
receipt_body,
["treasury", "credit", budget_id],
)
return {
"success": True,
"budget": budget,
"remaining": budget["allocated"] - budget["spent"],
"receipt_hash": receipt["root_hash"],
"message": f"Credited {amount} to '{budget['name']}' - {description}",
}
def treasury_balance(budget_id: Optional[str] = None) -> dict[str, Any]:
"""
Get budget balance(s).
Args:
budget_id: Specific budget ID (optional, returns all if omitted)
Returns:
Budget balance(s) with current state
"""
budgets = _load_budgets()
if budget_id:
if budget_id not in budgets:
return {"error": f"Budget not found: {budget_id}"}
budget = budgets[budget_id]
return {
"budget_id": budget_id,
"name": budget["name"],
"currency": budget["currency"],
"allocated": budget["allocated"],
"spent": budget["spent"],
"remaining": budget["allocated"] - budget["spent"],
}
# Return all budgets
result = []
total_allocated = 0
total_spent = 0
for bid, budget in budgets.items():
remaining = budget["allocated"] - budget["spent"]
total_allocated += budget["allocated"]
total_spent += budget["spent"]
result.append({
"budget_id": bid,
"name": budget["name"],
"currency": budget["currency"],
"allocated": budget["allocated"],
"spent": budget["spent"],
"remaining": remaining,
})
return {
"budgets": result,
"count": len(result),
"totals": {
"allocated": total_allocated,
"spent": total_spent,
"remaining": total_allocated - total_spent,
},
}