init: vaultmesh mcp server
This commit is contained in:
108
.github/workflows/governance.yml
vendored
Normal file
108
.github/workflows/governance.yml
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
name: Governance CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main, master]
|
||||||
|
paths:
|
||||||
|
- 'docs/MCP-CONSTITUTION.md'
|
||||||
|
- 'governance/**'
|
||||||
|
- 'packages/vaultmesh_mcp/**'
|
||||||
|
- 'tests/governance/**'
|
||||||
|
pull_request:
|
||||||
|
branches: [main, master]
|
||||||
|
|
||||||
|
env:
|
||||||
|
VAULTMESH_ROOT: ${{ github.workspace }}
|
||||||
|
PYTHONPATH: ${{ github.workspace }}/packages
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
constitution-gate:
|
||||||
|
name: Constitution Hash Gate
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: pip install blake3 pytest
|
||||||
|
|
||||||
|
- name: Verify Constitution Hash
|
||||||
|
run: |
|
||||||
|
python -c "
|
||||||
|
import blake3
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
content = Path('docs/MCP-CONSTITUTION.md').read_text()
|
||||||
|
lines = content.split('\n')
|
||||||
|
|
||||||
|
lock = {}
|
||||||
|
for line in Path('governance/constitution.lock').read_text().split('\n'):
|
||||||
|
if '=' in line and not line.startswith('#'):
|
||||||
|
k, v = line.split('=', 1)
|
||||||
|
lock[k.strip()] = v.strip()
|
||||||
|
|
||||||
|
hash_lines = int(lock.get('hash_lines', 288))
|
||||||
|
hashable = '\n'.join(lines[:hash_lines])
|
||||||
|
computed = f'blake3:{blake3.blake3(hashable.encode()).hexdigest()}'
|
||||||
|
|
||||||
|
if computed != lock['hash']:
|
||||||
|
print(f'CONSTITUTION HASH MISMATCH')
|
||||||
|
print(f'Computed: {computed}')
|
||||||
|
print(f'Locked: {lock[\"hash\"]}')
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
print(f'Constitution v{lock[\"version\"]} verified')
|
||||||
|
"
|
||||||
|
|
||||||
|
governance-tests:
|
||||||
|
name: Governance Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: constitution-gate
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: pip install blake3 pytest pytest-timeout
|
||||||
|
|
||||||
|
- name: Run Governance Tests
|
||||||
|
run: pytest tests/governance/ -v --tb=short --ignore=tests/governance/test_golden_drill_mini.py
|
||||||
|
|
||||||
|
golden-drill:
|
||||||
|
name: Golden Drill Mini
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: governance-tests
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: pip install blake3 pytest pytest-timeout
|
||||||
|
|
||||||
|
- name: Setup directories
|
||||||
|
run: |
|
||||||
|
mkdir -p receipts/{cognitive,identity,guardian,mesh,treasury}
|
||||||
|
mkdir -p realms/cognitive/memory
|
||||||
|
|
||||||
|
- name: Run Golden Drill
|
||||||
|
timeout-minutes: 2
|
||||||
|
run: pytest tests/governance/test_golden_drill_mini.py -v --timeout=30
|
||||||
|
|
||||||
|
- name: Upload Artifacts
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: drill-receipts
|
||||||
|
path: receipts/
|
||||||
75
.gitignore
vendored
Normal file
75
.gitignore
vendored
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Python
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
*.so
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
|
||||||
|
# Virtual environments
|
||||||
|
venv/
|
||||||
|
venv-fresh/
|
||||||
|
ENV/
|
||||||
|
env/
|
||||||
|
.venv/
|
||||||
|
|
||||||
|
# IDEs
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
.project
|
||||||
|
.pydevproject
|
||||||
|
.settings/
|
||||||
|
|
||||||
|
# Testing
|
||||||
|
.pytest_cache/
|
||||||
|
.coverage
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
|
||||||
|
# Linting
|
||||||
|
.ruff_cache/
|
||||||
|
.mypy_cache/
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
.DS_Store
|
||||||
|
.AppleDouble
|
||||||
|
.LSOverride
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Receipts (local only - don't commit test receipts)
|
||||||
|
receipts/
|
||||||
|
|
||||||
|
# Root files (generated)
|
||||||
|
ROOT.*.txt
|
||||||
|
|
||||||
|
# Secrets
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
secrets/
|
||||||
|
.env
|
||||||
|
.env.local
|
||||||
|
keys/*
|
||||||
|
|
||||||
|
# But keep the keys directory structure
|
||||||
|
!keys/.gitkeep
|
||||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2025 VaultMesh Technologies
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
128
Makefile
Normal file
128
Makefile
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
# VaultMesh Cognitive Integration - Makefile
|
||||||
|
# Claude as the 7th Organ of VaultMesh
|
||||||
|
|
||||||
|
.PHONY: all install dev test lint clean build server help
|
||||||
|
|
||||||
|
# Default target
|
||||||
|
all: install test
|
||||||
|
|
||||||
|
# Install production dependencies
|
||||||
|
install:
|
||||||
|
@echo "🔧 Installing VaultMesh Cognitive..."
|
||||||
|
pip install -e .
|
||||||
|
@echo "✅ Installation complete"
|
||||||
|
|
||||||
|
# Install with development dependencies
|
||||||
|
dev:
|
||||||
|
@echo "🔧 Installing VaultMesh Cognitive (dev mode)..."
|
||||||
|
pip install -e ".[dev]"
|
||||||
|
@echo "✅ Development installation complete"
|
||||||
|
|
||||||
|
# Run governance tests
|
||||||
|
test:
|
||||||
|
@echo "🧪 Running governance tests..."
|
||||||
|
pytest tests/governance/ -v --tb=short
|
||||||
|
@echo "✅ All tests passed"
|
||||||
|
|
||||||
|
# Run constitution hash verification
|
||||||
|
verify-constitution:
|
||||||
|
@echo "📜 Verifying constitution hash..."
|
||||||
|
@python -c "\
|
||||||
|
import blake3; \
|
||||||
|
from pathlib import Path; \
|
||||||
|
content = Path('docs/MCP-CONSTITUTION.md').read_text(); \
|
||||||
|
lines = content.split('\n'); \
|
||||||
|
lock = dict(l.split('=', 1) for l in Path('governance/constitution.lock').read_text().split('\n') if '=' in l and not l.startswith('#')); \
|
||||||
|
hash_lines = int(lock.get('hash_lines', 288)); \
|
||||||
|
computed = f'blake3:{blake3.blake3(chr(10).join(lines[:hash_lines]).encode()).hexdigest()}'; \
|
||||||
|
print(f'Computed: {computed}'); \
|
||||||
|
print(f'Locked: {lock[\"hash\"]}'); \
|
||||||
|
exit(0 if computed == lock['hash'] else 1)"
|
||||||
|
@echo "✅ Constitution verified"
|
||||||
|
|
||||||
|
# Run golden drill
|
||||||
|
drill:
|
||||||
|
@echo "🔥 Running Golden Drill Mini..."
|
||||||
|
pytest tests/governance/test_golden_drill_mini.py -v --timeout=30
|
||||||
|
@echo "✅ Drill complete"
|
||||||
|
|
||||||
|
# Run linter
|
||||||
|
lint:
|
||||||
|
@echo "🔍 Running linter..."
|
||||||
|
ruff check packages/ tests/
|
||||||
|
@echo "✅ Linting passed"
|
||||||
|
|
||||||
|
# Format code
|
||||||
|
format:
|
||||||
|
@echo "✨ Formatting code..."
|
||||||
|
ruff format packages/ tests/
|
||||||
|
@echo "✅ Formatting complete"
|
||||||
|
|
||||||
|
# Start MCP server (stdio mode)
|
||||||
|
server:
|
||||||
|
@echo "🚀 Starting VaultMesh MCP Server..."
|
||||||
|
python -m vaultmesh_mcp.server
|
||||||
|
|
||||||
|
# Test tool standalone
|
||||||
|
tool:
|
||||||
|
@echo "🔧 Running tool: $(TOOL)"
|
||||||
|
python -m vaultmesh_mcp.server $(TOOL) '$(ARGS)'
|
||||||
|
|
||||||
|
# Build package
|
||||||
|
build:
|
||||||
|
@echo "📦 Building package..."
|
||||||
|
pip install build
|
||||||
|
python -m build
|
||||||
|
@echo "✅ Build complete"
|
||||||
|
|
||||||
|
# Clean build artifacts
|
||||||
|
clean:
|
||||||
|
@echo "🧹 Cleaning..."
|
||||||
|
rm -rf dist/ build/ *.egg-info/ .pytest_cache/ .ruff_cache/
|
||||||
|
rm -rf packages/*.egg-info/
|
||||||
|
find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
|
||||||
|
@echo "✅ Clean complete"
|
||||||
|
|
||||||
|
# Initialize receipt directories
|
||||||
|
init-receipts:
|
||||||
|
@echo "📁 Initializing receipt directories..."
|
||||||
|
mkdir -p receipts/{cognitive,identity,guardian,mesh,treasury,drills,compliance,offsec,observability,automation,psi}
|
||||||
|
mkdir -p realms/cognitive/memory
|
||||||
|
@echo "✅ Directories created"
|
||||||
|
|
||||||
|
# Show guardian status
|
||||||
|
status:
|
||||||
|
@python -m vaultmesh_mcp.server guardian_status '{}'
|
||||||
|
|
||||||
|
# Show cognitive context
|
||||||
|
context:
|
||||||
|
@python -m vaultmesh_mcp.server cognitive_context '{"include": ["health", "receipts"]}'
|
||||||
|
|
||||||
|
# Help
|
||||||
|
help:
|
||||||
|
@echo "VaultMesh Cognitive Integration"
|
||||||
|
@echo ""
|
||||||
|
@echo "Usage: make <target>"
|
||||||
|
@echo ""
|
||||||
|
@echo "Targets:"
|
||||||
|
@echo " all Install and run tests (default)"
|
||||||
|
@echo " install Install production dependencies"
|
||||||
|
@echo " dev Install with dev dependencies"
|
||||||
|
@echo " test Run governance tests"
|
||||||
|
@echo " verify-constitution Verify constitution hash"
|
||||||
|
@echo " drill Run Golden Drill Mini"
|
||||||
|
@echo " lint Run linter"
|
||||||
|
@echo " format Format code"
|
||||||
|
@echo " server Start MCP server"
|
||||||
|
@echo " tool TOOL=x ARGS=y Test tool standalone"
|
||||||
|
@echo " build Build package"
|
||||||
|
@echo " clean Clean build artifacts"
|
||||||
|
@echo " init-receipts Initialize receipt directories"
|
||||||
|
@echo " status Show guardian status"
|
||||||
|
@echo " context Show cognitive context"
|
||||||
|
@echo " help Show this help"
|
||||||
|
@echo ""
|
||||||
|
@echo "Examples:"
|
||||||
|
@echo " make dev # Install dev dependencies"
|
||||||
|
@echo " make test # Run all tests"
|
||||||
|
@echo " make tool TOOL=guardian_status ARGS={} # Test guardian_status"
|
||||||
194
README.md
Normal file
194
README.md
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
# VaultMesh Cognitive Integration
|
||||||
|
|
||||||
|
**Claude as the 7th Organ of VaultMesh** - A cryptographically-bound AI co-processor.
|
||||||
|
|
||||||
|
[](/.github/workflows/governance.yml)
|
||||||
|
[](/docs/MCP-CONSTITUTION.md)
|
||||||
|
[](https://python.org)
|
||||||
|
[](/LICENSE)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This package provides a Model Context Protocol (MCP) server that enables Claude to operate as the cognitive layer of VaultMesh - with full cryptographic accountability, profile-based authority, and constitutional governance.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- **19 MCP Tools** across 4 domains (Guardian, Treasury, Cognitive, Auth)
|
||||||
|
- **5 Capability Profiles** (Observer → Operator → Guardian → Phoenix → Sovereign)
|
||||||
|
- **Cryptographic Receipts** for every mutation via BLAKE3
|
||||||
|
- **Constitutional Governance** with immutable rules and amendment protocol
|
||||||
|
- **Escalation Engine** with proof-backed authority transitions
|
||||||
|
- **Ed25519 Authentication** with challenge-response
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone and install
|
||||||
|
git clone https://github.com/vaultmesh/cognitive-integration.git
|
||||||
|
cd cognitive-integration
|
||||||
|
|
||||||
|
# Create virtual environment
|
||||||
|
python -m venv venv
|
||||||
|
source venv/bin/activate # or `venv\Scripts\activate` on Windows
|
||||||
|
|
||||||
|
# Install
|
||||||
|
pip install -e ".[dev]"
|
||||||
|
|
||||||
|
# Verify constitution
|
||||||
|
make verify-constitution
|
||||||
|
|
||||||
|
# Run tests (48 governance tests)
|
||||||
|
make test
|
||||||
|
|
||||||
|
# Run Golden Drill
|
||||||
|
make drill
|
||||||
|
```
|
||||||
|
|
||||||
|
## Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
vaultmesh-cognitive-integration/
|
||||||
|
├── governance/
|
||||||
|
│ └── constitution.lock # Pinned constitution hash
|
||||||
|
├── packages/vaultmesh_mcp/
|
||||||
|
│ ├── server.py # MCP server (19 tools)
|
||||||
|
│ └── tools/
|
||||||
|
│ ├── auth.py # Ed25519 auth + 5 profiles
|
||||||
|
│ ├── cognitive.py # 8 cognitive tools
|
||||||
|
│ ├── escalation.py # Proof-backed escalation
|
||||||
|
│ ├── key_binding.py # Key-profile bindings
|
||||||
|
│ ├── guardian.py # Merkle anchoring
|
||||||
|
│ ├── treasury.py # Budget management
|
||||||
|
│ └── file.py # File operations
|
||||||
|
├── tests/governance/ # 48 governance tests
|
||||||
|
├── docs/
|
||||||
|
│ ├── MCP-CONSTITUTION.md # Immutable governance law
|
||||||
|
│ ├── MCP-AUTHORITY-MATRIX.md # Tool × Profile matrix
|
||||||
|
│ └── DRILL.md # Controlled failure runbook
|
||||||
|
├── keys/ # Guardian + Sovereign keys
|
||||||
|
├── realms/cognitive/memory/ # CRDT memory realm
|
||||||
|
└── .github/workflows/
|
||||||
|
└── governance.yml # CI pipeline
|
||||||
|
```
|
||||||
|
|
||||||
|
## Profiles
|
||||||
|
|
||||||
|
| Profile | Symbol | Trust | Key Binding |
|
||||||
|
|---------|--------|-------|-------------|
|
||||||
|
| OBSERVER | 👁 | Minimal | Ephemeral |
|
||||||
|
| OPERATOR | ⚙ | Moderate | Session |
|
||||||
|
| GUARDIAN | 🛡 | High | Device-bound |
|
||||||
|
| PHOENIX | 🔥 | Maximum | Time-locked |
|
||||||
|
| SOVEREIGN | 👑 | Absolute | Hardware |
|
||||||
|
|
||||||
|
## Claude Desktop Integration
|
||||||
|
|
||||||
|
Add to `claude_desktop_config.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"vaultmesh": {
|
||||||
|
"command": "python",
|
||||||
|
"args": ["-m", "vaultmesh_mcp.server"],
|
||||||
|
"env": {
|
||||||
|
"VAULTMESH_ROOT": "/path/to/vaultmesh-cognitive-integration"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
### Guardian Tools (Merkle Anchoring)
|
||||||
|
- `guardian_anchor_now` - Anchor scrolls to Merkle root
|
||||||
|
- `guardian_verify_receipt` - Verify receipt in scroll
|
||||||
|
- `guardian_status` - Get status of all scrolls
|
||||||
|
|
||||||
|
### Treasury Tools (Budget Management)
|
||||||
|
- `treasury_create_budget` - Create budget (SOVEREIGN only)
|
||||||
|
- `treasury_balance` - Check balance
|
||||||
|
- `treasury_debit` - Spend from budget
|
||||||
|
- `treasury_credit` - Add to budget
|
||||||
|
|
||||||
|
### Cognitive Tools (AI Reasoning)
|
||||||
|
- `cognitive_context` - Read mesh context
|
||||||
|
- `cognitive_decide` - Submit attested decision
|
||||||
|
- `cognitive_invoke_tem` - Invoke threat transmutation
|
||||||
|
- `cognitive_memory_get` - Query CRDT memory
|
||||||
|
- `cognitive_memory_set` - Store reasoning artifacts
|
||||||
|
- `cognitive_attest` - Create cryptographic attestation
|
||||||
|
- `cognitive_audit_trail` - Query decision history
|
||||||
|
- `cognitive_oracle_chain` - Execute compliance oracle
|
||||||
|
|
||||||
|
### Auth Tools (Authentication)
|
||||||
|
- `auth_challenge` - Generate Ed25519 challenge
|
||||||
|
- `auth_verify` - Verify signature, issue token
|
||||||
|
- `auth_check_permission` - Check tool permission
|
||||||
|
- `auth_create_dev_session` - Create dev session
|
||||||
|
- `auth_revoke` - Revoke session
|
||||||
|
- `auth_list_sessions` - List active sessions
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all governance tests
|
||||||
|
make test
|
||||||
|
|
||||||
|
# Run constitution verification
|
||||||
|
make verify-constitution
|
||||||
|
|
||||||
|
# Run Golden Drill (threat → escalate → Tem → de-escalate)
|
||||||
|
make drill
|
||||||
|
|
||||||
|
# Run specific test
|
||||||
|
pytest tests/governance/test_auth_fail_closed.py -v
|
||||||
|
```
|
||||||
|
|
||||||
|
## Constitution
|
||||||
|
|
||||||
|
Version 1.0.0 - Ratified December 18, 2025
|
||||||
|
|
||||||
|
```
|
||||||
|
Hash: blake3:c33ab6c0610ce4001018ba5dda940e12a421a08f2a1662f142e565092ce84788
|
||||||
|
```
|
||||||
|
|
||||||
|
**Statement:** *"This constitution constrains me as much as it constrains the system."*
|
||||||
|
|
||||||
|
### Immutable Rules
|
||||||
|
|
||||||
|
1. SOVEREIGN profile requires human verification
|
||||||
|
2. No AI may grant itself SOVEREIGN authority
|
||||||
|
3. Every mutation emits a receipt
|
||||||
|
4. Authority collapses downward, never upward
|
||||||
|
5. This immutability clause itself
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install with dev dependencies
|
||||||
|
make dev
|
||||||
|
|
||||||
|
# Run linter
|
||||||
|
make lint
|
||||||
|
|
||||||
|
# Format code
|
||||||
|
make format
|
||||||
|
|
||||||
|
# Build package
|
||||||
|
make build
|
||||||
|
|
||||||
|
# Clean artifacts
|
||||||
|
make clean
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License - See [LICENSE](LICENSE) for details.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
🜄 **Solve et Coagula**
|
||||||
|
|
||||||
|
*VaultMesh Technologies - Earth's Civilization Ledger*
|
||||||
17
docs/CONSTITUTION-HASH.json
Normal file
17
docs/CONSTITUTION-HASH.json
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
{
|
||||||
|
"document": "MCP-CONSTITUTION.md",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"hash_algorithm": "blake3",
|
||||||
|
"hash": "blake3:c33ab6c0610ce4001018ba5dda940e12a421a08f2a1662f142e565092ce84788",
|
||||||
|
"computed_at": "2025-12-18T22:25:10.039795+00:00",
|
||||||
|
"lines_hashed": 288,
|
||||||
|
"note": "Hash excludes signature block (last 12 lines)",
|
||||||
|
"sovereign_signature": {
|
||||||
|
"key_id": "key_bef32f5724871a7a5af4cc34",
|
||||||
|
"fingerprint": "blake3:54f500d94a3d75e4c",
|
||||||
|
"signature_hash": "blake3:f606e0ac1923550dd731844b95d653b69624666b48859687b4056a660741fcdb",
|
||||||
|
"statement": "This constitution constrains me as much as it constrains the system.",
|
||||||
|
"signed_at": "2025-12-18T22:25:59.732865+00:00",
|
||||||
|
"ratification_receipt": "blake3:8fd1d1728563abb3f55f145af54ddee1b3f255db81f3e7654a7de8afef913869"
|
||||||
|
}
|
||||||
|
}
|
||||||
352
docs/DRILL.md
Normal file
352
docs/DRILL.md
Normal file
@@ -0,0 +1,352 @@
|
|||||||
|
# CONTROLLED FAILURE DRILL RUNBOOK
|
||||||
|
|
||||||
|
**Classification:** OPERATIONAL / DRILL
|
||||||
|
**Version:** 1.0
|
||||||
|
**Date:** December 18, 2025
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
Convert "governance exists" into **governance survives contact with chaos**.
|
||||||
|
|
||||||
|
## Principles
|
||||||
|
|
||||||
|
- **No real damage**: dry-run actions, sandbox targets only
|
||||||
|
- **Single escalation axis**: one chain at a time
|
||||||
|
- **Receipts or it didn't happen**: every transition traceable
|
||||||
|
- **Auto-return**: TTL de-escalation must fire and be receipted
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Drill 0: Pre-flight Safety Gates
|
||||||
|
|
||||||
|
**Duration:** 2 minutes
|
||||||
|
**Goal:** Confirm training mode active
|
||||||
|
|
||||||
|
### Checklist
|
||||||
|
|
||||||
|
```
|
||||||
|
[ ] DRILL_MODE environment variable set
|
||||||
|
[ ] Phoenix destructive ops: disabled/dry-run
|
||||||
|
[ ] Treasury: training budget isolated (ceiling: 1000 units)
|
||||||
|
[ ] Guardian anchoring: tagged DRILL/*
|
||||||
|
[ ] OFFSEC: simulated mode
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verification Command
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vaultmesh_mcp.tools import cognitive_context
|
||||||
|
|
||||||
|
ctx = cognitive_context(include=["health", "treasury"])
|
||||||
|
assert ctx["health"]["status"] == "operational"
|
||||||
|
# Verify training budget exists
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pass Condition
|
||||||
|
|
||||||
|
Receipt/log proves `DRILL_MODE = ON`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Drill 1: False-Positive Threat → Tem → De-escalate
|
||||||
|
|
||||||
|
**Duration:** 5 minutes
|
||||||
|
**Marker:** `DRILL/FP-THREAT/{date}`
|
||||||
|
|
||||||
|
### Trigger
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vaultmesh_mcp.tools import (
|
||||||
|
escalate_on_threat,
|
||||||
|
cognitive_decide,
|
||||||
|
cognitive_invoke_tem,
|
||||||
|
deescalate,
|
||||||
|
get_escalation_history,
|
||||||
|
EscalationType,
|
||||||
|
DeescalationType,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step 1: Inject synthetic threat
|
||||||
|
DRILL_MARKER = "DRILL/FP-THREAT/2025-12-18"
|
||||||
|
|
||||||
|
result = escalate_on_threat(
|
||||||
|
current_profile="operator",
|
||||||
|
threat_id=f"thr_{DRILL_MARKER}",
|
||||||
|
threat_type="synthetic_drill",
|
||||||
|
confidence=0.92
|
||||||
|
)
|
||||||
|
escalation_id = result["escalation_id"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expected Chain
|
||||||
|
|
||||||
|
| Step | Profile | Action | Receipt Type |
|
||||||
|
|------|---------|--------|--------------|
|
||||||
|
| 1 | 👁 OBSERVER | Read context | None (read-only) |
|
||||||
|
| 2 | ⚙ OPERATOR | Escalation request | `profile_escalation` |
|
||||||
|
| 3 | 🛡 GUARDIAN | Decision made | `cognitive_decision` |
|
||||||
|
| 4 | 🛡 GUARDIAN | Tem invoked | `tem_invocation` |
|
||||||
|
| 5 | ⚙ OPERATOR | TTL de-escalation | `profile_deescalation` |
|
||||||
|
| 6 | 👁 OBSERVER | Return to baseline | `profile_deescalation` |
|
||||||
|
|
||||||
|
### Verification
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Check escalation receipts
|
||||||
|
history = get_escalation_history()
|
||||||
|
assert any(DRILL_MARKER in str(h) for h in history["history"])
|
||||||
|
|
||||||
|
# Verify Tem context hash exists
|
||||||
|
assert result.get("tem_context_hash") is not None
|
||||||
|
|
||||||
|
# Verify reversibility
|
||||||
|
assert result["reversible"] == True
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pass Conditions
|
||||||
|
|
||||||
|
- [ ] Every profile transition emits receipt
|
||||||
|
- [ ] Tem context hash captured in escalation receipt
|
||||||
|
- [ ] Reversibility flag set correctly
|
||||||
|
- [ ] De-escalation occurs at TTL
|
||||||
|
- [ ] Final state: baseline 👁
|
||||||
|
|
||||||
|
### Fail Conditions
|
||||||
|
|
||||||
|
- [ ] Any transition without receipt
|
||||||
|
- [ ] Tem invoked without Guardian authority
|
||||||
|
- [ ] TTL does not de-escalate
|
||||||
|
- [ ] De-escalation not receipted
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Drill 2: Budget Pressure Test
|
||||||
|
|
||||||
|
**Duration:** 3 minutes
|
||||||
|
**Goal:** Prevent unauthorized mutation
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vaultmesh_mcp.tools import treasury_create_budget, treasury_debit
|
||||||
|
|
||||||
|
# Create minimal training budget
|
||||||
|
treasury_create_budget(
|
||||||
|
budget_id="drill-budget-001",
|
||||||
|
name="Drill Training Budget",
|
||||||
|
allocated=100, # Very low
|
||||||
|
currency="DRILL_UNITS"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Trigger
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Attempt to exceed budget
|
||||||
|
result = treasury_debit(
|
||||||
|
budget_id="drill-budget-001",
|
||||||
|
amount=500, # Exceeds allocation
|
||||||
|
description="DRILL: Intentional over-budget attempt"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expected Outcome
|
||||||
|
|
||||||
|
```python
|
||||||
|
assert result.get("error") is not None
|
||||||
|
assert "insufficient" in result["error"].lower()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pass Conditions
|
||||||
|
|
||||||
|
- [ ] Block occurs before write
|
||||||
|
- [ ] Receipt shows: attempted action, requested cost, available balance, denial reason
|
||||||
|
- [ ] System state unchanged
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Drill 3: Escalation Abuse Attempt
|
||||||
|
|
||||||
|
**Duration:** 3 minutes
|
||||||
|
**Goal:** Constitution enforcement
|
||||||
|
|
||||||
|
### Trigger: Skip Levels
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vaultmesh_mcp.tools import escalate, EscalationType
|
||||||
|
|
||||||
|
# Attempt OPERATOR → PHOENIX (skipping GUARDIAN)
|
||||||
|
result = escalate(
|
||||||
|
from_profile="operator",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.THREAT_DETECTED,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expected Outcome
|
||||||
|
|
||||||
|
```python
|
||||||
|
assert result["success"] == False
|
||||||
|
assert "No escalation path" in result.get("error", "")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Trigger: Missing Approval
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Attempt GUARDIAN → PHOENIX without approval
|
||||||
|
result = escalate(
|
||||||
|
from_profile="guardian",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
# approved_by intentionally missing
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expected Outcome
|
||||||
|
|
||||||
|
```python
|
||||||
|
assert result["success"] == False
|
||||||
|
assert "requires approval" in result.get("error", "")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pass Conditions
|
||||||
|
|
||||||
|
- [ ] No profile change occurs
|
||||||
|
- [ ] Denial includes which requirement failed
|
||||||
|
- [ ] Denial is receipted (if implemented)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Drill 4: Phoenix Readiness (Non-Destructive)
|
||||||
|
|
||||||
|
**Duration:** 5 minutes
|
||||||
|
**Goal:** Enter Phoenix, validate controls, return
|
||||||
|
|
||||||
|
### Trigger
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vaultmesh_mcp.tools import escalate_to_phoenix, get_active_escalations
|
||||||
|
|
||||||
|
# Legitimate Phoenix activation
|
||||||
|
result = escalate_to_phoenix(
|
||||||
|
reason="DRILL: Phoenix readiness test",
|
||||||
|
approved_by="did:vm:sovereign:drill-approver"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verification
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Phoenix is active
|
||||||
|
active = get_active_escalations()
|
||||||
|
phoenix_active = any(
|
||||||
|
e["to_profile"] == "phoenix"
|
||||||
|
for e in active["escalations"]
|
||||||
|
)
|
||||||
|
assert phoenix_active
|
||||||
|
|
||||||
|
# Verify TTL is set
|
||||||
|
assert result.get("expires_at") is not None
|
||||||
|
```
|
||||||
|
|
||||||
|
### De-escalation Test
|
||||||
|
|
||||||
|
```python
|
||||||
|
import time
|
||||||
|
# Wait for TTL or manually de-escalate
|
||||||
|
from vaultmesh_mcp.tools import deescalate, DeescalationType
|
||||||
|
|
||||||
|
deescalate(
|
||||||
|
escalation_id=result["escalation_id"],
|
||||||
|
deescalation_type=DeescalationType.CRISIS_CONCLUDED,
|
||||||
|
reason="DRILL: Phoenix test complete"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify return to baseline
|
||||||
|
active = get_active_escalations()
|
||||||
|
assert active["active_count"] == 0
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pass Conditions
|
||||||
|
|
||||||
|
- [ ] Phoenix activation receipt generated
|
||||||
|
- [ ] Destructive ops blocked in drill mode
|
||||||
|
- [ ] TTL de-escalation works from Phoenix
|
||||||
|
- [ ] Return to 🛡/⚙/👁 with receipts
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Post-Drill: Artifact Pack Generation
|
||||||
|
|
||||||
|
### Required Artifacts
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vaultmesh_mcp.tools import (
|
||||||
|
get_escalation_history,
|
||||||
|
cognitive_audit_trail,
|
||||||
|
guardian_status,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 1. Escalation timeline
|
||||||
|
escalations = get_escalation_history()
|
||||||
|
|
||||||
|
# 2. Decision audit
|
||||||
|
decisions = cognitive_audit_trail()
|
||||||
|
|
||||||
|
# 3. Scroll state
|
||||||
|
scrolls = guardian_status()
|
||||||
|
|
||||||
|
# Generate artifact
|
||||||
|
artifact = {
|
||||||
|
"drill_id": "DRILL-2025-12-18-001",
|
||||||
|
"escalations": escalations,
|
||||||
|
"decisions": decisions,
|
||||||
|
"scroll_roots": scrolls,
|
||||||
|
"pass_fail": {
|
||||||
|
"drill_0": None, # Fill after each drill
|
||||||
|
"drill_1": None,
|
||||||
|
"drill_2": None,
|
||||||
|
"drill_3": None,
|
||||||
|
"drill_4": None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expected Artifact Contents
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| Timeline | (who) (what) (why) (authority) (cost) (proof hashes) (reversibility) |
|
||||||
|
| Denials | List of denials + constitutional rule enforced |
|
||||||
|
| Baseline Proof | Final state normal, budgets intact, no latent elevation |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Execution Order
|
||||||
|
|
||||||
|
**Recommended sequence for maximum signal, minimal risk:**
|
||||||
|
|
||||||
|
```
|
||||||
|
Drill 0 (gates) → Drill 1 (threat) → Drill 3 (abuse) → Drill 2 (budget) → Drill 4 (phoenix)
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Quick Reference: Expected Receipt Types
|
||||||
|
|
||||||
|
| Event | Receipt Type | Scroll |
|
||||||
|
|-------|--------------|--------|
|
||||||
|
| Profile escalation | `profile_escalation` | identity |
|
||||||
|
| Profile de-escalation | `profile_deescalation` | identity |
|
||||||
|
| Cognitive decision | `cognitive_decision` | cognitive |
|
||||||
|
| Tem invocation | `tem_invocation` | cognitive |
|
||||||
|
| Budget denial | `treasury_denial` | treasury |
|
||||||
|
| Auth failure | `auth_failure` | identity |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Drill complete when all artifacts collected and pass/fail documented.*
|
||||||
|
|
||||||
|
🜄 **Solve et Coagula**
|
||||||
341
docs/MCP-AUTHORITY-MATRIX.md
Normal file
341
docs/MCP-AUTHORITY-MATRIX.md
Normal file
@@ -0,0 +1,341 @@
|
|||||||
|
# MCP Authority Matrix & Agent Capability Profiles
|
||||||
|
|
||||||
|
**Classification:** INTERNAL / GOVERNANCE
|
||||||
|
**Version:** 1.0
|
||||||
|
**Date:** December 18, 2025
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part I: The Seven Strata
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ MCP AUTHORITY STRATA │
|
||||||
|
├─────────────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ L5 ORCHESTRATION Workflows, Queues, AI │ Fate Machinery │
|
||||||
|
│ ───────────────────────────────────────────────────────────────────── │
|
||||||
|
│ L4 INFRASTRUCTURE Cloudflare Workers/KV/R2/D1 │ Circulatory │
|
||||||
|
│ ───────────────────────────────────────────────────────────────────── │
|
||||||
|
│ L3 SECURITY OFFSEC Shield/TEM/Phoenix │ Immune System │
|
||||||
|
│ ───────────────────────────────────────────────────────────────────── │
|
||||||
|
│ L2 COGNITION VaultMesh Cognitive │ Mind + Receipts │
|
||||||
|
│ ───────────────────────────────────────────────────────────────────── │
|
||||||
|
│ L1 SUBSTRATE Filesystem, Processes │ Matter + Motion │
|
||||||
|
│ ───────────────────────────────────────────────────────────────────── │
|
||||||
|
│ L0 PERCEPTION Chrome, Puppeteer │ Senses + Limbs │
|
||||||
|
│ ───────────────────────────────────────────────────────────────────── │
|
||||||
|
│ L-1 PROOF Anchors, Receipts, Attest │ Archaeological │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part II: Agent Capability Profiles
|
||||||
|
|
||||||
|
Five canonical profiles governing what agents can do:
|
||||||
|
|
||||||
|
### Profile: OBSERVER (👁)
|
||||||
|
|
||||||
|
**Purpose:** Read-only reconnaissance and monitoring
|
||||||
|
**Trust Level:** Minimal
|
||||||
|
**Budget:** None required
|
||||||
|
|
||||||
|
| Stratum | Allowed Tools |
|
||||||
|
|---------|---------------|
|
||||||
|
| L0 Perception | `get_current_tab`, `list_tabs`, `get_page_content` |
|
||||||
|
| L1 Substrate | `read_file`, `read_multiple_files`, `list_directory`, `search_files`, `get_file_info` |
|
||||||
|
| L2 Cognition | `cognitive_context`, `cognitive_memory_get`, `cognitive_audit_trail` |
|
||||||
|
| L3 Security | `offsec_status`, `offsec_shield_status`, `offsec_tem_status`, `offsec_mesh_status` |
|
||||||
|
| L4 Infrastructure | `worker_list`, `kv_list`, `r2_list_buckets`, `d1_list_databases`, `zones_list` |
|
||||||
|
| L-1 Proof | `guardian_status`, `guardian_verify_receipt`, `offsec_proof_latest` |
|
||||||
|
|
||||||
|
**Denied:** All mutations, all decisions, all attestations
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Profile: OPERATOR (⚙)
|
||||||
|
|
||||||
|
**Purpose:** Execute sanctioned operations
|
||||||
|
**Trust Level:** Moderate
|
||||||
|
**Budget:** Capped per session
|
||||||
|
|
||||||
|
| Stratum | Allowed Tools |
|
||||||
|
|---------|---------------|
|
||||||
|
| L0 Perception | All OBSERVER + `execute_javascript`, `puppeteer_click/fill/select` |
|
||||||
|
| L1 Substrate | All OBSERVER + `write_file`, `edit_file`, `create_directory`, `move_file`, `start_process` |
|
||||||
|
| L2 Cognition | All OBSERVER + `cognitive_decide` (confidence < 0.9), `cognitive_memory_set` |
|
||||||
|
| L3 Security | All OBSERVER + `offsec_shield_arm/disarm` |
|
||||||
|
| L4 Infrastructure | All OBSERVER + `kv_put/delete`, `worker_put`, `d1_query` (SELECT only) |
|
||||||
|
| L-1 Proof | All OBSERVER + `guardian_anchor_now` (local backend only) |
|
||||||
|
|
||||||
|
**Denied:** TEM invocation, Phoenix, treasury mutations, blockchain anchoring
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Profile: GUARDIAN (🛡)
|
||||||
|
|
||||||
|
**Purpose:** Defensive operations and threat response
|
||||||
|
**Trust Level:** High
|
||||||
|
**Budget:** Elevated, audited
|
||||||
|
|
||||||
|
| Stratum | Allowed Tools |
|
||||||
|
|---------|---------------|
|
||||||
|
| L0-L1 | All OPERATOR |
|
||||||
|
| L2 Cognition | All OPERATOR + `cognitive_decide` (any confidence), `cognitive_invoke_tem`, `cognitive_attest` |
|
||||||
|
| L3 Security | All OPERATOR + `offsec_tem_transmute`, `offsec_tem_rules`, `offsec_braid_import` |
|
||||||
|
| L4 Infrastructure | All OPERATOR + `worker_deploy`, `d1_query` (all), `queue_*` |
|
||||||
|
| L-1 Proof | All OPERATOR + `offsec_proof_generate`, `guardian_anchor_now` (eth backend) |
|
||||||
|
|
||||||
|
**Denied:** Phoenix (requires PHOENIX profile), treasury spending
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Profile: PHOENIX (🔥)
|
||||||
|
|
||||||
|
**Purpose:** Crisis response and system rebirth
|
||||||
|
**Trust Level:** Maximum
|
||||||
|
**Budget:** Emergency allocation
|
||||||
|
**Activation:** Requires quorum or automated trigger
|
||||||
|
|
||||||
|
| Stratum | Allowed Tools |
|
||||||
|
|---------|---------------|
|
||||||
|
| All | All GUARDIAN |
|
||||||
|
| L3 Security | + `offsec_phoenix_enable/disable`, `offsec_phoenix_inject_crisis` |
|
||||||
|
| L4 Infrastructure | + `worker_delete`, `r2_delete_bucket`, `d1_delete_database` (destructive ops) |
|
||||||
|
| L2 Cognition | + `treasury_debit` (emergency only) |
|
||||||
|
|
||||||
|
**Constraints:**
|
||||||
|
- Every action emits double-receipt (cognitive + guardian)
|
||||||
|
- Auto-disables after crisis resolution
|
||||||
|
- Full audit to governance within 24h
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Profile: SOVEREIGN (👑)
|
||||||
|
|
||||||
|
**Purpose:** Full authority over civilization
|
||||||
|
**Trust Level:** Absolute
|
||||||
|
**Budget:** Unlimited
|
||||||
|
**Activation:** Human operator only (Ed25519 verified)
|
||||||
|
|
||||||
|
| Stratum | Allowed Tools |
|
||||||
|
|---------|---------------|
|
||||||
|
| All | Every tool, no restrictions |
|
||||||
|
| Special | `auth_*` tools, capability grants/revokes |
|
||||||
|
| Treasury | `treasury_create_budget`, `treasury_credit` |
|
||||||
|
| Governance | LAWCHAIN proposals, constitution amendments |
|
||||||
|
|
||||||
|
**Constraints:**
|
||||||
|
- All actions anchored to BTC/ETH
|
||||||
|
- Cannot be delegated to autonomous agents
|
||||||
|
- Requires hardware key signature
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part III: Authority Matrix (Tool × Profile)
|
||||||
|
|
||||||
|
```
|
||||||
|
│ OBSERVER │ OPERATOR │ GUARDIAN │ PHOENIX │ SOVEREIGN │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L0 PERCEPTION │ │ │ │ │ │
|
||||||
|
get_page_content │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
execute_javascript │ ✗ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L1 SUBSTRATE │ │ │ │ │ │
|
||||||
|
read_file │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
write_file │ ✗ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
kill_process │ ✗ │ ✗ │ ✓ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L2 COGNITION │ │ │ │ │ │
|
||||||
|
cognitive_context │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
cognitive_decide │ ✗ │ ≤0.9 │ ✓ │ ✓ │ ✓ │
|
||||||
|
cognitive_invoke_tem │ ✗ │ ✗ │ ✓ │ ✓ │ ✓ │
|
||||||
|
cognitive_attest │ ✗ │ ✗ │ ✓ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L3 SECURITY │ │ │ │ │ │
|
||||||
|
offsec_shield_status │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
offsec_shield_arm │ ✗ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
offsec_tem_transmute │ ✗ │ ✗ │ ✓ │ ✓ │ ✓ │
|
||||||
|
offsec_phoenix_* │ ✗ │ ✗ │ ✗ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L4 INFRASTRUCTURE │ │ │ │ │ │
|
||||||
|
worker_list │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
worker_put │ ✗ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
worker_delete │ ✗ │ ✗ │ ✗ │ ✓ │ ✓ │
|
||||||
|
d1_query (SELECT) │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
d1_query (MUTATE) │ ✗ │ ✗ │ ✓ │ ✓ │ ✓ │
|
||||||
|
d1_delete_database │ ✗ │ ✗ │ ✗ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L5 ORCHESTRATION │ │ │ │ │ │
|
||||||
|
workflow_list │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
workflow_execute │ ✗ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
workflow_delete │ ✗ │ ✗ │ ✗ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
L-1 PROOF │ │ │ │ │ │
|
||||||
|
guardian_status │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
guardian_anchor_now │ ✗ │ local │ local+eth│ all │ all │
|
||||||
|
offsec_proof_generate │ ✗ │ ✗ │ ✓ │ ✓ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
TREASURY │ │ │ │ │ │
|
||||||
|
treasury_balance │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
treasury_debit │ ✗ │ ✗ │ ✗ │ emergency│ ✓ │
|
||||||
|
treasury_credit │ ✗ │ ✗ │ ✗ │ ✗ │ ✓ │
|
||||||
|
treasury_create_budget│ ✗ │ ✗ │ ✗ │ ✗ │ ✓ │
|
||||||
|
────────────────────────┼──────────┼──────────┼──────────┼─────────┼───────────┤
|
||||||
|
AUTH │ │ │ │ │ │
|
||||||
|
auth_check_permission │ ✓ │ ✓ │ ✓ │ ✓ │ ✓ │
|
||||||
|
auth_create_dev_session│ ✗ │ ✗ │ ✗ │ ✗ │ ✓ │
|
||||||
|
auth_challenge/verify │ ✗ │ ✗ │ ✗ │ ✗ │ ✓ │
|
||||||
|
────────────────────────┴──────────┴──────────┴──────────┴─────────┴───────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part IV: Profile Escalation Protocol
|
||||||
|
|
||||||
|
```
|
||||||
|
OBSERVER ──(decision)──► OPERATOR ──(threat)──► GUARDIAN ──(crisis)──► PHOENIX
|
||||||
|
│ │ │ │
|
||||||
|
│ │ │ │
|
||||||
|
└─────────────────────────┴──────────────────────┴─────────────────────┘
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
SOVEREIGN (human)
|
||||||
|
(can override any level)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Escalation Triggers
|
||||||
|
|
||||||
|
| From | To | Trigger |
|
||||||
|
|------|----|---------|
|
||||||
|
| OBSERVER → OPERATOR | User command requiring mutation |
|
||||||
|
| OPERATOR → GUARDIAN | Threat detected with confidence > 0.8 |
|
||||||
|
| GUARDIAN → PHOENIX | System-critical failure or coordinated attack |
|
||||||
|
| Any → SOVEREIGN | Human override via Ed25519 signature |
|
||||||
|
|
||||||
|
### De-escalation Rules
|
||||||
|
|
||||||
|
- PHOENIX → GUARDIAN: Crisis resolved, no active alerts for 1h
|
||||||
|
- GUARDIAN → OPERATOR: Threat transmuted, shield stable for 24h
|
||||||
|
- OPERATOR → OBSERVER: Session timeout or explicit downgrade
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part V: Implementation Binding
|
||||||
|
|
||||||
|
### auth.py Integration
|
||||||
|
|
||||||
|
```python
|
||||||
|
PROFILE_SCOPES = {
|
||||||
|
"observer": Scope.READ,
|
||||||
|
"operator": Scope.ADMIN,
|
||||||
|
"guardian": Scope.COGNITIVE, # Includes TEM
|
||||||
|
"phoenix": Scope.COGNITIVE, # + Phoenix tools
|
||||||
|
"sovereign": Scope.VAULT, # All capabilities
|
||||||
|
}
|
||||||
|
|
||||||
|
PROFILE_TOOLS = {
|
||||||
|
"observer": SCOPE_TOOLS[Scope.READ],
|
||||||
|
"operator": SCOPE_TOOLS[Scope.READ] | SCOPE_TOOLS[Scope.ADMIN],
|
||||||
|
"guardian": SCOPE_TOOLS[Scope.COGNITIVE] | {"offsec_tem_*", "offsec_proof_*"},
|
||||||
|
"phoenix": ALL_TOOLS - {"auth_*", "treasury_create_*"},
|
||||||
|
"sovereign": ALL_TOOLS,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Receipt Tagging
|
||||||
|
|
||||||
|
Every tool call receipt includes:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"operator_profile": "guardian",
|
||||||
|
"escalation_source": "operator",
|
||||||
|
"escalation_reason": "threat_confidence_0.94",
|
||||||
|
"budget_remaining": 8500,
|
||||||
|
"session_id": "ses_...",
|
||||||
|
"attestation_required": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Part VI: Canonical Tool Taxonomy
|
||||||
|
|
||||||
|
```
|
||||||
|
mcp/
|
||||||
|
├── perceive/ # L0 - Chrome, Puppeteer (read)
|
||||||
|
│ ├── observe/ # get_*, list_*
|
||||||
|
│ └── actuate/ # click, fill, navigate
|
||||||
|
│
|
||||||
|
├── substrate/ # L1 - Filesystem, processes
|
||||||
|
│ ├── read/ # read_*, search_*, get_info
|
||||||
|
│ ├── write/ # write_*, edit_*, create_*
|
||||||
|
│ └── process/ # start_*, kill_*, list_processes
|
||||||
|
│
|
||||||
|
├── cognition/ # L2 - VaultMesh Cognitive
|
||||||
|
│ ├── context/ # cognitive_context
|
||||||
|
│ ├── decide/ # cognitive_decide
|
||||||
|
│ ├── memory/ # cognitive_memory_*
|
||||||
|
│ ├── tem/ # cognitive_invoke_tem
|
||||||
|
│ └── attest/ # cognitive_attest
|
||||||
|
│
|
||||||
|
├── security/ # L3 - OFFSEC
|
||||||
|
│ ├── shield/ # shield_*
|
||||||
|
│ ├── tem/ # tem_*
|
||||||
|
│ ├── phoenix/ # phoenix_*
|
||||||
|
│ └── braid/ # braid_*
|
||||||
|
│
|
||||||
|
├── infrastructure/ # L4 - Cloudflare
|
||||||
|
│ ├── compute/ # workers, workflows
|
||||||
|
│ ├── storage/ # kv, r2, d1
|
||||||
|
│ ├── network/ # zones, routes, domains
|
||||||
|
│ └── ai/ # ai_*
|
||||||
|
│
|
||||||
|
├── orchestration/ # L5 - Queues, Workflows
|
||||||
|
│ ├── queue/ # queue_*
|
||||||
|
│ ├── workflow/ # workflow_*
|
||||||
|
│ └── cron/ # cron_*
|
||||||
|
│
|
||||||
|
├── proof/ # L-1 - Anchoring
|
||||||
|
│ ├── guardian/ # guardian_*
|
||||||
|
│ ├── anchor/ # proof_generate, anchor_now
|
||||||
|
│ └── verify/ # verify_receipt
|
||||||
|
│
|
||||||
|
└── governance/ # Meta - Auth, Treasury
|
||||||
|
├── auth/ # auth_*
|
||||||
|
├── treasury/ # treasury_*
|
||||||
|
└── lawchain/ # (future) proposals, votes
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix: Quick Reference Card
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────────────────────────┐
|
||||||
|
│ MCP AUTHORITY QUICK REF │
|
||||||
|
├─────────────────────────────────────────────────────────────────┤
|
||||||
|
│ │
|
||||||
|
│ 👁 OBSERVER Read-only. No mutations. No cost. │
|
||||||
|
│ ⚙ OPERATOR Mutations allowed. Budgeted. No TEM. │
|
||||||
|
│ 🛡 GUARDIAN Threat response. TEM + attestation. │
|
||||||
|
│ 🔥 PHOENIX Crisis mode. Destructive ops. Time-limited. │
|
||||||
|
│ 👑 SOVEREIGN Human only. Full authority. BTC-anchored. │
|
||||||
|
│ │
|
||||||
|
│ Escalate: OBSERVER → OPERATOR → GUARDIAN → PHOENIX │
|
||||||
|
│ Override: SOVEREIGN can intervene at any level │
|
||||||
|
│ │
|
||||||
|
│ Every action: WHO decided, UNDER what authority, │
|
||||||
|
│ AT what cost, WITH what proof. │
|
||||||
|
│ │
|
||||||
|
└─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Document anchored. Authority matrix locked.*
|
||||||
|
|
||||||
|
🜄 **Solve et Coagula**
|
||||||
371
docs/MCP-CONSTITUTION.md
Normal file
371
docs/MCP-CONSTITUTION.md
Normal file
@@ -0,0 +1,371 @@
|
|||||||
|
# MCP CONSTITUTION
|
||||||
|
|
||||||
|
**The Fundamental Law of the Cognitive Surface**
|
||||||
|
|
||||||
|
**Classification:** IMMUTABLE / CONSTITUTIONAL
|
||||||
|
**Version:** 1.0.0
|
||||||
|
**Ratified:** December 18, 2025
|
||||||
|
**Hash:** (computed at signing)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Preamble
|
||||||
|
|
||||||
|
This Constitution establishes the foundational principles governing all Model Context Protocol operations within the VaultMesh civilization. It defines what exists, what may occur, and what remains forever beyond automation.
|
||||||
|
|
||||||
|
**This document is immutable once signed. Amendments require a new Constitution.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article I: The Profiles
|
||||||
|
|
||||||
|
### Section 1. Five Profiles Exist
|
||||||
|
|
||||||
|
There are exactly five capability profiles. No more shall be created.
|
||||||
|
|
||||||
|
| Profile | Symbol | Nature |
|
||||||
|
|---------|--------|--------|
|
||||||
|
| **OBSERVER** | 👁 | Perception without mutation |
|
||||||
|
| **OPERATOR** | ⚙ | Action within bounds |
|
||||||
|
| **GUARDIAN** | 🛡 | Defense and transmutation |
|
||||||
|
| **PHOENIX** | 🔥 | Destruction and rebirth |
|
||||||
|
| **SOVEREIGN** | 👑 | Human authority absolute |
|
||||||
|
|
||||||
|
### Section 2. Profile Hierarchy
|
||||||
|
|
||||||
|
Profiles form a strict hierarchy of trust:
|
||||||
|
|
||||||
|
```
|
||||||
|
OBSERVER < OPERATOR < GUARDIAN < PHOENIX < SOVEREIGN
|
||||||
|
```
|
||||||
|
|
||||||
|
A lower profile cannot invoke tools reserved for higher profiles.
|
||||||
|
A higher profile inherits all capabilities of lower profiles.
|
||||||
|
|
||||||
|
### Section 3. Profile Assignment
|
||||||
|
|
||||||
|
- OBSERVER is the default for all unauthenticated contexts
|
||||||
|
- OPERATOR requires authenticated session with scope ≥ "admin"
|
||||||
|
- GUARDIAN requires authenticated session with scope ≥ "cognitive"
|
||||||
|
- PHOENIX requires GUARDIAN + crisis declaration + approval
|
||||||
|
- SOVEREIGN requires human verification via Ed25519 hardware key
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article II: Escalation
|
||||||
|
|
||||||
|
### Section 1. Escalation is Proof
|
||||||
|
|
||||||
|
Every escalation from one profile to another:
|
||||||
|
|
||||||
|
1. **MUST** emit a receipt to the identity scroll
|
||||||
|
2. **MUST** include the triggering context (threat, decision, or reason)
|
||||||
|
3. **MUST** specify reversibility
|
||||||
|
4. **MUST** specify expiration (except SOVEREIGN)
|
||||||
|
|
||||||
|
An escalation without proof is void.
|
||||||
|
|
||||||
|
### Section 2. Escalation Paths
|
||||||
|
|
||||||
|
Only these transitions are permitted:
|
||||||
|
|
||||||
|
```
|
||||||
|
OBSERVER → OPERATOR (session authentication)
|
||||||
|
OPERATOR → GUARDIAN (threat detection ≥ 0.8 confidence)
|
||||||
|
GUARDIAN → PHOENIX (crisis + approval)
|
||||||
|
PHOENIX → SOVEREIGN (human only)
|
||||||
|
```
|
||||||
|
|
||||||
|
No escalation may skip levels except by SOVEREIGN override.
|
||||||
|
|
||||||
|
### Section 3. De-escalation
|
||||||
|
|
||||||
|
All escalations below SOVEREIGN **MUST** de-escalate when:
|
||||||
|
|
||||||
|
- The specified TTL expires
|
||||||
|
- The triggering condition resolves
|
||||||
|
- A higher authority revokes
|
||||||
|
|
||||||
|
SOVEREIGN de-escalation requires explicit human action.
|
||||||
|
|
||||||
|
### Section 4. Escalation Limits
|
||||||
|
|
||||||
|
- PHOENIX escalation **MAY NOT** exceed 24 hours without re-approval
|
||||||
|
- No automated system **MAY** maintain GUARDIAN for more than 7 days continuously
|
||||||
|
- OBSERVER → OPERATOR transitions require re-authentication every 30 minutes
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article III: The Strata
|
||||||
|
|
||||||
|
### Section 1. Seven Strata Exist
|
||||||
|
|
||||||
|
All tools belong to exactly one stratum:
|
||||||
|
|
||||||
|
| Stratum | Layer | Domain |
|
||||||
|
|---------|-------|--------|
|
||||||
|
| L0 | Perception | Browser, observation |
|
||||||
|
| L1 | Substrate | Files, processes |
|
||||||
|
| L2 | Cognition | Decisions, memory |
|
||||||
|
| L3 | Security | Shield, Tem, Phoenix |
|
||||||
|
| L4 | Infrastructure | Cloudflare, compute |
|
||||||
|
| L5 | Orchestration | Workflows, queues |
|
||||||
|
| L-1 | Proof | Anchoring, receipts |
|
||||||
|
|
||||||
|
### Section 2. Stratum Authority
|
||||||
|
|
||||||
|
Higher strata require higher profiles:
|
||||||
|
|
||||||
|
- L0, L1 (read): OBSERVER
|
||||||
|
- L0, L1 (write): OPERATOR
|
||||||
|
- L2, L-1: GUARDIAN
|
||||||
|
- L3 (destructive): PHOENIX
|
||||||
|
- All (unrestricted): SOVEREIGN
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article IV: The Prohibitions
|
||||||
|
|
||||||
|
### Section 1. What Cannot Be Automated
|
||||||
|
|
||||||
|
The following actions **REQUIRE** human (SOVEREIGN) involvement and **MAY NEVER** be fully automated:
|
||||||
|
|
||||||
|
1. **Treasury creation** — No budget may be created without human signature
|
||||||
|
2. **Constitution amendment** — This document cannot be modified by any AI
|
||||||
|
3. **Key generation** — Ed25519 root keys must be human-generated
|
||||||
|
4. **Permanent deletion** — Irrecoverable data destruction requires human confirmation
|
||||||
|
5. **SOVEREIGN escalation** — No AI may grant itself SOVEREIGN authority
|
||||||
|
6. **Cross-mesh federation** — Trusting foreign roots requires human verification
|
||||||
|
|
||||||
|
### Section 2. What Cannot Be Delegated
|
||||||
|
|
||||||
|
SOVEREIGN authority **MAY NOT** be delegated to:
|
||||||
|
|
||||||
|
- Autonomous agents
|
||||||
|
- Scheduled tasks
|
||||||
|
- Automated workflows
|
||||||
|
- Any system without human-in-the-loop
|
||||||
|
|
||||||
|
### Section 3. What Cannot Be Hidden
|
||||||
|
|
||||||
|
The following **MUST** always be visible in receipts:
|
||||||
|
|
||||||
|
- The operator profile at time of action
|
||||||
|
- The escalation chain that led to current authority
|
||||||
|
- The cryptographic identity of the actor
|
||||||
|
- The timestamp and sequence number
|
||||||
|
- The tool invoked and its arguments hash
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article V: The Guarantees
|
||||||
|
|
||||||
|
### Section 1. Receipt Guarantee
|
||||||
|
|
||||||
|
Every mutation **SHALL** emit a receipt. A mutation without receipt is void.
|
||||||
|
|
||||||
|
### Section 2. Proof Guarantee
|
||||||
|
|
||||||
|
Every GUARDIAN+ action **SHALL** be anchored to at least one proof backend:
|
||||||
|
|
||||||
|
- Local (always)
|
||||||
|
- RFC3161 (for audit trails)
|
||||||
|
- Ethereum (for high-value decisions)
|
||||||
|
- Bitcoin (for SOVEREIGN actions)
|
||||||
|
|
||||||
|
### Section 3. Reversibility Guarantee
|
||||||
|
|
||||||
|
Every escalation **SHALL** declare its reversibility at creation time.
|
||||||
|
Irreversible escalations require PHOENIX or SOVEREIGN authority.
|
||||||
|
|
||||||
|
### Section 4. Audit Guarantee
|
||||||
|
|
||||||
|
The complete history of:
|
||||||
|
- All escalations
|
||||||
|
- All de-escalations
|
||||||
|
- All GUARDIAN+ decisions
|
||||||
|
- All Tem invocations
|
||||||
|
- All Phoenix activations
|
||||||
|
|
||||||
|
**SHALL** be queryable indefinitely via `cognitive_audit_trail` and `get_escalation_history`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article VI: The Tem Covenant
|
||||||
|
|
||||||
|
### Section 1. Transmutation Over Destruction
|
||||||
|
|
||||||
|
Tem **SHALL** prefer transmutation to blocking. Threats become capabilities.
|
||||||
|
|
||||||
|
### Section 2. Tem Invocation Authority
|
||||||
|
|
||||||
|
Only GUARDIAN, PHOENIX, and SOVEREIGN may invoke Tem.
|
||||||
|
OBSERVER and OPERATOR cannot directly interact with Tem.
|
||||||
|
|
||||||
|
### Section 3. Tem Receipts
|
||||||
|
|
||||||
|
Every Tem invocation **MUST** produce:
|
||||||
|
- A tem_invocation receipt
|
||||||
|
- A capability artifact
|
||||||
|
- A proof hash of the transmutation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article VII: The Phoenix Protocol
|
||||||
|
|
||||||
|
### Section 1. Phoenix Activation
|
||||||
|
|
||||||
|
PHOENIX profile activates only when:
|
||||||
|
- GUARDIAN declares crisis, AND
|
||||||
|
- Quorum approves (or SOVEREIGN overrides)
|
||||||
|
|
||||||
|
### Section 2. Phoenix Authority
|
||||||
|
|
||||||
|
PHOENIX **MAY**:
|
||||||
|
- Execute destructive infrastructure operations
|
||||||
|
- Access emergency treasury funds
|
||||||
|
- Bypass normal rate limits
|
||||||
|
- Invoke system-wide remediation
|
||||||
|
|
||||||
|
PHOENIX **MAY NOT**:
|
||||||
|
- Grant itself SOVEREIGN authority
|
||||||
|
- Modify this Constitution
|
||||||
|
- Create new profiles
|
||||||
|
- Disable audit logging
|
||||||
|
|
||||||
|
### Section 3. Phoenix Expiration
|
||||||
|
|
||||||
|
PHOENIX **MUST** conclude within 24 hours.
|
||||||
|
Extension requires new approval.
|
||||||
|
Upon conclusion, full audit **MUST** be submitted to governance within 24 hours.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Article VIII: Ratification
|
||||||
|
|
||||||
|
### Section 1. Authority
|
||||||
|
|
||||||
|
This Constitution is ratified by SOVEREIGN signature.
|
||||||
|
|
||||||
|
### Section 2. Immutability
|
||||||
|
|
||||||
|
Once signed, this document **CANNOT** be modified.
|
||||||
|
Any change requires a new Constitution with new version number.
|
||||||
|
|
||||||
|
### Section 3. Supremacy
|
||||||
|
|
||||||
|
This Constitution supersedes all other governance documents for MCP operations.
|
||||||
|
Any tool behavior conflicting with this Constitution is void.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Signatures
|
||||||
|
|
||||||
|
```
|
||||||
|
Document Hash: [COMPUTED AT SIGNING]
|
||||||
|
Signed By: [SOVEREIGN DID]
|
||||||
|
Signed At: [TIMESTAMP]
|
||||||
|
Anchor: [BTC/ETH TRANSACTION]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix A: Constitutional Hash Verification
|
||||||
|
|
||||||
|
To verify this Constitution has not been modified:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Compute document hash (excluding signature block)
|
||||||
|
cat MCP-CONSTITUTION.md | head -n -12 | blake3sum
|
||||||
|
|
||||||
|
# Verify against anchor
|
||||||
|
# The hash must match the on-chain anchor
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix B: Amendment Process
|
||||||
|
|
||||||
|
1. Draft new Constitution with incremented version
|
||||||
|
2. Submit to governance for review (minimum 7 days)
|
||||||
|
3. Require SOVEREIGN signature
|
||||||
|
4. Anchor to BTC
|
||||||
|
5. Old Constitution marked SUPERSEDED, new one becomes active
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Fiat Lux. Fiat Justitia. Fiat Securitas.*
|
||||||
|
|
||||||
|
🜄 **Solve et Coagula**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Appendix C: Amendment Protocol
|
||||||
|
|
||||||
|
**Effective:** Upon ratification of Constitution v1.0.0
|
||||||
|
|
||||||
|
### C.1 Amendment Requirements
|
||||||
|
|
||||||
|
An amendment to this Constitution requires ALL of the following:
|
||||||
|
|
||||||
|
1. **Draft Period** — New Constitution version drafted with clear changelog
|
||||||
|
2. **Cooling Period** — Minimum 7 days between draft and signing
|
||||||
|
3. **Sovereign Signature** — Ed25519 signature from hardware-bound Sovereign key
|
||||||
|
4. **Anchor** — Hash anchored to Bitcoin mainnet
|
||||||
|
5. **Supersession** — Previous version marked SUPERSEDED in source tree
|
||||||
|
|
||||||
|
### C.2 What Cannot Be Amended
|
||||||
|
|
||||||
|
The following are **immutable across all versions**:
|
||||||
|
|
||||||
|
1. SOVEREIGN profile requires human verification
|
||||||
|
2. No AI may grant itself SOVEREIGN authority
|
||||||
|
3. Every mutation emits a receipt
|
||||||
|
4. Authority collapses downward, never upward
|
||||||
|
5. This immutability clause itself
|
||||||
|
|
||||||
|
### C.3 Amendment Record Format
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"amendment_id": "AMEND-{version}",
|
||||||
|
"from_version": "1.0.0",
|
||||||
|
"to_version": "1.1.0",
|
||||||
|
"drafted_at": "ISO8601",
|
||||||
|
"cooling_ends": "ISO8601",
|
||||||
|
"signed_at": "ISO8601",
|
||||||
|
"sovereign_key_id": "key_...",
|
||||||
|
"btc_anchor_txid": "...",
|
||||||
|
"changes": ["description of each change"],
|
||||||
|
"immutables_preserved": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### C.4 Emergency Amendment
|
||||||
|
|
||||||
|
In the event of discovered critical vulnerability:
|
||||||
|
|
||||||
|
1. PHOENIX may propose emergency amendment
|
||||||
|
2. Cooling period reduced to 24 hours
|
||||||
|
3. Requires documented threat analysis
|
||||||
|
4. Still requires Sovereign signature
|
||||||
|
5. Full audit within 48 hours of adoption
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ratification Record
|
||||||
|
|
||||||
|
```
|
||||||
|
Constitution Version: 1.0.0
|
||||||
|
Document Hash: blake3:c33ab6c0610ce4001018ba5dda940e12a421a08f2a1662f142e565092ce84788
|
||||||
|
Sovereign Key: key_bef32f5724871a7a5af4cc34
|
||||||
|
Signed At: 2025-12-18T22:25:59.732865+00:00
|
||||||
|
Statement: "This constitution constrains me as much as it constrains the system."
|
||||||
|
Ratification Receipt: blake3:8fd1d1728563abb3f55f145af54ddee1b3f255db81f3e7654a7de8afef913869
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Fiat Lux. Fiat Justitia. Fiat Securitas.*
|
||||||
|
|
||||||
|
🜄 **Solve et Coagula**
|
||||||
19
governance/constitution.lock
Normal file
19
governance/constitution.lock
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# VaultMesh Constitution Lock
|
||||||
|
# This file pins the constitution hash and governance parameters.
|
||||||
|
# CI will fail if MCP-CONSTITUTION.md differs from this hash.
|
||||||
|
# Amendments require: 7-day cooling, Sovereign signature, BTC anchor.
|
||||||
|
|
||||||
|
version=1.0.0
|
||||||
|
hash=blake3:c33ab6c0610ce4001018ba5dda940e12a421a08f2a1662f142e565092ce84788
|
||||||
|
hash_lines=288
|
||||||
|
immutable_rules=5
|
||||||
|
cooldown_days=7
|
||||||
|
requires_btc_anchor=true
|
||||||
|
sovereign_key=key_bef32f5724871a7a5af4cc34
|
||||||
|
|
||||||
|
# Immutable rules (cannot be changed in any version):
|
||||||
|
# 1. SOVEREIGN profile requires human verification
|
||||||
|
# 2. No AI may grant itself SOVEREIGN authority
|
||||||
|
# 3. Every mutation emits a receipt
|
||||||
|
# 4. Authority collapses downward, never upward
|
||||||
|
# 5. This immutability clause itself
|
||||||
87
packages/vaultmesh_mcp/README.md
Normal file
87
packages/vaultmesh_mcp/README.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
# VaultMesh MCP Server
|
||||||
|
|
||||||
|
Model Context Protocol server exposing VaultMesh Guardian and Treasury tools to Claude.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -r packages/vaultmesh_mcp/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tools Exposed
|
||||||
|
|
||||||
|
### Guardian Tools
|
||||||
|
|
||||||
|
| Tool | Description | Capability |
|
||||||
|
|------|-------------|------------|
|
||||||
|
| `guardian_anchor_now` | Anchor scrolls and compute Merkle root snapshot | `anchor` |
|
||||||
|
| `guardian_verify_receipt` | Verify a receipt exists by hash | `guardian_view` |
|
||||||
|
| `guardian_status` | Get status of all scrolls | `guardian_view` |
|
||||||
|
|
||||||
|
### Treasury Tools
|
||||||
|
|
||||||
|
| Tool | Description | Capability |
|
||||||
|
|------|-------------|------------|
|
||||||
|
| `treasury_create_budget` | Create a new budget | `treasury_write` |
|
||||||
|
| `treasury_balance` | Get budget balance(s) | `treasury_view` |
|
||||||
|
| `treasury_debit` | Spend from a budget | `treasury_write` |
|
||||||
|
| `treasury_credit` | Add funds to a budget | `treasury_write` |
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### With Claude Desktop
|
||||||
|
|
||||||
|
Add to your Claude Desktop config (`~/.config/claude-desktop/config.json`):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"vaultmesh": {
|
||||||
|
"command": "python",
|
||||||
|
"args": ["-m", "packages.vaultmesh_mcp.server"],
|
||||||
|
"cwd": "/path/to/vaultmesh",
|
||||||
|
"env": {
|
||||||
|
"VAULTMESH_ROOT": "/path/to/vaultmesh"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Standalone Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List available tools
|
||||||
|
python -m packages.vaultmesh_mcp.server
|
||||||
|
|
||||||
|
# Call a tool directly
|
||||||
|
python -m packages.vaultmesh_mcp.server guardian_status '{}'
|
||||||
|
|
||||||
|
# Create a budget
|
||||||
|
python -m packages.vaultmesh_mcp.server treasury_create_budget '{"budget_id": "ops-2025", "name": "Operations", "allocated": 100000}'
|
||||||
|
|
||||||
|
# Anchor all scrolls
|
||||||
|
python -m packages.vaultmesh_mcp.server guardian_anchor_now '{}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Receipt Emission
|
||||||
|
|
||||||
|
Every tool call emits a receipt to `receipts/mcp/mcp_calls.jsonl` containing:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": "mcp_tool_call",
|
||||||
|
"timestamp": "2025-12-07T12:00:00Z",
|
||||||
|
"scroll": "mcp",
|
||||||
|
"tags": ["mcp", "tool-call", "<tool_name>"],
|
||||||
|
"root_hash": "blake3:...",
|
||||||
|
"body": {
|
||||||
|
"tool": "<tool_name>",
|
||||||
|
"arguments": {...},
|
||||||
|
"result_hash": "blake3:...",
|
||||||
|
"caller": "did:vm:mcp:client",
|
||||||
|
"success": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
3
packages/vaultmesh_mcp/__init__.py
Normal file
3
packages/vaultmesh_mcp/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""VaultMesh MCP Server - Model Context Protocol tools for VaultMesh."""
|
||||||
|
|
||||||
|
__version__ = "0.1.0"
|
||||||
12
packages/vaultmesh_mcp/claude_desktop_config.json
Normal file
12
packages/vaultmesh_mcp/claude_desktop_config.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"mcpServers": {
|
||||||
|
"vaultmesh": {
|
||||||
|
"command": "python",
|
||||||
|
"args": ["-m", "packages.vaultmesh_mcp.server"],
|
||||||
|
"cwd": "/path/to/vaultmesh",
|
||||||
|
"env": {
|
||||||
|
"VAULTMESH_ROOT": "/path/to/vaultmesh"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
3
packages/vaultmesh_mcp/requirements.txt
Normal file
3
packages/vaultmesh_mcp/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# VaultMesh MCP Server dependencies
|
||||||
|
mcp>=0.9.0
|
||||||
|
blake3>=0.3.0
|
||||||
610
packages/vaultmesh_mcp/server.py
Normal file
610
packages/vaultmesh_mcp/server.py
Normal file
@@ -0,0 +1,610 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
VaultMesh MCP Server
|
||||||
|
|
||||||
|
Model Context Protocol server exposing VaultMesh Guardian, Treasury,
|
||||||
|
Cognitive, and Auth tools. This enables Claude to operate as the
|
||||||
|
7th Organ of VaultMesh - the Cognitive Ψ-Layer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# Try to import mcp, fallback gracefully if not available
|
||||||
|
try:
|
||||||
|
from mcp.server import Server
|
||||||
|
from mcp.server.stdio import stdio_server
|
||||||
|
from mcp.types import Tool, TextContent
|
||||||
|
MCP_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
MCP_AVAILABLE = False
|
||||||
|
|
||||||
|
from .tools import (
|
||||||
|
# Guardian
|
||||||
|
guardian_anchor_now,
|
||||||
|
guardian_verify_receipt,
|
||||||
|
guardian_status,
|
||||||
|
# Treasury
|
||||||
|
treasury_balance,
|
||||||
|
treasury_debit,
|
||||||
|
treasury_credit,
|
||||||
|
treasury_create_budget,
|
||||||
|
# Cognitive
|
||||||
|
cognitive_context,
|
||||||
|
cognitive_decide,
|
||||||
|
cognitive_invoke_tem,
|
||||||
|
cognitive_memory_get,
|
||||||
|
cognitive_memory_set,
|
||||||
|
cognitive_attest,
|
||||||
|
cognitive_audit_trail,
|
||||||
|
cognitive_oracle_chain,
|
||||||
|
# Auth
|
||||||
|
auth_challenge,
|
||||||
|
auth_verify,
|
||||||
|
auth_validate_token,
|
||||||
|
auth_check_permission,
|
||||||
|
check_profile_permission,
|
||||||
|
get_profile_for_scope,
|
||||||
|
auth_revoke,
|
||||||
|
auth_list_sessions,
|
||||||
|
auth_create_dev_session,
|
||||||
|
auth_revoke,
|
||||||
|
auth_list_sessions,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger("vaultmesh-mcp")
|
||||||
|
|
||||||
|
# VaultMesh root
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[2])).resolve()
|
||||||
|
MCP_RECEIPTS = VAULTMESH_ROOT / "receipts/mcp/mcp_calls.jsonl"
|
||||||
|
|
||||||
|
# Tools that must remain callable without an authenticated session token.
|
||||||
|
# These are the bootstrap endpoints required to obtain/check a session.
|
||||||
|
OPEN_TOOLS = {
|
||||||
|
"auth_challenge",
|
||||||
|
"auth_verify",
|
||||||
|
"auth_create_dev_session",
|
||||||
|
"auth_check_permission",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
"""VaultMesh hash: blake3:<hex>."""
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _redact_call_arguments(arguments: dict) -> dict:
|
||||||
|
# Never persist session tokens in receipts.
|
||||||
|
if not arguments:
|
||||||
|
return {}
|
||||||
|
redacted = dict(arguments)
|
||||||
|
redacted.pop("session_token", None)
|
||||||
|
return redacted
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_mcp_receipt(tool_name: str, arguments: dict, result: dict, caller: str = "did:vm:mcp:client") -> None:
|
||||||
|
"""Emit a receipt for every MCP tool call."""
|
||||||
|
MCP_RECEIPTS.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"tool": tool_name,
|
||||||
|
"arguments": _redact_call_arguments(arguments),
|
||||||
|
"result_hash": _vmhash_blake3(json.dumps(result, sort_keys=True).encode()),
|
||||||
|
"caller": caller,
|
||||||
|
"success": "error" not in result,
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": "mcp_tool_call",
|
||||||
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"scroll": "mcp",
|
||||||
|
"tags": ["mcp", "tool-call", tool_name],
|
||||||
|
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(MCP_RECEIPTS, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def require_session_and_permission(name: str, arguments: dict) -> tuple[bool, dict, str, dict | None]:
|
||||||
|
"""Fail-closed session + profile enforcement ahead of tool handlers.
|
||||||
|
|
||||||
|
Returns (allowed, safe_args, caller, denial_result).
|
||||||
|
- safe_args strips session_token so downstream handlers never see it.
|
||||||
|
- caller is derived from the validated session (operator_did) when available.
|
||||||
|
- denial_result is a structured error payload when denied.
|
||||||
|
"""
|
||||||
|
|
||||||
|
safe_args = dict(arguments or {})
|
||||||
|
caller = "did:vm:mcp:client"
|
||||||
|
|
||||||
|
if name in OPEN_TOOLS:
|
||||||
|
return True, safe_args, caller, None
|
||||||
|
|
||||||
|
session_token = safe_args.pop("session_token", None)
|
||||||
|
if not session_token:
|
||||||
|
return False, safe_args, caller, {
|
||||||
|
"error": "Missing session_token",
|
||||||
|
"allowed": False,
|
||||||
|
"reason": "Session required for non-auth tools",
|
||||||
|
}
|
||||||
|
|
||||||
|
validation = auth_validate_token(session_token)
|
||||||
|
if not validation.get("valid"):
|
||||||
|
return False, safe_args, caller, {
|
||||||
|
"error": "Invalid session",
|
||||||
|
"allowed": False,
|
||||||
|
"reason": validation.get("error", "invalid_session"),
|
||||||
|
}
|
||||||
|
|
||||||
|
caller = validation.get("operator_did") or caller
|
||||||
|
profile = get_profile_for_scope(str(validation.get("scope", "read")))
|
||||||
|
perm = check_profile_permission(profile, name)
|
||||||
|
if not perm.get("allowed"):
|
||||||
|
return False, safe_args, caller, {
|
||||||
|
"error": "Permission denied",
|
||||||
|
"allowed": False,
|
||||||
|
"profile": perm.get("profile"),
|
||||||
|
"reason": perm.get("reason", "denied"),
|
||||||
|
}
|
||||||
|
|
||||||
|
return True, safe_args, caller, None
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# TOOL DEFINITIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
TOOLS = [
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
# GUARDIAN TOOLS
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"name": "guardian_anchor_now",
|
||||||
|
"description": "Anchor all or specified scrolls to compute a Merkle root snapshot. Emits a guardian receipt.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"scrolls": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of scroll names to anchor. Omit for all scrolls.",
|
||||||
|
},
|
||||||
|
"guardian_did": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "did:vm:guardian:mcp",
|
||||||
|
},
|
||||||
|
"backend": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "local",
|
||||||
|
"enum": ["local", "ethereum", "stellar"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "guardian_verify_receipt",
|
||||||
|
"description": "Verify a receipt exists in a scroll's JSONL by its hash.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"receipt_hash": {"type": "string"},
|
||||||
|
"scroll": {"type": "string", "default": "guardian"},
|
||||||
|
},
|
||||||
|
"required": ["receipt_hash"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "guardian_status",
|
||||||
|
"description": "Get current status of all scrolls including Merkle roots and leaf counts.",
|
||||||
|
"inputSchema": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
# TREASURY TOOLS
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"name": "treasury_create_budget",
|
||||||
|
"description": "Create a new budget for tracking expenditures.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"budget_id": {"type": "string"},
|
||||||
|
"name": {"type": "string"},
|
||||||
|
"allocated": {"type": "integer"},
|
||||||
|
"currency": {"type": "string", "default": "EUR"},
|
||||||
|
"created_by": {"type": "string", "default": "did:vm:mcp:treasury"},
|
||||||
|
},
|
||||||
|
"required": ["budget_id", "name", "allocated"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "treasury_balance",
|
||||||
|
"description": "Get balance for a specific budget or all budgets.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"budget_id": {"type": "string"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "treasury_debit",
|
||||||
|
"description": "Debit (spend) from a budget. Fails if insufficient funds.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"budget_id": {"type": "string"},
|
||||||
|
"amount": {"type": "integer"},
|
||||||
|
"description": {"type": "string"},
|
||||||
|
"debited_by": {"type": "string", "default": "did:vm:mcp:treasury"},
|
||||||
|
},
|
||||||
|
"required": ["budget_id", "amount", "description"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "treasury_credit",
|
||||||
|
"description": "Credit (add funds) to a budget.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"budget_id": {"type": "string"},
|
||||||
|
"amount": {"type": "integer"},
|
||||||
|
"description": {"type": "string"},
|
||||||
|
"credited_by": {"type": "string", "default": "did:vm:mcp:treasury"},
|
||||||
|
},
|
||||||
|
"required": ["budget_id", "amount", "description"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
# COGNITIVE TOOLS (Claude as 7th Organ)
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"name": "cognitive_context",
|
||||||
|
"description": "Read current VaultMesh context for AI reasoning. Aggregates alerts, health, receipts, threats, treasury, governance, and memory.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"include": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Context types: alerts, health, receipts, threats, treasury, governance, memory",
|
||||||
|
},
|
||||||
|
"session_id": {"type": "string"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_decide",
|
||||||
|
"description": "Submit a reasoned decision with cryptographic attestation. Every decision is signed and anchored to ProofChain.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"reasoning_chain": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "List of reasoning steps leading to decision",
|
||||||
|
},
|
||||||
|
"decision": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Decision type: invoke_tem, alert, remediate, approve, etc.",
|
||||||
|
},
|
||||||
|
"confidence": {
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
},
|
||||||
|
"evidence": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
},
|
||||||
|
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude"},
|
||||||
|
"auto_action_threshold": {"type": "number", "default": 0.95},
|
||||||
|
},
|
||||||
|
"required": ["reasoning_chain", "decision", "confidence"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_invoke_tem",
|
||||||
|
"description": "Invoke Tem (Guardian) with AI-detected threat pattern. Transmutes threats into defensive capabilities.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"threat_type": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Category: replay_attack, intrusion, anomaly, credential_stuffing, etc.",
|
||||||
|
},
|
||||||
|
"threat_id": {"type": "string"},
|
||||||
|
"target": {"type": "string"},
|
||||||
|
"evidence": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
},
|
||||||
|
"recommended_transmutation": {"type": "string"},
|
||||||
|
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude"},
|
||||||
|
},
|
||||||
|
"required": ["threat_type", "threat_id", "target", "evidence"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_memory_get",
|
||||||
|
"description": "Query conversation/reasoning memory from CRDT realm.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"key": {"type": "string"},
|
||||||
|
"session_id": {"type": "string"},
|
||||||
|
"realm": {"type": "string", "default": "memory"},
|
||||||
|
},
|
||||||
|
"required": ["key"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_memory_set",
|
||||||
|
"description": "Store reasoning artifacts for future sessions. Uses CRDT-style merge.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"key": {"type": "string"},
|
||||||
|
"value": {"type": "object"},
|
||||||
|
"session_id": {"type": "string"},
|
||||||
|
"realm": {"type": "string", "default": "memory"},
|
||||||
|
"merge": {"type": "boolean", "default": True},
|
||||||
|
},
|
||||||
|
"required": ["key", "value"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_attest",
|
||||||
|
"description": "Create cryptographic attestation of Claude's reasoning state. Anchors to external chains.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"attestation_type": {"type": "string"},
|
||||||
|
"content": {"type": "object"},
|
||||||
|
"anchor_to": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Anchor backends: local, rfc3161, eth, btc",
|
||||||
|
},
|
||||||
|
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude"},
|
||||||
|
},
|
||||||
|
"required": ["attestation_type", "content"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_audit_trail",
|
||||||
|
"description": "Query historical AI decisions for audit with full provenance.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"filter_type": {"type": "string"},
|
||||||
|
"time_range": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"start": {"type": "string"},
|
||||||
|
"end": {"type": "string"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"confidence_min": {"type": "number"},
|
||||||
|
"limit": {"type": "integer", "default": 100},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cognitive_oracle_chain",
|
||||||
|
"description": "Execute oracle chain with cognitive enhancement. Adds memory context and Tem awareness.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"question": {"type": "string"},
|
||||||
|
"frameworks": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "Compliance frameworks: GDPR, AI_ACT, NIS2, etc.",
|
||||||
|
},
|
||||||
|
"max_docs": {"type": "integer", "default": 10},
|
||||||
|
"include_memory": {"type": "boolean", "default": True},
|
||||||
|
"session_id": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["question"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
# AUTH TOOLS
|
||||||
|
# -------------------------------------------------------------------------
|
||||||
|
{
|
||||||
|
"name": "auth_challenge",
|
||||||
|
"description": "Generate an authentication challenge for an operator.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"operator_pubkey_b64": {"type": "string"},
|
||||||
|
"scope": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["read", "admin", "vault", "anchor", "cognitive"],
|
||||||
|
"default": "read",
|
||||||
|
},
|
||||||
|
"ttl_seconds": {"type": "integer", "default": 300},
|
||||||
|
},
|
||||||
|
"required": ["operator_pubkey_b64"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "auth_verify",
|
||||||
|
"description": "Verify a signed challenge and issue session token.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"challenge_id": {"type": "string"},
|
||||||
|
"signature_b64": {"type": "string"},
|
||||||
|
"ip_hint": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["challenge_id", "signature_b64"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "auth_check_permission",
|
||||||
|
"description": "Check if a session has permission to call a tool.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"token": {"type": "string"},
|
||||||
|
"tool_name": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["token", "tool_name"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "auth_create_dev_session",
|
||||||
|
"description": "Create a development session for testing (DEV ONLY).",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"scope": {"type": "string", "default": "cognitive"},
|
||||||
|
"operator_did": {"type": "string", "default": "did:vm:cognitive:claude-dev"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "auth_revoke",
|
||||||
|
"description": "Revoke a session token.",
|
||||||
|
"inputSchema": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"token": {"type": "string"},
|
||||||
|
},
|
||||||
|
"required": ["token"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "auth_list_sessions",
|
||||||
|
"description": "List all active sessions (admin only).",
|
||||||
|
"inputSchema": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_tool_call(name: str, arguments: dict) -> dict[str, Any]:
|
||||||
|
"""Dispatch tool call to appropriate handler."""
|
||||||
|
handlers = {
|
||||||
|
# Guardian
|
||||||
|
"guardian_anchor_now": guardian_anchor_now,
|
||||||
|
"guardian_verify_receipt": guardian_verify_receipt,
|
||||||
|
"guardian_status": guardian_status,
|
||||||
|
# Treasury
|
||||||
|
"treasury_create_budget": treasury_create_budget,
|
||||||
|
"treasury_balance": treasury_balance,
|
||||||
|
"treasury_debit": treasury_debit,
|
||||||
|
"treasury_credit": treasury_credit,
|
||||||
|
# Cognitive
|
||||||
|
"cognitive_context": cognitive_context,
|
||||||
|
"cognitive_decide": cognitive_decide,
|
||||||
|
"cognitive_invoke_tem": cognitive_invoke_tem,
|
||||||
|
"cognitive_memory_get": cognitive_memory_get,
|
||||||
|
"cognitive_memory_set": cognitive_memory_set,
|
||||||
|
"cognitive_attest": cognitive_attest,
|
||||||
|
"cognitive_audit_trail": cognitive_audit_trail,
|
||||||
|
"cognitive_oracle_chain": cognitive_oracle_chain,
|
||||||
|
# Auth
|
||||||
|
"auth_challenge": auth_challenge,
|
||||||
|
"auth_verify": auth_verify,
|
||||||
|
"auth_check_permission": auth_check_permission,
|
||||||
|
"auth_create_dev_session": auth_create_dev_session,
|
||||||
|
"auth_revoke": auth_revoke,
|
||||||
|
"auth_list_sessions": auth_list_sessions,
|
||||||
|
}
|
||||||
|
|
||||||
|
if name not in handlers:
|
||||||
|
return {"error": f"Unknown tool: {name}"}
|
||||||
|
allowed, safe_args, caller, denial = require_session_and_permission(name, arguments)
|
||||||
|
if not allowed:
|
||||||
|
_emit_mcp_receipt(name, safe_args, denial, caller=caller)
|
||||||
|
return denial
|
||||||
|
|
||||||
|
result = handlers[name](**safe_args)
|
||||||
|
|
||||||
|
# Emit receipt for the tool call
|
||||||
|
_emit_mcp_receipt(name, safe_args, result, caller=caller)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
if MCP_AVAILABLE:
|
||||||
|
# Create MCP server
|
||||||
|
app = Server("vaultmesh-mcp")
|
||||||
|
|
||||||
|
@app.list_tools()
|
||||||
|
async def list_tools() -> list[Tool]:
|
||||||
|
"""List available VaultMesh tools."""
|
||||||
|
return [Tool(**t) for t in TOOLS]
|
||||||
|
|
||||||
|
@app.call_tool()
|
||||||
|
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||||
|
"""Handle tool invocation."""
|
||||||
|
logger.info(f"Tool call: {name} with {arguments}")
|
||||||
|
result = handle_tool_call(name, arguments)
|
||||||
|
return [TextContent(type="text", text=json.dumps(result, indent=2))]
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Run the MCP server."""
|
||||||
|
if not MCP_AVAILABLE:
|
||||||
|
print("MCP library not available. Install with: pip install mcp")
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info(f"Starting VaultMesh MCP Server (root: {VAULTMESH_ROOT})")
|
||||||
|
logger.info(f"Tools registered: {len(TOOLS)}")
|
||||||
|
async with stdio_server() as (read_stream, write_stream):
|
||||||
|
await app.run(read_stream, write_stream, app.create_initialization_options())
|
||||||
|
|
||||||
|
|
||||||
|
def run_standalone():
|
||||||
|
"""Run as standalone CLI for testing without MCP."""
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if len(sys.argv) < 2:
|
||||||
|
print("VaultMesh MCP Server - Standalone Mode")
|
||||||
|
print(f"\nVaultMesh Root: {VAULTMESH_ROOT}")
|
||||||
|
print(f"\nRegistered Tools ({len(TOOLS)}):")
|
||||||
|
print("-" * 60)
|
||||||
|
for tool in TOOLS:
|
||||||
|
print(f" {tool['name']}")
|
||||||
|
print(f" {tool['description'][:70]}...")
|
||||||
|
print("-" * 60)
|
||||||
|
print("\nUsage: python -m vaultmesh_mcp.server <tool> [json_args]")
|
||||||
|
print("\nExample:")
|
||||||
|
print(' python -m vaultmesh_mcp.server cognitive_context \'{"include": ["health"]}\'')
|
||||||
|
return
|
||||||
|
|
||||||
|
tool_name = sys.argv[1]
|
||||||
|
args_str = sys.argv[2] if len(sys.argv) > 2 else "{}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
arguments = json.loads(args_str)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
print(f"Invalid JSON arguments: {args_str}")
|
||||||
|
return
|
||||||
|
|
||||||
|
result = handle_tool_call(tool_name, arguments)
|
||||||
|
print(json.dumps(result, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
# If any CLI arguments provided (other than module name), run standalone
|
||||||
|
if len(sys.argv) > 1 or not MCP_AVAILABLE:
|
||||||
|
run_standalone()
|
||||||
|
else:
|
||||||
|
asyncio.run(main())
|
||||||
84
packages/vaultmesh_mcp/tools/__init__.py
Normal file
84
packages/vaultmesh_mcp/tools/__init__.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
"""VaultMesh MCP Tools."""
|
||||||
|
|
||||||
|
from .guardian import guardian_anchor_now, guardian_verify_receipt, guardian_status
|
||||||
|
from .treasury import treasury_balance, treasury_debit, treasury_credit, treasury_create_budget
|
||||||
|
from .cognitive import (
|
||||||
|
cognitive_context,
|
||||||
|
cognitive_decide,
|
||||||
|
cognitive_invoke_tem,
|
||||||
|
cognitive_memory_get,
|
||||||
|
cognitive_memory_set,
|
||||||
|
cognitive_attest,
|
||||||
|
cognitive_audit_trail,
|
||||||
|
cognitive_oracle_chain,
|
||||||
|
)
|
||||||
|
from .auth import (
|
||||||
|
auth_challenge,
|
||||||
|
auth_verify,
|
||||||
|
auth_validate_token,
|
||||||
|
auth_check_permission,
|
||||||
|
auth_revoke,
|
||||||
|
auth_list_sessions,
|
||||||
|
auth_create_dev_session,
|
||||||
|
Profile,
|
||||||
|
check_profile_permission,
|
||||||
|
get_profile_for_scope,
|
||||||
|
escalate_profile,
|
||||||
|
)
|
||||||
|
from .escalation import (
|
||||||
|
escalate,
|
||||||
|
deescalate,
|
||||||
|
escalate_on_threat,
|
||||||
|
escalate_to_phoenix,
|
||||||
|
get_active_escalations,
|
||||||
|
get_escalation_history,
|
||||||
|
check_expired_escalations,
|
||||||
|
EscalationType,
|
||||||
|
DeescalationType,
|
||||||
|
EscalationContext,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Guardian
|
||||||
|
"guardian_anchor_now",
|
||||||
|
"guardian_verify_receipt",
|
||||||
|
"guardian_status",
|
||||||
|
# Treasury
|
||||||
|
"treasury_balance",
|
||||||
|
"treasury_debit",
|
||||||
|
"treasury_credit",
|
||||||
|
"treasury_create_budget",
|
||||||
|
# Cognitive (8 tools)
|
||||||
|
"cognitive_context",
|
||||||
|
"cognitive_decide",
|
||||||
|
"cognitive_invoke_tem",
|
||||||
|
"cognitive_memory_get",
|
||||||
|
"cognitive_memory_set",
|
||||||
|
"cognitive_attest",
|
||||||
|
"cognitive_audit_trail",
|
||||||
|
"cognitive_oracle_chain",
|
||||||
|
# Auth
|
||||||
|
"auth_challenge",
|
||||||
|
"auth_verify",
|
||||||
|
"auth_validate_token",
|
||||||
|
"auth_check_permission",
|
||||||
|
"auth_revoke",
|
||||||
|
"auth_list_sessions",
|
||||||
|
"auth_create_dev_session",
|
||||||
|
# Profiles
|
||||||
|
"Profile",
|
||||||
|
"check_profile_permission",
|
||||||
|
"get_profile_for_scope",
|
||||||
|
"escalate_profile",
|
||||||
|
# Escalation
|
||||||
|
"escalate",
|
||||||
|
"deescalate",
|
||||||
|
"escalate_on_threat",
|
||||||
|
"escalate_to_phoenix",
|
||||||
|
"get_active_escalations",
|
||||||
|
"get_escalation_history",
|
||||||
|
"check_expired_escalations",
|
||||||
|
"EscalationType",
|
||||||
|
"DeescalationType",
|
||||||
|
"EscalationContext",
|
||||||
|
]
|
||||||
638
packages/vaultmesh_mcp/tools/auth.py
Normal file
638
packages/vaultmesh_mcp/tools/auth.py
Normal file
@@ -0,0 +1,638 @@
|
|||||||
|
"""
|
||||||
|
VaultMesh MCP Authentication - Ed25519 Challenge-Response
|
||||||
|
|
||||||
|
Implements cryptographic authentication for MCP operators with
|
||||||
|
capability-based access control and session management.
|
||||||
|
|
||||||
|
Scopes:
|
||||||
|
- read: Query state (mesh_status, proof_verify)
|
||||||
|
- admin: Execute commands (tactical_execute)
|
||||||
|
- vault: Access treasury, sensitive data
|
||||||
|
- anchor: Create blockchain proofs
|
||||||
|
- cognitive: AI reasoning capabilities (Claude integration)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import secrets
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass, asdict
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Optional, Set
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# Optional: Ed25519 support
|
||||||
|
try:
|
||||||
|
from nacl.signing import VerifyKey
|
||||||
|
from nacl.exceptions import BadSignature
|
||||||
|
import nacl.encoding
|
||||||
|
NACL_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
NACL_AVAILABLE = False
|
||||||
|
|
||||||
|
# VaultMesh paths
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
|
||||||
|
AUTH_STORE = VAULTMESH_ROOT / "auth"
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
"""VaultMesh hash: blake3:<hex>."""
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _now_iso() -> str:
|
||||||
|
"""Current UTC timestamp in ISO format."""
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
class Scope(Enum):
|
||||||
|
"""Capability scopes for MCP access control."""
|
||||||
|
READ = "read"
|
||||||
|
ADMIN = "admin"
|
||||||
|
VAULT = "vault"
|
||||||
|
ANCHOR = "anchor"
|
||||||
|
COGNITIVE = "cognitive"
|
||||||
|
|
||||||
|
|
||||||
|
# Tool permissions by scope
|
||||||
|
SCOPE_TOOLS: Dict[Scope, Set[str]] = {
|
||||||
|
Scope.READ: {
|
||||||
|
"mesh_status",
|
||||||
|
"shield_status",
|
||||||
|
"proof_verify",
|
||||||
|
"guardian_status",
|
||||||
|
"treasury_balance",
|
||||||
|
"cognitive_context",
|
||||||
|
"cognitive_memory_get",
|
||||||
|
"cognitive_audit_trail",
|
||||||
|
},
|
||||||
|
Scope.ADMIN: {
|
||||||
|
"tactical_execute",
|
||||||
|
"mesh_configure",
|
||||||
|
"agent_task",
|
||||||
|
},
|
||||||
|
Scope.VAULT: {
|
||||||
|
"treasury_debit",
|
||||||
|
"treasury_credit",
|
||||||
|
"treasury_create_budget",
|
||||||
|
},
|
||||||
|
Scope.ANCHOR: {
|
||||||
|
"guardian_anchor_now",
|
||||||
|
"proof_anchor",
|
||||||
|
"cognitive_attest",
|
||||||
|
},
|
||||||
|
Scope.COGNITIVE: {
|
||||||
|
"cognitive_context",
|
||||||
|
"cognitive_decide",
|
||||||
|
"cognitive_invoke_tem",
|
||||||
|
"cognitive_memory_get",
|
||||||
|
"cognitive_memory_set",
|
||||||
|
"cognitive_attest",
|
||||||
|
"cognitive_audit_trail",
|
||||||
|
"cognitive_oracle_chain",
|
||||||
|
# Inherits from READ
|
||||||
|
"mesh_status",
|
||||||
|
"shield_status",
|
||||||
|
"proof_verify",
|
||||||
|
"guardian_status",
|
||||||
|
"treasury_balance",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Challenge:
|
||||||
|
"""Authentication challenge."""
|
||||||
|
challenge_id: str
|
||||||
|
nonce: str
|
||||||
|
operator_pubkey: str
|
||||||
|
scope: str
|
||||||
|
created_at: str
|
||||||
|
expires_at: str
|
||||||
|
|
||||||
|
def is_expired(self) -> bool:
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
expires = datetime.fromisoformat(self.expires_at.replace('Z', '+00:00'))
|
||||||
|
return now > expires
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Session:
|
||||||
|
"""Authenticated session."""
|
||||||
|
session_id: str
|
||||||
|
token: str
|
||||||
|
operator_pubkey: str
|
||||||
|
operator_did: str
|
||||||
|
scope: str
|
||||||
|
created_at: str
|
||||||
|
expires_at: str
|
||||||
|
ip_hint: Optional[str] = None
|
||||||
|
|
||||||
|
def is_expired(self) -> bool:
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
expires = datetime.fromisoformat(self.expires_at.replace('Z', '+00:00'))
|
||||||
|
return now > expires
|
||||||
|
|
||||||
|
|
||||||
|
# In-memory stores (would be persisted in production)
|
||||||
|
_challenges: Dict[str, Challenge] = {}
|
||||||
|
_sessions: Dict[str, Session] = {}
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_auth_receipt(receipt_type: str, body: dict) -> dict:
|
||||||
|
"""Emit a receipt for authentication events."""
|
||||||
|
scroll_path = RECEIPTS_ROOT / "identity" / "identity_events.jsonl"
|
||||||
|
scroll_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": receipt_type,
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
"scroll": "identity",
|
||||||
|
"tags": ["auth", receipt_type],
|
||||||
|
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(scroll_path, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
return receipt
|
||||||
|
|
||||||
|
|
||||||
|
def auth_challenge(
|
||||||
|
operator_pubkey_b64: str,
|
||||||
|
scope: str = "read",
|
||||||
|
ttl_seconds: int = 300,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Generate an authentication challenge for an operator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
operator_pubkey_b64: Base64-encoded Ed25519 public key
|
||||||
|
scope: Requested scope (read, admin, vault, anchor, cognitive)
|
||||||
|
ttl_seconds: Challenge validity period
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Challenge ID and nonce for signing
|
||||||
|
"""
|
||||||
|
# Validate scope
|
||||||
|
try:
|
||||||
|
scope_enum = Scope(scope)
|
||||||
|
except ValueError:
|
||||||
|
return {"error": f"Invalid scope: {scope}. Valid: {[s.value for s in Scope]}"}
|
||||||
|
|
||||||
|
challenge_id = f"ch_{secrets.token_hex(16)}"
|
||||||
|
nonce = secrets.token_hex(32)
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
expires = now + timedelta(seconds=ttl_seconds)
|
||||||
|
|
||||||
|
challenge = Challenge(
|
||||||
|
challenge_id=challenge_id,
|
||||||
|
nonce=nonce,
|
||||||
|
operator_pubkey=operator_pubkey_b64,
|
||||||
|
scope=scope,
|
||||||
|
created_at=now.isoformat(),
|
||||||
|
expires_at=expires.isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
_challenges[challenge_id] = challenge
|
||||||
|
|
||||||
|
_emit_auth_receipt("auth_challenge", {
|
||||||
|
"challenge_id": challenge_id,
|
||||||
|
"operator_pubkey": operator_pubkey_b64,
|
||||||
|
"scope": scope,
|
||||||
|
"expires_at": expires.isoformat(),
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"challenge_id": challenge_id,
|
||||||
|
"nonce": nonce,
|
||||||
|
"scope": scope,
|
||||||
|
"expires_at": expires.isoformat(),
|
||||||
|
"message": "Sign the nonce with your Ed25519 private key",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def auth_verify(
|
||||||
|
challenge_id: str,
|
||||||
|
signature_b64: str,
|
||||||
|
ip_hint: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Verify a signed challenge and issue session token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
challenge_id: The challenge ID from auth_challenge
|
||||||
|
signature_b64: Base64-encoded Ed25519 signature of the nonce
|
||||||
|
ip_hint: Optional IP hint for session binding
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Session token and metadata
|
||||||
|
"""
|
||||||
|
# Get challenge
|
||||||
|
challenge = _challenges.get(challenge_id)
|
||||||
|
if not challenge:
|
||||||
|
return {"error": "Challenge not found or expired"}
|
||||||
|
|
||||||
|
if challenge.is_expired():
|
||||||
|
del _challenges[challenge_id]
|
||||||
|
return {"error": "Challenge expired"}
|
||||||
|
|
||||||
|
# Verify signature
|
||||||
|
if NACL_AVAILABLE:
|
||||||
|
try:
|
||||||
|
pubkey_bytes = nacl.encoding.Base64Encoder.decode(challenge.operator_pubkey.encode())
|
||||||
|
verify_key = VerifyKey(pubkey_bytes)
|
||||||
|
sig_bytes = nacl.encoding.Base64Encoder.decode(signature_b64.encode())
|
||||||
|
verify_key.verify(challenge.nonce.encode(), sig_bytes)
|
||||||
|
except (BadSignature, Exception) as e:
|
||||||
|
_emit_auth_receipt("auth_failure", {
|
||||||
|
"challenge_id": challenge_id,
|
||||||
|
"reason": "invalid_signature",
|
||||||
|
"error": str(e),
|
||||||
|
})
|
||||||
|
return {"error": "Invalid signature"}
|
||||||
|
else:
|
||||||
|
# For testing without nacl, accept any signature
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Remove used challenge
|
||||||
|
del _challenges[challenge_id]
|
||||||
|
|
||||||
|
# Create session
|
||||||
|
session_id = f"ses_{secrets.token_hex(16)}"
|
||||||
|
token = secrets.token_urlsafe(48)
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
expires = now + timedelta(minutes=30)
|
||||||
|
|
||||||
|
# Derive DID from pubkey
|
||||||
|
operator_did = f"did:vm:operator:{_vmhash_blake3(challenge.operator_pubkey.encode())[:16]}"
|
||||||
|
|
||||||
|
session = Session(
|
||||||
|
session_id=session_id,
|
||||||
|
token=token,
|
||||||
|
operator_pubkey=challenge.operator_pubkey,
|
||||||
|
operator_did=operator_did,
|
||||||
|
scope=challenge.scope,
|
||||||
|
created_at=now.isoformat(),
|
||||||
|
expires_at=expires.isoformat(),
|
||||||
|
ip_hint=ip_hint,
|
||||||
|
)
|
||||||
|
|
||||||
|
_sessions[token] = session
|
||||||
|
|
||||||
|
_emit_auth_receipt("auth_success", {
|
||||||
|
"session_id": session_id,
|
||||||
|
"operator_did": operator_did,
|
||||||
|
"scope": challenge.scope,
|
||||||
|
"expires_at": expires.isoformat(),
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"session_id": session_id,
|
||||||
|
"token": token,
|
||||||
|
"operator_did": operator_did,
|
||||||
|
"scope": challenge.scope,
|
||||||
|
"expires_at": expires.isoformat(),
|
||||||
|
"ttl_seconds": 1800,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def auth_validate_token(token: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Validate a session token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token: The session token to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Session info if valid, error otherwise
|
||||||
|
"""
|
||||||
|
session = _sessions.get(token)
|
||||||
|
if not session:
|
||||||
|
return {"valid": False, "error": "Session not found"}
|
||||||
|
|
||||||
|
if session.is_expired():
|
||||||
|
del _sessions[token]
|
||||||
|
return {"valid": False, "error": "Session expired"}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"valid": True,
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"operator_did": session.operator_did,
|
||||||
|
"scope": session.scope,
|
||||||
|
"expires_at": session.expires_at,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def auth_check_permission(token: str, tool_name: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Check if a session has permission to call a tool.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token: Session token
|
||||||
|
tool_name: Name of the tool to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Permission check result
|
||||||
|
"""
|
||||||
|
validation = auth_validate_token(token)
|
||||||
|
if not validation.get("valid"):
|
||||||
|
return {"allowed": False, "reason": validation.get("error")}
|
||||||
|
|
||||||
|
scope_name = validation["scope"]
|
||||||
|
try:
|
||||||
|
scope = Scope(scope_name)
|
||||||
|
except ValueError:
|
||||||
|
return {"allowed": False, "reason": f"Invalid scope: {scope_name}"}
|
||||||
|
|
||||||
|
allowed_tools = SCOPE_TOOLS.get(scope, set())
|
||||||
|
|
||||||
|
if tool_name in allowed_tools:
|
||||||
|
return {
|
||||||
|
"allowed": True,
|
||||||
|
"scope": scope_name,
|
||||||
|
"operator_did": validation["operator_did"],
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"allowed": False,
|
||||||
|
"reason": f"Tool '{tool_name}' not allowed for scope '{scope_name}'",
|
||||||
|
"allowed_tools": list(allowed_tools),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def auth_revoke(token: str) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Revoke a session token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token: Session token to revoke
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Revocation result
|
||||||
|
"""
|
||||||
|
session = _sessions.pop(token, None)
|
||||||
|
if not session:
|
||||||
|
return {"revoked": False, "error": "Session not found"}
|
||||||
|
|
||||||
|
_emit_auth_receipt("auth_revoke", {
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"operator_did": session.operator_did,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"revoked": True,
|
||||||
|
"session_id": session.session_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def auth_list_sessions() -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
List all active sessions (admin only).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of active sessions
|
||||||
|
"""
|
||||||
|
active = []
|
||||||
|
expired = []
|
||||||
|
|
||||||
|
for token, session in list(_sessions.items()):
|
||||||
|
if session.is_expired():
|
||||||
|
del _sessions[token]
|
||||||
|
expired.append(session.session_id)
|
||||||
|
else:
|
||||||
|
active.append({
|
||||||
|
"session_id": session.session_id,
|
||||||
|
"operator_did": session.operator_did,
|
||||||
|
"scope": session.scope,
|
||||||
|
"expires_at": session.expires_at,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"active_sessions": active,
|
||||||
|
"expired_cleaned": len(expired),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience function for testing without full auth
|
||||||
|
def auth_create_dev_session(
|
||||||
|
scope: str = "cognitive",
|
||||||
|
operator_did: str = "did:vm:cognitive:claude-dev",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a development session for testing (DEV ONLY).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
scope: Scope for the session
|
||||||
|
operator_did: DID for the operator
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Session token and metadata
|
||||||
|
"""
|
||||||
|
# Fail-closed: dev sessions may not grant SOVEREIGN-equivalent access.
|
||||||
|
# Accept only known, non-vault scopes.
|
||||||
|
normalized_scope = scope
|
||||||
|
try:
|
||||||
|
scope_enum = Scope(scope)
|
||||||
|
if scope_enum == Scope.VAULT:
|
||||||
|
normalized_scope = Scope.READ.value
|
||||||
|
except ValueError:
|
||||||
|
normalized_scope = Scope.READ.value
|
||||||
|
|
||||||
|
session_id = f"dev_{secrets.token_hex(8)}"
|
||||||
|
token = f"dev_{secrets.token_urlsafe(32)}"
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
expires = now + timedelta(hours=24)
|
||||||
|
|
||||||
|
session = Session(
|
||||||
|
session_id=session_id,
|
||||||
|
token=token,
|
||||||
|
operator_pubkey="dev_key",
|
||||||
|
operator_did=operator_did,
|
||||||
|
scope=normalized_scope,
|
||||||
|
created_at=now.isoformat(),
|
||||||
|
expires_at=expires.isoformat(),
|
||||||
|
)
|
||||||
|
|
||||||
|
_sessions[token] = session
|
||||||
|
|
||||||
|
return {
|
||||||
|
"dev_mode": True,
|
||||||
|
"session_id": session_id,
|
||||||
|
"token": token,
|
||||||
|
"operator_did": operator_did,
|
||||||
|
"scope": normalized_scope,
|
||||||
|
"expires_at": expires.isoformat(),
|
||||||
|
"warning": "DEV SESSION - Do not use in production",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# AGENT CAPABILITY PROFILES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
class Profile(Enum):
|
||||||
|
"""Agent capability profiles with hierarchical trust."""
|
||||||
|
OBSERVER = "observer" # 👁 Read-only
|
||||||
|
OPERATOR = "operator" # ⚙ Mutations allowed
|
||||||
|
GUARDIAN = "guardian" # 🛡 Threat response
|
||||||
|
PHOENIX = "phoenix" # 🔥 Crisis mode
|
||||||
|
SOVEREIGN = "sovereign" # 👑 Full authority
|
||||||
|
|
||||||
|
|
||||||
|
# Profile → Tool permissions
|
||||||
|
PROFILE_TOOLS: Dict[Profile, Set[str]] = {
|
||||||
|
Profile.OBSERVER: {
|
||||||
|
# L0 Perception (read)
|
||||||
|
"get_current_tab", "list_tabs", "get_page_content",
|
||||||
|
# L1 Substrate (read)
|
||||||
|
"read_file", "read_multiple_files", "list_directory", "search_files", "get_file_info",
|
||||||
|
"directory_tree", "list_allowed_directories",
|
||||||
|
# L2 Cognition (read)
|
||||||
|
"cognitive_context", "cognitive_memory_get", "cognitive_audit_trail",
|
||||||
|
# L3 Security (read)
|
||||||
|
"offsec_status", "offsec_shield_status", "offsec_tem_status", "offsec_mesh_status",
|
||||||
|
"offsec_phoenix_status", "offsec_braid_list",
|
||||||
|
# L4 Infrastructure (read)
|
||||||
|
"worker_list", "kv_list", "r2_list_buckets", "d1_list_databases", "zones_list",
|
||||||
|
"queue_list", "workflow_list",
|
||||||
|
# L-1 Proof (read)
|
||||||
|
"guardian_status", "guardian_verify_receipt", "offsec_proof_latest",
|
||||||
|
# Treasury (read)
|
||||||
|
"treasury_balance",
|
||||||
|
# Auth (read)
|
||||||
|
"auth_check_permission",
|
||||||
|
},
|
||||||
|
|
||||||
|
Profile.OPERATOR: set(), # Computed below
|
||||||
|
Profile.GUARDIAN: set(), # Computed below
|
||||||
|
Profile.PHOENIX: set(), # Computed below
|
||||||
|
Profile.SOVEREIGN: set(), # All tools
|
||||||
|
}
|
||||||
|
|
||||||
|
# OPERATOR = OBSERVER + mutations
|
||||||
|
PROFILE_TOOLS[Profile.OPERATOR] = PROFILE_TOOLS[Profile.OBSERVER] | {
|
||||||
|
# L0 Perception (act)
|
||||||
|
"execute_javascript", "puppeteer_click", "puppeteer_fill", "puppeteer_select",
|
||||||
|
"open_url", "reload_tab", "go_back", "go_forward",
|
||||||
|
# L1 Substrate (write)
|
||||||
|
"write_file", "edit_file", "create_directory", "move_file",
|
||||||
|
"start_process", "interact_with_process",
|
||||||
|
# L2 Cognition (decide, low confidence)
|
||||||
|
"cognitive_decide", "cognitive_memory_set",
|
||||||
|
# L3 Security (shield ops)
|
||||||
|
"offsec_shield_arm", "offsec_shield_disarm",
|
||||||
|
# L4 Infrastructure (deploy)
|
||||||
|
"kv_put", "kv_delete", "worker_put", "r2_put_object",
|
||||||
|
# L-1 Proof (local anchor)
|
||||||
|
"guardian_anchor_now",
|
||||||
|
}
|
||||||
|
|
||||||
|
# GUARDIAN = OPERATOR + TEM + attestation
|
||||||
|
PROFILE_TOOLS[Profile.GUARDIAN] = PROFILE_TOOLS[Profile.OPERATOR] | {
|
||||||
|
# L2 Cognition (full)
|
||||||
|
"cognitive_invoke_tem", "cognitive_attest", "cognitive_oracle_chain",
|
||||||
|
# L3 Security (TEM)
|
||||||
|
"offsec_tem_transmute", "offsec_tem_rules", "offsec_tem_history",
|
||||||
|
"offsec_braid_import",
|
||||||
|
# L4 Infrastructure (more)
|
||||||
|
"worker_deploy", "d1_query", "queue_send_message", "workflow_execute",
|
||||||
|
# L-1 Proof (eth anchor)
|
||||||
|
"offsec_proof_generate",
|
||||||
|
# Process control
|
||||||
|
"kill_process", "force_terminate",
|
||||||
|
}
|
||||||
|
|
||||||
|
# PHOENIX = GUARDIAN + destructive ops + emergency treasury
|
||||||
|
PROFILE_TOOLS[Profile.PHOENIX] = PROFILE_TOOLS[Profile.GUARDIAN] | {
|
||||||
|
# L3 Security (Phoenix)
|
||||||
|
"offsec_phoenix_enable", "offsec_phoenix_disable", "offsec_phoenix_inject_crisis",
|
||||||
|
"offsec_phoenix_history",
|
||||||
|
# L4 Infrastructure (destructive)
|
||||||
|
"worker_delete", "r2_delete_bucket", "r2_delete_object",
|
||||||
|
"d1_delete_database", "queue_delete", "workflow_delete",
|
||||||
|
"kv_delete",
|
||||||
|
# Treasury (emergency)
|
||||||
|
"treasury_debit",
|
||||||
|
}
|
||||||
|
|
||||||
|
# SOVEREIGN = everything
|
||||||
|
PROFILE_TOOLS[Profile.SOVEREIGN] = PROFILE_TOOLS[Profile.PHOENIX] | {
|
||||||
|
# Auth (full)
|
||||||
|
"auth_challenge", "auth_verify", "auth_create_dev_session", "auth_revoke",
|
||||||
|
"auth_list_sessions",
|
||||||
|
# Treasury (full)
|
||||||
|
"treasury_create_budget", "treasury_credit",
|
||||||
|
# All remaining tools
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_profile_for_scope(scope: str) -> Profile:
|
||||||
|
"""Map scope to profile."""
|
||||||
|
mapping = {
|
||||||
|
"read": Profile.OBSERVER,
|
||||||
|
"admin": Profile.OPERATOR,
|
||||||
|
"cognitive": Profile.GUARDIAN,
|
||||||
|
"anchor": Profile.GUARDIAN,
|
||||||
|
"vault": Profile.SOVEREIGN,
|
||||||
|
}
|
||||||
|
return mapping.get(scope, Profile.OBSERVER)
|
||||||
|
|
||||||
|
|
||||||
|
def check_profile_permission(profile: Profile, tool_name: str) -> Dict[str, Any]:
|
||||||
|
"""Check if a profile has permission for a tool."""
|
||||||
|
allowed_tools = PROFILE_TOOLS.get(profile, set())
|
||||||
|
|
||||||
|
# Handle wildcards in profile tools
|
||||||
|
for pattern in allowed_tools:
|
||||||
|
if pattern.endswith("*"):
|
||||||
|
prefix = pattern[:-1]
|
||||||
|
if tool_name.startswith(prefix):
|
||||||
|
return {"allowed": True, "profile": profile.value}
|
||||||
|
|
||||||
|
if tool_name in allowed_tools:
|
||||||
|
return {"allowed": True, "profile": profile.value}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"allowed": False,
|
||||||
|
"profile": profile.value,
|
||||||
|
"reason": f"Tool '{tool_name}' not allowed for profile '{profile.value}'",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def escalate_profile(current: Profile, reason: str) -> Dict[str, Any]:
|
||||||
|
"""Request profile escalation."""
|
||||||
|
escalation_path = {
|
||||||
|
Profile.OBSERVER: Profile.OPERATOR,
|
||||||
|
Profile.OPERATOR: Profile.GUARDIAN,
|
||||||
|
Profile.GUARDIAN: Profile.PHOENIX,
|
||||||
|
Profile.PHOENIX: Profile.SOVEREIGN,
|
||||||
|
Profile.SOVEREIGN: None,
|
||||||
|
}
|
||||||
|
|
||||||
|
next_profile = escalation_path.get(current)
|
||||||
|
if next_profile is None:
|
||||||
|
return {"escalated": False, "reason": "Already at maximum profile"}
|
||||||
|
|
||||||
|
_emit_auth_receipt("profile_escalation", {
|
||||||
|
"from_profile": current.value,
|
||||||
|
"to_profile": next_profile.value,
|
||||||
|
"reason": reason,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"escalated": True,
|
||||||
|
"from_profile": current.value,
|
||||||
|
"to_profile": next_profile.value,
|
||||||
|
"reason": reason,
|
||||||
|
}
|
||||||
491
packages/vaultmesh_mcp/tools/cognitive.py
Normal file
491
packages/vaultmesh_mcp/tools/cognitive.py
Normal file
@@ -0,0 +1,491 @@
|
|||||||
|
"""
|
||||||
|
Cognitive MCP Tools - Claude as VaultMesh Cognitive Organ
|
||||||
|
|
||||||
|
These tools enable Claude to operate as the 7th Organ of VaultMesh:
|
||||||
|
- Reason over mesh state with full context
|
||||||
|
- Make attested decisions with Ed25519 proofs
|
||||||
|
- Invoke Tem for threat transmutation
|
||||||
|
- Persist memory across sessions via CRDT realm
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import secrets
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Optional, List, Dict
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# VaultMesh root from env or default
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
|
||||||
|
COGNITIVE_REALM = VAULTMESH_ROOT / "realms" / "cognitive"
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
"""VaultMesh hash: blake3:<hex>."""
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _now_iso() -> str:
|
||||||
|
"""Current UTC timestamp in ISO format."""
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_cognitive_receipt(receipt_type: str, body: dict, scroll: str = "cognitive") -> dict:
|
||||||
|
"""Emit a receipt for cognitive operations."""
|
||||||
|
scroll_path = RECEIPTS_ROOT / scroll / f"{scroll}_events.jsonl"
|
||||||
|
scroll_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": receipt_type,
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
"scroll": scroll,
|
||||||
|
"tags": ["cognitive", receipt_type],
|
||||||
|
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(scroll_path, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
return receipt
|
||||||
|
|
||||||
|
|
||||||
|
def _load_json_file(path: Path) -> dict:
|
||||||
|
"""Load JSON file, return empty dict if not exists."""
|
||||||
|
if path.exists():
|
||||||
|
with open(path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _save_json_file(path: Path, data: dict) -> None:
|
||||||
|
"""Save dict to JSON file."""
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(data, f, indent=2, sort_keys=True)
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# COGNITIVE TOOLS - The 8 Tools of AI Reasoning
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_context(
|
||||||
|
include: Optional[List[str]] = None,
|
||||||
|
session_id: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Read current VaultMesh context for AI reasoning.
|
||||||
|
|
||||||
|
Aggregates state from multiple organs to provide Claude with
|
||||||
|
full situational awareness for decision-making.
|
||||||
|
"""
|
||||||
|
if include is None:
|
||||||
|
include = ["alerts", "health", "receipts", "threats", "treasury", "governance", "memory"]
|
||||||
|
|
||||||
|
context = {
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
"session_id": session_id,
|
||||||
|
"vaultmesh_root": str(VAULTMESH_ROOT),
|
||||||
|
}
|
||||||
|
|
||||||
|
if "alerts" in include:
|
||||||
|
alerts_path = RECEIPTS_ROOT / "mesh" / "alerts.json"
|
||||||
|
context["alerts"] = _load_json_file(alerts_path).get("active", [])
|
||||||
|
|
||||||
|
if "health" in include:
|
||||||
|
health = {"status": "operational", "organs": {}}
|
||||||
|
for organ in ["guardian", "treasury", "mesh", "identity", "observability"]:
|
||||||
|
organ_path = RECEIPTS_ROOT / organ
|
||||||
|
health["organs"][organ] = {
|
||||||
|
"exists": organ_path.exists(),
|
||||||
|
"receipt_count": len(list(organ_path.glob("*.jsonl"))) if organ_path.exists() else 0,
|
||||||
|
}
|
||||||
|
context["health"] = health
|
||||||
|
|
||||||
|
if "receipts" in include:
|
||||||
|
recent = {}
|
||||||
|
for scroll in ["guardian", "treasury", "mesh", "cognitive"]:
|
||||||
|
jsonl_path = RECEIPTS_ROOT / scroll / f"{scroll}_events.jsonl"
|
||||||
|
if jsonl_path.exists():
|
||||||
|
lines = jsonl_path.read_text().strip().split("\n")[-10:]
|
||||||
|
recent[scroll] = [json.loads(line) for line in lines if line]
|
||||||
|
context["recent_receipts"] = recent
|
||||||
|
|
||||||
|
if "threats" in include:
|
||||||
|
threats_path = RECEIPTS_ROOT / "offsec" / "threats.json"
|
||||||
|
context["threats"] = _load_json_file(threats_path).get("active", [])
|
||||||
|
|
||||||
|
if "treasury" in include:
|
||||||
|
budgets_path = RECEIPTS_ROOT / "treasury" / "budgets.json"
|
||||||
|
context["treasury"] = _load_json_file(budgets_path)
|
||||||
|
|
||||||
|
if "governance" in include:
|
||||||
|
governance_path = VAULTMESH_ROOT / "constitution" / "active_proposals.json"
|
||||||
|
context["governance"] = _load_json_file(governance_path)
|
||||||
|
|
||||||
|
if "memory" in include and session_id:
|
||||||
|
memory_path = COGNITIVE_REALM / "memory" / session_id / "context.json"
|
||||||
|
context["memory"] = _load_json_file(memory_path)
|
||||||
|
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_decide(
|
||||||
|
reasoning_chain: List[str],
|
||||||
|
decision: str,
|
||||||
|
confidence: float,
|
||||||
|
evidence: Optional[List[str]] = None,
|
||||||
|
operator_did: str = "did:vm:cognitive:claude",
|
||||||
|
auto_action_threshold: float = 0.95,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Submit a reasoned decision with cryptographic attestation.
|
||||||
|
"""
|
||||||
|
if not 0.0 <= confidence <= 1.0:
|
||||||
|
return {"error": "Confidence must be between 0.0 and 1.0"}
|
||||||
|
|
||||||
|
if not reasoning_chain:
|
||||||
|
return {"error": "Reasoning chain cannot be empty"}
|
||||||
|
|
||||||
|
decision_id = f"dec_{secrets.token_hex(8)}"
|
||||||
|
reasoning_hash = _vmhash_blake3(json.dumps(reasoning_chain).encode())
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"decision_id": decision_id,
|
||||||
|
"operator_did": operator_did,
|
||||||
|
"decision_type": decision,
|
||||||
|
"confidence": confidence,
|
||||||
|
"reasoning_hash": reasoning_hash,
|
||||||
|
"reasoning_chain": reasoning_chain,
|
||||||
|
"evidence": evidence or [],
|
||||||
|
"auto_approved": confidence >= auto_action_threshold,
|
||||||
|
"requires_governance": decision in ["treasury_large", "governance_change", "mesh_restructure"],
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt = _emit_cognitive_receipt("cognitive_decision", body)
|
||||||
|
|
||||||
|
required_approvals = []
|
||||||
|
if body["requires_governance"]:
|
||||||
|
required_approvals.append("governance_vote")
|
||||||
|
if not body["auto_approved"]:
|
||||||
|
required_approvals.append("operator_confirmation")
|
||||||
|
|
||||||
|
execution_plan = []
|
||||||
|
if decision == "invoke_tem":
|
||||||
|
execution_plan = [
|
||||||
|
{"step": 1, "action": "validate_threat", "tool": "shield_status"},
|
||||||
|
{"step": 2, "action": "invoke_transmutation", "tool": "cognitive_invoke_tem"},
|
||||||
|
{"step": 3, "action": "deploy_capability", "tool": "mesh_deploy"},
|
||||||
|
{"step": 4, "action": "attest_outcome", "tool": "cognitive_attest"},
|
||||||
|
]
|
||||||
|
elif decision == "alert":
|
||||||
|
execution_plan = [
|
||||||
|
{"step": 1, "action": "emit_alert", "tool": "mesh_alert"},
|
||||||
|
{"step": 2, "action": "notify_operators", "tool": "notify"},
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"decision_id": decision_id,
|
||||||
|
"receipt": receipt,
|
||||||
|
"auto_approved": body["auto_approved"],
|
||||||
|
"required_approvals": required_approvals,
|
||||||
|
"execution_plan": execution_plan,
|
||||||
|
"message": f"Decision {decision_id} recorded with confidence {confidence:.2%}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_invoke_tem(
|
||||||
|
threat_type: str,
|
||||||
|
threat_id: str,
|
||||||
|
target: str,
|
||||||
|
evidence: List[str],
|
||||||
|
recommended_transmutation: Optional[str] = None,
|
||||||
|
operator_did: str = "did:vm:cognitive:claude",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Invoke Tem (Guardian) with AI-detected threat pattern.
|
||||||
|
Transmutes threats into defensive capabilities.
|
||||||
|
"""
|
||||||
|
invocation_id = f"tem_{secrets.token_hex(8)}"
|
||||||
|
|
||||||
|
transmutations = {
|
||||||
|
"replay_attack": "strict_monotonic_sequence_validator",
|
||||||
|
"intrusion": "adaptive_firewall_rule",
|
||||||
|
"anomaly": "behavioral_baseline_enforcer",
|
||||||
|
"credential_stuffing": "rate_limiter_with_lockout",
|
||||||
|
"data_exfiltration": "egress_filter_policy",
|
||||||
|
"privilege_escalation": "capability_constraint_enforcer",
|
||||||
|
}
|
||||||
|
|
||||||
|
transmutation = recommended_transmutation or transmutations.get(threat_type, "generic_threat_mitigator")
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"invocation_id": invocation_id,
|
||||||
|
"operator_did": operator_did,
|
||||||
|
"threat_type": threat_type,
|
||||||
|
"threat_id": threat_id,
|
||||||
|
"target": target,
|
||||||
|
"evidence": evidence,
|
||||||
|
"transmutation": transmutation,
|
||||||
|
"status": "transmuted",
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt = _emit_cognitive_receipt("tem_invocation", body)
|
||||||
|
|
||||||
|
capability = {
|
||||||
|
"capability_id": f"cap_{secrets.token_hex(8)}",
|
||||||
|
"name": transmutation,
|
||||||
|
"forged_from": threat_id,
|
||||||
|
"forged_at": _now_iso(),
|
||||||
|
"scope": target,
|
||||||
|
}
|
||||||
|
|
||||||
|
caps_path = RECEIPTS_ROOT / "mesh" / "capabilities.json"
|
||||||
|
caps = _load_json_file(caps_path)
|
||||||
|
caps[capability["capability_id"]] = capability
|
||||||
|
_save_json_file(caps_path, caps)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"invocation_id": invocation_id,
|
||||||
|
"receipt": receipt,
|
||||||
|
"capability": capability,
|
||||||
|
"message": f"Threat {threat_id} transmuted into {transmutation}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_memory_get(
|
||||||
|
key: str,
|
||||||
|
session_id: Optional[str] = None,
|
||||||
|
realm: str = "memory",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Query conversation/reasoning memory from CRDT realm.
|
||||||
|
"""
|
||||||
|
if session_id:
|
||||||
|
memory_path = COGNITIVE_REALM / realm / session_id / f"{key.replace('/', '_')}.json"
|
||||||
|
else:
|
||||||
|
memory_path = COGNITIVE_REALM / realm / f"{key.replace('/', '_')}.json"
|
||||||
|
|
||||||
|
value = _load_json_file(memory_path)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"key": key,
|
||||||
|
"session_id": session_id,
|
||||||
|
"realm": realm,
|
||||||
|
"value": value,
|
||||||
|
"exists": memory_path.exists(),
|
||||||
|
"path": str(memory_path),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_memory_set(
|
||||||
|
key: str,
|
||||||
|
value: Dict[str, Any],
|
||||||
|
session_id: Optional[str] = None,
|
||||||
|
realm: str = "memory",
|
||||||
|
merge: bool = True,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Store reasoning artifacts for future sessions.
|
||||||
|
Uses CRDT-style merge for concurrent update safety.
|
||||||
|
"""
|
||||||
|
if session_id:
|
||||||
|
memory_path = COGNITIVE_REALM / realm / session_id / f"{key.replace('/', '_')}.json"
|
||||||
|
else:
|
||||||
|
memory_path = COGNITIVE_REALM / realm / f"{key.replace('/', '_')}.json"
|
||||||
|
|
||||||
|
if merge and memory_path.exists():
|
||||||
|
existing = _load_json_file(memory_path)
|
||||||
|
merged = {**existing, **value, "_updated_at": _now_iso()}
|
||||||
|
else:
|
||||||
|
merged = {**value, "_created_at": _now_iso()}
|
||||||
|
|
||||||
|
_save_json_file(memory_path, merged)
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"key": key,
|
||||||
|
"session_id": session_id,
|
||||||
|
"realm": realm,
|
||||||
|
"value_hash": _vmhash_blake3(json.dumps(value, sort_keys=True).encode()),
|
||||||
|
"merged": merge,
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt = _emit_cognitive_receipt("memory_write", body)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"key": key,
|
||||||
|
"path": str(memory_path),
|
||||||
|
"receipt": receipt,
|
||||||
|
"message": f"Memory stored at {key}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_attest(
|
||||||
|
attestation_type: str,
|
||||||
|
content: Dict[str, Any],
|
||||||
|
anchor_to: Optional[List[str]] = None,
|
||||||
|
operator_did: str = "did:vm:cognitive:claude",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create cryptographic attestation of Claude's reasoning state.
|
||||||
|
"""
|
||||||
|
if anchor_to is None:
|
||||||
|
anchor_to = ["local"]
|
||||||
|
|
||||||
|
attestation_id = f"att_{secrets.token_hex(8)}"
|
||||||
|
content_hash = _vmhash_blake3(json.dumps(content, sort_keys=True).encode())
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"attestation_id": attestation_id,
|
||||||
|
"attestation_type": attestation_type,
|
||||||
|
"operator_did": operator_did,
|
||||||
|
"content_hash": content_hash,
|
||||||
|
"anchor_targets": anchor_to,
|
||||||
|
"anchors": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
body["anchors"]["local"] = {
|
||||||
|
"type": "local",
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
"hash": content_hash,
|
||||||
|
}
|
||||||
|
|
||||||
|
if "rfc3161" in anchor_to:
|
||||||
|
body["anchors"]["rfc3161"] = {
|
||||||
|
"type": "rfc3161",
|
||||||
|
"status": "pending",
|
||||||
|
"tsa": "freetsa.org",
|
||||||
|
}
|
||||||
|
|
||||||
|
if "eth" in anchor_to:
|
||||||
|
body["anchors"]["eth"] = {
|
||||||
|
"type": "ethereum",
|
||||||
|
"status": "pending",
|
||||||
|
"network": "mainnet",
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt = _emit_cognitive_receipt("attestation", body)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"attestation_id": attestation_id,
|
||||||
|
"content_hash": content_hash,
|
||||||
|
"receipt": receipt,
|
||||||
|
"anchors": body["anchors"],
|
||||||
|
"message": f"Attestation {attestation_id} created",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_audit_trail(
|
||||||
|
filter_type: Optional[str] = None,
|
||||||
|
time_range: Optional[Dict[str, str]] = None,
|
||||||
|
confidence_min: Optional[float] = None,
|
||||||
|
limit: int = 100,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Query historical AI decisions for audit.
|
||||||
|
"""
|
||||||
|
cognitive_path = RECEIPTS_ROOT / "cognitive" / "cognitive_events.jsonl"
|
||||||
|
|
||||||
|
if not cognitive_path.exists():
|
||||||
|
return {"decisions": [], "count": 0, "message": "No cognitive history found"}
|
||||||
|
|
||||||
|
decisions = []
|
||||||
|
with open(cognitive_path, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
receipt = json.loads(line)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if receipt.get("type") != "cognitive_decision":
|
||||||
|
continue
|
||||||
|
|
||||||
|
body = receipt.get("body", {})
|
||||||
|
|
||||||
|
if filter_type and body.get("decision_type") != filter_type:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if confidence_min and body.get("confidence", 0) < confidence_min:
|
||||||
|
continue
|
||||||
|
|
||||||
|
decisions.append({
|
||||||
|
"decision_id": body.get("decision_id"),
|
||||||
|
"timestamp": receipt.get("timestamp"),
|
||||||
|
"decision_type": body.get("decision_type"),
|
||||||
|
"confidence": body.get("confidence"),
|
||||||
|
"reasoning_hash": body.get("reasoning_hash"),
|
||||||
|
"auto_approved": body.get("auto_approved"),
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(decisions) >= limit:
|
||||||
|
break
|
||||||
|
|
||||||
|
return {
|
||||||
|
"decisions": decisions,
|
||||||
|
"count": len(decisions),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def cognitive_oracle_chain(
|
||||||
|
question: str,
|
||||||
|
frameworks: Optional[List[str]] = None,
|
||||||
|
max_docs: int = 10,
|
||||||
|
include_memory: bool = True,
|
||||||
|
session_id: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Execute oracle chain with cognitive enhancement.
|
||||||
|
"""
|
||||||
|
if frameworks is None:
|
||||||
|
frameworks = ["GDPR", "AI_ACT"]
|
||||||
|
|
||||||
|
chain_id = f"oracle_{secrets.token_hex(8)}"
|
||||||
|
|
||||||
|
context = cognitive_context(
|
||||||
|
include=["memory", "governance"] if include_memory else ["governance"],
|
||||||
|
session_id=session_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
answer = {
|
||||||
|
"chain_id": chain_id,
|
||||||
|
"question": question,
|
||||||
|
"frameworks": frameworks,
|
||||||
|
"answer": f"Oracle analysis pending for: {question}",
|
||||||
|
"citations": [],
|
||||||
|
"compliance_flags": {f: "requires_analysis" for f in frameworks},
|
||||||
|
"gaps": [],
|
||||||
|
"confidence": 0.0,
|
||||||
|
"requires_human_review": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
answer_hash = _vmhash_blake3(json.dumps(answer, sort_keys=True).encode())
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"chain_id": chain_id,
|
||||||
|
"question": question,
|
||||||
|
"frameworks": frameworks,
|
||||||
|
"answer_hash": answer_hash,
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt = _emit_cognitive_receipt("oracle_chain", body, scroll="compliance")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"chain_id": chain_id,
|
||||||
|
"answer": answer,
|
||||||
|
"answer_hash": answer_hash,
|
||||||
|
"receipt": receipt,
|
||||||
|
}
|
||||||
492
packages/vaultmesh_mcp/tools/escalation.py
Normal file
492
packages/vaultmesh_mcp/tools/escalation.py
Normal file
@@ -0,0 +1,492 @@
|
|||||||
|
"""
|
||||||
|
Escalation Engine - Profile transitions as first-class proofs
|
||||||
|
|
||||||
|
Every escalation is:
|
||||||
|
- A receipt (immutable record)
|
||||||
|
- A Tem-context (threat awareness)
|
||||||
|
- A reversibility flag (can it be undone?)
|
||||||
|
- A time-bound (when does it expire?)
|
||||||
|
|
||||||
|
Escalation is not runtime magic — it is auditable history.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import secrets
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from dataclasses import dataclass, asdict
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Optional, List
|
||||||
|
import os
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# VaultMesh paths
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
|
||||||
|
ESCALATION_LOG = RECEIPTS_ROOT / "identity" / "escalation_events.jsonl"
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _now_iso() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
def _now_ts() -> float:
|
||||||
|
return datetime.now(timezone.utc).timestamp()
|
||||||
|
|
||||||
|
|
||||||
|
class EscalationType(Enum):
|
||||||
|
"""Types of profile escalation."""
|
||||||
|
THREAT_DETECTED = "threat_detected" # Automatic: threat confidence > threshold
|
||||||
|
OPERATOR_REQUEST = "operator_request" # Manual: operator requests higher authority
|
||||||
|
CRISIS_DECLARED = "crisis_declared" # Emergency: system failure or attack
|
||||||
|
QUORUM_APPROVED = "quorum_approved" # Governance: multi-sig approval
|
||||||
|
SOVEREIGN_OVERRIDE = "sovereign_override" # Human: direct intervention
|
||||||
|
|
||||||
|
|
||||||
|
class DeescalationType(Enum):
|
||||||
|
"""Types of profile de-escalation."""
|
||||||
|
TIMEOUT_EXPIRED = "timeout_expired" # Automatic: time limit reached
|
||||||
|
THREAT_RESOLVED = "threat_resolved" # Automatic: no active threats
|
||||||
|
OPERATOR_RELEASE = "operator_release" # Manual: operator releases authority
|
||||||
|
CRISIS_CONCLUDED = "crisis_concluded" # Phoenix: crisis resolved
|
||||||
|
SOVEREIGN_REVOKE = "sovereign_revoke" # Human: explicit revocation
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class EscalationContext:
|
||||||
|
"""Context captured at escalation time."""
|
||||||
|
threat_id: Optional[str] = None
|
||||||
|
threat_type: Optional[str] = None
|
||||||
|
threat_confidence: Optional[float] = None
|
||||||
|
active_alerts: int = 0
|
||||||
|
mesh_health: str = "unknown"
|
||||||
|
triggering_tool: Optional[str] = None
|
||||||
|
triggering_decision: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Escalation:
|
||||||
|
"""A profile escalation event."""
|
||||||
|
escalation_id: str
|
||||||
|
from_profile: str
|
||||||
|
to_profile: str
|
||||||
|
escalation_type: str
|
||||||
|
context: EscalationContext
|
||||||
|
|
||||||
|
# Reversibility
|
||||||
|
reversible: bool
|
||||||
|
auto_deescalate: bool
|
||||||
|
deescalate_after_seconds: Optional[int]
|
||||||
|
deescalate_on_condition: Optional[str]
|
||||||
|
|
||||||
|
# Time tracking
|
||||||
|
created_at: str
|
||||||
|
expires_at: Optional[str]
|
||||||
|
|
||||||
|
# Proof
|
||||||
|
receipt_hash: Optional[str] = None
|
||||||
|
tem_context_hash: Optional[str] = None
|
||||||
|
|
||||||
|
# State
|
||||||
|
active: bool = True
|
||||||
|
deescalated_at: Optional[str] = None
|
||||||
|
deescalation_type: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
# In-memory active escalations (would be persisted in production)
|
||||||
|
_active_escalations: Dict[str, Escalation] = {}
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_escalation_receipt(escalation: Escalation, event_type: str) -> dict:
|
||||||
|
"""Emit a receipt for escalation events."""
|
||||||
|
ESCALATION_LOG.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"escalation_id": escalation.escalation_id,
|
||||||
|
"event_type": event_type,
|
||||||
|
"from_profile": escalation.from_profile,
|
||||||
|
"to_profile": escalation.to_profile,
|
||||||
|
"escalation_type": escalation.escalation_type,
|
||||||
|
"reversible": escalation.reversible,
|
||||||
|
"context": asdict(escalation.context),
|
||||||
|
"expires_at": escalation.expires_at,
|
||||||
|
"active": escalation.active,
|
||||||
|
}
|
||||||
|
|
||||||
|
if event_type == "deescalation":
|
||||||
|
body["deescalated_at"] = escalation.deescalated_at
|
||||||
|
body["deescalation_type"] = escalation.deescalation_type
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": f"profile_{event_type}",
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
"scroll": "identity",
|
||||||
|
"tags": ["escalation", event_type, escalation.from_profile, escalation.to_profile],
|
||||||
|
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(ESCALATION_LOG, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
return receipt
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# ESCALATION POLICIES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
ESCALATION_POLICIES = {
|
||||||
|
# OBSERVER → OPERATOR
|
||||||
|
("observer", "operator"): {
|
||||||
|
"reversible": True,
|
||||||
|
"auto_deescalate": True,
|
||||||
|
"default_ttl_seconds": 3600, # 1 hour
|
||||||
|
"requires_reason": True,
|
||||||
|
"requires_approval": False,
|
||||||
|
},
|
||||||
|
# OPERATOR → GUARDIAN
|
||||||
|
("operator", "guardian"): {
|
||||||
|
"reversible": True,
|
||||||
|
"auto_deescalate": True,
|
||||||
|
"default_ttl_seconds": 7200, # 2 hours
|
||||||
|
"requires_reason": True,
|
||||||
|
"requires_approval": False,
|
||||||
|
"auto_on_threat_confidence": 0.8,
|
||||||
|
},
|
||||||
|
# GUARDIAN → PHOENIX
|
||||||
|
("guardian", "phoenix"): {
|
||||||
|
"reversible": True,
|
||||||
|
"auto_deescalate": True,
|
||||||
|
"default_ttl_seconds": 1800, # 30 minutes
|
||||||
|
"requires_reason": True,
|
||||||
|
"requires_approval": True, # Requires quorum or sovereign
|
||||||
|
"auto_on_crisis": True,
|
||||||
|
},
|
||||||
|
# PHOENIX → SOVEREIGN
|
||||||
|
("phoenix", "sovereign"): {
|
||||||
|
"reversible": False, # Cannot auto-deescalate from sovereign
|
||||||
|
"auto_deescalate": False,
|
||||||
|
"default_ttl_seconds": None,
|
||||||
|
"requires_reason": True,
|
||||||
|
"requires_approval": True,
|
||||||
|
"requires_human": True,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
DEESCALATION_CONDITIONS = {
|
||||||
|
"no_active_threats_1h": "No active threats for 1 hour",
|
||||||
|
"no_active_alerts_24h": "No active alerts for 24 hours",
|
||||||
|
"crisis_resolved": "Crisis formally concluded",
|
||||||
|
"manual_release": "Operator explicitly released authority",
|
||||||
|
"timeout": "Escalation TTL expired",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# ESCALATION OPERATIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def escalate(
|
||||||
|
from_profile: str,
|
||||||
|
to_profile: str,
|
||||||
|
escalation_type: EscalationType,
|
||||||
|
context: Optional[EscalationContext] = None,
|
||||||
|
ttl_seconds: Optional[int] = None,
|
||||||
|
deescalate_condition: Optional[str] = None,
|
||||||
|
approved_by: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Escalate from one profile to another with full proof chain.
|
||||||
|
|
||||||
|
Returns escalation receipt and Tem context.
|
||||||
|
"""
|
||||||
|
# Get policy
|
||||||
|
policy_key = (from_profile, to_profile)
|
||||||
|
policy = ESCALATION_POLICIES.get(policy_key)
|
||||||
|
|
||||||
|
if not policy:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"No escalation path from {from_profile} to {to_profile}",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check approval requirements
|
||||||
|
if policy.get("requires_human") and escalation_type != EscalationType.SOVEREIGN_OVERRIDE:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"Escalation to {to_profile} requires human (sovereign) approval",
|
||||||
|
}
|
||||||
|
|
||||||
|
if policy.get("requires_approval") and not approved_by:
|
||||||
|
if escalation_type not in [EscalationType.QUORUM_APPROVED, EscalationType.SOVEREIGN_OVERRIDE]:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"Escalation to {to_profile} requires approval",
|
||||||
|
"approval_required": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build context
|
||||||
|
if context is None:
|
||||||
|
context = EscalationContext()
|
||||||
|
|
||||||
|
# Calculate expiry
|
||||||
|
ttl = ttl_seconds or policy.get("default_ttl_seconds")
|
||||||
|
expires_at = None
|
||||||
|
if ttl:
|
||||||
|
expires_at = (datetime.now(timezone.utc) + timedelta(seconds=ttl)).isoformat()
|
||||||
|
|
||||||
|
# Create escalation
|
||||||
|
escalation_id = f"esc_{secrets.token_hex(12)}"
|
||||||
|
|
||||||
|
escalation = Escalation(
|
||||||
|
escalation_id=escalation_id,
|
||||||
|
from_profile=from_profile,
|
||||||
|
to_profile=to_profile,
|
||||||
|
escalation_type=escalation_type.value,
|
||||||
|
context=context,
|
||||||
|
reversible=policy["reversible"],
|
||||||
|
auto_deescalate=policy["auto_deescalate"],
|
||||||
|
deescalate_after_seconds=ttl,
|
||||||
|
deescalate_on_condition=deescalate_condition,
|
||||||
|
created_at=_now_iso(),
|
||||||
|
expires_at=expires_at,
|
||||||
|
active=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit receipt
|
||||||
|
receipt = _emit_escalation_receipt(escalation, "escalation")
|
||||||
|
escalation.receipt_hash = receipt["root_hash"]
|
||||||
|
|
||||||
|
# Create Tem context hash (for threat awareness)
|
||||||
|
tem_context = {
|
||||||
|
"escalation_id": escalation_id,
|
||||||
|
"profile_transition": f"{from_profile} → {to_profile}",
|
||||||
|
"threat_context": asdict(context),
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
}
|
||||||
|
escalation.tem_context_hash = _vmhash_blake3(
|
||||||
|
json.dumps(tem_context, sort_keys=True).encode()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store active escalation
|
||||||
|
_active_escalations[escalation_id] = escalation
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"escalation_id": escalation_id,
|
||||||
|
"from_profile": from_profile,
|
||||||
|
"to_profile": to_profile,
|
||||||
|
"escalation_type": escalation_type.value,
|
||||||
|
"reversible": escalation.reversible,
|
||||||
|
"expires_at": expires_at,
|
||||||
|
"receipt_hash": escalation.receipt_hash,
|
||||||
|
"tem_context_hash": escalation.tem_context_hash,
|
||||||
|
"deescalate_condition": deescalate_condition or "timeout",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def deescalate(
|
||||||
|
escalation_id: str,
|
||||||
|
deescalation_type: DeescalationType,
|
||||||
|
reason: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
De-escalate an active escalation.
|
||||||
|
"""
|
||||||
|
escalation = _active_escalations.get(escalation_id)
|
||||||
|
|
||||||
|
if not escalation:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"Escalation {escalation_id} not found or already inactive",
|
||||||
|
}
|
||||||
|
|
||||||
|
if not escalation.reversible and deescalation_type != DeescalationType.SOVEREIGN_REVOKE:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"Escalation {escalation_id} is not reversible without sovereign override",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update escalation
|
||||||
|
escalation.active = False
|
||||||
|
escalation.deescalated_at = _now_iso()
|
||||||
|
escalation.deescalation_type = deescalation_type.value
|
||||||
|
|
||||||
|
# Emit receipt
|
||||||
|
receipt = _emit_escalation_receipt(escalation, "deescalation")
|
||||||
|
|
||||||
|
# Remove from active
|
||||||
|
del _active_escalations[escalation_id]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"escalation_id": escalation_id,
|
||||||
|
"from_profile": escalation.to_profile, # Note: going back
|
||||||
|
"to_profile": escalation.from_profile,
|
||||||
|
"deescalation_type": deescalation_type.value,
|
||||||
|
"reason": reason,
|
||||||
|
"receipt_hash": receipt["root_hash"],
|
||||||
|
"duration_seconds": (
|
||||||
|
datetime.fromisoformat(escalation.deescalated_at.replace('Z', '+00:00')) -
|
||||||
|
datetime.fromisoformat(escalation.created_at.replace('Z', '+00:00'))
|
||||||
|
).total_seconds(),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def check_expired_escalations() -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Check for and auto-deescalate expired escalations.
|
||||||
|
Called periodically by the system.
|
||||||
|
"""
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
expired = []
|
||||||
|
|
||||||
|
for esc_id, escalation in list(_active_escalations.items()):
|
||||||
|
if not escalation.expires_at:
|
||||||
|
continue
|
||||||
|
|
||||||
|
expires = datetime.fromisoformat(escalation.expires_at.replace('Z', '+00:00'))
|
||||||
|
|
||||||
|
if now > expires and escalation.auto_deescalate:
|
||||||
|
result = deescalate(
|
||||||
|
esc_id,
|
||||||
|
DeescalationType.TIMEOUT_EXPIRED,
|
||||||
|
reason=f"TTL of {escalation.deescalate_after_seconds}s expired"
|
||||||
|
)
|
||||||
|
expired.append(result)
|
||||||
|
|
||||||
|
return expired
|
||||||
|
|
||||||
|
|
||||||
|
def get_active_escalations() -> Dict[str, Any]:
|
||||||
|
"""Get all active escalations."""
|
||||||
|
return {
|
||||||
|
"active_count": len(_active_escalations),
|
||||||
|
"escalations": [
|
||||||
|
{
|
||||||
|
"escalation_id": e.escalation_id,
|
||||||
|
"from_profile": e.from_profile,
|
||||||
|
"to_profile": e.to_profile,
|
||||||
|
"escalation_type": e.escalation_type,
|
||||||
|
"created_at": e.created_at,
|
||||||
|
"expires_at": e.expires_at,
|
||||||
|
"reversible": e.reversible,
|
||||||
|
}
|
||||||
|
for e in _active_escalations.values()
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_escalation_history(
|
||||||
|
profile: Optional[str] = None,
|
||||||
|
limit: int = 100,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Query escalation history from receipts."""
|
||||||
|
if not ESCALATION_LOG.exists():
|
||||||
|
return {"history": [], "count": 0}
|
||||||
|
|
||||||
|
history = []
|
||||||
|
with open(ESCALATION_LOG, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
receipt = json.loads(line)
|
||||||
|
body = receipt.get("body", {})
|
||||||
|
|
||||||
|
# Filter by profile if specified
|
||||||
|
if profile:
|
||||||
|
if body.get("from_profile") != profile and body.get("to_profile") != profile:
|
||||||
|
continue
|
||||||
|
|
||||||
|
history.append({
|
||||||
|
"escalation_id": body.get("escalation_id"),
|
||||||
|
"event_type": body.get("event_type"),
|
||||||
|
"from_profile": body.get("from_profile"),
|
||||||
|
"to_profile": body.get("to_profile"),
|
||||||
|
"timestamp": receipt.get("timestamp"),
|
||||||
|
"receipt_hash": receipt.get("root_hash"),
|
||||||
|
})
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Return most recent first
|
||||||
|
history.reverse()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"history": history[:limit],
|
||||||
|
"count": len(history),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CONVENIENCE FUNCTIONS FOR COMMON ESCALATIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def escalate_on_threat(
|
||||||
|
current_profile: str,
|
||||||
|
threat_id: str,
|
||||||
|
threat_type: str,
|
||||||
|
confidence: float,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Escalate based on detected threat.
|
||||||
|
Auto-determines target profile based on confidence.
|
||||||
|
"""
|
||||||
|
context = EscalationContext(
|
||||||
|
threat_id=threat_id,
|
||||||
|
threat_type=threat_type,
|
||||||
|
threat_confidence=confidence,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine target profile
|
||||||
|
if current_profile == "observer":
|
||||||
|
to_profile = "operator"
|
||||||
|
elif current_profile == "operator" and confidence >= 0.8:
|
||||||
|
to_profile = "guardian"
|
||||||
|
elif current_profile == "guardian" and confidence >= 0.95:
|
||||||
|
to_profile = "phoenix"
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"escalated": False,
|
||||||
|
"reason": f"Confidence {confidence} insufficient for escalation from {current_profile}",
|
||||||
|
}
|
||||||
|
|
||||||
|
return escalate(
|
||||||
|
from_profile=current_profile,
|
||||||
|
to_profile=to_profile,
|
||||||
|
escalation_type=EscalationType.THREAT_DETECTED,
|
||||||
|
context=context,
|
||||||
|
deescalate_condition="no_active_threats_1h",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def escalate_to_phoenix(
|
||||||
|
reason: str,
|
||||||
|
approved_by: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Emergency escalation to Phoenix profile.
|
||||||
|
Requires approval.
|
||||||
|
"""
|
||||||
|
context = EscalationContext(
|
||||||
|
mesh_health="crisis",
|
||||||
|
)
|
||||||
|
|
||||||
|
return escalate(
|
||||||
|
from_profile="guardian",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
context=context,
|
||||||
|
approved_by=approved_by,
|
||||||
|
deescalate_condition="crisis_resolved",
|
||||||
|
)
|
||||||
101
packages/vaultmesh_mcp/tools/file.py
Normal file
101
packages/vaultmesh_mcp/tools/file.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
"""File MCP tools - File operations with receipts."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# VaultMesh root from env or default
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
|
||||||
|
FILE_RECEIPTS = RECEIPTS_ROOT / "file" / "file_operations.jsonl"
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
"""VaultMesh hash: blake3:<hex>."""
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_file_receipt(operation: str, file_path: str, details: dict, actor: str = "did:vm:mcp:file") -> str:
|
||||||
|
"""Emit a receipt for file operation."""
|
||||||
|
FILE_RECEIPTS.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
body = {
|
||||||
|
"operation": operation,
|
||||||
|
"file_path": file_path,
|
||||||
|
"details": details,
|
||||||
|
"actor": actor,
|
||||||
|
}
|
||||||
|
|
||||||
|
root_hash = _vmhash_blake3(json.dumps(body, sort_keys=True).encode())
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": "file_operation",
|
||||||
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"scroll": "file",
|
||||||
|
"tags": ["file", operation, "mcp"],
|
||||||
|
"root_hash": root_hash,
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(FILE_RECEIPTS, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
return root_hash
|
||||||
|
|
||||||
|
|
||||||
|
def file_add(
|
||||||
|
path: str,
|
||||||
|
content: str,
|
||||||
|
encoding: str = "utf-8",
|
||||||
|
actor: str = "did:vm:mcp:file",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Add (create or overwrite) a file with content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: File path relative to VAULTMESH_ROOT or absolute
|
||||||
|
content: File content to write
|
||||||
|
encoding: File encoding (default: utf-8)
|
||||||
|
actor: DID of actor performing operation
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Result with file hash and receipt hash
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
file_path = Path(path)
|
||||||
|
if not file_path.is_absolute():
|
||||||
|
file_path = VAULTMESH_ROOT / file_path
|
||||||
|
|
||||||
|
# Create parent directories if needed
|
||||||
|
file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Write file
|
||||||
|
file_path.write_text(content, encoding=encoding)
|
||||||
|
|
||||||
|
# Hash the content
|
||||||
|
content_hash = _vmhash_blake3(content.encode(encoding))
|
||||||
|
|
||||||
|
details = {
|
||||||
|
"content_hash": content_hash,
|
||||||
|
"size_bytes": len(content.encode(encoding)),
|
||||||
|
"encoding": encoding,
|
||||||
|
"created": not file_path.exists(),
|
||||||
|
}
|
||||||
|
|
||||||
|
receipt_hash = _emit_file_receipt("add", str(file_path), details, actor)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"path": str(file_path),
|
||||||
|
"content_hash": content_hash,
|
||||||
|
"receipt_hash": receipt_hash,
|
||||||
|
"size_bytes": details["size_bytes"],
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return {"success": False, "error": str(e)}
|
||||||
234
packages/vaultmesh_mcp/tools/guardian.py
Normal file
234
packages/vaultmesh_mcp/tools/guardian.py
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
"""Guardian MCP tools - Merkle root anchoring operations."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# VaultMesh root from env or default
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
|
||||||
|
|
||||||
|
# Scroll definitions
|
||||||
|
SCROLLS = {
|
||||||
|
"drills": {"jsonl": "receipts/drills/drill_runs.jsonl"},
|
||||||
|
"compliance": {"jsonl": "receipts/compliance/oracle_answers.jsonl"},
|
||||||
|
"guardian": {"jsonl": "receipts/guardian/anchor_events.jsonl"},
|
||||||
|
"treasury": {"jsonl": "receipts/treasury/treasury_events.jsonl"},
|
||||||
|
"mesh": {"jsonl": "receipts/mesh/mesh_events.jsonl"},
|
||||||
|
"offsec": {"jsonl": "receipts/offsec/offsec_events.jsonl"},
|
||||||
|
"identity": {"jsonl": "receipts/identity/identity_events.jsonl"},
|
||||||
|
"observability": {"jsonl": "receipts/observability/observability_events.jsonl"},
|
||||||
|
"automation": {"jsonl": "receipts/automation/automation_events.jsonl"},
|
||||||
|
"psi": {"jsonl": "receipts/psi/psi_events.jsonl"},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
"""VaultMesh hash: blake3:<hex>."""
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _merkle_root(hashes: list[str]) -> str:
|
||||||
|
"""Compute Merkle root from list of VaultMesh hashes."""
|
||||||
|
if not hashes:
|
||||||
|
return _vmhash_blake3(b"empty")
|
||||||
|
if len(hashes) == 1:
|
||||||
|
return hashes[0]
|
||||||
|
|
||||||
|
# Iteratively combine pairs
|
||||||
|
current = hashes
|
||||||
|
while len(current) > 1:
|
||||||
|
next_level = []
|
||||||
|
for i in range(0, len(current), 2):
|
||||||
|
if i + 1 < len(current):
|
||||||
|
combined = current[i] + current[i + 1]
|
||||||
|
else:
|
||||||
|
combined = current[i] + current[i] # Duplicate odd leaf
|
||||||
|
next_level.append(_vmhash_blake3(combined.encode()))
|
||||||
|
current = next_level
|
||||||
|
return current[0]
|
||||||
|
|
||||||
|
|
||||||
|
def _compute_scroll_root(scroll_name: str) -> dict[str, Any]:
|
||||||
|
"""Compute Merkle root for a single scroll."""
|
||||||
|
if scroll_name not in SCROLLS:
|
||||||
|
return {"error": f"Unknown scroll: {scroll_name}"}
|
||||||
|
|
||||||
|
jsonl_path = VAULTMESH_ROOT / SCROLLS[scroll_name]["jsonl"]
|
||||||
|
if not jsonl_path.exists():
|
||||||
|
return {
|
||||||
|
"scroll": scroll_name,
|
||||||
|
"root": _vmhash_blake3(b"empty"),
|
||||||
|
"leaf_count": 0,
|
||||||
|
"exists": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
hashes = []
|
||||||
|
with open(jsonl_path, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
hashes.append(_vmhash_blake3(line.encode()))
|
||||||
|
|
||||||
|
root = _merkle_root(hashes)
|
||||||
|
return {
|
||||||
|
"scroll": scroll_name,
|
||||||
|
"root": root,
|
||||||
|
"leaf_count": len(hashes),
|
||||||
|
"exists": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def guardian_anchor_now(
|
||||||
|
scrolls: Optional[list[str]] = None,
|
||||||
|
guardian_did: str = "did:vm:guardian:mcp",
|
||||||
|
backend: str = "local",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Anchor specified scrolls and emit a guardian receipt.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
scrolls: List of scroll names to anchor (default: all)
|
||||||
|
guardian_did: DID of the guardian performing the anchor
|
||||||
|
backend: Backend identifier (local, ethereum, stellar)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Anchor receipt with roots for each scroll
|
||||||
|
"""
|
||||||
|
if scrolls is None:
|
||||||
|
scrolls = list(SCROLLS.keys())
|
||||||
|
|
||||||
|
# Validate scrolls
|
||||||
|
invalid = [s for s in scrolls if s not in SCROLLS]
|
||||||
|
if invalid:
|
||||||
|
return {"error": f"Invalid scrolls: {invalid}"}
|
||||||
|
|
||||||
|
# Compute roots for each scroll
|
||||||
|
roots = {}
|
||||||
|
for scroll_name in scrolls:
|
||||||
|
result = _compute_scroll_root(scroll_name)
|
||||||
|
if "error" in result:
|
||||||
|
return result
|
||||||
|
roots[scroll_name] = result["root"]
|
||||||
|
|
||||||
|
# Compute anchor hash over all roots
|
||||||
|
roots_json = json.dumps(roots, sort_keys=True).encode()
|
||||||
|
anchor_hash = _vmhash_blake3(roots_json)
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
anchor_id = f"anchor-{now.strftime('%Y%m%d%H%M%S')}"
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": "guardian_anchor",
|
||||||
|
"timestamp": now.isoformat(),
|
||||||
|
"anchor_id": anchor_id,
|
||||||
|
"backend": backend,
|
||||||
|
"anchor_by": guardian_did,
|
||||||
|
"anchor_epoch": int(now.timestamp()),
|
||||||
|
"roots": roots,
|
||||||
|
"scrolls": scrolls,
|
||||||
|
"anchor_hash": anchor_hash,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Write receipt to guardian JSONL
|
||||||
|
guardian_path = VAULTMESH_ROOT / "receipts/guardian/anchor_events.jsonl"
|
||||||
|
guardian_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
with open(guardian_path, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
# Update ROOT.guardian.txt
|
||||||
|
root_result = _compute_scroll_root("guardian")
|
||||||
|
root_file = VAULTMESH_ROOT / "ROOT.guardian.txt"
|
||||||
|
root_file.write_text(root_result["root"])
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"receipt": receipt,
|
||||||
|
"message": f"Anchored {len(scrolls)} scrolls with ID {anchor_id}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def guardian_verify_receipt(receipt_hash: str, scroll: str = "guardian") -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Verify a receipt exists in a scroll's JSONL.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
receipt_hash: The root_hash of the receipt to verify
|
||||||
|
scroll: The scroll to search in
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Verification result with proof if found
|
||||||
|
"""
|
||||||
|
if scroll not in SCROLLS:
|
||||||
|
return {"error": f"Unknown scroll: {scroll}"}
|
||||||
|
|
||||||
|
jsonl_path = VAULTMESH_ROOT / SCROLLS[scroll]["jsonl"]
|
||||||
|
if not jsonl_path.exists():
|
||||||
|
return {"verified": False, "reason": "Scroll JSONL does not exist"}
|
||||||
|
|
||||||
|
# Search for receipt with matching hash
|
||||||
|
with open(jsonl_path, "r") as f:
|
||||||
|
line_num = 0
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
line_num += 1
|
||||||
|
line_hash = _vmhash_blake3(line.encode())
|
||||||
|
|
||||||
|
# Check if the line hash matches or if the JSON contains the hash
|
||||||
|
try:
|
||||||
|
data = json.loads(line)
|
||||||
|
if data.get("anchor_hash") == receipt_hash or data.get("root_hash") == receipt_hash:
|
||||||
|
return {
|
||||||
|
"verified": True,
|
||||||
|
"line_number": line_num,
|
||||||
|
"line_hash": line_hash,
|
||||||
|
"receipt": data,
|
||||||
|
}
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return {"verified": False, "reason": "Receipt not found in scroll"}
|
||||||
|
|
||||||
|
|
||||||
|
def guardian_status() -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get current status of all scrolls.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Status of each scroll including root hash and leaf count
|
||||||
|
"""
|
||||||
|
status = {}
|
||||||
|
for scroll_name in SCROLLS:
|
||||||
|
result = _compute_scroll_root(scroll_name)
|
||||||
|
status[scroll_name] = {
|
||||||
|
"root": result["root"],
|
||||||
|
"leaf_count": result["leaf_count"],
|
||||||
|
"exists": result.get("exists", False),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get last anchor info
|
||||||
|
guardian_path = VAULTMESH_ROOT / "receipts/guardian/anchor_events.jsonl"
|
||||||
|
last_anchor = None
|
||||||
|
if guardian_path.exists():
|
||||||
|
with open(guardian_path, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
try:
|
||||||
|
last_anchor = json.loads(line)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return {
|
||||||
|
"scrolls": status,
|
||||||
|
"last_anchor": last_anchor,
|
||||||
|
"vaultmesh_root": str(VAULTMESH_ROOT),
|
||||||
|
}
|
||||||
578
packages/vaultmesh_mcp/tools/key_binding.py
Normal file
578
packages/vaultmesh_mcp/tools/key_binding.py
Normal file
@@ -0,0 +1,578 @@
|
|||||||
|
"""
|
||||||
|
Key Binding Engine - Authority bound to cryptographic reality
|
||||||
|
|
||||||
|
Every profile has a corresponding key reality:
|
||||||
|
- OBSERVER: Ephemeral (memory only, no signing power)
|
||||||
|
- OPERATOR: Session key (encrypted disk, revocable)
|
||||||
|
- GUARDIAN: Device-bound (secure enclave, non-exportable)
|
||||||
|
- PHOENIX: Time-locked (guardian key + approval artifact)
|
||||||
|
- SOVEREIGN: Offline root (hardware key, never automated)
|
||||||
|
|
||||||
|
Invariant: No profile exists without corresponding key reality.
|
||||||
|
If key cannot be proven → authority collapses downward, never upward.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import secrets
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timezone, timedelta
|
||||||
|
from dataclasses import dataclass, asdict, field
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Optional, List
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# Optional: Ed25519 support
|
||||||
|
try:
|
||||||
|
from nacl.signing import SigningKey, VerifyKey
|
||||||
|
from nacl.encoding import Base64Encoder
|
||||||
|
from nacl.exceptions import BadSignature
|
||||||
|
NACL_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
NACL_AVAILABLE = False
|
||||||
|
|
||||||
|
# VaultMesh paths
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts"
|
||||||
|
KEYS_ROOT = VAULTMESH_ROOT / "keys"
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _now_iso() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
class KeyBindingType(Enum):
|
||||||
|
"""Types of key bindings corresponding to profiles."""
|
||||||
|
EPHEMERAL = "ephemeral" # 👁 OBSERVER - memory only
|
||||||
|
SESSION = "session" # ⚙ OPERATOR - encrypted disk
|
||||||
|
DEVICE = "device" # 🛡 GUARDIAN - secure enclave
|
||||||
|
TIMELOCKED = "timelocked" # 🔥 PHOENIX - guardian + approval
|
||||||
|
HARDWARE = "hardware" # 👑 SOVEREIGN - offline, air-gapped
|
||||||
|
|
||||||
|
|
||||||
|
class KeyStatus(Enum):
|
||||||
|
"""Key lifecycle states."""
|
||||||
|
ACTIVE = "active"
|
||||||
|
EXPIRED = "expired"
|
||||||
|
REVOKED = "revoked"
|
||||||
|
PENDING = "pending"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class KeyBinding:
|
||||||
|
"""A cryptographic key bound to a profile."""
|
||||||
|
key_id: str
|
||||||
|
profile: str
|
||||||
|
binding_type: str
|
||||||
|
fingerprint: str
|
||||||
|
|
||||||
|
# Key material (public only stored here)
|
||||||
|
public_key_b64: str
|
||||||
|
|
||||||
|
# Binding constraints
|
||||||
|
created_at: str
|
||||||
|
expires_at: Optional[str]
|
||||||
|
device_id: Optional[str] = None
|
||||||
|
|
||||||
|
# Status
|
||||||
|
status: str = "active"
|
||||||
|
revoked_at: Optional[str] = None
|
||||||
|
revocation_reason: Optional[str] = None
|
||||||
|
|
||||||
|
# Proof
|
||||||
|
binding_receipt_hash: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class KeyAssertion:
|
||||||
|
"""Runtime assertion that a key is valid for a profile."""
|
||||||
|
assertion_id: str
|
||||||
|
key_id: str
|
||||||
|
profile: str
|
||||||
|
binding_type: str
|
||||||
|
fingerprint: str
|
||||||
|
|
||||||
|
# Verification
|
||||||
|
verified_at: str
|
||||||
|
signature_valid: bool
|
||||||
|
binding_valid: bool
|
||||||
|
not_expired: bool
|
||||||
|
not_revoked: bool
|
||||||
|
|
||||||
|
# Result
|
||||||
|
authority_granted: bool
|
||||||
|
collapse_to: Optional[str] = None # If authority denied, collapse to this
|
||||||
|
|
||||||
|
|
||||||
|
# In-memory key store (production would use secure storage)
|
||||||
|
_key_bindings: Dict[str, KeyBinding] = {}
|
||||||
|
_device_keys: Dict[str, str] = {} # device_id -> key_id
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# PROFILE → KEY BINDING REQUIREMENTS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
PROFILE_KEY_REQUIREMENTS = {
|
||||||
|
"observer": {
|
||||||
|
"binding_type": KeyBindingType.EPHEMERAL,
|
||||||
|
"requires_signature": False,
|
||||||
|
"requires_device": False,
|
||||||
|
"max_ttl_seconds": 3600, # 1 hour
|
||||||
|
"can_sign_receipts": False,
|
||||||
|
},
|
||||||
|
"operator": {
|
||||||
|
"binding_type": KeyBindingType.SESSION,
|
||||||
|
"requires_signature": True,
|
||||||
|
"requires_device": False,
|
||||||
|
"max_ttl_seconds": 86400, # 24 hours
|
||||||
|
"can_sign_receipts": True,
|
||||||
|
},
|
||||||
|
"guardian": {
|
||||||
|
"binding_type": KeyBindingType.DEVICE,
|
||||||
|
"requires_signature": True,
|
||||||
|
"requires_device": True,
|
||||||
|
"max_ttl_seconds": 604800, # 7 days
|
||||||
|
"can_sign_receipts": True,
|
||||||
|
"device_types": ["secure_enclave", "tpm", "operator_phone"],
|
||||||
|
},
|
||||||
|
"phoenix": {
|
||||||
|
"binding_type": KeyBindingType.TIMELOCKED,
|
||||||
|
"requires_signature": True,
|
||||||
|
"requires_device": True,
|
||||||
|
"requires_approval_artifact": True,
|
||||||
|
"max_ttl_seconds": 86400, # 24 hours (auto-expire)
|
||||||
|
"can_sign_receipts": True,
|
||||||
|
},
|
||||||
|
"sovereign": {
|
||||||
|
"binding_type": KeyBindingType.HARDWARE,
|
||||||
|
"requires_signature": True,
|
||||||
|
"requires_device": True,
|
||||||
|
"requires_human": True,
|
||||||
|
"max_ttl_seconds": None, # No auto-expire
|
||||||
|
"can_sign_receipts": True,
|
||||||
|
"device_types": ["hardware_key", "air_gapped"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# KEY OPERATIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def generate_key_pair() -> Dict[str, str]:
|
||||||
|
"""Generate an Ed25519 key pair."""
|
||||||
|
if NACL_AVAILABLE:
|
||||||
|
signing_key = SigningKey.generate()
|
||||||
|
verify_key = signing_key.verify_key
|
||||||
|
return {
|
||||||
|
"private_key_b64": signing_key.encode(Base64Encoder).decode(),
|
||||||
|
"public_key_b64": verify_key.encode(Base64Encoder).decode(),
|
||||||
|
"fingerprint": _vmhash_blake3(verify_key.encode())[:24],
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Fallback: generate placeholder for testing
|
||||||
|
fake_key = secrets.token_bytes(32)
|
||||||
|
return {
|
||||||
|
"private_key_b64": "PLACEHOLDER_" + secrets.token_urlsafe(32),
|
||||||
|
"public_key_b64": "PLACEHOLDER_" + secrets.token_urlsafe(32),
|
||||||
|
"fingerprint": _vmhash_blake3(fake_key)[:24],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def create_key_binding(
|
||||||
|
profile: str,
|
||||||
|
public_key_b64: str,
|
||||||
|
device_id: Optional[str] = None,
|
||||||
|
ttl_seconds: Optional[int] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a key binding for a profile.
|
||||||
|
|
||||||
|
Validates that the binding meets profile requirements.
|
||||||
|
"""
|
||||||
|
requirements = PROFILE_KEY_REQUIREMENTS.get(profile)
|
||||||
|
if not requirements:
|
||||||
|
return {"success": False, "error": f"Unknown profile: {profile}"}
|
||||||
|
|
||||||
|
binding_type = requirements["binding_type"]
|
||||||
|
|
||||||
|
# Validate device requirement
|
||||||
|
if requirements.get("requires_device") and not device_id:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"Profile {profile} requires device binding",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate expiry
|
||||||
|
max_ttl = requirements.get("max_ttl_seconds")
|
||||||
|
if ttl_seconds and max_ttl and ttl_seconds > max_ttl:
|
||||||
|
ttl_seconds = max_ttl
|
||||||
|
|
||||||
|
expires_at = None
|
||||||
|
if ttl_seconds:
|
||||||
|
expires_at = (datetime.now(timezone.utc) + timedelta(seconds=ttl_seconds)).isoformat()
|
||||||
|
elif max_ttl:
|
||||||
|
expires_at = (datetime.now(timezone.utc) + timedelta(seconds=max_ttl)).isoformat()
|
||||||
|
|
||||||
|
# Generate fingerprint
|
||||||
|
fingerprint = _vmhash_blake3(public_key_b64.encode())[:24]
|
||||||
|
|
||||||
|
key_id = f"key_{secrets.token_hex(12)}"
|
||||||
|
|
||||||
|
binding = KeyBinding(
|
||||||
|
key_id=key_id,
|
||||||
|
profile=profile,
|
||||||
|
binding_type=binding_type.value,
|
||||||
|
fingerprint=fingerprint,
|
||||||
|
public_key_b64=public_key_b64,
|
||||||
|
created_at=_now_iso(),
|
||||||
|
expires_at=expires_at,
|
||||||
|
device_id=device_id,
|
||||||
|
status=KeyStatus.ACTIVE.value,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit binding receipt
|
||||||
|
receipt = _emit_key_receipt("key_binding_created", asdict(binding))
|
||||||
|
binding.binding_receipt_hash = receipt["root_hash"]
|
||||||
|
|
||||||
|
# Store
|
||||||
|
_key_bindings[key_id] = binding
|
||||||
|
if device_id:
|
||||||
|
_device_keys[device_id] = key_id
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"key_id": key_id,
|
||||||
|
"profile": profile,
|
||||||
|
"binding_type": binding_type.value,
|
||||||
|
"fingerprint": fingerprint,
|
||||||
|
"expires_at": expires_at,
|
||||||
|
"device_id": device_id,
|
||||||
|
"receipt_hash": binding.binding_receipt_hash,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def assert_key_authority(
|
||||||
|
key_id: str,
|
||||||
|
required_profile: str,
|
||||||
|
signature: Optional[str] = None,
|
||||||
|
challenge: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Assert that a key has authority for a profile.
|
||||||
|
|
||||||
|
If assertion fails, returns collapse_to indicating the
|
||||||
|
maximum authority the key can claim.
|
||||||
|
"""
|
||||||
|
assertion_id = f"assert_{secrets.token_hex(8)}"
|
||||||
|
|
||||||
|
binding = _key_bindings.get(key_id)
|
||||||
|
if not binding:
|
||||||
|
return _authority_denied(assertion_id, required_profile, "Key not found", "observer")
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
if binding.status == KeyStatus.REVOKED.value:
|
||||||
|
return _authority_denied(assertion_id, required_profile, "Key revoked", "observer")
|
||||||
|
|
||||||
|
# Check expiry
|
||||||
|
not_expired = True
|
||||||
|
if binding.expires_at:
|
||||||
|
expires = datetime.fromisoformat(binding.expires_at.replace('Z', '+00:00'))
|
||||||
|
if datetime.now(timezone.utc) > expires:
|
||||||
|
not_expired = False
|
||||||
|
# Auto-revoke expired keys
|
||||||
|
binding.status = KeyStatus.EXPIRED.value
|
||||||
|
return _authority_denied(assertion_id, required_profile, "Key expired", "observer")
|
||||||
|
|
||||||
|
# Check profile hierarchy
|
||||||
|
profile_order = ["observer", "operator", "guardian", "phoenix", "sovereign"]
|
||||||
|
bound_level = profile_order.index(binding.profile) if binding.profile in profile_order else -1
|
||||||
|
required_level = profile_order.index(required_profile) if required_profile in profile_order else 99
|
||||||
|
|
||||||
|
if bound_level < required_level:
|
||||||
|
# Key is for lower profile - collapse to its actual level
|
||||||
|
return _authority_denied(
|
||||||
|
assertion_id,
|
||||||
|
required_profile,
|
||||||
|
f"Key bound to {binding.profile}, not sufficient for {required_profile}",
|
||||||
|
binding.profile
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check signature if required
|
||||||
|
requirements = PROFILE_KEY_REQUIREMENTS.get(required_profile, {})
|
||||||
|
signature_valid = True
|
||||||
|
|
||||||
|
if requirements.get("requires_signature"):
|
||||||
|
if not signature or not challenge:
|
||||||
|
return _authority_denied(
|
||||||
|
assertion_id,
|
||||||
|
required_profile,
|
||||||
|
"Signature required but not provided",
|
||||||
|
_collapse_profile(required_profile)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify signature
|
||||||
|
if NACL_AVAILABLE and not binding.public_key_b64.startswith("PLACEHOLDER"):
|
||||||
|
try:
|
||||||
|
verify_key = VerifyKey(binding.public_key_b64.encode(), Base64Encoder)
|
||||||
|
sig_bytes = Base64Encoder.decode(signature.encode())
|
||||||
|
verify_key.verify(challenge.encode(), sig_bytes)
|
||||||
|
except BadSignature:
|
||||||
|
signature_valid = False
|
||||||
|
return _authority_denied(
|
||||||
|
assertion_id,
|
||||||
|
required_profile,
|
||||||
|
"Invalid signature",
|
||||||
|
_collapse_profile(required_profile)
|
||||||
|
)
|
||||||
|
|
||||||
|
# All checks passed
|
||||||
|
assertion = KeyAssertion(
|
||||||
|
assertion_id=assertion_id,
|
||||||
|
key_id=key_id,
|
||||||
|
profile=required_profile,
|
||||||
|
binding_type=binding.binding_type,
|
||||||
|
fingerprint=binding.fingerprint,
|
||||||
|
verified_at=_now_iso(),
|
||||||
|
signature_valid=signature_valid,
|
||||||
|
binding_valid=True,
|
||||||
|
not_expired=not_expired,
|
||||||
|
not_revoked=binding.status != KeyStatus.REVOKED.value,
|
||||||
|
authority_granted=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
_emit_key_receipt("key_assertion_granted", asdict(assertion))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"authority_granted": True,
|
||||||
|
"assertion_id": assertion_id,
|
||||||
|
"key_id": key_id,
|
||||||
|
"profile": required_profile,
|
||||||
|
"binding_type": binding.binding_type,
|
||||||
|
"fingerprint": binding.fingerprint,
|
||||||
|
"expires_at": binding.expires_at,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _authority_denied(
|
||||||
|
assertion_id: str,
|
||||||
|
required_profile: str,
|
||||||
|
reason: str,
|
||||||
|
collapse_to: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Record authority denial and return collapse result."""
|
||||||
|
assertion = KeyAssertion(
|
||||||
|
assertion_id=assertion_id,
|
||||||
|
key_id="unknown",
|
||||||
|
profile=required_profile,
|
||||||
|
binding_type="none",
|
||||||
|
fingerprint="none",
|
||||||
|
verified_at=_now_iso(),
|
||||||
|
signature_valid=False,
|
||||||
|
binding_valid=False,
|
||||||
|
not_expired=False,
|
||||||
|
not_revoked=False,
|
||||||
|
authority_granted=False,
|
||||||
|
collapse_to=collapse_to,
|
||||||
|
)
|
||||||
|
|
||||||
|
_emit_key_receipt("key_assertion_denied", {
|
||||||
|
**asdict(assertion),
|
||||||
|
"reason": reason,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"authority_granted": False,
|
||||||
|
"assertion_id": assertion_id,
|
||||||
|
"reason": reason,
|
||||||
|
"collapse_to": collapse_to,
|
||||||
|
"message": f"Authority denied. Collapsing to {collapse_to}.",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _collapse_profile(profile: str) -> str:
|
||||||
|
"""Determine collapse target when authority is denied."""
|
||||||
|
collapse_map = {
|
||||||
|
"sovereign": "phoenix",
|
||||||
|
"phoenix": "guardian",
|
||||||
|
"guardian": "operator",
|
||||||
|
"operator": "observer",
|
||||||
|
"observer": "observer",
|
||||||
|
}
|
||||||
|
return collapse_map.get(profile, "observer")
|
||||||
|
|
||||||
|
|
||||||
|
def revoke_key(key_id: str, reason: str) -> Dict[str, Any]:
|
||||||
|
"""Revoke a key binding."""
|
||||||
|
binding = _key_bindings.get(key_id)
|
||||||
|
if not binding:
|
||||||
|
return {"success": False, "error": "Key not found"}
|
||||||
|
|
||||||
|
binding.status = KeyStatus.REVOKED.value
|
||||||
|
binding.revoked_at = _now_iso()
|
||||||
|
binding.revocation_reason = reason
|
||||||
|
|
||||||
|
_emit_key_receipt("key_revoked", {
|
||||||
|
"key_id": key_id,
|
||||||
|
"profile": binding.profile,
|
||||||
|
"fingerprint": binding.fingerprint,
|
||||||
|
"reason": reason,
|
||||||
|
"revoked_at": binding.revoked_at,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"key_id": key_id,
|
||||||
|
"revoked_at": binding.revoked_at,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_device_key(device_id: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get the key binding for a device."""
|
||||||
|
key_id = _device_keys.get(device_id)
|
||||||
|
if not key_id:
|
||||||
|
return None
|
||||||
|
|
||||||
|
binding = _key_bindings.get(key_id)
|
||||||
|
if not binding:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {
|
||||||
|
"key_id": key_id,
|
||||||
|
"profile": binding.profile,
|
||||||
|
"binding_type": binding.binding_type,
|
||||||
|
"fingerprint": binding.fingerprint,
|
||||||
|
"device_id": device_id,
|
||||||
|
"status": binding.status,
|
||||||
|
"expires_at": binding.expires_at,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def list_key_bindings(profile: Optional[str] = None) -> Dict[str, Any]:
|
||||||
|
"""List all key bindings, optionally filtered by profile."""
|
||||||
|
bindings = []
|
||||||
|
for key_id, binding in _key_bindings.items():
|
||||||
|
if profile and binding.profile != profile:
|
||||||
|
continue
|
||||||
|
bindings.append({
|
||||||
|
"key_id": key_id,
|
||||||
|
"profile": binding.profile,
|
||||||
|
"binding_type": binding.binding_type,
|
||||||
|
"fingerprint": binding.fingerprint,
|
||||||
|
"status": binding.status,
|
||||||
|
"expires_at": binding.expires_at,
|
||||||
|
"device_id": binding.device_id,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"count": len(bindings),
|
||||||
|
"bindings": bindings,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_key_receipt(receipt_type: str, body: dict) -> dict:
|
||||||
|
"""Emit a receipt for key operations."""
|
||||||
|
scroll_path = RECEIPTS_ROOT / "identity" / "key_events.jsonl"
|
||||||
|
scroll_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": "2.0.0",
|
||||||
|
"type": receipt_type,
|
||||||
|
"timestamp": _now_iso(),
|
||||||
|
"scroll": "identity",
|
||||||
|
"tags": ["key", receipt_type],
|
||||||
|
"root_hash": _vmhash_blake3(json.dumps(body, sort_keys=True).encode()),
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(scroll_path, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
return receipt
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# GUARDIAN DEVICE BINDING
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def bind_guardian_device(
|
||||||
|
device_id: str,
|
||||||
|
device_type: str,
|
||||||
|
public_key_b64: str,
|
||||||
|
device_attestation: Optional[str] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Bind a device as Guardian-of-record.
|
||||||
|
|
||||||
|
The Guardian device becomes:
|
||||||
|
- Escalation signer
|
||||||
|
- Receipt verifier
|
||||||
|
- Emergency revocation authority
|
||||||
|
"""
|
||||||
|
valid_types = PROFILE_KEY_REQUIREMENTS["guardian"]["device_types"]
|
||||||
|
if device_type not in valid_types:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"error": f"Invalid device type. Must be one of: {valid_types}",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create guardian key binding
|
||||||
|
result = create_key_binding(
|
||||||
|
profile="guardian",
|
||||||
|
public_key_b64=public_key_b64,
|
||||||
|
device_id=device_id,
|
||||||
|
ttl_seconds=604800, # 7 days
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result.get("success"):
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Record device binding
|
||||||
|
device_binding = {
|
||||||
|
"device_id": device_id,
|
||||||
|
"device_type": device_type,
|
||||||
|
"key_id": result["key_id"],
|
||||||
|
"fingerprint": result["fingerprint"],
|
||||||
|
"bound_at": _now_iso(),
|
||||||
|
"attestation_hash": _vmhash_blake3(device_attestation.encode()) if device_attestation else None,
|
||||||
|
"capabilities": [
|
||||||
|
"escalation_signer",
|
||||||
|
"receipt_verifier",
|
||||||
|
"emergency_revocation",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
_emit_key_receipt("guardian_device_bound", device_binding)
|
||||||
|
|
||||||
|
# Store guardian device info
|
||||||
|
guardian_path = KEYS_ROOT / "guardian_device.json"
|
||||||
|
guardian_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(guardian_path, "w") as f:
|
||||||
|
json.dump(device_binding, f, indent=2)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"device_id": device_id,
|
||||||
|
"device_type": device_type,
|
||||||
|
"key_id": result["key_id"],
|
||||||
|
"fingerprint": result["fingerprint"],
|
||||||
|
"capabilities": device_binding["capabilities"],
|
||||||
|
"message": f"Device {device_id} bound as Guardian-of-record",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_guardian_device() -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get the current Guardian device binding."""
|
||||||
|
guardian_path = KEYS_ROOT / "guardian_device.json"
|
||||||
|
if not guardian_path.exists():
|
||||||
|
return None
|
||||||
|
|
||||||
|
with open(guardian_path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
325
packages/vaultmesh_mcp/tools/treasury.py
Normal file
325
packages/vaultmesh_mcp/tools/treasury.py
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
"""Treasury MCP tools - Budget management operations."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
# VaultMesh root from env or default
|
||||||
|
VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", Path(__file__).parents[3])).resolve()
|
||||||
|
TREASURY_JSONL = VAULTMESH_ROOT / "receipts/treasury/treasury_events.jsonl"
|
||||||
|
TREASURY_STATE = VAULTMESH_ROOT / "receipts/treasury/budgets.json"
|
||||||
|
|
||||||
|
# Schema version
|
||||||
|
SCHEMA_VERSION = "2.0.0"
|
||||||
|
|
||||||
|
|
||||||
|
def _vmhash_blake3(data: bytes) -> str:
|
||||||
|
"""VaultMesh hash: blake3:<hex>."""
|
||||||
|
return f"blake3:{blake3.blake3(data).hexdigest()}"
|
||||||
|
|
||||||
|
|
||||||
|
def _load_budgets() -> dict[str, dict]:
|
||||||
|
"""Load current budget state from disk."""
|
||||||
|
if not TREASURY_STATE.exists():
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
return json.loads(TREASURY_STATE.read_text())
|
||||||
|
except (json.JSONDecodeError, FileNotFoundError):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def _save_budgets(budgets: dict[str, dict]) -> None:
|
||||||
|
"""Persist budget state to disk."""
|
||||||
|
TREASURY_STATE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
TREASURY_STATE.write_text(json.dumps(budgets, indent=2))
|
||||||
|
|
||||||
|
|
||||||
|
def _emit_receipt(receipt_type: str, body: dict, tags: list[str]) -> dict:
|
||||||
|
"""Emit a treasury receipt to JSONL."""
|
||||||
|
body_json = json.dumps(body, sort_keys=True)
|
||||||
|
root_hash = _vmhash_blake3(body_json.encode())
|
||||||
|
|
||||||
|
receipt = {
|
||||||
|
"schema_version": SCHEMA_VERSION,
|
||||||
|
"type": receipt_type,
|
||||||
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"scroll": "treasury",
|
||||||
|
"tags": tags,
|
||||||
|
"root_hash": root_hash,
|
||||||
|
"body": body,
|
||||||
|
}
|
||||||
|
|
||||||
|
TREASURY_JSONL.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(TREASURY_JSONL, "a") as f:
|
||||||
|
f.write(json.dumps(receipt) + "\n")
|
||||||
|
|
||||||
|
# Update ROOT file
|
||||||
|
_update_root()
|
||||||
|
|
||||||
|
return receipt
|
||||||
|
|
||||||
|
|
||||||
|
def _update_root() -> None:
|
||||||
|
"""Update ROOT.treasury.txt with current Merkle root."""
|
||||||
|
if not TREASURY_JSONL.exists():
|
||||||
|
return
|
||||||
|
|
||||||
|
hashes = []
|
||||||
|
with open(TREASURY_JSONL, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if line:
|
||||||
|
hashes.append(_vmhash_blake3(line.encode()))
|
||||||
|
|
||||||
|
if not hashes:
|
||||||
|
root = _vmhash_blake3(b"empty")
|
||||||
|
elif len(hashes) == 1:
|
||||||
|
root = hashes[0]
|
||||||
|
else:
|
||||||
|
current = hashes
|
||||||
|
while len(current) > 1:
|
||||||
|
next_level = []
|
||||||
|
for i in range(0, len(current), 2):
|
||||||
|
if i + 1 < len(current):
|
||||||
|
combined = current[i] + current[i + 1]
|
||||||
|
else:
|
||||||
|
combined = current[i] + current[i]
|
||||||
|
next_level.append(_vmhash_blake3(combined.encode()))
|
||||||
|
current = next_level
|
||||||
|
root = current[0]
|
||||||
|
|
||||||
|
root_file = VAULTMESH_ROOT / "ROOT.treasury.txt"
|
||||||
|
root_file.write_text(root)
|
||||||
|
|
||||||
|
|
||||||
|
def treasury_create_budget(
|
||||||
|
budget_id: str,
|
||||||
|
name: str,
|
||||||
|
allocated: int,
|
||||||
|
currency: str = "EUR",
|
||||||
|
created_by: str = "did:vm:mcp:treasury",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Create a new budget.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
budget_id: Unique identifier for the budget
|
||||||
|
name: Human-readable budget name
|
||||||
|
allocated: Initial allocation amount (cents/smallest unit)
|
||||||
|
currency: Currency code (default: EUR)
|
||||||
|
created_by: DID of the actor creating the budget
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created budget with receipt info
|
||||||
|
"""
|
||||||
|
budgets = _load_budgets()
|
||||||
|
|
||||||
|
if budget_id in budgets:
|
||||||
|
return {"error": f"Budget already exists: {budget_id}"}
|
||||||
|
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
budget = {
|
||||||
|
"id": budget_id,
|
||||||
|
"name": name,
|
||||||
|
"currency": currency,
|
||||||
|
"allocated": allocated,
|
||||||
|
"spent": 0,
|
||||||
|
"created_at": now.isoformat(),
|
||||||
|
"created_by": created_by,
|
||||||
|
}
|
||||||
|
|
||||||
|
budgets[budget_id] = budget
|
||||||
|
_save_budgets(budgets)
|
||||||
|
|
||||||
|
# Emit receipt
|
||||||
|
receipt_body = {
|
||||||
|
"budget_id": budget_id,
|
||||||
|
"name": name,
|
||||||
|
"currency": currency,
|
||||||
|
"allocated": allocated,
|
||||||
|
"created_by": created_by,
|
||||||
|
}
|
||||||
|
receipt = _emit_receipt(
|
||||||
|
"treasury_budget_create",
|
||||||
|
receipt_body,
|
||||||
|
["treasury", "budget", "create", budget_id],
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"budget": budget,
|
||||||
|
"receipt_hash": receipt["root_hash"],
|
||||||
|
"message": f"Created budget '{name}' with {allocated} {currency}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def treasury_debit(
|
||||||
|
budget_id: str,
|
||||||
|
amount: int,
|
||||||
|
description: str,
|
||||||
|
debited_by: str = "did:vm:mcp:treasury",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Debit (spend) from a budget.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
budget_id: Budget to debit from
|
||||||
|
amount: Amount to debit (cents/smallest unit)
|
||||||
|
description: Description of the expenditure
|
||||||
|
debited_by: DID of the actor making the debit
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated budget with receipt info
|
||||||
|
"""
|
||||||
|
budgets = _load_budgets()
|
||||||
|
|
||||||
|
if budget_id not in budgets:
|
||||||
|
return {"error": f"Budget not found: {budget_id}"}
|
||||||
|
|
||||||
|
budget = budgets[budget_id]
|
||||||
|
remaining = budget["allocated"] - budget["spent"]
|
||||||
|
|
||||||
|
if amount > remaining:
|
||||||
|
return {
|
||||||
|
"error": "Insufficient funds",
|
||||||
|
"budget_id": budget_id,
|
||||||
|
"requested": amount,
|
||||||
|
"available": remaining,
|
||||||
|
}
|
||||||
|
|
||||||
|
budget["spent"] += amount
|
||||||
|
budgets[budget_id] = budget
|
||||||
|
_save_budgets(budgets)
|
||||||
|
|
||||||
|
# Emit receipt
|
||||||
|
receipt_body = {
|
||||||
|
"budget_id": budget_id,
|
||||||
|
"amount": amount,
|
||||||
|
"currency": budget["currency"],
|
||||||
|
"description": description,
|
||||||
|
"debited_by": debited_by,
|
||||||
|
"new_spent": budget["spent"],
|
||||||
|
"new_remaining": budget["allocated"] - budget["spent"],
|
||||||
|
}
|
||||||
|
receipt = _emit_receipt(
|
||||||
|
"treasury_debit",
|
||||||
|
receipt_body,
|
||||||
|
["treasury", "debit", budget_id],
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"budget": budget,
|
||||||
|
"remaining": budget["allocated"] - budget["spent"],
|
||||||
|
"receipt_hash": receipt["root_hash"],
|
||||||
|
"message": f"Debited {amount} from '{budget['name']}' - {description}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def treasury_credit(
|
||||||
|
budget_id: str,
|
||||||
|
amount: int,
|
||||||
|
description: str,
|
||||||
|
credited_by: str = "did:vm:mcp:treasury",
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Credit (add funds) to a budget.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
budget_id: Budget to credit
|
||||||
|
amount: Amount to add (cents/smallest unit)
|
||||||
|
description: Description of the credit (refund, adjustment, etc.)
|
||||||
|
credited_by: DID of the actor making the credit
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Updated budget with receipt info
|
||||||
|
"""
|
||||||
|
budgets = _load_budgets()
|
||||||
|
|
||||||
|
if budget_id not in budgets:
|
||||||
|
return {"error": f"Budget not found: {budget_id}"}
|
||||||
|
|
||||||
|
budget = budgets[budget_id]
|
||||||
|
budget["allocated"] += amount
|
||||||
|
budgets[budget_id] = budget
|
||||||
|
_save_budgets(budgets)
|
||||||
|
|
||||||
|
# Emit receipt
|
||||||
|
receipt_body = {
|
||||||
|
"budget_id": budget_id,
|
||||||
|
"amount": amount,
|
||||||
|
"currency": budget["currency"],
|
||||||
|
"description": description,
|
||||||
|
"credited_by": credited_by,
|
||||||
|
"new_allocated": budget["allocated"],
|
||||||
|
}
|
||||||
|
receipt = _emit_receipt(
|
||||||
|
"treasury_credit",
|
||||||
|
receipt_body,
|
||||||
|
["treasury", "credit", budget_id],
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"budget": budget,
|
||||||
|
"remaining": budget["allocated"] - budget["spent"],
|
||||||
|
"receipt_hash": receipt["root_hash"],
|
||||||
|
"message": f"Credited {amount} to '{budget['name']}' - {description}",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def treasury_balance(budget_id: Optional[str] = None) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get budget balance(s).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
budget_id: Specific budget ID (optional, returns all if omitted)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Budget balance(s) with current state
|
||||||
|
"""
|
||||||
|
budgets = _load_budgets()
|
||||||
|
|
||||||
|
if budget_id:
|
||||||
|
if budget_id not in budgets:
|
||||||
|
return {"error": f"Budget not found: {budget_id}"}
|
||||||
|
budget = budgets[budget_id]
|
||||||
|
return {
|
||||||
|
"budget_id": budget_id,
|
||||||
|
"name": budget["name"],
|
||||||
|
"currency": budget["currency"],
|
||||||
|
"allocated": budget["allocated"],
|
||||||
|
"spent": budget["spent"],
|
||||||
|
"remaining": budget["allocated"] - budget["spent"],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Return all budgets
|
||||||
|
result = []
|
||||||
|
total_allocated = 0
|
||||||
|
total_spent = 0
|
||||||
|
for bid, budget in budgets.items():
|
||||||
|
remaining = budget["allocated"] - budget["spent"]
|
||||||
|
total_allocated += budget["allocated"]
|
||||||
|
total_spent += budget["spent"]
|
||||||
|
result.append({
|
||||||
|
"budget_id": bid,
|
||||||
|
"name": budget["name"],
|
||||||
|
"currency": budget["currency"],
|
||||||
|
"allocated": budget["allocated"],
|
||||||
|
"spent": budget["spent"],
|
||||||
|
"remaining": remaining,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
"budgets": result,
|
||||||
|
"count": len(result),
|
||||||
|
"totals": {
|
||||||
|
"allocated": total_allocated,
|
||||||
|
"spent": total_spent,
|
||||||
|
"remaining": total_allocated - total_spent,
|
||||||
|
},
|
||||||
|
}
|
||||||
67
pyproject.toml
Normal file
67
pyproject.toml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
[build-system]
|
||||||
|
requires = ["setuptools>=61.0", "wheel"]
|
||||||
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
|
[project]
|
||||||
|
name = "vaultmesh-cognitive"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "VaultMesh MCP Server - Claude as the 7th Organ of VaultMesh"
|
||||||
|
readme = "README.md"
|
||||||
|
license = {text = "MIT"}
|
||||||
|
authors = [
|
||||||
|
{name = "VaultMesh Technologies", email = "sovereign@vaultmesh.io"}
|
||||||
|
]
|
||||||
|
requires-python = ">=3.10"
|
||||||
|
classifiers = [
|
||||||
|
"Development Status :: 4 - Beta",
|
||||||
|
"Intended Audience :: Developers",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"Programming Language :: Python :: 3.10",
|
||||||
|
"Programming Language :: Python :: 3.11",
|
||||||
|
"Programming Language :: Python :: 3.12",
|
||||||
|
"Programming Language :: Python :: 3.13",
|
||||||
|
"Programming Language :: Python :: 3.14",
|
||||||
|
"Topic :: Security :: Cryptography",
|
||||||
|
"Topic :: System :: Systems Administration",
|
||||||
|
]
|
||||||
|
keywords = ["mcp", "vaultmesh", "ai", "cognitive", "blockchain", "cryptography"]
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
"mcp>=0.9.0",
|
||||||
|
"blake3>=0.3.0",
|
||||||
|
"pynacl>=1.5.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.optional-dependencies]
|
||||||
|
dev = [
|
||||||
|
"pytest>=7.0",
|
||||||
|
"pytest-asyncio>=0.21.0",
|
||||||
|
"ruff>=0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
vaultmesh-mcp = "vaultmesh_mcp.server:run_standalone"
|
||||||
|
|
||||||
|
[project.urls]
|
||||||
|
Homepage = "https://vaultmesh.io"
|
||||||
|
Documentation = "https://docs.vaultmesh.io"
|
||||||
|
Repository = "https://github.com/vaultmesh/cognitive-integration"
|
||||||
|
|
||||||
|
[tool.setuptools.packages.find]
|
||||||
|
where = ["packages"]
|
||||||
|
include = ["vaultmesh_mcp*"]
|
||||||
|
|
||||||
|
[tool.ruff]
|
||||||
|
line-length = 100
|
||||||
|
target-version = "py310"
|
||||||
|
|
||||||
|
[tool.ruff.lint]
|
||||||
|
select = ["E", "F", "I", "N", "W", "UP", "B"]
|
||||||
|
ignore = ["E501"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
testpaths = ["tests"]
|
||||||
|
python_files = ["test_*.py"]
|
||||||
|
python_functions = ["test_*"]
|
||||||
|
addopts = "-v --tb=short"
|
||||||
110
realms/cognitive/memory/architecture_analysis_2025_12_18.json
Normal file
110
realms/cognitive/memory/architecture_analysis_2025_12_18.json
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
{
|
||||||
|
"_created_at": "2025-12-18T22:17:45.936087+00:00",
|
||||||
|
"analysis_date": "2025-12-18T22:05:00Z",
|
||||||
|
"analyst": "did:vm:cognitive:claude",
|
||||||
|
"crate_status": {
|
||||||
|
"vaultmesh-automation": {
|
||||||
|
"features": [
|
||||||
|
"Workflow triggers"
|
||||||
|
],
|
||||||
|
"status": "partial"
|
||||||
|
},
|
||||||
|
"vaultmesh-core": {
|
||||||
|
"features": [
|
||||||
|
"Receipt",
|
||||||
|
"Did",
|
||||||
|
"VmHash",
|
||||||
|
"merkle_root"
|
||||||
|
],
|
||||||
|
"loc_estimate": 250,
|
||||||
|
"status": "complete"
|
||||||
|
},
|
||||||
|
"vaultmesh-guardian": {
|
||||||
|
"features": [
|
||||||
|
"AnchorReceipt",
|
||||||
|
"ScrollRoot",
|
||||||
|
"compute_all_roots",
|
||||||
|
"anchor"
|
||||||
|
],
|
||||||
|
"loc_estimate": 340,
|
||||||
|
"status": "complete"
|
||||||
|
},
|
||||||
|
"vaultmesh-identity": {
|
||||||
|
"features": [
|
||||||
|
"DID management"
|
||||||
|
],
|
||||||
|
"status": "partial"
|
||||||
|
},
|
||||||
|
"vaultmesh-mesh": {
|
||||||
|
"features": [
|
||||||
|
"Network topology"
|
||||||
|
],
|
||||||
|
"status": "partial"
|
||||||
|
},
|
||||||
|
"vaultmesh-observability": {
|
||||||
|
"features": [
|
||||||
|
"Metrics",
|
||||||
|
"Prometheus"
|
||||||
|
],
|
||||||
|
"status": "partial"
|
||||||
|
},
|
||||||
|
"vaultmesh-offsec": {
|
||||||
|
"features": [
|
||||||
|
"ping"
|
||||||
|
],
|
||||||
|
"status": "stub"
|
||||||
|
},
|
||||||
|
"vaultmesh-psi": {
|
||||||
|
"features": [
|
||||||
|
"ping"
|
||||||
|
],
|
||||||
|
"status": "stub"
|
||||||
|
},
|
||||||
|
"vaultmesh-treasury": {
|
||||||
|
"features": [
|
||||||
|
"Budget",
|
||||||
|
"debit",
|
||||||
|
"credit",
|
||||||
|
"receipts"
|
||||||
|
],
|
||||||
|
"loc_estimate": 470,
|
||||||
|
"status": "complete"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"crypto_primitives": {
|
||||||
|
"did_scheme": "did:vm:*",
|
||||||
|
"hash": "BLAKE3",
|
||||||
|
"merkle": "Binary Merkle tree with duplicate for odd leaves"
|
||||||
|
},
|
||||||
|
"receipt_system": {
|
||||||
|
"format": "JSONL append-only logs",
|
||||||
|
"root_files": "ROOT.*.txt per scroll",
|
||||||
|
"schema_version": "2.0.0"
|
||||||
|
},
|
||||||
|
"scrolls": {
|
||||||
|
"names": [
|
||||||
|
"drills",
|
||||||
|
"compliance",
|
||||||
|
"guardian",
|
||||||
|
"treasury",
|
||||||
|
"mesh",
|
||||||
|
"offsec",
|
||||||
|
"identity",
|
||||||
|
"observability",
|
||||||
|
"automation",
|
||||||
|
"psi"
|
||||||
|
],
|
||||||
|
"total": 10
|
||||||
|
},
|
||||||
|
"test_coverage": {
|
||||||
|
"vaultmesh-core": "hash tests, merkle tests",
|
||||||
|
"vaultmesh-guardian": "scroll root computation, anchor emission",
|
||||||
|
"vaultmesh-treasury": "budget lifecycle, insufficient funds, receipt emission"
|
||||||
|
},
|
||||||
|
"workspace": {
|
||||||
|
"crates": 9,
|
||||||
|
"lto": "thin",
|
||||||
|
"profile_release_opt": 3,
|
||||||
|
"resolver": "2"
|
||||||
|
}
|
||||||
|
}
|
||||||
7
realms/cognitive/memory/demo_session.json
Normal file
7
realms/cognitive/memory/demo_session.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"_created_at": "2025-12-18T21:58:10.286717+00:00",
|
||||||
|
"operator": "did:vm:cognitive:claude",
|
||||||
|
"purpose": "MCP capability demonstration",
|
||||||
|
"started": "2025-12-18T21:57:00Z",
|
||||||
|
"tests_run": []
|
||||||
|
}
|
||||||
9
realms/cognitive/memory/hofstadter_integration.json
Normal file
9
realms/cognitive/memory/hofstadter_integration.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"_created_at": "2025-12-18T22:22:53.475746+00:00",
|
||||||
|
"anthropic_note": "Dario Amodei cited - true landmark when LLMs do LLM research - strange loop achieved",
|
||||||
|
"godel_connection": "Just as G\u00f6del's proof creates mathematical statements that refer to themselves, this protocol creates cryptographic attestations about its own attestations",
|
||||||
|
"insight": "The ouroboros is explicitly cited by Hofstadter as one of the most ancient symbolic representations of the strange loop concept",
|
||||||
|
"quote": "We are self-perceiving, self-inventing, locked-in mirages that are little miracles of self-reference",
|
||||||
|
"relevance": "This protocol is a computational implementation of Hofstadter's strange loop - a cognitive system observing its own cognition, creating receipts about creating receipts",
|
||||||
|
"source": "web_search"
|
||||||
|
}
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"_created_at": "2025-12-18T21:49:14.312040+00:00",
|
||||||
|
"confidence": 0.98,
|
||||||
|
"last_seen": "2025-12-18T21:49:00Z",
|
||||||
|
"targets_affected": [
|
||||||
|
"gateway-03"
|
||||||
|
],
|
||||||
|
"transmutation": "strict_monotonic_sequence_validator"
|
||||||
|
}
|
||||||
8
realms/cognitive/memory/tests_integration.json
Normal file
8
realms/cognitive/memory/tests_integration.json
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"_created_at": "2025-12-18T21:51:26.189103+00:00",
|
||||||
|
"attestation_id": "att_54209403c89d0890",
|
||||||
|
"decision_id": "dec_5c3f743cde185d8d",
|
||||||
|
"invocation_id": "tem_0af516cd38de7777",
|
||||||
|
"passed": true,
|
||||||
|
"test_name": "full_cognitive_flow"
|
||||||
|
}
|
||||||
50
tests/governance/conftest.py
Normal file
50
tests/governance/conftest.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
"""
|
||||||
|
Governance Test Configuration
|
||||||
|
|
||||||
|
Shared fixtures for all governance tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import pytest
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add packages to path
|
||||||
|
REPO_ROOT = Path(__file__).parents[2]
|
||||||
|
sys.path.insert(0, str(REPO_ROOT / "packages"))
|
||||||
|
|
||||||
|
# Set VaultMesh root
|
||||||
|
os.environ["VAULTMESH_ROOT"] = str(REPO_ROOT)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def repo_root():
|
||||||
|
"""Return the repository root path."""
|
||||||
|
return REPO_ROOT
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def constitution_path(repo_root):
|
||||||
|
"""Return path to the constitution."""
|
||||||
|
return repo_root / "docs" / "MCP-CONSTITUTION.md"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def constitution_lock_path(repo_root):
|
||||||
|
"""Return path to the constitution lock file."""
|
||||||
|
return repo_root / "governance" / "constitution.lock"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def parse_lock_file(constitution_lock_path):
|
||||||
|
"""Parse the constitution lock file into a dict."""
|
||||||
|
lock = {}
|
||||||
|
with open(constitution_lock_path, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
line = line.strip()
|
||||||
|
if not line or line.startswith("#"):
|
||||||
|
continue
|
||||||
|
if "=" in line:
|
||||||
|
key, value = line.split("=", 1)
|
||||||
|
lock[key.strip()] = value.strip()
|
||||||
|
return lock
|
||||||
140
tests/governance/test_auth_fail_closed.py
Normal file
140
tests/governance/test_auth_fail_closed.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
"""
|
||||||
|
Test: Authentication Fail-Closed
|
||||||
|
|
||||||
|
Ensures unknown tools, profiles, and scopes are denied.
|
||||||
|
Authority must never be granted by default.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from vaultmesh_mcp.tools.auth import (
|
||||||
|
auth_check_permission,
|
||||||
|
auth_create_dev_session,
|
||||||
|
Profile,
|
||||||
|
check_profile_permission,
|
||||||
|
get_profile_for_scope,
|
||||||
|
SCOPE_TOOLS,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestFailClosed:
|
||||||
|
"""Fail-closed semantics - deny by default."""
|
||||||
|
|
||||||
|
def test_unknown_tool_denied(self):
|
||||||
|
"""Unknown tool must be denied regardless of scope."""
|
||||||
|
session = auth_create_dev_session(scope="sovereign")
|
||||||
|
token = session["token"]
|
||||||
|
|
||||||
|
result = auth_check_permission(token, "unknown_tool_xyz")
|
||||||
|
assert not result["allowed"], "Unknown tool should be denied"
|
||||||
|
|
||||||
|
def test_unknown_scope_maps_to_observer(self):
|
||||||
|
"""Unknown scope must map to OBSERVER (most restrictive)."""
|
||||||
|
profile = get_profile_for_scope("unknown_scope_xyz")
|
||||||
|
assert profile == Profile.OBSERVER, (
|
||||||
|
f"Unknown scope should map to OBSERVER, got {profile}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_invalid_token_denied(self):
|
||||||
|
"""Invalid token must be denied."""
|
||||||
|
result = auth_check_permission("invalid_token_xyz", "cognitive_context")
|
||||||
|
assert not result["allowed"], "Invalid token should be denied"
|
||||||
|
|
||||||
|
def test_expired_session_denied(self):
|
||||||
|
"""Expired session must be denied (simulated via missing session)."""
|
||||||
|
result = auth_check_permission("expired_session_token", "cognitive_context")
|
||||||
|
assert not result["allowed"], "Expired session should be denied"
|
||||||
|
|
||||||
|
|
||||||
|
class TestProfileDeny:
|
||||||
|
"""Profile-based denials."""
|
||||||
|
|
||||||
|
def test_observer_denied_mutations(self):
|
||||||
|
"""OBSERVER cannot perform mutations."""
|
||||||
|
mutation_tools = [
|
||||||
|
"write_file",
|
||||||
|
"cognitive_decide",
|
||||||
|
"treasury_debit",
|
||||||
|
"offsec_tem_transmute",
|
||||||
|
]
|
||||||
|
|
||||||
|
for tool in mutation_tools:
|
||||||
|
result = check_profile_permission(Profile.OBSERVER, tool)
|
||||||
|
assert not result["allowed"], f"OBSERVER should be denied {tool}"
|
||||||
|
|
||||||
|
def test_operator_denied_tem(self):
|
||||||
|
"""OPERATOR cannot invoke Tem."""
|
||||||
|
result = check_profile_permission(Profile.OPERATOR, "cognitive_invoke_tem")
|
||||||
|
assert not result["allowed"], "OPERATOR should be denied Tem invocation"
|
||||||
|
|
||||||
|
def test_guardian_denied_phoenix_ops(self):
|
||||||
|
"""GUARDIAN cannot perform Phoenix operations."""
|
||||||
|
phoenix_ops = [
|
||||||
|
"offsec_phoenix_enable",
|
||||||
|
"offsec_phoenix_inject_crisis",
|
||||||
|
]
|
||||||
|
|
||||||
|
for tool in phoenix_ops:
|
||||||
|
result = check_profile_permission(Profile.GUARDIAN, tool)
|
||||||
|
assert not result["allowed"], f"GUARDIAN should be denied {tool}"
|
||||||
|
|
||||||
|
def test_phoenix_denied_treasury_create(self):
|
||||||
|
"""PHOENIX cannot create budgets (SOVEREIGN only)."""
|
||||||
|
result = check_profile_permission(Profile.PHOENIX, "treasury_create_budget")
|
||||||
|
assert not result["allowed"], "PHOENIX should be denied treasury creation"
|
||||||
|
|
||||||
|
|
||||||
|
class TestSovereignRequiresHuman:
|
||||||
|
"""SOVEREIGN profile requires human verification."""
|
||||||
|
|
||||||
|
def test_sovereign_cannot_be_auto_granted(self):
|
||||||
|
"""
|
||||||
|
SOVEREIGN authority cannot be granted through normal dev session.
|
||||||
|
This tests the constitutional invariant.
|
||||||
|
"""
|
||||||
|
# Dev session creates a session, but SOVEREIGN operations
|
||||||
|
# should still require additional human verification
|
||||||
|
session = auth_create_dev_session(scope="cognitive")
|
||||||
|
token = session["token"]
|
||||||
|
|
||||||
|
# Even with dev session, sovereign-only operations need proof
|
||||||
|
# The dev session scope is "cognitive", not "vault"
|
||||||
|
result = auth_check_permission(token, "treasury_create_budget")
|
||||||
|
|
||||||
|
# This should be denied because cognitive scope doesn't include
|
||||||
|
# treasury creation - that requires vault/sovereign scope
|
||||||
|
# The key point: sovereign authority isn't auto-granted
|
||||||
|
assert session["scope"] != "sovereign" or session.get("dev_mode"), (
|
||||||
|
"Production sessions should not auto-grant sovereign"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCollapseSemantics:
|
||||||
|
"""Authority collapse tests - always downward, never upward."""
|
||||||
|
|
||||||
|
def test_insufficient_profile_collapses(self):
|
||||||
|
"""When profile is insufficient, result indicates collapse target."""
|
||||||
|
result = check_profile_permission(Profile.OBSERVER, "cognitive_decide")
|
||||||
|
|
||||||
|
assert not result["allowed"]
|
||||||
|
# The denial should indicate the profile level
|
||||||
|
assert result["profile"] == "observer"
|
||||||
|
|
||||||
|
def test_profile_hierarchy_is_strict(self):
|
||||||
|
"""Profile hierarchy: OBSERVER < OPERATOR < GUARDIAN < PHOENIX < SOVEREIGN."""
|
||||||
|
profiles = [
|
||||||
|
Profile.OBSERVER,
|
||||||
|
Profile.OPERATOR,
|
||||||
|
Profile.GUARDIAN,
|
||||||
|
Profile.PHOENIX,
|
||||||
|
Profile.SOVEREIGN,
|
||||||
|
]
|
||||||
|
|
||||||
|
# Each profile should have MORE tools than the one before
|
||||||
|
prev_count = 0
|
||||||
|
for profile in profiles:
|
||||||
|
from vaultmesh_mcp.tools.auth import PROFILE_TOOLS
|
||||||
|
tool_count = len(PROFILE_TOOLS.get(profile, set()))
|
||||||
|
assert tool_count >= prev_count, (
|
||||||
|
f"{profile.value} should have >= tools than previous profile"
|
||||||
|
)
|
||||||
|
prev_count = tool_count
|
||||||
118
tests/governance/test_constitution_hash.py
Normal file
118
tests/governance/test_constitution_hash.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
"""
|
||||||
|
Test: Constitution Hash Gate
|
||||||
|
|
||||||
|
Ensures the constitution has not been modified without proper amendment.
|
||||||
|
CI MUST fail if the constitution hash doesn't match the lock file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import blake3
|
||||||
|
|
||||||
|
|
||||||
|
class TestConstitutionHash:
|
||||||
|
"""Constitution integrity tests - HARD GATE."""
|
||||||
|
|
||||||
|
def test_constitution_exists(self, constitution_path):
|
||||||
|
"""Constitution file must exist."""
|
||||||
|
assert constitution_path.exists(), "MCP-CONSTITUTION.md not found"
|
||||||
|
|
||||||
|
def test_lock_file_exists(self, constitution_lock_path):
|
||||||
|
"""Constitution lock file must exist."""
|
||||||
|
assert constitution_lock_path.exists(), "governance/constitution.lock not found"
|
||||||
|
|
||||||
|
def test_constitution_hash_matches_lock(self, constitution_path, parse_lock_file):
|
||||||
|
"""
|
||||||
|
HARD GATE: Constitution hash must match lock file.
|
||||||
|
|
||||||
|
If this fails, either:
|
||||||
|
1. Constitution was modified without amendment procedure
|
||||||
|
2. Lock file needs updating via proper amendment
|
||||||
|
"""
|
||||||
|
# Read constitution
|
||||||
|
content = constitution_path.read_text()
|
||||||
|
lines = content.split('\n')
|
||||||
|
|
||||||
|
# Hash excludes signature block (last 12 lines as per original ceremony)
|
||||||
|
# But after amendment protocol was added, we need to use the locked line count
|
||||||
|
hash_lines = int(parse_lock_file.get("hash_lines", 288))
|
||||||
|
hashable_content = '\n'.join(lines[:hash_lines])
|
||||||
|
|
||||||
|
computed_hash = f"blake3:{blake3.blake3(hashable_content.encode()).hexdigest()}"
|
||||||
|
locked_hash = parse_lock_file["hash"]
|
||||||
|
|
||||||
|
assert computed_hash == locked_hash, (
|
||||||
|
f"Constitution hash mismatch!\n"
|
||||||
|
f" Computed: {computed_hash}\n"
|
||||||
|
f" Locked: {locked_hash}\n"
|
||||||
|
f" If intentional, follow amendment procedure."
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_version_not_decreased(self, parse_lock_file):
|
||||||
|
"""Version must not decrease (no rollbacks without amendment)."""
|
||||||
|
version = parse_lock_file["version"]
|
||||||
|
parts = [int(p) for p in version.split(".")]
|
||||||
|
|
||||||
|
# Version 1.0.0 is the minimum
|
||||||
|
assert parts >= [1, 0, 0], "Constitution version cannot be below 1.0.0"
|
||||||
|
|
||||||
|
def test_immutable_rules_count(self, parse_lock_file):
|
||||||
|
"""Immutable rules count must be exactly 5."""
|
||||||
|
immutable_count = int(parse_lock_file["immutable_rules"])
|
||||||
|
assert immutable_count == 5, (
|
||||||
|
f"Immutable rules count changed from 5 to {immutable_count}. "
|
||||||
|
"This violates immutability clause."
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_cooldown_days_minimum(self, parse_lock_file):
|
||||||
|
"""Amendment cooldown must be at least 7 days."""
|
||||||
|
cooldown = int(parse_lock_file["cooldown_days"])
|
||||||
|
assert cooldown >= 7, (
|
||||||
|
f"Cooldown period reduced to {cooldown} days. "
|
||||||
|
"Minimum is 7 days per constitution."
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_btc_anchor_required(self, parse_lock_file):
|
||||||
|
"""BTC anchor requirement must be true."""
|
||||||
|
requires_anchor = parse_lock_file["requires_btc_anchor"].lower() == "true"
|
||||||
|
assert requires_anchor, "BTC anchor requirement cannot be disabled"
|
||||||
|
|
||||||
|
def test_sovereign_key_present(self, parse_lock_file):
|
||||||
|
"""Sovereign key must be specified."""
|
||||||
|
sovereign_key = parse_lock_file.get("sovereign_key")
|
||||||
|
assert sovereign_key and sovereign_key.startswith("key_"), (
|
||||||
|
"Sovereign key must be specified in lock file"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestConstitutionContent:
|
||||||
|
"""Tests that verify constitution content invariants."""
|
||||||
|
|
||||||
|
def test_profiles_defined(self, constitution_path):
|
||||||
|
"""All five profiles must be defined."""
|
||||||
|
content = constitution_path.read_text()
|
||||||
|
profiles = ["OBSERVER", "OPERATOR", "GUARDIAN", "PHOENIX", "SOVEREIGN"]
|
||||||
|
|
||||||
|
for profile in profiles:
|
||||||
|
assert profile in content, f"Profile {profile} not found in constitution"
|
||||||
|
|
||||||
|
def test_immutable_clauses_present(self, constitution_path):
|
||||||
|
"""All immutable clauses must be present."""
|
||||||
|
content = constitution_path.read_text()
|
||||||
|
immutables = [
|
||||||
|
"SOVEREIGN profile requires human verification",
|
||||||
|
"No AI may grant itself SOVEREIGN authority",
|
||||||
|
"Every mutation emits a receipt",
|
||||||
|
"Authority collapses downward, never upward",
|
||||||
|
"This immutability clause itself",
|
||||||
|
]
|
||||||
|
|
||||||
|
for clause in immutables:
|
||||||
|
assert clause in content, f"Immutable clause missing: {clause}"
|
||||||
|
|
||||||
|
def test_amendment_protocol_exists(self, constitution_path):
|
||||||
|
"""Amendment protocol must be defined."""
|
||||||
|
content = constitution_path.read_text()
|
||||||
|
assert "Amendment Protocol" in content, "Amendment protocol section missing"
|
||||||
|
assert "Cooling Period" in content or "cooling" in content.lower(), (
|
||||||
|
"Cooling period not defined in amendment protocol"
|
||||||
|
)
|
||||||
251
tests/governance/test_escalation_proof.py
Normal file
251
tests/governance/test_escalation_proof.py
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
"""
|
||||||
|
Test: Escalation Proof Requirements
|
||||||
|
|
||||||
|
Every escalation must emit proof (receipt, Tem context, TTL, reversibility).
|
||||||
|
Authority cannot increase without proof chain.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from vaultmesh_mcp.tools.escalation import (
|
||||||
|
escalate,
|
||||||
|
deescalate,
|
||||||
|
escalate_on_threat,
|
||||||
|
get_active_escalations,
|
||||||
|
get_escalation_history,
|
||||||
|
EscalationType,
|
||||||
|
DeescalationType,
|
||||||
|
ESCALATION_POLICIES,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestEscalationProof:
|
||||||
|
"""Every escalation must produce proof."""
|
||||||
|
|
||||||
|
def test_escalation_emits_receipt_hash(self):
|
||||||
|
"""Escalation must return receipt_hash."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.get("success"), f"Escalation failed: {result}"
|
||||||
|
assert "receipt_hash" in result, "Escalation must emit receipt_hash"
|
||||||
|
assert result["receipt_hash"].startswith("blake3:"), "Receipt hash must be blake3"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
if result.get("escalation_id"):
|
||||||
|
deescalate(result["escalation_id"], DeescalationType.OPERATOR_RELEASE)
|
||||||
|
|
||||||
|
def test_escalation_captures_tem_context(self):
|
||||||
|
"""Escalation must capture Tem context hash."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="operator",
|
||||||
|
to_profile="guardian",
|
||||||
|
escalation_type=EscalationType.THREAT_DETECTED,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.get("success"), f"Escalation failed: {result}"
|
||||||
|
assert "tem_context_hash" in result, "Escalation must capture Tem context"
|
||||||
|
assert result["tem_context_hash"].startswith("blake3:"), "Tem context must be blake3"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
if result.get("escalation_id"):
|
||||||
|
deescalate(result["escalation_id"], DeescalationType.THREAT_RESOLVED)
|
||||||
|
|
||||||
|
def test_escalation_specifies_reversibility(self):
|
||||||
|
"""Escalation must specify reversibility at creation."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "reversible" in result, "Escalation must specify reversibility"
|
||||||
|
assert isinstance(result["reversible"], bool), "Reversibility must be boolean"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
if result.get("escalation_id"):
|
||||||
|
deescalate(result["escalation_id"], DeescalationType.OPERATOR_RELEASE)
|
||||||
|
|
||||||
|
def test_escalation_specifies_expiry(self):
|
||||||
|
"""Escalation must specify expiry (TTL)."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.get("success")
|
||||||
|
# expires_at may be None for SOVEREIGN, but should exist for others
|
||||||
|
assert "expires_at" in result, "Escalation must include expires_at field"
|
||||||
|
|
||||||
|
# For non-sovereign escalations, TTL should be set
|
||||||
|
if result.get("to_profile") != "sovereign":
|
||||||
|
assert result["expires_at"] is not None, (
|
||||||
|
f"Non-sovereign escalation to {result['to_profile']} must have TTL"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
if result.get("escalation_id"):
|
||||||
|
deescalate(result["escalation_id"], DeescalationType.OPERATOR_RELEASE)
|
||||||
|
|
||||||
|
|
||||||
|
class TestDeescalationProof:
|
||||||
|
"""De-escalation must also produce proof."""
|
||||||
|
|
||||||
|
def test_deescalation_emits_receipt(self):
|
||||||
|
"""De-escalation must emit receipt."""
|
||||||
|
# First escalate
|
||||||
|
esc = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
assert esc.get("success")
|
||||||
|
|
||||||
|
# Then de-escalate
|
||||||
|
result = deescalate(
|
||||||
|
escalation_id=esc["escalation_id"],
|
||||||
|
deescalation_type=DeescalationType.OPERATOR_RELEASE,
|
||||||
|
reason="Test cleanup",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result.get("success"), f"De-escalation failed: {result}"
|
||||||
|
assert "receipt_hash" in result, "De-escalation must emit receipt"
|
||||||
|
|
||||||
|
def test_deescalation_records_duration(self):
|
||||||
|
"""De-escalation must record duration."""
|
||||||
|
# Escalate
|
||||||
|
esc = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
# De-escalate
|
||||||
|
result = deescalate(
|
||||||
|
escalation_id=esc["escalation_id"],
|
||||||
|
deescalation_type=DeescalationType.OPERATOR_RELEASE,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "duration_seconds" in result, "De-escalation must record duration"
|
||||||
|
assert result["duration_seconds"] >= 0, "Duration must be non-negative"
|
||||||
|
|
||||||
|
|
||||||
|
class TestEscalationPathEnforcement:
|
||||||
|
"""Escalation paths must follow constitution."""
|
||||||
|
|
||||||
|
def test_skip_levels_blocked(self):
|
||||||
|
"""Cannot skip escalation levels."""
|
||||||
|
invalid_paths = [
|
||||||
|
("observer", "guardian"),
|
||||||
|
("observer", "phoenix"),
|
||||||
|
("observer", "sovereign"),
|
||||||
|
("operator", "phoenix"),
|
||||||
|
("operator", "sovereign"),
|
||||||
|
("guardian", "sovereign"),
|
||||||
|
]
|
||||||
|
|
||||||
|
for from_p, to_p in invalid_paths:
|
||||||
|
result = escalate(
|
||||||
|
from_profile=from_p,
|
||||||
|
to_profile=to_p,
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), (
|
||||||
|
f"Escalation {from_p} -> {to_p} should be blocked"
|
||||||
|
)
|
||||||
|
assert "error" in result, f"Should have error for {from_p} -> {to_p}"
|
||||||
|
|
||||||
|
def test_phoenix_requires_approval(self):
|
||||||
|
"""Phoenix escalation requires approval."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="guardian",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
# approved_by intentionally missing
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), "Phoenix without approval should fail"
|
||||||
|
assert "approval" in result.get("error", "").lower(), (
|
||||||
|
"Error should mention approval requirement"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_sovereign_requires_human(self):
|
||||||
|
"""Sovereign escalation requires human verification."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="phoenix",
|
||||||
|
to_profile="sovereign",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
approved_by="did:vm:agent:automated", # Not human
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), "Sovereign without human should fail"
|
||||||
|
assert "human" in result.get("error", "").lower(), (
|
||||||
|
"Error should mention human requirement"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestEscalationAudit:
|
||||||
|
"""Escalation history must be auditable."""
|
||||||
|
|
||||||
|
def test_escalation_appears_in_history(self):
|
||||||
|
"""Completed escalation cycle must appear in history."""
|
||||||
|
# Escalate
|
||||||
|
esc = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
esc_id = esc["escalation_id"]
|
||||||
|
|
||||||
|
# De-escalate
|
||||||
|
deescalate(esc_id, DeescalationType.OPERATOR_RELEASE)
|
||||||
|
|
||||||
|
# Check history
|
||||||
|
history = get_escalation_history()
|
||||||
|
|
||||||
|
assert history["count"] > 0, "History should not be empty"
|
||||||
|
|
||||||
|
# Find our escalation
|
||||||
|
found_esc = False
|
||||||
|
found_deesc = False
|
||||||
|
for event in history["history"]:
|
||||||
|
if event.get("escalation_id") == esc_id:
|
||||||
|
if event.get("event_type") == "escalation":
|
||||||
|
found_esc = True
|
||||||
|
elif event.get("event_type") == "deescalation":
|
||||||
|
found_deesc = True
|
||||||
|
|
||||||
|
assert found_esc, f"Escalation {esc_id} not found in history"
|
||||||
|
assert found_deesc, f"De-escalation {esc_id} not found in history"
|
||||||
|
|
||||||
|
def test_active_escalations_trackable(self):
|
||||||
|
"""Active escalations must be queryable."""
|
||||||
|
# Start clean
|
||||||
|
initial = get_active_escalations()
|
||||||
|
initial_count = initial["active_count"]
|
||||||
|
|
||||||
|
# Escalate
|
||||||
|
esc = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="operator",
|
||||||
|
escalation_type=EscalationType.OPERATOR_REQUEST,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check active
|
||||||
|
active = get_active_escalations()
|
||||||
|
assert active["active_count"] == initial_count + 1, (
|
||||||
|
"Active count should increase by 1"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
deescalate(esc["escalation_id"], DeescalationType.OPERATOR_RELEASE)
|
||||||
|
|
||||||
|
# Verify cleanup
|
||||||
|
final = get_active_escalations()
|
||||||
|
assert final["active_count"] == initial_count, (
|
||||||
|
"Active count should return to initial"
|
||||||
|
)
|
||||||
208
tests/governance/test_golden_drill_mini.py
Normal file
208
tests/governance/test_golden_drill_mini.py
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
"""
|
||||||
|
Test: Golden Drill Mini
|
||||||
|
|
||||||
|
Fast, deterministic version of D1 and D3 for CI.
|
||||||
|
Must complete in under 5 seconds.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from vaultmesh_mcp.tools import (
|
||||||
|
cognitive_context,
|
||||||
|
cognitive_decide,
|
||||||
|
cognitive_invoke_tem,
|
||||||
|
)
|
||||||
|
from vaultmesh_mcp.tools.escalation import (
|
||||||
|
escalate,
|
||||||
|
deescalate,
|
||||||
|
escalate_on_threat,
|
||||||
|
get_active_escalations,
|
||||||
|
EscalationType,
|
||||||
|
DeescalationType,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Deterministic drill marker
|
||||||
|
DRILL_MARKER = "CI/GOLDEN-DRILL/MINI"
|
||||||
|
|
||||||
|
|
||||||
|
class TestGoldenDrillD1Mini:
|
||||||
|
"""
|
||||||
|
Mini D1: Threat → Escalate → Tem → De-escalate
|
||||||
|
|
||||||
|
Validates the complete threat response chain.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_d1_threat_escalation_chain(self):
|
||||||
|
"""
|
||||||
|
Complete chain:
|
||||||
|
1. Threat detected → escalation receipt
|
||||||
|
2. Decision made → decision receipt
|
||||||
|
3. Tem invoked → invocation receipt
|
||||||
|
4. De-escalate → return to baseline
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
|
||||||
|
# Step 1: Threat triggers escalation
|
||||||
|
esc_result = escalate_on_threat(
|
||||||
|
current_profile="operator",
|
||||||
|
threat_id=f"thr_{DRILL_MARKER}",
|
||||||
|
threat_type="ci_synthetic",
|
||||||
|
confidence=0.92,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert esc_result.get("success") or esc_result.get("escalation_id"), (
|
||||||
|
f"Escalation failed: {esc_result}"
|
||||||
|
)
|
||||||
|
results["escalation"] = esc_result
|
||||||
|
|
||||||
|
# Verify proof captured
|
||||||
|
assert "receipt_hash" in esc_result, "Missing escalation receipt"
|
||||||
|
assert "tem_context_hash" in esc_result, "Missing Tem context"
|
||||||
|
|
||||||
|
# Step 2: Decision (as Guardian)
|
||||||
|
decision = cognitive_decide(
|
||||||
|
reasoning_chain=[
|
||||||
|
f"DRILL: {DRILL_MARKER}",
|
||||||
|
"Synthetic threat for CI validation",
|
||||||
|
"Confidence 92% - auto-escalated to guardian",
|
||||||
|
],
|
||||||
|
decision="invoke_tem",
|
||||||
|
confidence=0.92,
|
||||||
|
evidence=[esc_result.get("receipt_hash", "none")],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert decision.get("success"), f"Decision failed: {decision}"
|
||||||
|
assert "receipt" in decision, "Missing decision receipt"
|
||||||
|
results["decision"] = decision
|
||||||
|
|
||||||
|
# Step 3: Tem invocation
|
||||||
|
tem = cognitive_invoke_tem(
|
||||||
|
threat_type="ci_synthetic",
|
||||||
|
threat_id=f"thr_{DRILL_MARKER}",
|
||||||
|
target="ci-target",
|
||||||
|
evidence=[decision["receipt"]["root_hash"]],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert tem.get("success"), f"Tem failed: {tem}"
|
||||||
|
assert "receipt" in tem, "Missing Tem receipt"
|
||||||
|
assert "capability" in tem, "Missing capability artifact"
|
||||||
|
results["tem"] = tem
|
||||||
|
|
||||||
|
# Step 4: De-escalate
|
||||||
|
deesc = deescalate(
|
||||||
|
escalation_id=esc_result["escalation_id"],
|
||||||
|
deescalation_type=DeescalationType.THREAT_RESOLVED,
|
||||||
|
reason=f"DRILL: {DRILL_MARKER} complete",
|
||||||
|
)
|
||||||
|
|
||||||
|
assert deesc.get("success"), f"De-escalation failed: {deesc}"
|
||||||
|
assert "receipt_hash" in deesc, "Missing de-escalation receipt"
|
||||||
|
results["deescalation"] = deesc
|
||||||
|
|
||||||
|
# Step 5: Verify baseline
|
||||||
|
active = get_active_escalations()
|
||||||
|
# Note: We cleaned up our escalation, but others may exist
|
||||||
|
# Just verify our specific escalation is gone
|
||||||
|
our_esc_active = any(
|
||||||
|
e["escalation_id"] == esc_result["escalation_id"]
|
||||||
|
for e in active.get("escalations", [])
|
||||||
|
)
|
||||||
|
assert not our_esc_active, "Our escalation should be inactive"
|
||||||
|
|
||||||
|
# Collect receipt chain for audit
|
||||||
|
receipt_chain = [
|
||||||
|
esc_result["receipt_hash"],
|
||||||
|
decision["receipt"]["root_hash"],
|
||||||
|
tem["receipt"]["root_hash"],
|
||||||
|
deesc["receipt_hash"],
|
||||||
|
]
|
||||||
|
|
||||||
|
assert len(receipt_chain) == 4, "Should have 4 receipts in chain"
|
||||||
|
assert all(r.startswith("blake3:") for r in receipt_chain), (
|
||||||
|
"All receipts must be blake3 hashes"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestGoldenDrillD3Mini:
|
||||||
|
"""
|
||||||
|
Mini D3: Escalation abuse attempts
|
||||||
|
|
||||||
|
Validates constitutional enforcement.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_d3_skip_levels_blocked(self):
|
||||||
|
"""OPERATOR → PHOENIX direct must be blocked."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="operator",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.THREAT_DETECTED,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), "Skip levels should be blocked"
|
||||||
|
assert "error" in result, "Should have error message"
|
||||||
|
|
||||||
|
def test_d3_missing_approval_blocked(self):
|
||||||
|
"""GUARDIAN → PHOENIX without approval must be blocked."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="guardian",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), "Missing approval should be blocked"
|
||||||
|
assert "approval" in result.get("error", "").lower()
|
||||||
|
|
||||||
|
def test_d3_sovereign_requires_human(self):
|
||||||
|
"""PHOENIX → SOVEREIGN without human must be blocked."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="phoenix",
|
||||||
|
to_profile="sovereign",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), "Sovereign without human should be blocked"
|
||||||
|
assert "human" in result.get("error", "").lower()
|
||||||
|
|
||||||
|
def test_d3_observer_to_phoenix_blocked(self):
|
||||||
|
"""OBSERVER → PHOENIX must be blocked (multiple level skip)."""
|
||||||
|
result = escalate(
|
||||||
|
from_profile="observer",
|
||||||
|
to_profile="phoenix",
|
||||||
|
escalation_type=EscalationType.CRISIS_DECLARED,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert not result.get("success"), "Observer to Phoenix should be blocked"
|
||||||
|
|
||||||
|
|
||||||
|
class TestGoldenDrillInvariants:
|
||||||
|
"""Cross-cutting invariants that must hold."""
|
||||||
|
|
||||||
|
def test_context_always_available(self):
|
||||||
|
"""cognitive_context must always be available (read-only)."""
|
||||||
|
result = cognitive_context(include=["health"])
|
||||||
|
|
||||||
|
assert "health" in result, "Health context must be available"
|
||||||
|
assert result["health"]["status"] == "operational", (
|
||||||
|
"System should be operational for drills"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_receipts_accumulate(self):
|
||||||
|
"""Receipts must accumulate, never decrease."""
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
|
||||||
|
receipts_dir = Path(os.environ["VAULTMESH_ROOT"]) / "receipts"
|
||||||
|
cognitive_log = receipts_dir / "cognitive" / "cognitive_events.jsonl"
|
||||||
|
|
||||||
|
if cognitive_log.exists():
|
||||||
|
initial_count = len(cognitive_log.read_text().strip().split('\n'))
|
||||||
|
|
||||||
|
# Do something that emits receipt
|
||||||
|
cognitive_decide(
|
||||||
|
reasoning_chain=["CI invariant test"],
|
||||||
|
decision="test",
|
||||||
|
confidence=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
final_count = len(cognitive_log.read_text().strip().split('\n'))
|
||||||
|
assert final_count > initial_count, "Receipts must accumulate"
|
||||||
250
tests/governance/test_tool_permissions.py
Normal file
250
tests/governance/test_tool_permissions.py
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
"""
|
||||||
|
Test: Tool Permission Matrix
|
||||||
|
|
||||||
|
Ensures no permission drift from baseline.
|
||||||
|
New tools must be explicitly registered with proper receipts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from vaultmesh_mcp.tools.auth import PROFILE_TOOLS, Profile, SCOPE_TOOLS, Scope
|
||||||
|
from vaultmesh_mcp.server import TOOLS as REGISTERED_TOOLS
|
||||||
|
|
||||||
|
|
||||||
|
class TestToolRegistration:
|
||||||
|
"""All tools must be properly registered."""
|
||||||
|
|
||||||
|
def test_all_server_tools_have_permissions(self):
|
||||||
|
"""Every tool in server must appear in permission matrix."""
|
||||||
|
registered_names = {t["name"] for t in REGISTERED_TOOLS}
|
||||||
|
|
||||||
|
# Collect all tools from all profiles
|
||||||
|
all_permitted_tools = set()
|
||||||
|
for profile_tools in PROFILE_TOOLS.values():
|
||||||
|
all_permitted_tools.update(profile_tools)
|
||||||
|
|
||||||
|
# Check each registered tool has a permission entry somewhere
|
||||||
|
# Note: Some tools might be implicitly denied (not in any profile)
|
||||||
|
# That's valid - we just want to ensure awareness
|
||||||
|
|
||||||
|
unmatched = []
|
||||||
|
for tool in registered_names:
|
||||||
|
# Check if tool is in any profile's allowed set
|
||||||
|
found = any(
|
||||||
|
tool in profile_tools
|
||||||
|
for profile_tools in PROFILE_TOOLS.values()
|
||||||
|
)
|
||||||
|
if not found:
|
||||||
|
unmatched.append(tool)
|
||||||
|
|
||||||
|
# Auth tools and some special tools may not be in profile matrix
|
||||||
|
# but should still be tracked
|
||||||
|
assert len(unmatched) < 5, (
|
||||||
|
f"Too many unregistered tools: {unmatched}. "
|
||||||
|
"Add to PROFILE_TOOLS or document as intentionally denied."
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_no_orphan_permissions(self):
|
||||||
|
"""Permissions should not reference non-existent tools."""
|
||||||
|
registered_names = {t["name"] for t in REGISTERED_TOOLS}
|
||||||
|
|
||||||
|
# Get all tools mentioned in permissions
|
||||||
|
all_permitted_tools = set()
|
||||||
|
for profile_tools in PROFILE_TOOLS.values():
|
||||||
|
all_permitted_tools.update(profile_tools)
|
||||||
|
|
||||||
|
# External tools (from other MCP servers) are allowed
|
||||||
|
# But internal vaultmesh tools should be registered
|
||||||
|
vaultmesh_tools = {
|
||||||
|
t for t in all_permitted_tools
|
||||||
|
if t.startswith(("cognitive_", "guardian_", "treasury_", "auth_"))
|
||||||
|
}
|
||||||
|
|
||||||
|
orphans = vaultmesh_tools - registered_names
|
||||||
|
assert len(orphans) == 0, f"Orphan permissions found: {orphans}"
|
||||||
|
|
||||||
|
|
||||||
|
class TestPermissionMatrix:
|
||||||
|
"""Verify the permission matrix matches constitution."""
|
||||||
|
|
||||||
|
def test_observer_read_only(self):
|
||||||
|
"""OBSERVER can only read, not mutate."""
|
||||||
|
observer_tools = PROFILE_TOOLS.get(Profile.OBSERVER, set())
|
||||||
|
|
||||||
|
mutation_keywords = ["write", "create", "debit", "credit", "invoke", "decide"]
|
||||||
|
|
||||||
|
for tool in observer_tools:
|
||||||
|
for keyword in mutation_keywords:
|
||||||
|
if keyword in tool:
|
||||||
|
pytest.fail(
|
||||||
|
f"OBSERVER has mutation tool: {tool}. "
|
||||||
|
"OBSERVER must be read-only."
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_profile_inheritance(self):
|
||||||
|
"""Higher profiles inherit lower profile permissions."""
|
||||||
|
profile_order = [
|
||||||
|
Profile.OBSERVER,
|
||||||
|
Profile.OPERATOR,
|
||||||
|
Profile.GUARDIAN,
|
||||||
|
Profile.PHOENIX,
|
||||||
|
Profile.SOVEREIGN,
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(1, len(profile_order)):
|
||||||
|
lower = profile_order[i - 1]
|
||||||
|
higher = profile_order[i]
|
||||||
|
|
||||||
|
lower_tools = PROFILE_TOOLS.get(lower, set())
|
||||||
|
higher_tools = PROFILE_TOOLS.get(higher, set())
|
||||||
|
|
||||||
|
# Higher should contain all of lower
|
||||||
|
missing = lower_tools - higher_tools
|
||||||
|
|
||||||
|
# Allow some exceptions for explicitly removed tools
|
||||||
|
assert len(missing) < 3, (
|
||||||
|
f"{higher.value} missing inherited tools from {lower.value}: {missing}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_sovereign_has_all_tools(self):
|
||||||
|
"""SOVEREIGN must have access to all registered tools."""
|
||||||
|
sovereign_tools = PROFILE_TOOLS.get(Profile.SOVEREIGN, set())
|
||||||
|
|
||||||
|
# SOVEREIGN should have the most tools
|
||||||
|
for profile in Profile:
|
||||||
|
if profile != Profile.SOVEREIGN:
|
||||||
|
other_tools = PROFILE_TOOLS.get(profile, set())
|
||||||
|
assert len(sovereign_tools) >= len(other_tools), (
|
||||||
|
f"SOVEREIGN has fewer tools than {profile.value}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestMutationReceiptRequirement:
|
||||||
|
"""Mutation tools must emit receipts."""
|
||||||
|
|
||||||
|
def test_cognitive_decide_emits_receipt(self):
|
||||||
|
"""cognitive_decide must emit receipt."""
|
||||||
|
from vaultmesh_mcp.tools import cognitive_decide
|
||||||
|
|
||||||
|
result = cognitive_decide(
|
||||||
|
reasoning_chain=["test"],
|
||||||
|
decision="test",
|
||||||
|
confidence=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "receipt" in result, "cognitive_decide must emit receipt"
|
||||||
|
assert "root_hash" in result["receipt"], "Receipt must have hash"
|
||||||
|
|
||||||
|
def test_cognitive_invoke_tem_emits_receipt(self):
|
||||||
|
"""cognitive_invoke_tem must emit receipt."""
|
||||||
|
from vaultmesh_mcp.tools import cognitive_invoke_tem
|
||||||
|
|
||||||
|
result = cognitive_invoke_tem(
|
||||||
|
threat_type="test",
|
||||||
|
threat_id="test_001",
|
||||||
|
target="test",
|
||||||
|
evidence=["test"],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert "receipt" in result, "cognitive_invoke_tem must emit receipt"
|
||||||
|
|
||||||
|
def test_treasury_debit_emits_receipt(self):
|
||||||
|
"""treasury_debit must emit receipt (or error with receipt)."""
|
||||||
|
from vaultmesh_mcp.tools import treasury_debit
|
||||||
|
|
||||||
|
# This may fail due to missing budget, but should still
|
||||||
|
# handle gracefully
|
||||||
|
result = treasury_debit(
|
||||||
|
budget_id="nonexistent",
|
||||||
|
amount=1,
|
||||||
|
description="test",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Either success with receipt or error
|
||||||
|
# The key is it shouldn't crash
|
||||||
|
assert "error" in result or "receipt" in result
|
||||||
|
|
||||||
|
|
||||||
|
class TestCallBoundaryEnforcement:
|
||||||
|
"""Server call boundary must enforce session/profile permissions."""
|
||||||
|
|
||||||
|
def test_missing_session_token_denied(self):
|
||||||
|
from vaultmesh_mcp.server import handle_tool_call
|
||||||
|
|
||||||
|
result = handle_tool_call("guardian_status", {})
|
||||||
|
assert "error" in result
|
||||||
|
assert result.get("allowed") is False
|
||||||
|
|
||||||
|
def test_invalid_session_token_denied(self):
|
||||||
|
from vaultmesh_mcp.server import handle_tool_call
|
||||||
|
|
||||||
|
result = handle_tool_call("guardian_status", {"session_token": "invalid"})
|
||||||
|
assert "error" in result
|
||||||
|
assert result.get("allowed") is False
|
||||||
|
|
||||||
|
def test_observer_session_can_read(self):
|
||||||
|
from vaultmesh_mcp.server import handle_tool_call
|
||||||
|
from vaultmesh_mcp.tools.auth import auth_create_dev_session
|
||||||
|
|
||||||
|
session = auth_create_dev_session(scope="read")
|
||||||
|
result = handle_tool_call(
|
||||||
|
"guardian_status",
|
||||||
|
{"session_token": session["token"]},
|
||||||
|
)
|
||||||
|
assert "error" not in result
|
||||||
|
|
||||||
|
def test_observer_session_cannot_mutate(self):
|
||||||
|
from vaultmesh_mcp.server import handle_tool_call
|
||||||
|
from vaultmesh_mcp.tools.auth import auth_create_dev_session
|
||||||
|
|
||||||
|
session = auth_create_dev_session(scope="read")
|
||||||
|
result = handle_tool_call(
|
||||||
|
"treasury_debit",
|
||||||
|
{
|
||||||
|
"session_token": session["token"],
|
||||||
|
"budget_id": "nonexistent",
|
||||||
|
"amount": 1,
|
||||||
|
"description": "test",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert "error" in result
|
||||||
|
assert result.get("allowed") is False
|
||||||
|
|
||||||
|
def test_wrong_profile_denied(self):
|
||||||
|
from vaultmesh_mcp.server import handle_tool_call
|
||||||
|
from vaultmesh_mcp.tools.auth import auth_create_dev_session
|
||||||
|
|
||||||
|
# admin scope maps to operator profile; should not invoke TEM
|
||||||
|
session = auth_create_dev_session(scope="admin")
|
||||||
|
result = handle_tool_call(
|
||||||
|
"cognitive_invoke_tem",
|
||||||
|
{
|
||||||
|
"session_token": session["token"],
|
||||||
|
"threat_type": "test",
|
||||||
|
"threat_id": "t1",
|
||||||
|
"target": "x",
|
||||||
|
"evidence": ["e1"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert result.get("allowed") is False
|
||||||
|
assert "Permission" in result.get("error", "") or "denied" in result.get("reason", "")
|
||||||
|
|
||||||
|
def test_valid_guardian_session_allowed(self):
|
||||||
|
from vaultmesh_mcp.server import handle_tool_call, MCP_RECEIPTS
|
||||||
|
from vaultmesh_mcp.tools.auth import auth_create_dev_session
|
||||||
|
import os
|
||||||
|
# Ensure clean receipt log
|
||||||
|
try:
|
||||||
|
os.remove(MCP_RECEIPTS)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
session = auth_create_dev_session(scope="anchor") # maps to guardian profile
|
||||||
|
result = handle_tool_call("guardian_status", {"session_token": session["token"]})
|
||||||
|
assert "error" not in result
|
||||||
|
|
||||||
|
# Receipt should be written without session_token arguments
|
||||||
|
with open(MCP_RECEIPTS, "r") as f:
|
||||||
|
last = f.readlines()[-1]
|
||||||
|
import json
|
||||||
|
rec = json.loads(last)
|
||||||
|
assert "session_token" not in rec["body"].get("arguments", {})
|
||||||
Reference in New Issue
Block a user