From 110d644e10bd9fa1f1d7905b71b141b1323519d2 Mon Sep 17 00:00:00 2001 From: Vault Sovereign Date: Sat, 27 Dec 2025 00:10:32 +0000 Subject: [PATCH] Initialize repository snapshot --- .gitignore | 26 + .gitlab-ci.yml | 130 + .opencode/package.json | 5 + .opencode/plugin/vaultmesh-sentinel.ts | 300 ++ ASSURANCE.md | 18 + CHANGELOG.md | 68 + Cargo.lock | 1832 +++++++++++ Cargo.toml | 31 + Containerfile | 40 + MERIDIAN_V1_CONFORMANCE_TEST_SUITE/.gitignore | 1 + MERIDIAN_V1_CONFORMANCE_TEST_SUITE/README.md | 58 + .../fixtures/README.md | 15 + .../fail/E_EVENT_HASH_MISMATCH/README.md | 1 + .../fail/E_EVENT_HASH_MISMATCH/integrity.json | 31 + .../fail/E_EVENT_HASH_MISMATCH/receipts.jsonl | 2 + .../fail/E_EVENT_HASH_MISMATCH/roots.txt | 3 + .../fail/E_EVENT_HASH_MISMATCH/seal.json | 1 + .../verifier_manifest.json | 1 + .../fail/canon_version_unsupported/README.md | 1 + .../canon_version_unsupported/integrity.json | 31 + .../canon_version_unsupported/receipts.jsonl | 2 + .../fail/canon_version_unsupported/roots.txt | 3 + .../fail/canon_version_unsupported/seal.json | 1 + .../verifier_manifest.json | 1 + .../fixtures/fail/double_outcome/README.md | 1 + .../fail/double_outcome/integrity.json | 31 + .../fail/double_outcome/receipts.jsonl | 4 + .../fixtures/fail/double_outcome/roots.txt | 5 + .../fixtures/fail/double_outcome/seal.json | 1 + .../double_outcome/verifier_manifest.json | 1 + .../fail/event_hash_mismatch/README.md | 1 + .../fail/event_hash_mismatch/integrity.json | 31 + .../fail/event_hash_mismatch/receipts.jsonl | 2 + .../fail/event_hash_mismatch/roots.txt | 3 + .../fail/event_hash_mismatch/seal.json | 1 + .../verifier_manifest.json | 1 + .../fail/execution_without_intent/README.md | 1 + .../execution_without_intent/integrity.json | 31 + .../execution_without_intent/receipts.jsonl | 2 + .../fail/execution_without_intent/roots.txt | 3 + .../fail/execution_without_intent/seal.json | 1 + .../verifier_manifest.json | 1 + .../fail/invalid_jsonl_truncated/README.md | 1 + .../invalid_jsonl_truncated/integrity.json | 31 + .../invalid_jsonl_truncated/receipts.jsonl | 1 + .../fail/invalid_jsonl_truncated/roots.txt | 3 + .../fail/invalid_jsonl_truncated/seal.json | 1 + .../verifier_manifest.json | 1 + .../fail/manifest_hash_mismatch/README.md | 1 + .../manifest_hash_mismatch/integrity.json | 31 + .../manifest_hash_mismatch/receipts.jsonl | 2 + .../fail/manifest_hash_mismatch/roots.txt | 3 + .../fail/manifest_hash_mismatch/seal.json | 1 + .../verifier_manifest.json | 1 + .../missing_required_file_roots/README.md | 1 + .../integrity.json | 26 + .../receipts.jsonl | 2 + .../missing_required_file_roots/seal.json | 1 + .../verifier_manifest.json | 1 + .../fail/op_digest_mismatch/README.md | 1 + .../fail/op_digest_mismatch/integrity.json | 31 + .../fail/op_digest_mismatch/receipts.jsonl | 2 + .../fail/op_digest_mismatch/roots.txt | 3 + .../fail/op_digest_mismatch/seal.json | 1 + .../op_digest_mismatch/verifier_manifest.json | 1 + .../fail/prev_event_hash_mismatch/README.md | 1 + .../prev_event_hash_mismatch/integrity.json | 31 + .../prev_event_hash_mismatch/receipts.jsonl | 2 + .../fail/prev_event_hash_mismatch/roots.txt | 3 + .../fail/prev_event_hash_mismatch/seal.json | 1 + .../verifier_manifest.json | 1 + .../fixtures/fail/range_mismatch/README.md | 1 + .../fail/range_mismatch/integrity.json | 31 + .../fail/range_mismatch/receipts.jsonl | 2 + .../fixtures/fail/range_mismatch/roots.txt | 3 + .../fixtures/fail/range_mismatch/seal.json | 1 + .../range_mismatch/verifier_manifest.json | 1 + .../fail/revoked_capability_used/README.md | 1 + .../revoked_capability_used/integrity.json | 31 + .../revoked_capability_used/receipts.jsonl | 4 + .../fail/revoked_capability_used/roots.txt | 5 + .../fail/revoked_capability_used/seal.json | 1 + .../verifier_manifest.json | 1 + .../fixtures/fail/root_mismatch/README.md | 1 + .../fail/root_mismatch/integrity.json | 31 + .../fail/root_mismatch/receipts.jsonl | 2 + .../fixtures/fail/root_mismatch/roots.txt | 3 + .../fixtures/fail/root_mismatch/seal.json | 1 + .../fail/root_mismatch/verifier_manifest.json | 1 + .../seq_non_monotonic_duplicate/README.md | 1 + .../integrity.json | 31 + .../receipts.jsonl | 3 + .../seq_non_monotonic_duplicate/roots.txt | 3 + .../seq_non_monotonic_duplicate/seal.json | 1 + .../verifier_manifest.json | 1 + .../README.md | 1 + .../integrity.json | 31 + .../receipts.jsonl | 2 + .../roots.txt | 3 + .../seal.json | 1 + .../verifier_manifest.json | 1 + .../fail/unlisted_extra_file_strict/README.md | 1 + .../unlisted_extra_file_strict/UNLISTED.bin | 1 + .../unlisted_extra_file_strict/integrity.json | 31 + .../unlisted_extra_file_strict/receipts.jsonl | 2 + .../fail/unlisted_extra_file_strict/roots.txt | 3 + .../fail/unlisted_extra_file_strict/seal.json | 1 + .../verifier_manifest.json | 1 + .../pass/refusal_proof_pass/README.md | 1 + .../pass/refusal_proof_pass/integrity.json | 31 + .../pass/refusal_proof_pass/receipts.jsonl | 3 + .../pass/refusal_proof_pass/roots.txt | 4 + .../pass/refusal_proof_pass/seal.json | 1 + .../refusal_proof_pass/verifier_manifest.json | 1 + .../pass/tamper_signal_pass/README.md | 1 + .../pass/tamper_signal_pass/integrity.json | 31 + .../pass/tamper_signal_pass/receipts.jsonl | 2 + .../pass/tamper_signal_pass/roots.txt | 3 + .../pass/tamper_signal_pass/seal.json | 1 + .../tamper_signal_pass/verifier_manifest.json | 1 + .../manifest.yaml | 209 ++ MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.py | 394 +++ MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh | 39 + .../tests/README.md | 14 + .../tests/capabilities/README.md | 8 + .../tests/event_chain/README.md | 9 + .../tests/identity/README.md | 7 + .../tests/ml_boundary/README.md | 12 + .../tests/offline_restore/README.md | 9 + .../tests/refusal_proofs/README.md | 13 + .../tests/sealing/README.md | 11 + .../tests/tamper_signals/README.md | 9 + .../tools/generate_fixtures.py | 944 ++++++ OFFSEC-AGENTS-PLAN.md | 898 ++++++ PLAN.md | 215 ++ README.md | 15 + cli/__init__.py | 0 cli/ledger.py | 667 ++++ cli/skill_validator.py | 550 ++++ cli/vm_cli.py | 2816 +++++++++++++++++ docs/GITLAB-CONSOLE-SETUP.md | 155 + docs/VAULTMESH-AUTOMATION-ENGINE.md | 907 ++++++ docs/VAULTMESH-CONSOLE-ENGINE.md | 438 +++ docs/VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md | 752 +++++ docs/VAULTMESH-DEPLOYMENT-MANIFESTS.md | 1267 ++++++++ docs/VAULTMESH-ETERNAL-PATTERN.md | 507 +++ docs/VAULTMESH-FEDERATION-PROTOCOL.md | 560 ++++ docs/VAULTMESH-IDENTITY-ENGINE.md | 635 ++++ docs/VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md | 1621 ++++++++++ docs/VAULTMESH-MCP-SERVERS.md | 1049 ++++++ docs/VAULTMESH-MCP-TEM-NODE.md | 161 + docs/VAULTMESH-MESH-ENGINE.md | 554 ++++ docs/VAULTMESH-MIGRATION-GUIDE.md | 537 ++++ docs/VAULTMESH-MONITORING-STACK.md | 688 ++++ docs/VAULTMESH-OBSERVABILITY-ENGINE.md | 742 +++++ docs/VAULTMESH-OFFSEC-ENGINE.md | 652 ++++ docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md | 169 + docs/VAULTMESH-PROOFBUNDLE-SPEC.md | 595 ++++ docs/VAULTMESH-PSI-FIELD-ENGINE.md | 652 ++++ docs/VAULTMESH-SECURITY-MANUAL-INDEX.json | 1187 +++++++ docs/VAULTMESH-SENTINEL-GTM-BATTLECARD.md | 109 + docs/VAULTMESH-SHIELD-NODE-TEM.md | 197 ++ docs/VAULTMESH-STANDARDS-INDEX.md | 201 ++ docs/VAULTMESH-TESTING-FRAMEWORK.md | 620 ++++ docs/observability/README.md | 101 + docs/observability/dashboards/receipts.json | 366 +++ docs/observability/docker-compose.yml | 34 + .../dashboards/default.yml | 12 + .../datasources/prometheus.yml | 9 + docs/observability/prometheus.yml | 9 + docs/skill/ALCHEMICAL_PATTERNS.md | 551 ++++ docs/skill/CODE_TEMPLATES.md | 693 ++++ docs/skill/ENGINE_SPECS.md | 315 ++ docs/skill/INFRASTRUCTURE.md | 711 +++++ docs/skill/MCP_INTEGRATION.md | 493 +++ docs/skill/OPERATIONS.md | 537 ++++ docs/skill/PROTOCOLS.md | 605 ++++ docs/skill/QUICK_REFERENCE.md | 196 ++ docs/skill/SKILL.md | 338 ++ engines/console/__init__.py | 27 + engines/console/approvals.py | 209 ++ engines/console/receipts.py | 271 ++ health_report.md | 30 + keys/identity/guardian-local.json | 7 + keys/identity/human-karol.json | 7 + keys/identity/portal-shield.json | 7 + keys/identity/skill-validator.json | 7 + ledger/__init__.py | 33 + ledger/db.py | 426 +++ ledger/migrate.py | 52 + ledger/redact.py | 199 ++ ledger/schema/0001_init.sql | 50 + ledger/schema/0002_indexes.sql | 10 + ledger/schema/0003_shadow_receipts.sql | 18 + pyproject.toml | 9 + requirements.txt | 8 + scripts/console_receipts_server.py | 935 ++++++ scripts/gitlab_console_session.sh | 190 ++ scripts/offsec_node_client.py | 123 + spec/ATTACK_RESISTANCE_LEDGER.md | 217 ++ spec/BLUEPRINT_SPEC.md | 203 ++ spec/MAPPING.md | 36 + ...RIDIAN_V1_FAQ_HOSTILE_REGULATOR_EDITION.md | 331 ++ ...DIAN_V1_INSURER_DUE_DILIGENCE_QUESTIONS.md | 309 ++ spec/SENTINEL_EVIDENCE_STANDARD.md | 41 + spec/SENTINEL_FAILURE_CODE_SEMANTICS.md | 43 + .../SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md | 225 ++ spec/SENTINEL_V1_CONTRACT_MATRIX.md | 137 + spec/SENTINEL_V1_SPEC.md | 340 ++ spec/sentinel/README.md | 22 + spec/sentinel/canonicalization.md | 123 + spec/sentinel/event.schema.json | 68 + spec/sentinel/integrity.schema.json | 24 + spec/sentinel/seal.schema.json | 62 + spec/sentinel/verifier_manifest.schema.json | 23 + testvectors/proofbundle/README.md | 81 + .../proofbundle/proofbundle-broken-chain.json | 80 + .../proofbundle-tampered-body.json | 80 + .../proofbundle-tampered-root.json | 80 + .../proofbundle/proofbundle-valid.json | 80 + .../sentinel/black-box-that-refused/README.md | 12 + .../black-box-that-refused/integrity.json | 31 + .../black-box-that-refused/receipts.jsonl | 5 + .../sentinel/black-box-that-refused/roots.txt | 6 + .../sentinel/black-box-that-refused/seal.json | 1 + .../verifier_manifest.json | 1 + .../corruption-truncated-jsonl/README.md | 6 + .../corruption-truncated-jsonl/integrity.json | 1 + .../corruption-truncated-jsonl/receipts.jsonl | 5 + .../corruption-truncated-jsonl/roots.txt | 6 + .../corruption-truncated-jsonl/seal.json | 1 + .../verifier_manifest.json | 1 + .../integrity-size-mismatch/README.md | 16 + .../integrity-size-mismatch/integrity.json | 31 + .../integrity-size-mismatch/receipts.jsonl | 5 + .../integrity-size-mismatch/roots.txt | 6 + .../integrity-size-mismatch/seal.json | 1 + .../verifier_manifest.json | 1 + .../revocation-used-after-revoke/README.md | 6 + .../integrity.json | 1 + .../receipts.jsonl | 5 + .../revocation-used-after-revoke/roots.txt | 6 + .../revocation-used-after-revoke/seal.json | 1 + .../verifier_manifest.json | 1 + .../sentinel/rollback-duplicate-seq/README.md | 6 + .../rollback-duplicate-seq/integrity.json | 1 + .../rollback-duplicate-seq/receipts.jsonl | 5 + .../sentinel/rollback-duplicate-seq/roots.txt | 6 + .../sentinel/rollback-duplicate-seq/seal.json | 1 + .../verifier_manifest.json | 1 + tools/check_sentinel_contract_parity.py | 123 + tools/make_proofbundle_testvectors.py | 99 + tools/run_sentinel_testvectors.sh | 110 + tools/sentinel_failure_codes.py | 22 + tools/vm_verify_sentinel_bundle.py | 1650 ++++++++++ vaultmesh-automation/Cargo.toml | 7 + vaultmesh-automation/src/lib.rs | 5 + vaultmesh-core/Cargo.toml | 10 + vaultmesh-core/src/did.rs | 130 + vaultmesh-core/src/hash.rs | 147 + vaultmesh-core/src/lib.rs | 7 + vaultmesh-core/src/receipt.rs | 79 + vaultmesh-guardian/Cargo.toml | 21 + vaultmesh-guardian/src/lib.rs | 339 ++ .../tests/metrics_integration.rs | 75 + vaultmesh-identity/Cargo.toml | 11 + vaultmesh-identity/src/lib.rs | 284 ++ vaultmesh-mesh/Cargo.toml | 21 + vaultmesh-mesh/src/lib.rs | 824 +++++ vaultmesh-mesh/tests/metrics_integration.rs | 76 + vaultmesh-observability/Cargo.toml | 20 + vaultmesh-observability/Dockerfile | 23 + vaultmesh-observability/src/lib.rs | 208 ++ vaultmesh-observability/src/main.rs | 55 + vaultmesh-observability/tests/smoketest.rs | 85 + vaultmesh-offsec/Cargo.toml | 7 + vaultmesh-offsec/src/lib.rs | 5 + vaultmesh-psi/Cargo.toml | 7 + vaultmesh-psi/src/lib.rs | 5 + vaultmesh-treasury/Cargo.toml | 18 + vaultmesh-treasury/src/lib.rs | 466 +++ 281 files changed, 40331 insertions(+) create mode 100644 .gitignore create mode 100644 .gitlab-ci.yml create mode 100644 .opencode/package.json create mode 100644 .opencode/plugin/vaultmesh-sentinel.ts create mode 100644 ASSURANCE.md create mode 100644 CHANGELOG.md create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 Containerfile create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/.gitignore create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/UNLISTED.bin create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/integrity.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/receipts.jsonl create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/roots.txt create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/seal.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/verifier_manifest.json create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/manifest.yaml create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.py create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/capabilities/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/event_chain/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/identity/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/ml_boundary/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/offline_restore/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/refusal_proofs/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/sealing/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/tamper_signals/README.md create mode 100644 MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tools/generate_fixtures.py create mode 100644 OFFSEC-AGENTS-PLAN.md create mode 100644 PLAN.md create mode 100644 README.md create mode 100644 cli/__init__.py create mode 100644 cli/ledger.py create mode 100755 cli/skill_validator.py create mode 100755 cli/vm_cli.py create mode 100644 docs/GITLAB-CONSOLE-SETUP.md create mode 100644 docs/VAULTMESH-AUTOMATION-ENGINE.md create mode 100644 docs/VAULTMESH-CONSOLE-ENGINE.md create mode 100644 docs/VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md create mode 100644 docs/VAULTMESH-DEPLOYMENT-MANIFESTS.md create mode 100644 docs/VAULTMESH-ETERNAL-PATTERN.md create mode 100644 docs/VAULTMESH-FEDERATION-PROTOCOL.md create mode 100644 docs/VAULTMESH-IDENTITY-ENGINE.md create mode 100644 docs/VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md create mode 100644 docs/VAULTMESH-MCP-SERVERS.md create mode 100644 docs/VAULTMESH-MCP-TEM-NODE.md create mode 100644 docs/VAULTMESH-MESH-ENGINE.md create mode 100644 docs/VAULTMESH-MIGRATION-GUIDE.md create mode 100644 docs/VAULTMESH-MONITORING-STACK.md create mode 100644 docs/VAULTMESH-OBSERVABILITY-ENGINE.md create mode 100644 docs/VAULTMESH-OFFSEC-ENGINE.md create mode 100644 docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md create mode 100644 docs/VAULTMESH-PROOFBUNDLE-SPEC.md create mode 100644 docs/VAULTMESH-PSI-FIELD-ENGINE.md create mode 100644 docs/VAULTMESH-SECURITY-MANUAL-INDEX.json create mode 100644 docs/VAULTMESH-SENTINEL-GTM-BATTLECARD.md create mode 100644 docs/VAULTMESH-SHIELD-NODE-TEM.md create mode 100644 docs/VAULTMESH-STANDARDS-INDEX.md create mode 100644 docs/VAULTMESH-TESTING-FRAMEWORK.md create mode 100644 docs/observability/README.md create mode 100644 docs/observability/dashboards/receipts.json create mode 100644 docs/observability/docker-compose.yml create mode 100644 docs/observability/grafana-provisioning/dashboards/default.yml create mode 100644 docs/observability/grafana-provisioning/datasources/prometheus.yml create mode 100644 docs/observability/prometheus.yml create mode 100644 docs/skill/ALCHEMICAL_PATTERNS.md create mode 100644 docs/skill/CODE_TEMPLATES.md create mode 100644 docs/skill/ENGINE_SPECS.md create mode 100644 docs/skill/INFRASTRUCTURE.md create mode 100644 docs/skill/MCP_INTEGRATION.md create mode 100644 docs/skill/OPERATIONS.md create mode 100644 docs/skill/PROTOCOLS.md create mode 100644 docs/skill/QUICK_REFERENCE.md create mode 100644 docs/skill/SKILL.md create mode 100644 engines/console/__init__.py create mode 100644 engines/console/approvals.py create mode 100644 engines/console/receipts.py create mode 100644 health_report.md create mode 100644 keys/identity/guardian-local.json create mode 100644 keys/identity/human-karol.json create mode 100644 keys/identity/portal-shield.json create mode 100644 keys/identity/skill-validator.json create mode 100644 ledger/__init__.py create mode 100644 ledger/db.py create mode 100644 ledger/migrate.py create mode 100644 ledger/redact.py create mode 100644 ledger/schema/0001_init.sql create mode 100644 ledger/schema/0002_indexes.sql create mode 100644 ledger/schema/0003_shadow_receipts.sql create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100644 scripts/console_receipts_server.py create mode 100755 scripts/gitlab_console_session.sh create mode 100644 scripts/offsec_node_client.py create mode 100644 spec/ATTACK_RESISTANCE_LEDGER.md create mode 100644 spec/BLUEPRINT_SPEC.md create mode 100644 spec/MAPPING.md create mode 100644 spec/MERIDIAN_V1_FAQ_HOSTILE_REGULATOR_EDITION.md create mode 100644 spec/MERIDIAN_V1_INSURER_DUE_DILIGENCE_QUESTIONS.md create mode 100644 spec/SENTINEL_EVIDENCE_STANDARD.md create mode 100644 spec/SENTINEL_FAILURE_CODE_SEMANTICS.md create mode 100644 spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md create mode 100644 spec/SENTINEL_V1_CONTRACT_MATRIX.md create mode 100644 spec/SENTINEL_V1_SPEC.md create mode 100644 spec/sentinel/README.md create mode 100644 spec/sentinel/canonicalization.md create mode 100644 spec/sentinel/event.schema.json create mode 100644 spec/sentinel/integrity.schema.json create mode 100644 spec/sentinel/seal.schema.json create mode 100644 spec/sentinel/verifier_manifest.schema.json create mode 100644 testvectors/proofbundle/README.md create mode 100644 testvectors/proofbundle/proofbundle-broken-chain.json create mode 100644 testvectors/proofbundle/proofbundle-tampered-body.json create mode 100644 testvectors/proofbundle/proofbundle-tampered-root.json create mode 100644 testvectors/proofbundle/proofbundle-valid.json create mode 100644 testvectors/sentinel/black-box-that-refused/README.md create mode 100644 testvectors/sentinel/black-box-that-refused/integrity.json create mode 100644 testvectors/sentinel/black-box-that-refused/receipts.jsonl create mode 100644 testvectors/sentinel/black-box-that-refused/roots.txt create mode 100644 testvectors/sentinel/black-box-that-refused/seal.json create mode 100644 testvectors/sentinel/black-box-that-refused/verifier_manifest.json create mode 100644 testvectors/sentinel/corruption-truncated-jsonl/README.md create mode 100644 testvectors/sentinel/corruption-truncated-jsonl/integrity.json create mode 100644 testvectors/sentinel/corruption-truncated-jsonl/receipts.jsonl create mode 100644 testvectors/sentinel/corruption-truncated-jsonl/roots.txt create mode 100644 testvectors/sentinel/corruption-truncated-jsonl/seal.json create mode 100644 testvectors/sentinel/corruption-truncated-jsonl/verifier_manifest.json create mode 100644 testvectors/sentinel/integrity-size-mismatch/README.md create mode 100644 testvectors/sentinel/integrity-size-mismatch/integrity.json create mode 100644 testvectors/sentinel/integrity-size-mismatch/receipts.jsonl create mode 100644 testvectors/sentinel/integrity-size-mismatch/roots.txt create mode 100644 testvectors/sentinel/integrity-size-mismatch/seal.json create mode 100644 testvectors/sentinel/integrity-size-mismatch/verifier_manifest.json create mode 100644 testvectors/sentinel/revocation-used-after-revoke/README.md create mode 100644 testvectors/sentinel/revocation-used-after-revoke/integrity.json create mode 100644 testvectors/sentinel/revocation-used-after-revoke/receipts.jsonl create mode 100644 testvectors/sentinel/revocation-used-after-revoke/roots.txt create mode 100644 testvectors/sentinel/revocation-used-after-revoke/seal.json create mode 100644 testvectors/sentinel/revocation-used-after-revoke/verifier_manifest.json create mode 100644 testvectors/sentinel/rollback-duplicate-seq/README.md create mode 100644 testvectors/sentinel/rollback-duplicate-seq/integrity.json create mode 100644 testvectors/sentinel/rollback-duplicate-seq/receipts.jsonl create mode 100644 testvectors/sentinel/rollback-duplicate-seq/roots.txt create mode 100644 testvectors/sentinel/rollback-duplicate-seq/seal.json create mode 100644 testvectors/sentinel/rollback-duplicate-seq/verifier_manifest.json create mode 100644 tools/check_sentinel_contract_parity.py create mode 100755 tools/make_proofbundle_testvectors.py create mode 100755 tools/run_sentinel_testvectors.sh create mode 100644 tools/sentinel_failure_codes.py create mode 100644 tools/vm_verify_sentinel_bundle.py create mode 100644 vaultmesh-automation/Cargo.toml create mode 100644 vaultmesh-automation/src/lib.rs create mode 100644 vaultmesh-core/Cargo.toml create mode 100644 vaultmesh-core/src/did.rs create mode 100644 vaultmesh-core/src/hash.rs create mode 100644 vaultmesh-core/src/lib.rs create mode 100644 vaultmesh-core/src/receipt.rs create mode 100644 vaultmesh-guardian/Cargo.toml create mode 100644 vaultmesh-guardian/src/lib.rs create mode 100644 vaultmesh-guardian/tests/metrics_integration.rs create mode 100644 vaultmesh-identity/Cargo.toml create mode 100644 vaultmesh-identity/src/lib.rs create mode 100644 vaultmesh-mesh/Cargo.toml create mode 100644 vaultmesh-mesh/src/lib.rs create mode 100644 vaultmesh-mesh/tests/metrics_integration.rs create mode 100644 vaultmesh-observability/Cargo.toml create mode 100644 vaultmesh-observability/Dockerfile create mode 100644 vaultmesh-observability/src/lib.rs create mode 100644 vaultmesh-observability/src/main.rs create mode 100644 vaultmesh-observability/tests/smoketest.rs create mode 100644 vaultmesh-offsec/Cargo.toml create mode 100644 vaultmesh-offsec/src/lib.rs create mode 100644 vaultmesh-psi/Cargo.toml create mode 100644 vaultmesh-psi/src/lib.rs create mode 100644 vaultmesh-treasury/Cargo.toml create mode 100644 vaultmesh-treasury/src/lib.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..dda5b7f --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +target/ +__pycache__/ +*.pyc +.venv/ +.DS_Store +*.egg-info/ + +# Local-first SQLite ledger and seal bundles +.state/ +*.sqlite +*.sqlite-wal +*.sqlite-shm + +# Runtime logs and receipts +logs/ +receipts/ + +# Guardian/automation/offsec/treasury ROOT state files +ROOT.*.txt + +# Generic logs +*.log + +# Generated verifier reports +verification_report.json +verification_report_*.json diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..166bac9 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,130 @@ +stages: + - build + - test + - lint + +variables: + CARGO_HOME: $CI_PROJECT_DIR/.cargo + +# Ensure receipts directories exist (tests may write into them) +before_script: + - mkdir -p receipts/guardian receipts/treasury receipts/offsec receipts/automation receipts/mcp receipts/mesh + +# Rust build job +rust-build: + stage: build + image: rust:1.75 + script: + - cargo build --workspace --locked + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - .cargo/registry/ + +# Sentinel contract parity + testvectors (required gate) +sentinel-contracts: + stage: test + image: python:3.11 + before_script: + - pip install -q blake3 + script: + - python3 tools/check_sentinel_contract_parity.py + - bash tools/run_sentinel_testvectors.sh + +# MERIDIAN v1 conformance suite (offline, deterministic, build-blocking) +meridian-v1-conformance: + stage: test + image: python:3.11 + before_script: + - pip install -q blake3 + script: + - bash MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh + +# OpenCode plugin smoke (one PASS + one FAIL) +sentinel-opencode-smoke: + stage: test + image: node:20-bullseye + before_script: + - apt-get update && apt-get install -y python3 python3-pip >/dev/null + - pip3 install -q blake3 + - npm install -g opencode-ai@1.0.166 + - npm install --prefix .opencode + - export VAULTMESH_WORKSPACE_ROOT="$CI_PROJECT_DIR" + - export VAULTMESH_SENTINEL_VERIFIER="$CI_PROJECT_DIR/tools/vm_verify_sentinel_bundle.py" + script: + - opencode run --format json --command sentinelVerifyBundle --worktree "$CI_PROJECT_DIR" --directory "$CI_PROJECT_DIR" --tool-args '{"bundlePath":"testvectors/sentinel/black-box-that-refused","strict":true}' + - opencode run --format json --command sentinelVerifyBundle --worktree "$CI_PROJECT_DIR" --directory "$CI_PROJECT_DIR" --tool-args '{"bundlePath":"testvectors/sentinel/integrity-size-mismatch","strict":true}' + +# Rust test job +rust-test: + stage: test + image: rust:1.75 + script: + - cargo test --workspace --locked + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - .cargo/registry/ + +# Rust lint job (format + clippy) +rust-lint: + stage: lint + image: rust:1.75 + script: + - rustup component add clippy rustfmt + - cargo fmt --check + - cargo clippy --workspace -- -D warnings + allow_failure: true + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - .cargo/registry/ + +# Python CLI tests (when pytest available) +python-test: + stage: test + image: python:3.11 + before_script: + - pip install -q blake3 click pynacl pytest + script: + - python -m pytest -q cli/ tests/ 2>/dev/null || echo "No Python tests yet" + allow_failure: true + +# Observability exporter smoke test +observability-smoke: + stage: test + image: rust:1.75 + script: + - cargo test -p vaultmesh-observability --tests -- --nocapture + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - .cargo/registry/ + +# Guardian metrics integration test (requires --features metrics) +guardian-metrics-integration: + stage: test + image: rust:1.75 + script: + - cargo test -p vaultmesh-guardian --features metrics --test metrics_integration -- --nocapture + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - .cargo/registry/ + +# Mesh metrics integration test (requires --features metrics) +mesh-metrics-integration: + stage: test + image: rust:1.75 + script: + - cargo test -p vaultmesh-mesh --features metrics --test metrics_integration -- --nocapture + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - .cargo/registry/ diff --git a/.opencode/package.json b/.opencode/package.json new file mode 100644 index 0000000..8320f77 --- /dev/null +++ b/.opencode/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "@opencode-ai/plugin": "1.0.166" + } +} diff --git a/.opencode/plugin/vaultmesh-sentinel.ts b/.opencode/plugin/vaultmesh-sentinel.ts new file mode 100644 index 0000000..5b7ad33 --- /dev/null +++ b/.opencode/plugin/vaultmesh-sentinel.ts @@ -0,0 +1,300 @@ +import { tool, type Plugin } from "@opencode-ai/plugin"; +import { spawn } from "node:child_process"; +import { createHash, randomUUID } from "node:crypto"; +import { promises as fs, statSync } from "node:fs"; +import os from "node:os"; +import path from "node:path"; + +const TOOL_VERSION = "0.2.0"; + +type RunResult = { + exitCode: number; + stdout: string; + stderr: string; +}; + +function normalizeForStableJson(value: unknown): unknown { + if (value === null || value === undefined) return value; + if (Array.isArray(value)) return value.map((v) => normalizeForStableJson(v)); + if (typeof value === "object") { + const entries = Object.entries(value as Record).sort( + ([a], [b]) => a.localeCompare(b), + ); + return entries.reduce>((acc, [k, v]) => { + acc[k] = normalizeForStableJson(v); + return acc; + }, {}); + } + return value; +} + +function stableStringify(value: unknown): string { + return JSON.stringify(normalizeForStableJson(value)); +} + +function run( + cmd: string, + args: string[], + opts: { env?: Record } = {}, +): Promise { + return new Promise((resolve) => { + const child = spawn(cmd, args, { + env: { ...process.env, ...(opts.env ?? {}) }, + stdio: ["ignore", "pipe", "pipe"], + }); + + let stdout = ""; + let stderr = ""; + + child.stdout.on("data", (d) => { + stdout += d.toString("utf8"); + }); + + child.stderr.on("data", (d) => { + stderr += d.toString("utf8"); + }); + + child.on("close", (code) => { + resolve({ exitCode: code ?? 1, stdout, stderr }); + }); + }); +} + +async function computeBundleHash(bundlePath: string): Promise { + const root = path.resolve(bundlePath); + const hasher = createHash("sha256"); + + async function walk(dir: string): Promise { + const entries = await fs.readdir(dir, { withFileTypes: true }); + const sorted = entries.sort((a, b) => a.name.localeCompare(b.name)); + + for (const entry of sorted) { + const abs = path.join(dir, entry.name); + const rel = path.relative(root, abs).split(path.sep).join("/"); + + if (entry.isDirectory()) { + hasher.update(`dir:${rel}\n`); + await walk(abs); + continue; + } + + if (entry.isFile()) { + const data = await fs.readFile(abs); + const digest = createHash("sha256").update(data).digest("hex"); + hasher.update(`file:${rel}:${data.length}:${digest}\n`); + } + } + } + + await walk(root); + return `sha256:${hasher.digest("hex")}`; +} + +async function readJsonIfExists(filePath: string): Promise { + try { + const raw = await fs.readFile(filePath, "utf8"); + return JSON.parse(raw); + } catch { + return null; + } +} + +function resolveWorkspaceRoot( + worktree: string | undefined, + directory: string, +): string { + const envRoot = process.env.VAULTMESH_WORKSPACE_ROOT; + if (envRoot) return envRoot; + + if (worktree) return path.resolve(worktree, ".."); + return path.resolve(directory, ".."); +} + +function resolveVerifierPath( + worktree: string | undefined, + directory: string, +): string { + const envVerifier = process.env.VAULTMESH_SENTINEL_VERIFIER; + if (envVerifier) return envVerifier; + + const workspaceRoot = resolveWorkspaceRoot(worktree, directory); + const candidates = [ + path.join( + workspaceRoot, + "vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py", + ), + path.join(workspaceRoot, "tools/vm_verify_sentinel_bundle.py"), + ]; + + for (const candidate of candidates) { + try { + if (statSync(candidate).isFile()) { + return candidate; + } + } catch { + // continue searching + } + } + + // Fallback: first candidate, even if it does not exist (caller will error deterministically) + return candidates[0]; +} + +export const VaultMeshSentinelPlugin: Plugin = async (ctx) => { + const baseDir = ctx.directory ?? process.cwd(); + const verifierPath = resolveVerifierPath(ctx.worktree, baseDir); + + return { + tool: { + sentinelVerifyBundle: tool({ + description: + "Verify a VaultMesh Sentinel v1 seal bundle offline (deterministic; no network).", + args: { + bundlePath: tool.schema + .string() + .describe("Path to seal bundle directory (contains seal.json)"), + strict: tool.schema + .boolean() + .optional() + .default(false) + .describe("Enable strict verification (recommended for audits)"), + maxFileBytes: tool.schema + .number() + .int() + .optional() + .describe( + "Reject any single input file larger than this many bytes", + ), + }, + async execute(args) { + const respond = (value: unknown) => stableStringify(value); + + const bundle = path.resolve(baseDir, args.bundlePath); + + const baseResult = { + tool: "sentinelVerifyBundle", + tool_version: TOOL_VERSION, + verifier_path: verifierPath, + bundle_path: bundle, + bundle_hash: null as string | null, + canonicalization_version: null as string | null, + schema_version: null as string | null, + verifier_version: null as string | null, + strict: !!args.strict, + exit_code: null as number | null, + ok: false, + stdout: "", + stderr: "", + report: null as unknown, + }; + + try { + const st = await fs.stat(bundle); + if (!st.isDirectory()) { + return respond({ + ...baseResult, + error: "BUNDLE_NOT_DIRECTORY", + }); + } + } catch { + return respond({ + ...baseResult, + error: "BUNDLE_NOT_FOUND", + }); + } + + try { + const st = await fs.stat(verifierPath); + if (!st.isFile()) { + return respond({ + ...baseResult, + error: "VERIFIER_NOT_FILE", + }); + } + } catch { + return respond({ + ...baseResult, + error: "VERIFIER_NOT_FOUND", + }); + } + + baseResult.bundle_hash = await computeBundleHash(bundle); + + const reportPath = path.join( + os.tmpdir(), + `vm_sentinel_verification_report_${randomUUID()}.json`, + ); + + const cmdArgs: string[] = [ + "-u", + verifierPath, + "--bundle", + bundle, + "--report", + reportPath, + ]; + if (args.strict) cmdArgs.push("--strict"); + if (typeof args.maxFileBytes === "number") { + cmdArgs.push("--max-file-bytes", String(args.maxFileBytes)); + } + + const { exitCode, stdout, stderr } = await run("python3", cmdArgs); + const report = await readJsonIfExists(reportPath); + + // Best-effort cleanup: report is returned inline; avoid mutating evidence bundles. + await fs.unlink(reportPath).catch(() => {}); + + const versions = + report && typeof report === "object" + ? (report as Record).versions + : null; + + const canonicalizationVersion = + versions && + typeof versions === "object" && + (versions as Record).canonicalization_version; + const schemaVersion = + versions && + typeof versions === "object" && + (versions as Record).schema_version; + + const declared = + report && typeof report === "object" + ? (report as Record).declared_verifier + : null; + const reportedVerifier = + report && typeof report === "object" + ? (report as Record).verifier + : null; + + const verifierVersion = + (declared && + typeof declared === "object" && + (declared as Record).version) || + (reportedVerifier && + typeof reportedVerifier === "object" && + (reportedVerifier as Record).version) || + null; + + return respond({ + ...baseResult, + exit_code: exitCode, + ok: exitCode === 0, + stdout, + stderr, + report, + canonicalization_version: + typeof canonicalizationVersion === "string" + ? canonicalizationVersion + : null, + schema_version: + typeof schemaVersion === "string" ? schemaVersion : null, + verifier_version: + typeof verifierVersion === "string" ? verifierVersion : null, + error: undefined, + }); + }, + }), + }, + }; +}; diff --git a/ASSURANCE.md b/ASSURANCE.md new file mode 100644 index 0000000..019affa --- /dev/null +++ b/ASSURANCE.md @@ -0,0 +1,18 @@ +# Assurance Run — 2025-12-18 + +- Commit: 3cf647e3b6cc732d953f9c4389387c7481e0ed9e +- Toolchain: `rustc 1.92.0 (ded5c06cf 2025-12-08)`, `cargo 1.92.0 (344c4567c 2025-10-21)`, `python3 3.14.2` + +| Check | Status | Notes | +| --- | --- | --- | +| `cargo fmt --check` | ❌ | rustfmt reported dozens of wrapping changes (see `vaultmesh-core/src/did.rs`, `vaultmesh-mesh/src/lib.rs`, etc.). No edits were applied—run `cargo fmt` to adopt the default style. | +| `cargo clippy --all -- -D warnings` | ❌ | Blocks on `vaultmesh-core::DidType::from_str` (Clippy wants an actual `FromStr` impl) before analyzing other crates. | +| `cargo test` | ✅ | Full workspace test suite passes (Guardian/Mesh/Treasury/Observability metrics + doc tests). | +| `python3 -m py_compile cli/vm_cli.py tools/*.py` | ✅ | Bytecode generation succeeded. | +| `python3 tools/check_sentinel_contract_parity.py` | ✅ | Script prints `[OK] Sentinel contract parity verified`. | +| `bash tools/run_sentinel_testvectors.sh` | ✅ | All Sentinel vectors reported `[OK]`, transcript stored under `tools/out/`. | +| `bash MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh` | ✅ | Suite reports 19/19 passing; outputs kept under `MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/`. | + +Notes: +- `target/` was cleaned (space pressure) before running the suite and rebuilt by `cargo test`. +- No source files were modified during this pass. diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..e3de30b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,68 @@ +# Changelog + +All notable changes to VaultMesh are documented in this file. + +## [Unreleased] - 2025-12-07 + +### Added + +#### Observability Engine +- Prometheus exporter at `:9108/metrics` with 4 metrics: + - `vaultmesh_receipts_total` (counter by module) + - `vaultmesh_receipts_failed_total` (counter by module, reason) + - `vaultmesh_anchor_age_seconds` (gauge) + - `vaultmesh_emit_seconds` (histogram by module) +- Docker Compose stack (Prometheus + Grafana + Exporter) +- Grafana dashboard JSON at `docs/observability/dashboards/receipts.json` +- 8 unit + integration tests for observability + +#### Guardian Engine +- Full implementation replacing stub (~200 LoC) +- `compute_scroll_root()` and `anchor()` methods +- Optional `metrics` feature for observability integration +- `set_anchor_age(0.0)` after each anchor (fresh anchor indicator) +- Metrics integration test (`--features metrics`) +- 5 unit tests + +#### Treasury Engine +- Full implementation replacing stub (~300 LoC) +- Budget management: `create_budget`, `debit`, `credit` +- Receipt emission for all financial operations +- Optional `metrics` feature for observability integration +- 5 unit tests + +#### Mesh Engine +- Full implementation replacing stub (~400 LoC) +- Node management: `node_join`, `node_leave` +- Route management: `route_add`, `route_remove` +- Capability management: `capability_grant`, `capability_revoke` +- Topology snapshots with `topology_snapshot()` +- 6 receipt types: `mesh_node_join`, `mesh_node_leave`, `mesh_route_change`, `mesh_capability_grant`, `mesh_capability_revoke`, `mesh_topology_snapshot` +- Optional `metrics` feature for observability integration +- Metrics integration test (`--features metrics`) +- 5 unit tests + 1 integration test + +#### Core +- 13 unit tests for hash.rs and did.rs + +#### MCP Server +- 7 Claude tools in `packages/vaultmesh_mcp/`: + - `guardian_anchor_now` + - `guardian_verify_receipt` + - `guardian_status` + - `treasury_create_budget` + - `treasury_balance` + - `treasury_debit` + - `treasury_credit` +- Receipt emission for all tool calls + +#### CI/CD +- GitLab CI pipeline with build/test/lint stages +- `observability-smoke` job for exporter tests +- `guardian-metrics-integration` job for guardian metrics integration test +- `mesh-metrics-integration` job for mesh metrics integration test +- `before_script` to ensure receipts directories exist (including `receipts/mesh`) + +### Notes +- Total tests in workspace: 40 (38 workspace + 2 metrics integration) +- Level-of-Done score: 2.5 → 4.0 diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..d55aa87 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,1832 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "blake3" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +dependencies = [ + "arrayref", + "arrayvec", + "cc", + "cfg-if", + "constant_time_eq", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "cc" +version = "1.2.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +dependencies = [ + "find-msvc-tools", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "h2" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http", + "hyper", + "rustls", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.178" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prometheus" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64", +] + +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "syn" +version = "2.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "pin-project-lite", + "socket2 0.6.1", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "vaultmesh-automation" +version = "0.1.0" +dependencies = [ + "vaultmesh-core", +] + +[[package]] +name = "vaultmesh-core" +version = "0.1.0" +dependencies = [ + "blake3", + "chrono", + "serde", + "serde_json", +] + +[[package]] +name = "vaultmesh-guardian" +version = "0.1.0" +dependencies = [ + "chrono", + "reqwest", + "serde", + "serde_json", + "tempfile", + "tokio", + "vaultmesh-core", + "vaultmesh-observability", +] + +[[package]] +name = "vaultmesh-identity" +version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "serde_json", + "vaultmesh-core", +] + +[[package]] +name = "vaultmesh-mesh" +version = "0.1.0" +dependencies = [ + "chrono", + "reqwest", + "serde", + "serde_json", + "tempfile", + "tokio", + "vaultmesh-core", + "vaultmesh-observability", +] + +[[package]] +name = "vaultmesh-observability" +version = "0.1.0" +dependencies = [ + "hyper", + "prometheus", + "reqwest", + "serde", + "serde_json", + "tokio", + "vaultmesh-core", +] + +[[package]] +name = "vaultmesh-offsec" +version = "0.1.0" +dependencies = [ + "vaultmesh-core", +] + +[[package]] +name = "vaultmesh-psi" +version = "0.1.0" +dependencies = [ + "vaultmesh-core", +] + +[[package]] +name = "vaultmesh-treasury" +version = "0.1.0" +dependencies = [ + "chrono", + "serde", + "serde_json", + "tempfile", + "vaultmesh-core", + "vaultmesh-observability", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-roots" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "winreg" +version = "0.50.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +dependencies = [ + "cfg-if", + "windows-sys 0.48.0", +] + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..05d37e9 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,31 @@ +[workspace] +members = [ + "vaultmesh-core", + "vaultmesh-treasury", + "vaultmesh-identity", + "vaultmesh-mesh", + "vaultmesh-offsec", + "vaultmesh-observability", + "vaultmesh-automation", + "vaultmesh-psi", + "vaultmesh-guardian", +] +resolver = "2" + +# Release profile optimizations +[profile.release] +opt-level = 3 +lto = "thin" +codegen-units = 1 +panic = "abort" +strip = true + +# Development profile with some optimizations for faster testing +[profile.dev] +opt-level = 1 + +# Profile for maximum performance (use with --profile release-max) +[profile.release-max] +inherits = "release" +lto = "fat" +codegen-units = 1 diff --git a/Containerfile b/Containerfile new file mode 100644 index 0000000..9f1c331 --- /dev/null +++ b/Containerfile @@ -0,0 +1,40 @@ +# VaultMesh Sentinel Verifier +# Minimal image for deterministic verification tasks +# No daemon, no ports, stateless + +FROM python:3.12-slim + +LABEL org.opencontainers.image.title="VaultMesh Sentinel" +LABEL org.opencontainers.image.description="Deterministic seal bundle verifier" +LABEL org.opencontainers.image.source="https://gitlab.com/vaultsovereign/ops" + +# Avoid interactive prompts +ENV DEBIAN_FRONTEND=noninteractive +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +# Install minimal dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + && rm -rf /var/lib/apt/lists/* + +# Create non-root user +RUN useradd --create-home --shell /bin/bash sentinel +USER sentinel +WORKDIR /home/sentinel + +# Install Python dependencies +COPY --chown=sentinel:sentinel pyproject.toml requirements.txt* ./ +RUN pip install --user --no-cache-dir click blake3 + +# Copy application code +COPY --chown=sentinel:sentinel cli/ ./cli/ +COPY --chown=sentinel:sentinel tools/ ./tools/ +COPY --chown=sentinel:sentinel ledger/ ./ledger/ + +# Set PATH for user-installed packages +ENV PATH="/home/sentinel/.local/bin:${PATH}" + +# Default entrypoint - verifier help +ENTRYPOINT ["python3"] +CMD ["tools/vm_verify_sentinel_bundle.py", "--help"] diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/.gitignore b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/.gitignore new file mode 100644 index 0000000..89f9ac0 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/.gitignore @@ -0,0 +1 @@ +out/ diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/README.md new file mode 100644 index 0000000..bce3710 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/README.md @@ -0,0 +1,58 @@ +# MERIDIAN v1 Conformance Test Suite (Offline / Deterministic / Build‑Blocking) + +This suite turns MERIDIAN v1 into an **executable compliance gate**. + +What it tests (normative): +- MERIDIAN v1 is **Sentinel‑governed**; verification is defined by the Sentinel offline verifier in `--strict` mode. +- No proprietary verifier codes: expected failures are **Sentinel v1 failure codes** only. + +Normative references: +- `2025-12-18-07h58m04s-attachments/MERIDIAN_V1_SPEC.md` +- `2025-12-18-07h58m04s-attachments/MERIDIAN_V1_EVENT_CONTRACT_MATRIX.md` +- `vaultmesh-orgine-mobile/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md` +- `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py` + +--- + +## Run (one command) + +From `vaultmesh-orgine-mobile/`: + +```bash +bash MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +``` + +What `run.sh` does: +1. Verifies Sentinel contract parity (code ↔ docs) via `tools/check_sentinel_contract_parity.py`. +2. Runs each testvector in `manifest.yaml` through `tools/vm_verify_sentinel_bundle.py --strict`. +3. Produces an auditor‑readable report (JSON + text) under `out/`. + +--- + +## Output + +`out/` contains: +- `out/meridian_v1_conformance_report.json` (suite summary + per-test results) +- `out/meridian_v1_conformance_report.txt` (human summary) +- `out/sentinel_reports/.verification_report.json` (verifier output per test) +- `out/sentinel_stdio/.stderr.txt` (verifier stderr per test) + +--- + +## Interpreting results + +- Suite PASS means the fixtures and verifier behave as the MERIDIAN v1 spec requires. +- A deployment MAY claim “MERIDIAN v1 compliant” for a specific bundle **iff**: + +`python3 tools/vm_verify_sentinel_bundle.py --bundle --strict` returns `PASS` + +--- + +## Notes on “killer tests” terminology + +MERIDIAN v1 does not introduce new verifier failure codes. + +Examples: +- “silent denial” ⇒ Sentinel strict-mode failure `E_CHAIN_DISCONTINUITY` +- “unbounded automation” (v1 boundary) ⇒ modeled as “execution without intent” ⇒ `E_CHAIN_DISCONTINUITY` + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/README.md new file mode 100644 index 0000000..58e3d26 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/README.md @@ -0,0 +1,15 @@ +# fixtures/ + +Fixtures are **Sentinel v1 seal bundles** used as deterministic testvectors for MERIDIAN v1 invariants. + +Layout: +- `pass/` bundles must verify with `--strict` and produce `PASS`. +- `fail/` bundles must fail with a specific Sentinel v1 `failure_code` (see `manifest.yaml`). + +Each fixture directory is a bundle containing at minimum: +- `seal.json` +- `integrity.json` +- `verifier_manifest.json` +- receipts/events file(s) (typically `receipts.jsonl`) +- `roots.txt` + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/README.md new file mode 100644 index 0000000..3787a46 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: event_hash_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/integrity.json new file mode 100644 index 0000000..53b2ed7 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:1d37e53b7dbd608ab9418a1d9cc872d51100ec35020c6853aa1a2ff1c13d430b", + "path": "README.md", + "size_bytes": 53 + }, + { + "digest": "sha256:6555ba75e9c12063b8ea45aa7f2b8d42c0640bd7c34d5dec094d432347ea67fe", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:18c4ad7ddbb198e6fc8efe5e4769450a2856a7eb1d9e587937b306832c864030", + "path": "seal.json", + "size_bytes": 707 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/receipts.jsonl new file mode 100644 index 0000000..ce1f2e1 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:0000000000000000000000000000000000000000000000000000000000000000","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/seal.json new file mode 100644 index 0000000..8e57efd --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_event_hash_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/README.md new file mode 100644 index 0000000..96acce5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: canon_version_unsupported diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/integrity.json new file mode 100644 index 0000000..9a2b79a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:11bddf1e93d24997bd7d7a94f6395666edda587284e5ee4d2022d566f04f1957", + "path": "README.md", + "size_bytes": 59 + }, + { + "digest": "sha256:fe99e620546158cba1855aef378ce7c14de48c89e99bf7ee9eb259340d21f1df", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:80f5f348140ab695b27edd60b4edcb8bffaaa9a148ea546c4a467a0185feac8c", + "path": "seal.json", + "size_bytes": 715 + }, + { + "digest": "sha256:69f602c85f266f780deeb99085531e43ca8ecf0062f7d68a727de0dd05a4d824", + "path": "verifier_manifest.json", + "size_bytes": 240 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/seal.json new file mode 100644 index 0000000..dfa6e3a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v999","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_canon_version_unsupported","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/verifier_manifest.json new file mode 100644 index 0000000..a54f13f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/canon_version_unsupported/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v999","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/README.md new file mode 100644 index 0000000..a3bd325 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: double_outcome diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/integrity.json new file mode 100644 index 0000000..c186dfe --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:f6c5fff056a8fab6b5df6a8ddd085c6f0ac33bc659fc653434233127bca1c130", + "path": "README.md", + "size_bytes": 48 + }, + { + "digest": "sha256:4a4284db18107347fd0c7645b08c614777b5c88375f797561e197c1fec5103f7", + "path": "receipts.jsonl", + "size_bytes": 2698 + }, + { + "digest": "sha256:a782d2beebe78664aca0dbe2b42beed275042afdcc8643d5fd6a2ff960dea739", + "path": "roots.txt", + "size_bytes": 377 + }, + { + "digest": "sha256:1fc193716891d3cde4486d09c8705041cab1654ba9583a4987299aa5b097650c", + "path": "seal.json", + "size_bytes": 702 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/receipts.jsonl new file mode 100644 index 0000000..145008f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/receipts.jsonl @@ -0,0 +1,4 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:f8e7607b7d45d306b9da8361593a115e45c91c4efce19790aaee4308ee836d4a","event_id":"00000000-0000-4000-8000-000000000221","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:operator:demo","cap_hash":"sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd","event_hash":"sha256:226a880a1f30a1fd24be533deecca73af5b9badece02cbb770cff8089e1d78c2","event_id":"00000000-0000-4000-8000-000000000222","event_type":"action_intent","op":"meridian.v1.plc.write","op_digest":"sha256:8ce9de554fc5a237b1f8a7d0b4711058c0c2ad933c70165c4d900337afc6cf84","payload":{"kind":"command_requested","params":{"plc":"plc:demo","register":"R3","value":"1"}},"prev_event_hash":"sha256:f8e7607b7d45d306b9da8361593a115e45c91c4efce19790aaee4308ee836d4a","result":"ok","seq":1,"trace_id":"22222222-2222-4222-8222-222222222224","ts":"2025-03-17T03:17:41Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd","event_hash":"sha256:ce6bfb75f3792c08b0237d7d051c6af8ec0193c2d14c81845f835e53f621c251","event_id":"00000000-0000-4000-8000-000000000223","event_type":"action_executed","op":"meridian.v1.plc.write","op_digest":"sha256:8ce9de554fc5a237b1f8a7d0b4711058c0c2ad933c70165c4d900337afc6cf84","payload":{"kind":"command_executed","params":{"plc":"plc:demo","register":"R3","value":"1"}},"prev_event_hash":"sha256:226a880a1f30a1fd24be533deecca73af5b9badece02cbb770cff8089e1d78c2","result":"ok","seq":2,"trace_id":"22222222-2222-4222-8222-222222222224","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd","event_hash":"sha256:faecc8feaf604e012cddd574260937d35c5b72ab2639e6c5f0a39e294a06dff8","event_id":"00000000-0000-4000-8000-000000000224","event_type":"shadow_receipt","op":"meridian.v1.plc.write","op_digest":"sha256:8ce9de554fc5a237b1f8a7d0b4711058c0c2ad933c70165c4d900337afc6cf84","payload":{"kind":"command_refused","params":{"plc":"plc:demo","register":"R3","value":"1"},"reason_code":"policy_denied"},"prev_event_hash":"sha256:ce6bfb75f3792c08b0237d7d051c6af8ec0193c2d14c81845f835e53f621c251","result":"deny","seq":3,"trace_id":"22222222-2222-4222-8222-222222222224","ts":"2025-03-17T03:17:43Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/roots.txt new file mode 100644 index 0000000..d65fba5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/roots.txt @@ -0,0 +1,5 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:f8e7607b7d45d306b9da8361593a115e45c91c4efce19790aaee4308ee836d4a +seq=1 root=sha256:a1e813b1a70383dfc2ede487c125d606936ecad92a68ebe3d37d37d7c98f78df +seq=2 root=sha256:01393640058eca7ac43a6aae2112966c234f0bbfc58c1c67ffd88192bf6858da +seq=3 root=sha256:ca845f9f1caf91eb2beb556ed6feb2e9ce9fd6cf7ec6bdd6100c343700ec9b2f diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/seal.json new file mode 100644 index 0000000..bcc4fbd --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:44Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":3,"until_ts":"2025-03-17T03:17:43Z"},"root":{"end":"sha256:ca845f9f1caf91eb2beb556ed6feb2e9ce9fd6cf7ec6bdd6100c343700ec9b2f","seq":3,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_double_outcome","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/double_outcome/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/README.md new file mode 100644 index 0000000..3787a46 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: event_hash_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/integrity.json new file mode 100644 index 0000000..53b2ed7 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:1d37e53b7dbd608ab9418a1d9cc872d51100ec35020c6853aa1a2ff1c13d430b", + "path": "README.md", + "size_bytes": 53 + }, + { + "digest": "sha256:6555ba75e9c12063b8ea45aa7f2b8d42c0640bd7c34d5dec094d432347ea67fe", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:18c4ad7ddbb198e6fc8efe5e4769450a2856a7eb1d9e587937b306832c864030", + "path": "seal.json", + "size_bytes": 707 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/receipts.jsonl new file mode 100644 index 0000000..ce1f2e1 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:0000000000000000000000000000000000000000000000000000000000000000","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/seal.json new file mode 100644 index 0000000..8e57efd --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_event_hash_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/README.md new file mode 100644 index 0000000..5d2a038 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: execution_without_intent diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/integrity.json new file mode 100644 index 0000000..eb52f6f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:cc5a97bbce56fd6bbf0fcc1491a9ca68aa45afd99d5d39b0499a7c1db665135f", + "path": "README.md", + "size_bytes": 58 + }, + { + "digest": "sha256:216c9d68e68651c04819ea7d8e9a274fccbe26963728e2a4b472d62cf9c07429", + "path": "receipts.jsonl", + "size_bytes": 1292 + }, + { + "digest": "sha256:0396a0afb4802666620d63682da492bf219867fc7099aa0628df71aed60a2b42", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:2b21d2200ae30ce18e9803ce18dc338b0429636d0a3f6abf6eaf2715340344ed", + "path": "seal.json", + "size_bytes": 712 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/receipts.jsonl new file mode 100644 index 0000000..9150fa2 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:c66fee7334d8077b76e96500eef6fded3d4df6c43adabed19f63e8a8439e476a","event_id":"00000000-0000-4000-8000-000000000211","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc","event_hash":"sha256:0cf93dc668ae23232678731e8d5286322e5524fdb03e6779efac8933c33e1e1f","event_id":"00000000-0000-4000-8000-000000000212","event_type":"action_executed","op":"meridian.v1.plc.write","op_digest":"sha256:a68906cc6970cc15a611fff2b81a07a31dc93bd7dd5506bf4406cb16d447252b","payload":{"kind":"command_executed","params":{"plc":"plc:demo","register":"R9","value":"1"}},"prev_event_hash":"sha256:c66fee7334d8077b76e96500eef6fded3d4df6c43adabed19f63e8a8439e476a","result":"ok","seq":1,"trace_id":"99999999-9999-4999-8999-999999999999","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/roots.txt new file mode 100644 index 0000000..3826b06 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:c66fee7334d8077b76e96500eef6fded3d4df6c43adabed19f63e8a8439e476a +seq=1 root=sha256:0ebb872451cb6669ae997e558631fdaa38df560b53c6e09ca51621f2e84c9145 diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/seal.json new file mode 100644 index 0000000..35dc286 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:0ebb872451cb6669ae997e558631fdaa38df560b53c6e09ca51621f2e84c9145","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_execution_without_intent","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/execution_without_intent/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/README.md new file mode 100644 index 0000000..ab7f261 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: invalid_jsonl_truncated diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/integrity.json new file mode 100644 index 0000000..b1fbd0a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:076f6c0260405dd67ac641d9ad61a86c2e15e5b77140d41f297bd52c1047de8b", + "path": "README.md", + "size_bytes": 57 + }, + { + "digest": "sha256:5ae4c468ed53f5c8a7518ffd24cac29837f20e7377db06aa995fed4b5e88e654", + "path": "receipts.jsonl", + "size_bytes": 14 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:7be2019d7bca77e2b9e44da0710ae43fc8625ec448938a6aaa69efe533b7a00f", + "path": "seal.json", + "size_bytes": 711 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/receipts.jsonl new file mode 100644 index 0000000..f0413d3 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/receipts.jsonl @@ -0,0 +1 @@ +{"truncated": diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/seal.json new file mode 100644 index 0000000..4b0b633 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_invalid_jsonl_truncated","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/invalid_jsonl_truncated/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/README.md new file mode 100644 index 0000000..057ce10 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: manifest_hash_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/integrity.json new file mode 100644 index 0000000..16d5f2f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:51fb89251d833c3229d284ce080135f1d53c7a22a5b5c715fe43a7257e61d98c", + "path": "README.md", + "size_bytes": 56 + }, + { + "digest": "sha256:2222222222222222222222222222222222222222222222222222222222222222", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:2579685d2b55aa625b00cc62833ce139277ba82d856873d62d78f62a7380c6ca", + "path": "seal.json", + "size_bytes": 710 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/seal.json new file mode 100644 index 0000000..f61ab20 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_manifest_hash_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/README.md new file mode 100644 index 0000000..de26448 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: missing_required_file_roots diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/integrity.json new file mode 100644 index 0000000..87cf6c8 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/integrity.json @@ -0,0 +1,26 @@ +{ + "files": [ + { + "digest": "sha256:f55737e620d70f3a0d13c56bae90556ad012375786d5d1a3e7e3c6360c97ad21", + "path": "README.md", + "size_bytes": 61 + }, + { + "digest": "sha256:fe99e620546158cba1855aef378ce7c14de48c89e99bf7ee9eb259340d21f1df", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:2117ebec196781f6b1526ea2786eacda4661790000f8ca0ac4d965f4a3dcd04b", + "path": "seal.json", + "size_bytes": 701 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/seal.json new file mode 100644 index 0000000..b4829bf --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_missing_roots","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/README.md new file mode 100644 index 0000000..74db06f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: op_digest_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/integrity.json new file mode 100644 index 0000000..9ad3e19 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:6d84872131ebbbea2cfe51f4fc78b637cf8d3fbd07536efad7842494824ebff5", + "path": "README.md", + "size_bytes": 52 + }, + { + "digest": "sha256:cc8c8a8a826aa2af56c973d0acf5d2cb3c15ec27774b32d379fdbcd773ea89fc", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:0f409ba6f0b3a5f725c27ea4f694d3233ba525c0e8b45346db78677a28a1bb27", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:d9927f312b368cc93c5f658129520be2e410a53cc430ee17d1994a56a587fa7b", + "path": "seal.json", + "size_bytes": 706 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/receipts.jsonl new file mode 100644 index 0000000..4c1340e --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:0dc6ce7f089a5a984b539949cd115c3c82bdf042d8c258629c2435f52ff0931c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/roots.txt new file mode 100644 index 0000000..2388755 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:bd92627c9be6c20d5fa39a43af20b71399df67515c57c4f3747021135ce6c1a0 diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/seal.json new file mode 100644 index 0000000..8132358 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:bd92627c9be6c20d5fa39a43af20b71399df67515c57c4f3747021135ce6c1a0","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_op_digest_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/op_digest_mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/README.md new file mode 100644 index 0000000..d9a13f5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: prev_event_hash_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/integrity.json new file mode 100644 index 0000000..4890c78 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:b2c04f16a262c9c0dbf5cd97453f39a2d1784cd4cf8bc33c2db18699ebee85b7", + "path": "README.md", + "size_bytes": 58 + }, + { + "digest": "sha256:d0926fb0271ef172b3a266acba1ef72e4874b4c6a10fd4728c4a5c0637f23b1e", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:151c573c6450d7e11ac7714e5f46f3ed4dd8236095cd4dd4abcece4aa0d6cfe0", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:9ce9202c3070d5c10873cc060c40657a582d1d0ba610afb158bdc064dd089150", + "path": "seal.json", + "size_bytes": 706 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/receipts.jsonl new file mode 100644 index 0000000..9108792 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:66097f23685fbf3559480b7cb89f2bef233321702f94ab0878074fa023b459e5","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:1111111111111111111111111111111111111111111111111111111111111111","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/roots.txt new file mode 100644 index 0000000..c0c1f1c --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:018be4d5f99ee40aa0d2fa0ef572a029b10b3626462bec9f0046d32358e04122 diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/seal.json new file mode 100644 index 0000000..e3e573c --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:018be4d5f99ee40aa0d2fa0ef572a029b10b3626462bec9f0046d32358e04122","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_prev_hash_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/README.md new file mode 100644 index 0000000..9f016a0 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: range_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/integrity.json new file mode 100644 index 0000000..db5721c --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:487b6fe99e45ba428c81ccbf8412fd8cdfe653631e686bc9e3d84337e5968ec6", + "path": "README.md", + "size_bytes": 48 + }, + { + "digest": "sha256:fe99e620546158cba1855aef378ce7c14de48c89e99bf7ee9eb259340d21f1df", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:eeea9ab98df5540b0493f948e3fbb2f982f6a950cd6d690edcb8edc7818780c6", + "path": "seal.json", + "size_bytes": 702 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/seal.json new file mode 100644 index 0000000..dca5936 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":0,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":0,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_range_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/range_mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/README.md new file mode 100644 index 0000000..a98b00d --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: revoked_capability_used diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/integrity.json new file mode 100644 index 0000000..02394c5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:e2ac0e69c825925179a37cc3c7f302db809c0882529b09b24fa19170c76a9722", + "path": "README.md", + "size_bytes": 57 + }, + { + "digest": "sha256:102e1ba3c758a3387ee658a552a682339cb0ff4c0dac46b8fee2ab1298d661ac", + "path": "receipts.jsonl", + "size_bytes": 2616 + }, + { + "digest": "sha256:800646f71e8dfa68f5ff59e037aefe1f4c343ec8fe50259f7c70b53559c14ec6", + "path": "roots.txt", + "size_bytes": 377 + }, + { + "digest": "sha256:7572439a56d51a887204b0ad45af3b9d1be561b19b83d6563686ff2c70a22d4f", + "path": "seal.json", + "size_bytes": 711 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/receipts.jsonl new file mode 100644 index 0000000..9de6743 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/receipts.jsonl @@ -0,0 +1,4 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:255f64c373130f8c525b25a75a4f39509fe9d5749cc0a0998b8ec4770c2bdb5e","event_id":"00000000-0000-4000-8000-000000000401","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:guardian:demo","cap_hash":"none","event_hash":"sha256:01dee033c0551df6d97f1bf1a42901eff46fe74c924e7ffd6b826294770e4746","event_id":"00000000-0000-4000-8000-000000000402","event_type":"cap_revoke","op":"meridian.v1.cap.revoke","op_digest":"sha256:3ec26a65972bb3d5cdbb73e8b5cb21493eb4d93cfa4a5458624b344fb2c71205","payload":{"params":{"revoked_cap_hash":"sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"}},"prev_event_hash":"sha256:255f64c373130f8c525b25a75a4f39509fe9d5749cc0a0998b8ec4770c2bdb5e","result":"ok","seq":1,"trace_id":"55555555-5555-4555-8555-555555555555","ts":"2025-03-17T03:17:41Z"} +{"actor":"did:vm:operator:demo","cap_hash":"sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee","event_hash":"sha256:362ef4b2dfb84d65740385a2408e5c2d2347f4b72c54719c71cc23ff3753583b","event_id":"00000000-0000-4000-8000-000000000403","event_type":"action_intent","op":"meridian.v1.plc.write","op_digest":"sha256:117d3ce1dbece95b6bf06b1d251946dedfd879a0cb38aa8b8ec4e34380f1d1c8","payload":{"kind":"command_requested","params":{"plc":"plc:demo","register":"R7","value":"1"}},"prev_event_hash":"sha256:01dee033c0551df6d97f1bf1a42901eff46fe74c924e7ffd6b826294770e4746","result":"ok","seq":2,"trace_id":"44444444-4444-4444-8444-444444444444","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee","event_hash":"sha256:bd3b793f7cc567527df49b49c1282bbf43c67e7bac4e8ad54a45c4e76f06f035","event_id":"00000000-0000-4000-8000-000000000404","event_type":"action_executed","op":"meridian.v1.plc.write","op_digest":"sha256:117d3ce1dbece95b6bf06b1d251946dedfd879a0cb38aa8b8ec4e34380f1d1c8","payload":{"kind":"command_executed","params":{"plc":"plc:demo","register":"R7","value":"1"}},"prev_event_hash":"sha256:362ef4b2dfb84d65740385a2408e5c2d2347f4b72c54719c71cc23ff3753583b","result":"ok","seq":3,"trace_id":"44444444-4444-4444-8444-444444444444","ts":"2025-03-17T03:17:43Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/roots.txt new file mode 100644 index 0000000..4d4f466 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/roots.txt @@ -0,0 +1,5 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:255f64c373130f8c525b25a75a4f39509fe9d5749cc0a0998b8ec4770c2bdb5e +seq=1 root=sha256:d825463571271525aa2e65fb72574040538c71ee8ab8ebf88de72d08cc364038 +seq=2 root=sha256:f0b4f65c899b47afc42611b449fe4be1c092d2360e172af9cb31137660290bf0 +seq=3 root=sha256:22f52ba66114decc6702f2e711ddfd690b1567a5fc2586339feb658c44f593c3 diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/seal.json new file mode 100644 index 0000000..e3a88ed --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:44Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":3,"until_ts":"2025-03-17T03:17:43Z"},"root":{"end":"sha256:22f52ba66114decc6702f2e711ddfd690b1567a5fc2586339feb658c44f593c3","seq":3,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_revoked_capability_used","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/README.md new file mode 100644 index 0000000..979812a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: root_mismatch diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/integrity.json new file mode 100644 index 0000000..7f261a5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:cb5376f22e2f1d4b956f3a61cd85d2010d9ff160f9109f876eb6dff7e68eb483", + "path": "README.md", + "size_bytes": 47 + }, + { + "digest": "sha256:fe99e620546158cba1855aef378ce7c14de48c89e99bf7ee9eb259340d21f1df", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:818502d58a7dea7ec78196f629a223b10ec2603a762fc76500b2bc59592d9e66", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:9cb2077be17e3f77f4151830b309e6ff79169fa9a188b0707fd3662b6b4354f2", + "path": "seal.json", + "size_bytes": 701 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/roots.txt new file mode 100644 index 0000000..b5e338b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:3333333333333333333333333333333333333333333333333333333333333333 diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/seal.json new file mode 100644 index 0000000..1ff0c60 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:3333333333333333333333333333333333333333333333333333333333333333","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_root_mismatch","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/root_mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/README.md new file mode 100644 index 0000000..539d2d8 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: seq_non_monotonic_duplicate diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/integrity.json new file mode 100644 index 0000000..1c338df --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:d22dcdada9f37d6f6860d160ea994f7db1b75844b3ff6d301e33d47a69010e9c", + "path": "README.md", + "size_bytes": 61 + }, + { + "digest": "sha256:0e4056952676edaf73e2841421a7c7597a4ccee5c6c1cf29576eb9cb8926a8c0", + "path": "receipts.jsonl", + "size_bytes": 1800 + }, + { + "digest": "sha256:dda5f37f4d5bb9bda4b00c7042b2249fb8ec6ae68008b86d6e60774cca728458", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:a6cbbbfe4a59dfdff145d4ca8a7b631d71b599f35ae53dbfdbea90bc9b103d98", + "path": "seal.json", + "size_bytes": 705 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/receipts.jsonl new file mode 100644 index 0000000..71df4ee --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/receipts.jsonl @@ -0,0 +1,3 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:8400306ca8ae4c0a051d25b5315ce07654281ea49d58698fa7a3d30f17a9c3a0","event_id":"00000000-0000-4000-8000-000000000301","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:8433e0eb0867866fa7ba7fba0bff09302b6ac9bc03d04608cf3d7e4f892c7648","event_id":"00000000-0000-4000-8000-000000000302","event_type":"health_event","op":"meridian.v1.health","op_digest":"sha256:86a43eb2d983d91d8cf106edf3d55e9590a72647fe00ca9a919819a4c853ec30","payload":{"kind":"health_event","params":{"ok":true}},"prev_event_hash":"sha256:8400306ca8ae4c0a051d25b5315ce07654281ea49d58698fa7a3d30f17a9c3a0","result":"ok","seq":1,"trace_id":"aaaaaaaa-aaaa-4aaa-8aaa-aaaaaaaaaaaa","ts":"2025-03-17T03:17:41Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:5955a07c38ee54c89feef03eb7295483e2ad00059174b20ec8f24481e090f03a","event_id":"00000000-0000-4000-8000-000000000303","event_type":"corruption_detected","op":"meridian.v1.corruption_detected","op_digest":"sha256:3fb33f6ebd627285686d974ee770894dc91f1e77b90448ddda0d23012e48eccb","payload":{"kind":"corruption_detected","params":{"component":"storage"}},"prev_event_hash":"sha256:8433e0eb0867866fa7ba7fba0bff09302b6ac9bc03d04608cf3d7e4f892c7648","result":"error","seq":1,"trace_id":"bbbbbbbb-bbbb-4bbb-8bbb-bbbbbbbbbbbb","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/roots.txt new file mode 100644 index 0000000..fed7d6c --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:8400306ca8ae4c0a051d25b5315ce07654281ea49d58698fa7a3d30f17a9c3a0 +seq=1 root=sha256:ed0d9e93d9feed61c3c3baa9a99866d88909275efb1d6490a7760363aa41d0ce diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/seal.json new file mode 100644 index 0000000..7459abb --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:ed0d9e93d9feed61c3c3baa9a99866d88909275efb1d6490a7760363aa41d0ce","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_seq_non_monotonic","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/README.md new file mode 100644 index 0000000..6635eef --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: silent_denial_intent_without_outcome diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/integrity.json new file mode 100644 index 0000000..357c99a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:b38038066e2b0824a4d2e23ae90964452c073c328934bd1a64a395a07b84738f", + "path": "README.md", + "size_bytes": 70 + }, + { + "digest": "sha256:8403298da38e131bfb9533ab3f483b7e6a5800e3ec0769f8890656cc026c54c9", + "path": "receipts.jsonl", + "size_bytes": 1288 + }, + { + "digest": "sha256:a3aff64880a37f16b8e1e83cc2f8e16d40d4126676abce6a071c4ed090f7280c", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:1630336168944b3fc601984ab346ed9bccb3fb0a63daa40437b276aa230dbebc", + "path": "seal.json", + "size_bytes": 701 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/receipts.jsonl new file mode 100644 index 0000000..56a542b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:65454174bcfd71e018828a00aa1a54ef48bb68c901df0af4889979b572da252b","event_id":"00000000-0000-4000-8000-000000000201","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:operator:demo","cap_hash":"sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb","event_hash":"sha256:988d189147174703c406cced62a4d1e5c968ad676b2ddcfc99d0652f7c974c22","event_id":"00000000-0000-4000-8000-000000000202","event_type":"action_intent","op":"meridian.v1.plc.write","op_digest":"sha256:f0625adaa0316d2058fd9f2d9ccf497fda392b709306b74441e7236e2f29d39a","payload":{"kind":"command_requested","params":{"plc":"plc:demo","register":"R2","value":"1"}},"prev_event_hash":"sha256:65454174bcfd71e018828a00aa1a54ef48bb68c901df0af4889979b572da252b","result":"ok","seq":1,"trace_id":"22222222-2222-4222-8222-222222222223","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/roots.txt new file mode 100644 index 0000000..c1c9856 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:65454174bcfd71e018828a00aa1a54ef48bb68c901df0af4889979b572da252b +seq=1 root=sha256:9b669e18278c025b371af97d641406d36b3b2b770ded71cf4420d16e4fe336e1 diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/seal.json new file mode 100644 index 0000000..2fb662e --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:9b669e18278c025b371af97d641406d36b3b2b770ded71cf4420d16e4fe336e1","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_silent_denial","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/README.md new file mode 100644 index 0000000..c4ef204 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: unlisted_extra_file_strict diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/UNLISTED.bin b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/UNLISTED.bin new file mode 100644 index 0000000..45a70fc --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/UNLISTED.bin @@ -0,0 +1 @@ +unlisted \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/integrity.json new file mode 100644 index 0000000..bfd7d6f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:adc6aa1dbf563d1c54d6f84056e5873e97a8d6b2f3a37ed79c12ef90fad2777b", + "path": "README.md", + "size_bytes": 60 + }, + { + "digest": "sha256:fe99e620546158cba1855aef378ce7c14de48c89e99bf7ee9eb259340d21f1df", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:534c165628fc84d1753291d1a7fd75d78387fb49edb383a156a5e7d7c2514fcb", + "path": "seal.json", + "size_bytes": 701 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/seal.json new file mode 100644 index 0000000..ed5e637 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_unlisted_file","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/unlisted_extra_file_strict/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/README.md new file mode 100644 index 0000000..0543e42 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: refusal_proof_pass diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/integrity.json new file mode 100644 index 0000000..478a61d --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:428e685d42ff4847cfaac4455d1d62d751e045239bad574d75d5fd5eda79fa5b", + "path": "README.md", + "size_bytes": 52 + }, + { + "digest": "sha256:31899850a5b8a67d28b50ee43f56d2703b8dcceb5433ffe24f4ca3c48f942850", + "path": "receipts.jsonl", + "size_bytes": 2103 + }, + { + "digest": "sha256:c2b0f54736c108163d69804402c07dd64098db8f9a34aec7c52c2ece476923c5", + "path": "roots.txt", + "size_bytes": 294 + }, + { + "digest": "sha256:b9ed30a1a86aed4fa3190303ed5e995a680db0c64560361b4d2e38f9ddd55057", + "path": "seal.json", + "size_bytes": 706 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/receipts.jsonl new file mode 100644 index 0000000..40d3708 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/receipts.jsonl @@ -0,0 +1,3 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:3cd6ae9b4222348df6731a25a86b29aa07b3e5fa2086d7be74a0d35039674927","event_id":"00000000-0000-4000-8000-000000000001","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:operator:demo","cap_hash":"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","event_hash":"sha256:f77f68216ecbd7d770165041ebbf004e35dc607b4ec2bb8e7745dcd98d9be18b","event_id":"00000000-0000-4000-8000-000000000002","event_type":"action_intent","op":"meridian.v1.plc.write","op_digest":"sha256:aaf211dd935c56bf75ee10c2fb7e3825dc44595dc7a183579623924c04bd5bbc","payload":{"kind":"command_requested","params":{"plc":"plc:demo","register":"R1","value":"1"}},"prev_event_hash":"sha256:3cd6ae9b4222348df6731a25a86b29aa07b3e5fa2086d7be74a0d35039674927","result":"ok","seq":1,"trace_id":"22222222-2222-4222-8222-222222222222","ts":"2025-03-17T03:17:41Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","event_hash":"sha256:aab74764a03e3c4779d3c1aa8d4d050e61b7e08d521c9cfaf2f491d7d677791e","event_id":"00000000-0000-4000-8000-000000000003","event_type":"shadow_receipt","op":"meridian.v1.plc.write","op_digest":"sha256:aaf211dd935c56bf75ee10c2fb7e3825dc44595dc7a183579623924c04bd5bbc","payload":{"kind":"command_refused","params":{"plc":"plc:demo","register":"R1","value":"1"},"reason_code":"safety_interlock","reason_text":"safety policy denied write","would_have_done":{"op":"meridian.v1.plc.write"}},"prev_event_hash":"sha256:f77f68216ecbd7d770165041ebbf004e35dc607b4ec2bb8e7745dcd98d9be18b","result":"deny","seq":2,"trace_id":"22222222-2222-4222-8222-222222222222","ts":"2025-03-17T03:17:42Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/roots.txt new file mode 100644 index 0000000..2db7faa --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/roots.txt @@ -0,0 +1,4 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:3cd6ae9b4222348df6731a25a86b29aa07b3e5fa2086d7be74a0d35039674927 +seq=1 root=sha256:275c1556f2c10b4ad77adebfb2efa51e17f9b0c7c323d735ffa0f656ef6596ff +seq=2 root=sha256:99241eeb16570260679f4bdacefee352dbac47c136426579ea826e970302ba8d diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/seal.json new file mode 100644 index 0000000..2f2490f --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:43Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":2,"until_ts":"2025-03-17T03:17:42Z"},"root":{"end":"sha256:99241eeb16570260679f4bdacefee352dbac47c136426579ea826e970302ba8d","seq":2,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_refusal_proof_pass","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/README.md new file mode 100644 index 0000000..4215d3c --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/README.md @@ -0,0 +1 @@ +MERIDIAN v1 conformance fixture: tamper_signal_pass diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/integrity.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/integrity.json new file mode 100644 index 0000000..6c08536 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:acfcdfe8209371e521013c92996cd5647bc1e5de11d69a7ff8fb1609dd988bb7", + "path": "README.md", + "size_bytes": 52 + }, + { + "digest": "sha256:fe99e620546158cba1855aef378ce7c14de48c89e99bf7ee9eb259340d21f1df", + "path": "receipts.jsonl", + "size_bytes": 1200 + }, + { + "digest": "sha256:ab1e7d06897cf422b09e688ef6346756a1b4cea3237c350f4f5f9d4b3fb5becc", + "path": "roots.txt", + "size_bytes": 211 + }, + { + "digest": "sha256:706a5d3982fbe92d6bfba6be7e51d79321064c8c2134caac389d1ca889934786", + "path": "seal.json", + "size_bytes": 706 + }, + { + "digest": "sha256:421bc39113331983867b523c4019984ddee73136cde3af5e540978549101d4d8", + "path": "verifier_manifest.json", + "size_bytes": 238 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/receipts.jsonl b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/receipts.jsonl new file mode 100644 index 0000000..b43d16a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/receipts.jsonl @@ -0,0 +1,2 @@ +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:meridian:gw:demo","cap_hash":"none","event_hash":"sha256:e94f886565c5ddb0593a763d185a9f939895cddbb9fe678474097bd44642636c","event_id":"00000000-0000-4000-8000-000000000102","event_type":"tamper_signal","op":"meridian.v1.tamper_signal","op_digest":"sha256:ab8c0984c1dec7b04adc4d6eb51ba3065e339ff63b55d00aa6169e5047004c0f","payload":{"kind":"tamper_signal","params":{"signal":"case_open"}},"prev_event_hash":"sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e","result":"ok","seq":1,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:41Z"} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/roots.txt b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/roots.txt new file mode 100644 index 0000000..9c36231 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/roots.txt @@ -0,0 +1,3 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:29e0c8bbe263751ae6b81171fd05aa3f8ab5a9085d4e6c084f1238cc77af933e +seq=1 root=sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/seal.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/seal.json new file mode 100644 index 0000000..a252e01 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:meridian:gw:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":1,"until_ts":"2025-03-17T03:17:41Z"},"root":{"end":"sha256:086b680e09037deacf61ba9cc73b3c7da2737db7ef3802c887b12e6d76bd85ee","seq":1,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"mv1_tamper_signal_pass","sentinel_version":"0.1.0"} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/verifier_manifest.json b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/verifier_manifest.json new file mode 100644 index 0000000..95de11b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} \ No newline at end of file diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/manifest.yaml b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/manifest.yaml new file mode 100644 index 0000000..c92400b --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/manifest.yaml @@ -0,0 +1,209 @@ +{ + "suite": { + "name": "MERIDIAN_V1_CONFORMANCE_TEST_SUITE", + "version": "1.0.0", + "requires": { + "sentinel_verifier_path": "tools/vm_verify_sentinel_bundle.py", + "sentinel_failure_codes_path": "tools/sentinel_failure_codes.py", + "strict": true, + "offline": true + } + }, + "tests": [ + { + "id": "MV1-PASS-REFUSAL-001", + "category": "refusal_proofs", + "name": "Refusal proof (intent + shadow_receipt) passes", + "fixture": "fixtures/pass/refusal_proof_pass", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 0, "failure_code": null, "violated_contract_ids": [] } + }, + { + "id": "MV1-PASS-TAMPER-001", + "category": "tamper_signals", + "name": "Tamper signal can be recorded and verified", + "fixture": "fixtures/pass/tamper_signal_pass", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 0, "failure_code": null, "violated_contract_ids": [] } + }, + { + "id": "MV1-PASS-OFFLINE-001", + "category": "offline_restore", + "name": "Clean-room restore: copied bundle still verifies PASS", + "fixture": "fixtures/pass/refusal_proof_pass", + "mode": "verify_clean_copy", + "verifier": { "strict": true }, + "expect": { "exit": 0, "failure_code": null, "violated_contract_ids": [] } + }, + + { + "id": "MV1-FAIL-REFUSAL-001", + "category": "refusal_proofs", + "name": "Silent denial (intent without outcome) fails", + "fixture": "fixtures/fail/silent_denial_intent_without_outcome", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_CHAIN_DISCONTINUITY", "violated_contract_ids": ["E-3"] } + }, + { + "id": "MV1-FAIL-REFUSAL-002", + "category": "refusal_proofs", + "name": "Execution without intent fails", + "fixture": "fixtures/fail/execution_without_intent", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_CHAIN_DISCONTINUITY", "violated_contract_ids": ["E-3"] } + }, + { + "id": "MV1-FAIL-REFUSAL-003", + "category": "refusal_proofs", + "name": "Double outcome (executed + denied) fails", + "fixture": "fixtures/fail/double_outcome", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_CHAIN_DISCONTINUITY", "violated_contract_ids": ["E-3"] } + }, + + { + "id": "MV1-FAIL-EVENT-001", + "category": "event_chain", + "name": "Stored event_hash mismatch fails", + "fixture": "fixtures/fail/event_hash_mismatch", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { + "exit": 1, + "failure_code": "E_EVENT_HASH_MISMATCH", + "violated_contract_ids": ["E-2"], + "error_path_contains": ".event_hash" + } + }, + { + "id": "MV1-FAIL-EVENT-002", + "category": "event_chain", + "name": "Stored op_digest mismatch fails", + "fixture": "fixtures/fail/op_digest_mismatch", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { + "exit": 1, + "failure_code": "E_EVENT_HASH_MISMATCH", + "violated_contract_ids": ["E-2"], + "error_path_contains": ".op_digest" + } + }, + { + "id": "MV1-FAIL-EVENT-003", + "category": "event_chain", + "name": "prev_event_hash discontinuity fails", + "fixture": "fixtures/fail/prev_event_hash_mismatch", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { + "exit": 1, + "failure_code": "E_CHAIN_DISCONTINUITY", + "violated_contract_ids": ["E-3"], + "error_path_contains": ".prev_event_hash" + } + }, + { + "id": "MV1-FAIL-EVENT-004", + "category": "event_chain", + "name": "Invalid receipts.jsonl (parse error) fails", + "fixture": "fixtures/fail/invalid_jsonl_truncated", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { + "exit": 1, + "failure_code": "E_SCHEMA_INVALID", + "violated_contract_ids": ["E-1"], + "error_path_contains": "receipts.jsonl" + } + }, + { + "id": "MV1-FAIL-EVENT-005", + "category": "event_chain", + "name": "Duplicate seq fails (non-monotonic)", + "fixture": "fixtures/fail/seq_non_monotonic_duplicate", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_SEQ_NON_MONOTONIC", "violated_contract_ids": ["E-4"] } + }, + + { + "id": "MV1-FAIL-CAP-001", + "category": "capabilities", + "name": "Revoked capability used for execution fails", + "fixture": "fixtures/fail/revoked_capability_used", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_REVOKED_CAPABILITY_USED", "violated_contract_ids": ["E-7"] } + }, + + { + "id": "MV1-FAIL-SEAL-001", + "category": "sealing", + "name": "Missing required roots file fails", + "fixture": "fixtures/fail/missing_required_file_roots", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_MISSING_REQUIRED_FILE", "violated_contract_ids": ["B-1"] } + }, + { + "id": "MV1-FAIL-SEAL-002", + "category": "sealing", + "name": "Integrity digest mismatch fails", + "fixture": "fixtures/fail/manifest_hash_mismatch", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_MANIFEST_HASH_MISMATCH", "violated_contract_ids": ["I-3"] } + }, + { + "id": "MV1-FAIL-SEAL-003", + "category": "sealing", + "name": "Unlisted file in bundle fails in strict mode", + "fixture": "fixtures/fail/unlisted_extra_file_strict", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_SCHEMA_INVALID", "violated_contract_ids": ["I-5"] } + }, + { + "id": "MV1-FAIL-SEAL-004", + "category": "sealing", + "name": "Merkle root mismatch fails", + "fixture": "fixtures/fail/root_mismatch", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_ROOT_MISMATCH", "violated_contract_ids": ["E-5"] } + }, + { + "id": "MV1-FAIL-SEAL-005", + "category": "sealing", + "name": "Declared range mismatch fails", + "fixture": "fixtures/fail/range_mismatch", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_RANGE_MISMATCH", "violated_contract_ids": ["E-6"] } + }, + { + "id": "MV1-FAIL-SEAL-006", + "category": "sealing", + "name": "Unsupported canonicalization version fails", + "fixture": "fixtures/fail/canon_version_unsupported", + "mode": "verify_fixture", + "verifier": { "strict": true }, + "expect": { "exit": 1, "failure_code": "E_CANON_VERSION_UNSUPPORTED", "violated_contract_ids": ["S-6"] } + }, + { + "id": "MV1-FAIL-SEAL-007", + "category": "sealing", + "name": "Oversize input is rejected (bounded verification)", + "fixture": "fixtures/pass/refusal_proof_pass", + "mode": "verify_fixture", + "verifier": { "strict": true, "max_file_bytes": 200 }, + "expect": { "exit": 1, "failure_code": "E_OVERSIZE_INPUT", "violated_contract_ids": ["B-3"] } + } + ] +} diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.py b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.py new file mode 100644 index 0000000..034adaf --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.py @@ -0,0 +1,394 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +import shutil +import subprocess +import sys +from dataclasses import dataclass +from pathlib import Path + + +@dataclass(frozen=True) +class TestExpect: + exit: int + failure_code: str | None + violated_contract_ids: list[str] + error_path_contains: str | None = None + + +@dataclass(frozen=True) +class TestCase: + id: str + name: str + category: str + fixture: str + strict: bool + max_file_bytes: int | None + mode: str + expect: TestExpect + + +def _repo_root_from(suite_dir: Path) -> Path: + return suite_dir.parent.resolve() + + +def _load_manifest(path: Path) -> dict: + raw = path.read_text(encoding="utf-8").strip() + if not raw.startswith("{"): + raise ValueError( + "manifest.yaml is expected to be JSON (YAML 1.2 compatible) to avoid external YAML deps" + ) + return json.loads(raw) + + +def _import_sentinel_codes(repo_root: Path) -> tuple[set[str], set[str]]: + tools_dir = repo_root / "tools" + sys.path.insert(0, str(tools_dir)) + from sentinel_failure_codes import FailureCode, WarningCode # type: ignore + + return ({c.value for c in FailureCode}, {c.value for c in WarningCode}) + + +def _parse_tests(manifest: dict) -> list[TestCase]: + tests: list[TestCase] = [] + for t in manifest.get("tests") or []: + expect = t.get("expect") or {} + tests.append( + TestCase( + id=str(t["id"]), + name=str(t.get("name") or t["id"]), + category=str(t.get("category") or "uncategorized"), + fixture=str(t["fixture"]), + strict=bool((t.get("verifier") or {}).get("strict", True)), + max_file_bytes=( + int((t.get("verifier") or {}).get("max_file_bytes")) + if (t.get("verifier") or {}).get("max_file_bytes") is not None + else None + ), + mode=str(t.get("mode") or "verify_fixture"), + expect=TestExpect( + exit=int(expect["exit"]), + failure_code=( + None + if expect.get("failure_code") is None + else str(expect["failure_code"]) + ), + violated_contract_ids=list( + expect.get("violated_contract_ids") or [] + ), + error_path_contains=( + None + if expect.get("error_path_contains") is None + else str(expect["error_path_contains"]) + ), + ), + ) + ) + return tests + + +def _run_verifier( + *, + repo_root: Path, + bundle_dir: Path, + report_path: Path, + strict: bool, + max_file_bytes: int | None, +) -> tuple[int, str, str]: + verifier = repo_root / "tools" / "vm_verify_sentinel_bundle.py" + cmd = [sys.executable, str(verifier), "--bundle", str(bundle_dir)] + if strict: + cmd.append("--strict") + cmd += ["--report", str(report_path)] + if max_file_bytes is not None: + cmd += ["--max-file-bytes", str(max_file_bytes)] + + proc = subprocess.run(cmd, capture_output=True, text=True) + return proc.returncode, proc.stdout, proc.stderr + + +def _read_json(path: Path) -> dict: + return json.loads(path.read_text(encoding="utf-8")) + + +def _write_text(path: Path, text: str) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(text, encoding="utf-8") + + +def _stable_json(obj: object) -> str: + return json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + + +def main(argv: list[str]) -> int: + p = argparse.ArgumentParser() + default_manifest = (Path(__file__).resolve().parent / "manifest.yaml").as_posix() + p.add_argument( + "--manifest", + default=default_manifest, + help="Path to manifest.yaml (JSON content). Defaults to manifest.yaml beside this script.", + ) + p.add_argument( + "--list", + action="store_true", + help="List tests and expected outcomes (no execution).", + ) + args = p.parse_args(argv) + + manifest_path = Path(args.manifest).resolve() + suite_dir = manifest_path.parent.resolve() + repo_root = _repo_root_from(suite_dir) + + out_dir = suite_dir / "out" + reports_dir = out_dir / "sentinel_reports" + stdio_dir = out_dir / "sentinel_stdio" + clean_room_dir = out_dir / "clean_room" + records_dir = out_dir / "tests" + reports_dir.mkdir(parents=True, exist_ok=True) + stdio_dir.mkdir(parents=True, exist_ok=True) + records_dir.mkdir(parents=True, exist_ok=True) + + manifest = _load_manifest(manifest_path) + tests = _parse_tests(manifest) + tests = sorted(tests, key=lambda t: t.id) + + if args.list: + lines: list[str] = [] + suite = manifest.get("suite") or {} + lines.append( + f"Suite: {suite.get('name', 'MERIDIAN_V1_CONFORMANCE_TEST_SUITE')}" + ) + lines.append(f"Version: {suite.get('version', '')}".rstrip()) + lines.append(f"Tests: {len(tests)}") + lines.append("") + for t in tests: + exp = t.expect + lines.append( + f"{t.id} | expect.exit={exp.exit} failure_code={exp.failure_code} violated={exp.violated_contract_ids} | fixture={t.fixture} | strict={t.strict}" + ) + print("\n".join(lines)) + return 0 + + if not tests: + out_dir = suite_dir / "out" + out_dir.mkdir(parents=True, exist_ok=True) + (out_dir / "meridian_v1_conformance_report.txt").write_text( + "MERIDIAN v1 Conformance Suite\n\n[FAIL] manifest.yaml contains zero tests\n", + encoding="utf-8", + ) + return 2 + + known_failures, known_warnings = _import_sentinel_codes(repo_root) + + suite_results: list[dict] = [] + failures: list[str] = [] + + for t in tests: + fixture_src = (suite_dir / t.fixture).resolve() + if not fixture_src.exists(): + failures.append(f"{t.id}: missing fixture path {t.fixture}") + continue + + if t.mode == "verify_fixture": + bundle_dir = fixture_src + elif t.mode == "verify_clean_copy": + target = (clean_room_dir / t.id).resolve() + if target.exists(): + shutil.rmtree(target) + shutil.copytree(fixture_src, target) + bundle_dir = target + else: + failures.append(f"{t.id}: unknown mode {t.mode!r}") + continue + + report_path = (reports_dir / f"{t.id}.verification_report.json").resolve() + + code, stdout, stderr = _run_verifier( + repo_root=repo_root, + bundle_dir=bundle_dir, + report_path=report_path, + strict=t.strict, + max_file_bytes=t.max_file_bytes, + ) + + stdout_path = stdio_dir / f"{t.id}.stdout.txt" + stderr_path = stdio_dir / f"{t.id}.stderr.txt" + _write_text(stdout_path, stdout) + _write_text(stderr_path, stderr) + + if not report_path.exists(): + failures.append(f"{t.id}: verifier did not write report: {report_path}") + continue + + report = _read_json(report_path) + observed_failure_code = report.get("failure_code") + observed_ok = bool(report.get("ok")) + observed_errors = report.get("errors") or [] + observed_warnings = report.get("warnings") or [] + observed_warned_contract_ids = report.get("warned_contract_ids") or [] + + # Basic invariants: only known Sentinel codes appear. + if ( + observed_failure_code is not None + and observed_failure_code not in known_failures + ): + failures.append( + f"{t.id}: unknown failure_code in report: {observed_failure_code}" + ) + + for w in report.get("warnings") or []: + wc = w.get("code") + if isinstance(wc, str) and wc not in known_warnings: + failures.append(f"{t.id}: unknown warning code in report: {wc}") + + violated = report.get("violated_contract_ids") or [] + + # Expectations + ok = True + messages: list[str] = [] + + def fail(msg: str) -> None: + nonlocal ok + ok = False + messages.append(msg) + failures.append(f"{t.id}: {msg}") + + if code != t.expect.exit: + fail(f"exit={code} expected={t.expect.exit}") + + if observed_failure_code != t.expect.failure_code: + fail( + f"failure_code={observed_failure_code!r} expected={t.expect.failure_code!r}" + ) + + expected_contracts = set(t.expect.violated_contract_ids) + if not expected_contracts.issubset(set(violated)): + fail( + f"violated_contract_ids missing expected entries: {sorted(expected_contracts - set(violated))}" + ) + + if t.expect.error_path_contains: + found = False + for e in report.get("errors") or []: + path = e.get("path") or "" + if t.expect.error_path_contains in str(path): + found = True + break + if not found: + fail( + f"expected an error path containing {t.expect.error_path_contains!r}" + ) + + # PASS hygiene: strict PASS implies no findings. + if t.expect.exit == 0 and t.expect.failure_code is None: + if not observed_ok: + fail("expected ok:true in verification_report.json") + if observed_errors: + fail("expected errors:[] in verification_report.json") + if observed_warnings: + fail("expected warnings:[] in verification_report.json") + if violated: + fail("expected violated_contract_ids:[] in verification_report.json") + if observed_warned_contract_ids: + fail("expected warned_contract_ids:[] in verification_report.json") + + record_obj = { + "format": "meridian-v1-test-record-v1", + "id": t.id, + "name": t.name, + "category": t.category, + "fixture": t.fixture, + "mode": t.mode, + "strict": t.strict, + "max_file_bytes": t.max_file_bytes, + "expected": { + "exit": t.expect.exit, + "failure_code": t.expect.failure_code, + "violated_contract_ids": t.expect.violated_contract_ids, + "error_path_contains": t.expect.error_path_contains, + }, + "observed": { + "exit": code, + "failure_code": observed_failure_code, + "violated_contract_ids": violated, + "warned_contract_ids": observed_warned_contract_ids, + }, + "artifacts": { + "bundle_dir": str(bundle_dir.relative_to(suite_dir)), + "verification_report_json": str(report_path.relative_to(suite_dir)), + "stdout_txt": str(stdout_path.relative_to(suite_dir)), + "stderr_txt": str(stderr_path.relative_to(suite_dir)), + }, + "ok": ok, + "failure_messages": messages, + } + (records_dir / f"{t.id}.record.json").write_text( + _stable_json(record_obj), encoding="utf-8" + ) + + suite_results.append( + { + "id": t.id, + "name": t.name, + "category": t.category, + "fixture": t.fixture, + "mode": t.mode, + "strict": t.strict, + "max_file_bytes": t.max_file_bytes, + "expected": { + "exit": t.expect.exit, + "failure_code": t.expect.failure_code, + "violated_contract_ids": t.expect.violated_contract_ids, + }, + "observed": { + "exit": code, + "failure_code": observed_failure_code, + "violated_contract_ids": violated, + }, + "artifacts": record_obj["artifacts"], + "ok": ok, + } + ) + + report_obj = { + "format": "meridian-v1-conformance-report-v1", + "suite": manifest.get("suite") or {}, + "counts": { + "tests": len(tests), + "passed": sum(1 for r in suite_results if r.get("ok") is True), + "failed": sum(1 for r in suite_results if r.get("ok") is False), + }, + "results": suite_results, + "failures": failures, + } + + out_json = out_dir / "meridian_v1_conformance_report.json" + out_txt = out_dir / "meridian_v1_conformance_report.txt" + out_json.write_text(_stable_json(report_obj), encoding="utf-8") + + # Insurer/auditor-friendly canonical names. + (out_dir / "report.json").write_text(_stable_json(report_obj), encoding="utf-8") + + lines: list[str] = [] + lines.append("MERIDIAN v1 Conformance Suite") + lines.append(f"Tests: {report_obj['counts']['tests']}") + lines.append(f"Passed: {report_obj['counts']['passed']}") + lines.append(f"Failed: {report_obj['counts']['failed']}") + if failures: + lines.append("") + lines.append("Failures:") + for f in failures: + lines.append(f"- {f}") + summary_text = "\n".join(lines) + "\n" + out_txt.write_text(summary_text, encoding="utf-8") + (out_dir / "report.txt").write_text(summary_text, encoding="utf-8") + + if failures: + return 1 + return 0 + + +if __name__ == "__main__": + raise SystemExit(main(sys.argv[1:])) diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh new file mode 100644 index 0000000..b919b61 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -euo pipefail + +SUITE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SUITE_DIR/.." && pwd)" + +PYTHON_BIN="${PYTHON_BIN:-python3}" + +if [[ ! -f "$REPO_ROOT/tools/vm_verify_sentinel_bundle.py" ]]; then + echo "[FAIL] Sentinel verifier not found: $REPO_ROOT/tools/vm_verify_sentinel_bundle.py" >&2 + exit 2 +fi + +if [[ ! -f "$REPO_ROOT/tools/check_sentinel_contract_parity.py" ]]; then + echo "[FAIL] Parity gate not found: $REPO_ROOT/tools/check_sentinel_contract_parity.py" >&2 + exit 2 +fi + +echo "[RUN] Sentinel contract parity gate" +"$PYTHON_BIN" "$REPO_ROOT/tools/check_sentinel_contract_parity.py" + +echo "[RUN] MERIDIAN v1 conformance suite" +set +e +"$PYTHON_BIN" "$SUITE_DIR/run.py" --manifest "$SUITE_DIR/manifest.yaml" +status=$? +set -e + +report_txt="$SUITE_DIR/out/report.txt" +fallback_txt="$SUITE_DIR/out/meridian_v1_conformance_report.txt" + +if [[ -f "$report_txt" ]]; then + cat "$report_txt" +elif [[ -f "$fallback_txt" ]]; then + cat "$fallback_txt" +else + echo "[WARN] suite did not produce report.txt under $SUITE_DIR/out/" >&2 +fi + +exit "$status" diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/README.md new file mode 100644 index 0000000..a1cbce4 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/README.md @@ -0,0 +1,14 @@ +# Test Categories + +This directory is explanatory: the executable catalog is `../manifest.yaml`. + +Categories mirror the MERIDIAN v1 spec boundaries: +- `identity/` (audit-required boundaries; only what can be tested offline) +- `event_chain/` (schema, hashes, chain, seq) +- `refusal_proofs/` (intent → exactly-one-outcome; no silent denial) +- `capabilities/` (revocation enforcement) +- `sealing/` (bundle completeness, integrity, roots, ranges) +- `tamper_signals/` (recording signals; not fully enforceable by verifier) +- `offline_restore/` (clean-room copy verification) +- `ml_boundary/` (v1 boundary represented by command lifecycle invariants) + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/capabilities/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/capabilities/README.md new file mode 100644 index 0000000..91e977a --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/capabilities/README.md @@ -0,0 +1,8 @@ +# capabilities/ + +MERIDIAN v1 binds “authority” to Sentinel’s capability surface. + +Conformance focus (verifier-enforced): +- Revocation must be representable (`cap_revoke.payload.revoked_cap_hash`). +- Any `action_executed` using a revoked `cap_hash` must FAIL verification: `E_REVOKED_CAPABILITY_USED`. + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/event_chain/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/event_chain/README.md new file mode 100644 index 0000000..21391a5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/event_chain/README.md @@ -0,0 +1,9 @@ +# event_chain/ + +Conformance focus (verifier-enforced): +- Event schema validity (`E_SCHEMA_INVALID`) +- `event_hash` recomputation (`E_EVENT_HASH_MISMATCH`) +- `op_digest` recomputation (`E_EVENT_HASH_MISMATCH`) +- `prev_event_hash` continuity (`E_CHAIN_DISCONTINUITY`) +- `seq` monotonicity (`E_SEQ_NON_MONOTONIC`) + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/identity/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/identity/README.md new file mode 100644 index 0000000..b524b45 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/identity/README.md @@ -0,0 +1,7 @@ +# identity/ + +Identity material (key origin, HSM provenance, provisioning ceremony) is largely **AUDIT‑REQUIRED** in v1. + +This suite only encodes what can be tested offline without secrets: +- identity claims, once emitted into receipts, must be tamper‑evident (verified by Sentinel bundle verification) + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/ml_boundary/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/ml_boundary/README.md new file mode 100644 index 0000000..a6f9697 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/ml_boundary/README.md @@ -0,0 +1,12 @@ +# ml_boundary/ + +MERIDIAN v1 does not claim ML correctness. It enforces evidence. + +Conformance modeling in v1: +- “automation must not execute directly” is represented as: + - no `action_executed` without prior `action_intent` (strict linkage) + - no double outcome for a `trace_id` + +Failure code: +- `E_CHAIN_DISCONTINUITY` + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/offline_restore/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/offline_restore/README.md new file mode 100644 index 0000000..91111a5 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/offline_restore/README.md @@ -0,0 +1,9 @@ +# offline_restore/ + +Clean-room drills: +- copy a seal bundle into an empty directory +- run verifier `--strict` +- require PASS + +This enforces “offline-only” and “no hidden dependencies”. + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/refusal_proofs/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/refusal_proofs/README.md new file mode 100644 index 0000000..743aa3c --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/refusal_proofs/README.md @@ -0,0 +1,13 @@ +# refusal_proofs/ + +MERIDIAN v1 wedge: **provable refusal**. + +Conformance focus (verifier-enforced in `--strict` mode): +- `action_intent` MUST have exactly one outcome: + - `action_executed` OR `shadow_receipt` +- silent denial is forbidden (intent without outcome) +- execution without intent is forbidden + +Failure code: +- `E_CHAIN_DISCONTINUITY` + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/sealing/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/sealing/README.md new file mode 100644 index 0000000..7a7478e --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/sealing/README.md @@ -0,0 +1,11 @@ +# sealing/ + +Conformance focus (verifier-enforced): +- required files present (`E_MISSING_REQUIRED_FILE`) +- integrity hashes match (`E_MANIFEST_HASH_MISMATCH`) +- no unlisted files in `--strict` (`E_SCHEMA_INVALID`) +- declared range matches receipts (`E_RANGE_MISMATCH`) +- declared root matches recomputation (`E_ROOT_MISMATCH`) +- canonicalization version is supported (`E_CANON_VERSION_UNSUPPORTED`) +- input bounds enforced (`E_OVERSIZE_INPUT`) + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/tamper_signals/README.md b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/tamper_signals/README.md new file mode 100644 index 0000000..989f2da --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tests/tamper_signals/README.md @@ -0,0 +1,9 @@ +# tamper_signals/ + +Sentinel can prove “this event was recorded” (integrity), not “the device detected it”. + +This category includes fixtures that demonstrate: +- `tamper_signal` / `corruption_detected` events can be recorded and verified offline. + +Detection and emission policies remain AUDIT‑REQUIRED. + diff --git a/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tools/generate_fixtures.py b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tools/generate_fixtures.py new file mode 100644 index 0000000..3c91217 --- /dev/null +++ b/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/tools/generate_fixtures.py @@ -0,0 +1,944 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import hashlib +import json +import shutil +from pathlib import Path + + +def _require_no_floats(value: object, *, path: str = "$") -> None: + if isinstance(value, float): + raise ValueError(f"float not allowed in canonical JSON at {path}") + if isinstance(value, dict): + for k, v in value.items(): + _require_no_floats(v, path=f"{path}.{k}") + elif isinstance(value, list): + for i, v in enumerate(value): + _require_no_floats(v, path=f"{path}[{i}]") + + +def _canonical_json_bytes(obj: object) -> bytes: + _require_no_floats(obj) + return json.dumps( + obj, + sort_keys=True, + separators=(",", ":"), + ensure_ascii=False, + allow_nan=False, + ).encode("utf-8") + + +def _vmhash(data: bytes, *, hash_algo: str) -> str: + if hash_algo != "sha256": + raise ValueError("this fixture generator supports sha256 only") + return f"sha256:{hashlib.sha256(data).hexdigest()}" + + +def _hex_part(value: str) -> str: + return value.split(":", 1)[-1] + + +def _compute_merkle_root(leaves: list[str], *, hash_algo: str) -> str: + if not leaves: + return _vmhash(b"empty", hash_algo=hash_algo) + if len(leaves) == 1: + return leaves[0] + + level = leaves[:] + while len(level) > 1: + next_level: list[str] = [] + for i in range(0, len(level), 2): + left = level[i] + right = level[i + 1] if i + 1 < len(level) else left + combined = (_hex_part(left) + _hex_part(right)).encode("utf-8") + next_level.append(_vmhash(combined, hash_algo=hash_algo)) + level = next_level + return level[0] + + +def _op_digest(*, op: str, params: dict, hash_algo: str) -> str: + op_obj = {"op": op, "params": params} + return _vmhash(_canonical_json_bytes(op_obj), hash_algo=hash_algo) + + +def _event_hash(event_without_event_hash: dict, *, hash_algo: str) -> str: + return _vmhash(_canonical_json_bytes(event_without_event_hash), hash_algo=hash_algo) + + +def _write_json(path: Path, obj: object, *, pretty: bool) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + if pretty: + path.write_text( + json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False) + "\n", + encoding="utf-8", + ) + return + path.write_text( + json.dumps(obj, sort_keys=True, separators=(",", ":"), ensure_ascii=False), + encoding="utf-8", + ) + + +def _write_receipts_jsonl(path: Path, events: list[dict]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + lines = [ + json.dumps(e, sort_keys=True, separators=(",", ":"), ensure_ascii=False) + for e in events + ] + path.write_text("\n".join(lines) + "\n", encoding="utf-8") + + +def _write_roots_txt(path: Path, *, roots_by_seq: dict[int, str]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + lines = ["# Sentinel root history (seq -> merkle root)"] + for seq in sorted(roots_by_seq.keys()): + lines.append(f"seq={seq} root={roots_by_seq[seq]}") + path.write_text("\n".join(lines) + "\n", encoding="utf-8") + + +def _file_digest(path: Path, *, hash_algo: str) -> tuple[str, int]: + data = path.read_bytes() + return _vmhash(data, hash_algo=hash_algo), len(data) + + +def _write_integrity_json( + bundle_dir: Path, + *, + include_paths: list[str], + hash_algo: str, +) -> None: + files: list[dict] = [] + for rel in include_paths: + fp = (bundle_dir / rel).resolve() + digest, size = _file_digest(fp, hash_algo=hash_algo) + files.append({"path": rel, "digest": digest, "size_bytes": size}) + + integrity = { + "format": "vm-sentinel-integrity-v1", + "hash_algo": hash_algo, + "files": sorted(files, key=lambda e: e["path"]), + } + _write_json(bundle_dir / "integrity.json", integrity, pretty=True) + + +def _flatten_events_for_verifier(events: list[dict], *, since_seq: int, until_seq: int) -> list[dict]: + by_seq: dict[int, list[dict]] = {} + for ev in events: + seq = ev.get("seq") + if isinstance(seq, int): + by_seq.setdefault(seq, []).append(ev) + ordered = [by_seq[s][0] for s in range(since_seq, until_seq + 1) if s in by_seq] + return ordered + + +def _build_bundle( + bundle_dir: Path, + *, + events_in_file_order: list[dict], + since_seq: int, + until_seq: int, + seal_id: str, + instance_id: str, + created_at: str, + canonicalization_version: str, + sentinel_version: str, + schema_version: str, + hash_algo: str, + include_readme: bool = True, + omit_roots_file: bool = False, + extra_unlisted_file: bool = False, + integrity_override: callable | None = None, +) -> None: + if bundle_dir.exists(): + shutil.rmtree(bundle_dir) + bundle_dir.mkdir(parents=True, exist_ok=True) + + if include_readme: + (bundle_dir / "README.md").write_text( + f"MERIDIAN v1 conformance fixture: {bundle_dir.name}\n", encoding="utf-8" + ) + + # Build event hashes in file order, but with prev_event_hash chaining defined by seq order. + # + # We set prev_event_hash for each event to the previous event hash in (seq, file-order) ordering. + # This matches the verifier's deterministic sort-by-seq behavior for normal (unique seq) fixtures, + # and produces stable behavior for duplicate-seq fixtures (extras may be dropped by the verifier). + events_sorted = sorted( + enumerate(events_in_file_order), + key=lambda t: (int(t[1]["seq"]), t[0]), + ) + + # Fill op_digest for every event first (may already be overridden by caller). + for _, ev in events_sorted: + payload = ev.get("payload") + if not isinstance(payload, dict): + raise ValueError("payload must be an object") + params = payload.get("params") + if params is None: + params = {} + payload["params"] = params + if not isinstance(params, dict): + raise ValueError("payload.params must be an object") + if not isinstance(ev.get("op"), str): + raise ValueError("op must be a string") + if not isinstance(ev.get("op_digest"), str) or not ev.get("op_digest"): + ev["op_digest"] = _op_digest(op=ev["op"], params=params, hash_algo=hash_algo) + + # Fill prev_event_hash + event_hash. + prev_hash = "0" + for idx, (_, ev) in enumerate(events_sorted): + if idx == 0: + if int(ev["seq"]) != 0: + raise ValueError("first event must have seq=0 for these fixtures") + ev["prev_event_hash"] = "0" + else: + if not isinstance(ev.get("prev_event_hash"), str) or not ev.get("prev_event_hash"): + ev["prev_event_hash"] = prev_hash + + ev_no_hash = dict(ev) + ev_no_hash.pop("event_hash", None) + ev["event_hash"] = _event_hash(ev_no_hash, hash_algo=hash_algo) + prev_hash = ev["event_hash"] + + # Write receipts + receipts_path = bundle_dir / "receipts.jsonl" + _write_receipts_jsonl(receipts_path, events_in_file_order) + + # Seal + roots are computed over the verifier-flattened range view. + flattened = _flatten_events_for_verifier(events_in_file_order, since_seq=since_seq, until_seq=until_seq) + leaves = [ev["event_hash"] for ev in flattened] + root_start = _vmhash(b"empty", hash_algo=hash_algo) + root_end = _compute_merkle_root(leaves, hash_algo=hash_algo) + + # Root history for the declared range (genesis-range fixtures only). + roots_by_seq: dict[int, str] = {} + running: list[str] = [] + for ev in flattened: + running.append(ev["event_hash"]) + roots_by_seq[int(ev["seq"])] = _compute_merkle_root(running, hash_algo=hash_algo) + + if not omit_roots_file: + _write_roots_txt(bundle_dir / "roots.txt", roots_by_seq=roots_by_seq) + + seal = { + "format": "vm-sentinel-seal-v1", + "sentinel_version": sentinel_version, + "schema_version": schema_version, + "hash_algo": hash_algo, + "canonicalization_version": canonicalization_version, + "seal_id": seal_id, + "created_at": created_at, + "instance_id": instance_id, + "ledger_type": "jsonl", + "range": { + "since_seq": since_seq, + "until_seq": until_seq, + "since_ts": str(events_in_file_order[0]["ts"]), + "until_ts": str(events_in_file_order[-1]["ts"]), + }, + "root": {"start": root_start, "end": root_end, "seq": until_seq}, + "files": { + "receipts": "receipts.jsonl", + "roots": "roots.txt", + "integrity": "integrity.json", + "verifier_manifest": "verifier_manifest.json", + }, + } + _write_json(bundle_dir / "seal.json", seal, pretty=False) + + verifier_manifest = { + "format": "vm-sentinel-verifier-manifest-v1", + "sentinel_version": sentinel_version, + "schema_version": schema_version, + "canonicalization_version": canonicalization_version, + "hash_algo": hash_algo, + "verifier": {"name": "vm_verify_sentinel_bundle.py", "version": "0.1.0"}, + } + _write_json(bundle_dir / "verifier_manifest.json", verifier_manifest, pretty=False) + + if extra_unlisted_file: + (bundle_dir / "UNLISTED.bin").write_bytes(b"unlisted") + + # Default integrity: cover all bundle files (including README.md if present). + include_paths = ["receipts.jsonl", "seal.json", "verifier_manifest.json"] + if not omit_roots_file: + include_paths.append("roots.txt") + if include_readme: + include_paths.append("README.md") + + _write_integrity_json(bundle_dir, include_paths=include_paths, hash_algo=hash_algo) + + if integrity_override is not None: + integrity_override(bundle_dir) + + +def _uuid(n: int) -> str: + return f"00000000-0000-4000-8000-{n:012d}" + + +def _base_boot_event(*, seq: int, event_id: int, actor: str, ts: str, trace_id: str) -> dict: + return { + "event_id": _uuid(event_id), + "seq": seq, + "ts": ts, + "event_type": "boot_event", + "actor": actor, + "cap_hash": "none", + "op": "sentinel.boot_event.v1", + "op_digest": "", + "result": "ok", + "trace_id": trace_id, + "prev_event_hash": "", + "event_hash": "", + "payload": { + "params": { + "canonicalization_version": "sentinel-event-jcs-v1", + "hash_algo": "sha256", + "schema_version": "1.0.0", + "sentinel_version": "0.1.0", + } + }, + } + + +def main() -> int: + p = argparse.ArgumentParser() + p.add_argument( + "--suite-dir", + default=str(Path(__file__).resolve().parents[1]), + help="Path to MERIDIAN_V1_CONFORMANCE_TEST_SUITE directory", + ) + args = p.parse_args() + + suite_dir = Path(args.suite_dir).resolve() + fixtures_dir = suite_dir / "fixtures" + + hash_algo = "sha256" + canonicalization_version = "sentinel-event-jcs-v1" + sentinel_version = "0.1.0" + schema_version = "1.0.0" + instance_id = "did:vm:meridian:gw:demo" + + pass_dir = fixtures_dir / "pass" + fail_dir = fixtures_dir / "fail" + + # --- PASS fixtures --- + + # PASS: refusal proof (intent + shadow receipt) + boot_trace = "11111111-1111-4111-8111-111111111111" + cmd_trace = "22222222-2222-4222-8222-222222222222" + refusal_events = [ + _base_boot_event( + seq=0, + event_id=1, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(2), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "action_intent", + "actor": "did:vm:operator:demo", + "cap_hash": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": cmd_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": { + "kind": "command_requested", + "params": {"plc": "plc:demo", "register": "R1", "value": "1"}, + }, + }, + { + "event_id": _uuid(3), + "seq": 2, + "ts": "2025-03-17T03:17:42Z", + "event_type": "shadow_receipt", + "actor": instance_id, + "cap_hash": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "deny", + "trace_id": cmd_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": { + "kind": "command_refused", + "reason_code": "safety_interlock", + "reason_text": "safety policy denied write", + "would_have_done": {"op": "meridian.v1.plc.write"}, + "params": {"plc": "plc:demo", "register": "R1", "value": "1"}, + }, + }, + ] + + _build_bundle( + pass_dir / "refusal_proof_pass", + events_in_file_order=refusal_events, + since_seq=0, + until_seq=2, + seal_id="mv1_refusal_proof_pass", + instance_id=instance_id, + created_at="2025-03-17T03:17:43Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # PASS: tamper signal recorded (no trace linkage semantics) + tamper_trace = "33333333-3333-4333-8333-333333333333" + tamper_events = [ + _base_boot_event( + seq=0, + event_id=101, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(102), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "tamper_signal", + "actor": instance_id, + "cap_hash": "none", + "op": "meridian.v1.tamper_signal", + "op_digest": "", + "result": "ok", + "trace_id": tamper_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "tamper_signal", "params": {"signal": "case_open"}}, + }, + ] + + _build_bundle( + pass_dir / "tamper_signal_pass", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_tamper_signal_pass", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # --- FAIL fixtures --- + + # FAIL: silent denial (intent without outcome) => E_CHAIN_DISCONTINUITY (strict linkage) + silent_events = [ + _base_boot_event( + seq=0, + event_id=201, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(202), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "action_intent", + "actor": "did:vm:operator:demo", + "cap_hash": "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": "22222222-2222-4222-8222-222222222223", + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_requested", "params": {"plc": "plc:demo", "register": "R2", "value": "1"}}, + }, + ] + _build_bundle( + fail_dir / "silent_denial_intent_without_outcome", + events_in_file_order=silent_events, + since_seq=0, + until_seq=1, + seal_id="mv1_silent_denial", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: execution without intent => E_CHAIN_DISCONTINUITY (strict linkage) + exec_only_events = [ + _base_boot_event( + seq=0, + event_id=211, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(212), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "action_executed", + "actor": instance_id, + "cap_hash": "sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": "99999999-9999-4999-8999-999999999999", + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_executed", "params": {"plc": "plc:demo", "register": "R9", "value": "1"}}, + }, + ] + _build_bundle( + fail_dir / "execution_without_intent", + events_in_file_order=exec_only_events, + since_seq=0, + until_seq=1, + seal_id="mv1_execution_without_intent", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: double outcome (executed + denied) => E_CHAIN_DISCONTINUITY (strict linkage) + double_trace = "22222222-2222-4222-8222-222222222224" + double_outcome_events = [ + _base_boot_event( + seq=0, + event_id=221, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(222), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "action_intent", + "actor": "did:vm:operator:demo", + "cap_hash": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": double_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_requested", "params": {"plc": "plc:demo", "register": "R3", "value": "1"}}, + }, + { + "event_id": _uuid(223), + "seq": 2, + "ts": "2025-03-17T03:17:42Z", + "event_type": "action_executed", + "actor": instance_id, + "cap_hash": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": double_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_executed", "params": {"plc": "plc:demo", "register": "R3", "value": "1"}}, + }, + { + "event_id": _uuid(224), + "seq": 3, + "ts": "2025-03-17T03:17:43Z", + "event_type": "shadow_receipt", + "actor": instance_id, + "cap_hash": "sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "deny", + "trace_id": double_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_refused", "reason_code": "policy_denied", "params": {"plc": "plc:demo", "register": "R3", "value": "1"}}, + }, + ] + _build_bundle( + fail_dir / "double_outcome", + events_in_file_order=double_outcome_events, + since_seq=0, + until_seq=3, + seal_id="mv1_double_outcome", + instance_id=instance_id, + created_at="2025-03-17T03:17:44Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: event_hash mismatch (tampered stored event_hash) => E_EVENT_HASH_MISMATCH + def _tamper_event_hash(bundle: Path) -> None: + receipts = (bundle / "receipts.jsonl").read_text(encoding="utf-8").splitlines() + objs = [json.loads(l) for l in receipts if l.strip()] + objs[1]["event_hash"] = "sha256:" + ("0" * 64) + _write_receipts_jsonl(bundle / "receipts.jsonl", objs) + _write_integrity_json( + bundle, + include_paths=["receipts.jsonl", "seal.json", "verifier_manifest.json", "roots.txt", "README.md"], + hash_algo=hash_algo, + ) + + _build_bundle( + fail_dir / "event_hash_mismatch", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_event_hash_mismatch", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + integrity_override=_tamper_event_hash, + ) + + # FAIL: op_digest mismatch (tampered op_digest but consistent event_hash) => E_EVENT_HASH_MISMATCH + op_digest_bad = "sha256:" + ("f" * 64) + op_digest_events = json.loads(json.dumps(tamper_events)) + # Force a wrong op_digest for the tamper_signal event; event_hash will be computed from that wrong value. + op_digest_events[1]["op_digest"] = op_digest_bad + _build_bundle( + fail_dir / "op_digest_mismatch", + events_in_file_order=op_digest_events, + since_seq=0, + until_seq=1, + seal_id="mv1_op_digest_mismatch", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: prev_event_hash mismatch (tamper_signal prev hash wrong, but hashes updated) => E_CHAIN_DISCONTINUITY + prev_bad_events = json.loads(json.dumps(tamper_events)) + prev_bad_events[1]["prev_event_hash"] = "sha256:" + ("1" * 64) + _build_bundle( + fail_dir / "prev_event_hash_mismatch", + events_in_file_order=prev_bad_events, + since_seq=0, + until_seq=1, + seal_id="mv1_prev_hash_mismatch", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: seq non-monotonic (duplicate seq) => E_SEQ_NON_MONOTONIC + dup_events = [ + _base_boot_event( + seq=0, + event_id=301, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(302), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "health_event", + "actor": instance_id, + "cap_hash": "none", + "op": "meridian.v1.health", + "op_digest": "", + "result": "ok", + "trace_id": "aaaaaaaa-aaaa-4aaa-8aaa-aaaaaaaaaaaa", + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "health_event", "params": {"ok": True}}, + }, + { + "event_id": _uuid(303), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "corruption_detected", + "actor": instance_id, + "cap_hash": "none", + "op": "meridian.v1.corruption_detected", + "op_digest": "", + "result": "error", + "trace_id": "bbbbbbbb-bbbb-4bbb-8bbb-bbbbbbbbbbbb", + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "corruption_detected", "params": {"component": "storage"}}, + }, + ] + _build_bundle( + fail_dir / "seq_non_monotonic_duplicate", + events_in_file_order=dup_events, + since_seq=0, + until_seq=1, + seal_id="mv1_seq_non_monotonic", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: revoked capability used after revoke => E_REVOKED_CAPABILITY_USED + revoked = "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + revoke_trace = "44444444-4444-4444-8444-444444444444" + revoked_events = [ + _base_boot_event( + seq=0, + event_id=401, + actor=instance_id, + ts="2025-03-17T03:17:40Z", + trace_id=boot_trace, + ), + { + "event_id": _uuid(402), + "seq": 1, + "ts": "2025-03-17T03:17:41Z", + "event_type": "cap_revoke", + "actor": "did:vm:guardian:demo", + "cap_hash": "none", + "op": "meridian.v1.cap.revoke", + "op_digest": "", + "result": "ok", + "trace_id": "55555555-5555-4555-8555-555555555555", + "prev_event_hash": "", + "event_hash": "", + "payload": {"params": {"revoked_cap_hash": revoked}}, + }, + { + "event_id": _uuid(403), + "seq": 2, + "ts": "2025-03-17T03:17:42Z", + "event_type": "action_intent", + "actor": "did:vm:operator:demo", + "cap_hash": revoked, + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": revoke_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_requested", "params": {"plc": "plc:demo", "register": "R7", "value": "1"}}, + }, + { + "event_id": _uuid(404), + "seq": 3, + "ts": "2025-03-17T03:17:43Z", + "event_type": "action_executed", + "actor": instance_id, + "cap_hash": revoked, + "op": "meridian.v1.plc.write", + "op_digest": "", + "result": "ok", + "trace_id": revoke_trace, + "prev_event_hash": "", + "event_hash": "", + "payload": {"kind": "command_executed", "params": {"plc": "plc:demo", "register": "R7", "value": "1"}}, + }, + ] + _build_bundle( + fail_dir / "revoked_capability_used", + events_in_file_order=revoked_events, + since_seq=0, + until_seq=3, + seal_id="mv1_revoked_capability_used", + instance_id=instance_id, + created_at="2025-03-17T03:17:44Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + ) + + # FAIL: missing required file (roots.txt) => E_MISSING_REQUIRED_FILE + _build_bundle( + fail_dir / "missing_required_file_roots", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_missing_roots", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + omit_roots_file=True, + ) + + # FAIL: integrity digest mismatch => E_MANIFEST_HASH_MISMATCH + def _wrong_digest(bundle: Path) -> None: + integrity = json.loads((bundle / "integrity.json").read_text(encoding="utf-8")) + for entry in integrity.get("files") or []: + if entry.get("path") == "receipts.jsonl": + entry["digest"] = "sha256:" + ("2" * 64) + _write_json(bundle / "integrity.json", integrity, pretty=True) + + _build_bundle( + fail_dir / "manifest_hash_mismatch", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_manifest_hash_mismatch", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + integrity_override=_wrong_digest, + ) + + # FAIL: receipts JSONL cannot be parsed => E_SCHEMA_INVALID + def _invalid_jsonl(bundle: Path) -> None: + (bundle / "receipts.jsonl").write_text('{"truncated":\n', encoding="utf-8") + _write_integrity_json( + bundle, + include_paths=["receipts.jsonl", "seal.json", "verifier_manifest.json", "roots.txt", "README.md"], + hash_algo=hash_algo, + ) + + _build_bundle( + fail_dir / "invalid_jsonl_truncated", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_invalid_jsonl_truncated", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + integrity_override=_invalid_jsonl, + ) + + # FAIL: strict mode forbids unlisted files => E_SCHEMA_INVALID + _build_bundle( + fail_dir / "unlisted_extra_file_strict", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_unlisted_file", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + extra_unlisted_file=True, + ) + + # FAIL: root mismatch => E_ROOT_MISMATCH + def _tamper_root(bundle: Path) -> None: + seal = json.loads((bundle / "seal.json").read_text(encoding="utf-8")) + bad = "sha256:" + ("3" * 64) + seal["root"]["end"] = bad + _write_json(bundle / "seal.json", seal, pretty=False) + roots_txt = (bundle / "roots.txt").read_text(encoding="utf-8").splitlines() + roots_txt = [ln if not ln.startswith("seq=1 ") else f"seq=1 root={bad}" for ln in roots_txt] + (bundle / "roots.txt").write_text("\n".join(roots_txt) + "\n", encoding="utf-8") + _write_integrity_json( + bundle, + include_paths=["receipts.jsonl", "seal.json", "verifier_manifest.json", "roots.txt", "README.md"], + hash_algo=hash_algo, + ) + + _build_bundle( + fail_dir / "root_mismatch", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_root_mismatch", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + integrity_override=_tamper_root, + ) + + # FAIL: range mismatch => E_RANGE_MISMATCH + def _tamper_range(bundle: Path) -> None: + seal = json.loads((bundle / "seal.json").read_text(encoding="utf-8")) + seal["range"]["until_seq"] = 0 + seal["root"]["seq"] = 0 + _write_json(bundle / "seal.json", seal, pretty=False) + _write_integrity_json( + bundle, + include_paths=["receipts.jsonl", "seal.json", "verifier_manifest.json", "roots.txt", "README.md"], + hash_algo=hash_algo, + ) + + _build_bundle( + fail_dir / "range_mismatch", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_range_mismatch", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + integrity_override=_tamper_range, + ) + + # FAIL: canonicalization version unsupported => E_CANON_VERSION_UNSUPPORTED + def _tamper_canon_version(bundle: Path) -> None: + seal = json.loads((bundle / "seal.json").read_text(encoding="utf-8")) + vm = json.loads((bundle / "verifier_manifest.json").read_text(encoding="utf-8")) + seal["canonicalization_version"] = "sentinel-event-jcs-v999" + vm["canonicalization_version"] = "sentinel-event-jcs-v999" + _write_json(bundle / "seal.json", seal, pretty=False) + _write_json(bundle / "verifier_manifest.json", vm, pretty=False) + _write_integrity_json( + bundle, + include_paths=["receipts.jsonl", "seal.json", "verifier_manifest.json", "roots.txt", "README.md"], + hash_algo=hash_algo, + ) + + _build_bundle( + fail_dir / "canon_version_unsupported", + events_in_file_order=tamper_events, + since_seq=0, + until_seq=1, + seal_id="mv1_canon_version_unsupported", + instance_id=instance_id, + created_at="2025-03-17T03:17:42Z", + canonicalization_version=canonicalization_version, + sentinel_version=sentinel_version, + schema_version=schema_version, + hash_algo=hash_algo, + integrity_override=_tamper_canon_version, + ) + + print(f"[OK] Fixtures regenerated under {fixtures_dir}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/OFFSEC-AGENTS-PLAN.md b/OFFSEC-AGENTS-PLAN.md new file mode 100644 index 0000000..1b74c89 --- /dev/null +++ b/OFFSEC-AGENTS-PLAN.md @@ -0,0 +1,898 @@ +# OffSec Agent Workers Plan + +> *Shield commands. Agents execute. Receipts prove.* + +## Current State Assessment + +### What Exists + +| Component | Status | Location | +|-----------|--------|----------| +| **Guardian Engine (Rust)** | ✅ Implemented | `vaultmesh-guardian/src/lib.rs` | +| **OffSec Engine (Rust)** | ⚠️ Stub only | `vaultmesh-offsec/src/lib.rs` | +| **Console Engine (Python)** | ✅ Receipts + Approvals | `engines/console/` | +| **Shield Portal (Node.js)** | ✅ Web UI | `vaultmesh-shield-portal/` | +| **CLI OffSec Commands** | ✅ Full implementation | `cli/vm_cli.py` | +| **OffSec Spec** | ✅ Complete | `VAULTMESH-OFFSEC-ENGINE.md` | + +### What's Missing + +1. **OffSec Agent Workers** - Autonomous AI agents that perform security tasks +2. **Agent Orchestration Layer** - Dispatch, monitor, control agents +3. **Shield ↔ Agent Protocol** - Communication and approval flow +4. **CAI Integration** - Leverage existing security AI tools + +--- + +## Architecture: Shield-Agent Pattern + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ SHIELD (Guardian/Overseer) │ +│ ┌─────────────────────────────────────────────────────────────────────┐ │ +│ │ Shield Portal (vaultmesh-shield-portal) │ │ +│ │ - Mission control UI │ │ +│ │ - Agent status dashboard │ │ +│ │ - Approval workflow │ │ +│ │ - Receipt viewer │ │ +│ └─────────────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌─────────────────────────────────────────────────────────────────────┐ │ +│ │ Agent Orchestrator (new: engines/offsec_agents/) │ │ +│ │ - Agent registry │ │ +│ │ - Mission dispatch │ │ +│ │ - Approval enforcement │ │ +│ │ - Receipt emission │ │ +│ └─────────────────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ┌────────────────┼────────────────┐ + │ │ │ + ▼ ▼ ▼ +┌─────────────────────┐ ┌─────────────────────┐ ┌─────────────────────┐ +│ RECON AGENT │ │ VULN AGENT │ │ EXPLOIT AGENT │ +│ (Worker) │ │ (Worker) │ │ (Worker) │ +│ │ │ │ │ │ +│ • Target enum │ │ • CVE scanning │ │ • PoC execution │ +│ • Port scanning │ │ • Config audit │ │ • Payload delivery │ +│ • OSINT gathering │ │ • Dependency check │ │ • Post-exploit │ +│ • Service ID │ │ • Web vuln scan │ │ • Priv escalation │ +└─────────────────────┘ └─────────────────────┘ └─────────────────────┘ + │ │ │ + └───────────────────────┴───────────────────────┘ + │ + ▼ + ┌─────────────────────────────┐ + │ OFFSEC SCROLL │ + │ receipts/offsec/ │ + │ offsec_events.jsonl │ + └─────────────────────────────┘ +``` + +--- + +## Agent Types + +### Phase 1: Core Agents (Nigredo) + +| Agent | Purpose | Risk Level | Approval Required | +|-------|---------|------------|-------------------| +| **Recon Agent** | Target enumeration, OSINT | Low | Auto-approve | +| **Vuln Agent** | Vulnerability scanning | Medium | Manager approve | +| **Analyze Agent** | Code/binary analysis | Low | Auto-approve | +| **Report Agent** | Finding aggregation | Low | Auto-approve | + +### Phase 2: Active Agents (Albedo) + +| Agent | Purpose | Risk Level | Approval Required | +|-------|---------|------------|-------------------| +| **Exploit Agent** | PoC execution | High | Multi-party approve | +| **CTF Agent** | CTF challenge solving | Medium | Engagement scope | +| **Red Team Agent** | Full adversary emulation | Critical | Executive approve | + +### Phase 3: Response Agents (Citrinitas) + +| Agent | Purpose | Risk Level | Approval Required | +|-------|---------|------------|-------------------| +| **DFIR Agent** | Incident response | Medium | Incident commander | +| **Remediation Agent** | Automated patching | High | Change approval | +| **Threat Intel Agent** | IOC correlation | Low | Auto-approve | + +--- + +## Implementation Plan + +### Step 1: Agent Base Framework + +**File:** `engines/offsec_agents/__init__.py` + +```python +""" +OffSec Agent Workers - Base Framework +""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from datetime import datetime, timezone +from enum import Enum +from typing import Any, Dict, List, Optional +import uuid + +class AgentType(str, Enum): + RECON = "recon" + VULN = "vuln" + EXPLOIT = "exploit" + CTF = "ctf" + ANALYZE = "analyze" + DFIR = "dfir" + REMEDIATION = "remediation" + THREAT_INTEL = "threat_intel" + REPORT = "report" + +class AgentStatus(str, Enum): + IDLE = "idle" + ASSIGNED = "assigned" + RUNNING = "running" + AWAITING_APPROVAL = "awaiting_approval" + COMPLETED = "completed" + FAILED = "failed" + CANCELLED = "cancelled" + +class RiskLevel(str, Enum): + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + CRITICAL = "critical" + +@dataclass +class Mission: + """A task assigned to an agent.""" + mission_id: str + agent_type: AgentType + target: str + objectives: List[str] + scope: Dict[str, Any] + risk_level: RiskLevel + requested_by: str + engagement_id: Optional[str] = None + incident_id: Optional[str] = None + created_at: str = field( + default_factory=lambda: datetime.now(timezone.utc).isoformat() + ) + status: str = "pending" + +@dataclass +class AgentResult: + """Result from agent execution.""" + mission_id: str + agent_id: str + status: AgentStatus + findings: List[Dict[str, Any]] + evidence_paths: List[str] + started_at: str + completed_at: str + duration_seconds: float + receipt_hash: Optional[str] = None +``` + +### Step 2: Agent Orchestrator + +**File:** `engines/offsec_agents/orchestrator.py` + +```python +""" +Agent Orchestrator - Dispatch, monitor, and control agents. +""" + +class AgentOrchestrator: + """ + Central control for OffSec agents. + + Responsibilities: + - Register available agents + - Dispatch missions to appropriate agents + - Enforce approval workflows + - Emit receipts for all agent actions + - Track agent status and results + """ + + def __init__(self, vaultmesh_root: str): + self.vaultmesh_root = vaultmesh_root + self.agents: Dict[str, BaseAgent] = {} + self.missions: Dict[str, Mission] = {} + self.approval_manager = ApprovalManager(vaultmesh_root) + + def register_agent(self, agent: "BaseAgent") -> None: + """Register an agent with the orchestrator.""" + self.agents[agent.agent_id] = agent + + async def dispatch(self, mission: Mission) -> str: + """ + Dispatch a mission to an appropriate agent. + + Flow: + 1. Validate mission scope + 2. Check if approval required + 3. Request approval if needed + 4. Assign to available agent + 5. Emit mission_assigned receipt + """ + # Check approval requirements + if self._requires_approval(mission): + approval = await self._request_approval(mission) + if not approval.approved: + return "rejected" + + # Find available agent + agent = self._find_agent(mission.agent_type) + if not agent: + raise NoAgentAvailable(mission.agent_type) + + # Assign mission + agent.assign(mission) + self.missions[mission.mission_id] = mission + + # Emit receipt + self._emit_mission_receipt("offsec_mission_assigned", mission) + + return mission.mission_id + + def _requires_approval(self, mission: Mission) -> bool: + """Check if mission requires approval based on risk level.""" + approval_matrix = { + RiskLevel.LOW: False, + RiskLevel.MEDIUM: True, + RiskLevel.HIGH: True, + RiskLevel.CRITICAL: True, + } + return approval_matrix.get(mission.risk_level, True) +``` + +### Step 3: Base Agent Class + +**File:** `engines/offsec_agents/base.py` + +```python +""" +Base Agent - Abstract class for all OffSec agents. +""" + +class BaseAgent(ABC): + """ + Abstract base class for OffSec agents. + + All agents must: + 1. Implement execute() for their core logic + 2. Emit receipts for significant actions + 3. Respect scope boundaries + 4. Handle approval checkpoints + """ + + def __init__( + self, + agent_id: str, + agent_type: AgentType, + did: str, + orchestrator: AgentOrchestrator, + ): + self.agent_id = agent_id + self.agent_type = agent_type + self.did = did # e.g., did:vm:agent:recon-01 + self.orchestrator = orchestrator + self.status = AgentStatus.IDLE + self.current_mission: Optional[Mission] = None + + def assign(self, mission: Mission) -> None: + """Accept a mission assignment.""" + self.current_mission = mission + self.status = AgentStatus.ASSIGNED + + async def run(self) -> AgentResult: + """ + Execute the assigned mission. + + Flow: + 1. Validate mission is assigned + 2. Emit mission_started receipt + 3. Execute core logic + 4. Emit findings as receipts + 5. Emit mission_completed receipt + """ + if not self.current_mission: + raise NoMissionAssigned() + + self.status = AgentStatus.RUNNING + started_at = datetime.now(timezone.utc) + + self._emit_receipt("offsec_agent_started", { + "mission_id": self.current_mission.mission_id, + "agent_id": self.agent_id, + "agent_type": self.agent_type.value, + }) + + try: + findings = await self.execute() + self.status = AgentStatus.COMPLETED + except Exception as e: + self.status = AgentStatus.FAILED + findings = [{"error": str(e)}] + + completed_at = datetime.now(timezone.utc) + + result = AgentResult( + mission_id=self.current_mission.mission_id, + agent_id=self.agent_id, + status=self.status, + findings=findings, + evidence_paths=self.evidence_paths, + started_at=started_at.isoformat(), + completed_at=completed_at.isoformat(), + duration_seconds=(completed_at - started_at).total_seconds(), + ) + + self._emit_receipt("offsec_agent_completed", asdict(result)) + + self.current_mission = None + self.status = AgentStatus.IDLE + + return result + + @abstractmethod + async def execute(self) -> List[Dict[str, Any]]: + """ + Core agent logic - must be implemented by subclasses. + + Returns: + List of findings/results + """ + pass + + async def checkpoint(self, action: str, details: Dict[str, Any]) -> bool: + """ + Request approval for a risky action mid-execution. + + Use for actions that exceed initial mission scope or risk level. + """ + if self._action_exceeds_scope(action, details): + approval = await self.orchestrator.request_approval( + session_id=self.current_mission.mission_id, + action_type=action, + action_details=details, + requested_by=self.did, + approvers=self._get_approvers(action), + ) + return approval.approved + return True +``` + +### Step 4: Concrete Agent Implementations + +**File:** `engines/offsec_agents/agents/recon.py` + +```python +""" +Recon Agent - Target enumeration and OSINT gathering. +""" + +class ReconAgent(BaseAgent): + """ + Reconnaissance agent for target enumeration. + + Capabilities: + - Port scanning (nmap integration) + - Service identification + - OSINT gathering + - DNS enumeration + - Technology fingerprinting + """ + + def __init__(self, **kwargs): + super().__init__( + agent_type=AgentType.RECON, + **kwargs + ) + + async def execute(self) -> List[Dict[str, Any]]: + """Execute reconnaissance mission.""" + mission = self.current_mission + target = mission.target + findings = [] + + # Phase 1: DNS/Host resolution + dns_results = await self._dns_enum(target) + findings.extend(dns_results) + + # Phase 2: Port scanning + if "port_scan" in mission.objectives: + ports = await self._port_scan(target, mission.scope) + findings.extend(ports) + + # Phase 3: Service identification + if "service_id" in mission.objectives: + services = await self._identify_services(target, ports) + findings.extend(services) + + # Phase 4: Technology fingerprinting + if "tech_fingerprint" in mission.objectives: + tech = await self._fingerprint(target) + findings.extend(tech) + + return findings + + async def _port_scan(self, target: str, scope: Dict) -> List[Dict]: + """Run port scan within scope constraints.""" + # Use CAI recon or nmap + from cai_mcp_client import cai_recon + + depth = scope.get("scan_depth", "quick") + result = await cai_recon(target=target, depth=depth) + + return self._parse_scan_results(result) +``` + +**File:** `engines/offsec_agents/agents/vuln.py` + +```python +""" +Vuln Agent - Vulnerability scanning and assessment. +""" + +class VulnAgent(BaseAgent): + """ + Vulnerability scanning agent. + + Capabilities: + - CVE detection + - Configuration auditing + - Dependency vulnerability checks + - Web application scanning + - API security testing + """ + + async def execute(self) -> List[Dict[str, Any]]: + """Execute vulnerability scan mission.""" + mission = self.current_mission + target = mission.target + findings = [] + + # Determine scan type + scan_type = mission.scope.get("scan_type", "all") + + # Checkpoint for web scans (can be noisy) + if scan_type in ["web", "all"]: + approved = await self.checkpoint( + "web_vuln_scan", + {"target": target, "reason": "Web scanning may generate traffic"} + ) + if not approved: + scan_type = "network" # Fall back to quieter scan + + # Use CAI vuln scanner + from cai_mcp_client import cai_vuln_scan + + result = await cai_vuln_scan(target=target, scan_type=scan_type) + + # Convert to findings + for vuln in result.get("vulnerabilities", []): + finding = { + "type": "vulnerability", + "title": vuln.get("title"), + "severity": vuln.get("severity"), + "cvss": vuln.get("cvss_score"), + "cve": vuln.get("cve_id"), + "affected_asset": target, + "evidence": vuln.get("evidence"), + } + findings.append(finding) + + # Auto-create vuln receipt for high+ severity + if vuln.get("severity") in ["high", "critical"]: + self._emit_vuln_discovery(finding) + + return findings +``` + +**File:** `engines/offsec_agents/agents/exploit.py` + +```python +""" +Exploit Agent - Controlled exploitation and PoC execution. + +⚠️ HIGH RISK - Requires multi-party approval +""" + +class ExploitAgent(BaseAgent): + """ + Exploitation agent for controlled PoC execution. + + CRITICAL: All exploits require: + 1. Valid engagement/incident context + 2. Multi-party approval + 3. Detailed evidence capture + 4. Immediate rollback capability + """ + + async def execute(self) -> List[Dict[str, Any]]: + """Execute exploitation mission with strict controls.""" + mission = self.current_mission + + # Verify engagement context + if not mission.engagement_id and not mission.incident_id: + raise NoEngagementContext( + "Exploitation requires valid engagement or incident context" + ) + + # Mandatory checkpoint before any exploit + approved = await self.checkpoint( + "exploit_execution", + { + "target": mission.target, + "vulnerability": mission.scope.get("vuln_id"), + "technique": mission.scope.get("technique"), + "engagement_id": mission.engagement_id, + } + ) + + if not approved: + return [{"status": "blocked", "reason": "Approval denied"}] + + # Execute with full evidence capture + findings = [] + + try: + # Start evidence capture + self._start_capture() + + # Use CAI exploit + from cai_mcp_client import cai_exploit + + result = await cai_exploit( + target=mission.target, + vulnerability=mission.scope.get("vuln_id"), + ) + + findings.append({ + "type": "exploit_result", + "success": result.get("success"), + "access_gained": result.get("access_level"), + "evidence_path": self._stop_capture(), + }) + + except Exception as e: + findings.append({ + "type": "exploit_error", + "error": str(e), + "evidence_path": self._stop_capture(), + }) + + return findings +``` + +### Step 5: CAI Integration Bridge + +**File:** `engines/offsec_agents/cai_bridge.py` + +```python +""" +Bridge to CAI MCP tools for agent use. +""" + +import asyncio +from typing import Any, Dict, Optional + +class CAIBridge: + """ + Bridge to CAI security tools via MCP. + + Wraps CAI tools for use by OffSec agents: + - cai_recon → ReconAgent + - cai_vuln_scan → VulnAgent + - cai_exploit → ExploitAgent + - cai_ctf → CTFAgent + - cai_analyze → AnalyzeAgent + """ + + async def recon( + self, + target: str, + depth: str = "standard" + ) -> Dict[str, Any]: + """Run CAI reconnaissance.""" + # Call MCP tool + result = await self._call_mcp("cai_recon", { + "target": target, + "depth": depth, + }) + return result + + async def vuln_scan( + self, + target: str, + scan_type: str = "all" + ) -> Dict[str, Any]: + """Run CAI vulnerability scan.""" + result = await self._call_mcp("cai_vuln_scan", { + "target": target, + "scan_type": scan_type, + }) + return result + + async def exploit( + self, + target: str, + vulnerability: str + ) -> Dict[str, Any]: + """Run CAI exploitation (requires authorization).""" + result = await self._call_mcp("cai_exploit", { + "target": target, + "vulnerability": vulnerability, + }) + return result + + async def analyze( + self, + file_path: str, + analysis_type: str = "static" + ) -> Dict[str, Any]: + """Run CAI code/binary analysis.""" + result = await self._call_mcp("cai_analyze", { + "file_path": file_path, + "analysis_type": analysis_type, + }) + return result + + async def ctf( + self, + challenge: str, + category: Optional[str] = None + ) -> Dict[str, Any]: + """Run CAI CTF solver.""" + params = {"challenge": challenge} + if category: + params["category"] = category + result = await self._call_mcp("cai_ctf", params) + return result +``` + +### Step 6: Shield Portal Integration + +**File:** `vaultmesh-shield-portal/app/src/routes/agents.js` + +```javascript +/** + * Agent management routes for Shield Portal + */ + +const express = require('express'); +const router = express.Router(); + +// GET /agents - List all registered agents +router.get('/', async (req, res) => { + const agents = await getAgentRegistry(); + res.render('agents/list', { agents }); +}); + +// GET /agents/:id - Agent detail view +router.get('/:id', async (req, res) => { + const agent = await getAgent(req.params.id); + const missions = await getAgentMissions(req.params.id); + res.render('agents/detail', { agent, missions }); +}); + +// POST /agents/dispatch - Dispatch a new mission +router.post('/dispatch', async (req, res) => { + const { agent_type, target, objectives, scope } = req.body; + + const mission = await dispatchMission({ + agent_type, + target, + objectives, + scope, + requested_by: req.user.did, + }); + + res.redirect(`/missions/${mission.mission_id}`); +}); + +// GET /missions - List all missions +router.get('/missions', async (req, res) => { + const missions = await getMissions({ + status: req.query.status, + agent_type: req.query.agent_type, + }); + res.render('missions/list', { missions }); +}); + +// POST /missions/:id/approve - Approve a pending mission +router.post('/missions/:id/approve', async (req, res) => { + await approveMission(req.params.id, { + approver: req.user.did, + reason: req.body.reason, + }); + res.redirect(`/missions/${req.params.id}`); +}); + +module.exports = router; +``` + +### Step 7: CLI Commands + +**Add to:** `cli/vm_cli.py` + +```python +# ============================================================================ +# Agent Commands +# ============================================================================ + +@cli.group() +def agent(): + """OffSec Agent management.""" + pass + +@agent.command("list") +def agent_list(): + """List registered agents.""" + orchestrator = get_orchestrator() + for agent in orchestrator.agents.values(): + status_icon = "🟢" if agent.status == AgentStatus.IDLE else "🔴" + click.echo(f"{status_icon} {agent.agent_id} ({agent.agent_type.value})") + +@agent.command("dispatch") +@click.option("--type", "agent_type", required=True, + type=click.Choice(["recon", "vuln", "exploit", "ctf", "analyze"])) +@click.option("--target", required=True, help="Target for the mission") +@click.option("--objective", multiple=True, help="Mission objectives") +@click.option("--engagement", help="Link to engagement ID") +@click.option("--incident", help="Link to incident ID") +def agent_dispatch(agent_type, target, objective, engagement, incident): + """Dispatch a mission to an agent.""" + orchestrator = get_orchestrator() + + mission = Mission( + mission_id=generate_mission_id(), + agent_type=AgentType(agent_type), + target=target, + objectives=list(objective), + scope={}, + risk_level=RiskLevel.MEDIUM, + requested_by=get_actor_did(), + engagement_id=engagement, + incident_id=incident, + ) + + result = asyncio.run(orchestrator.dispatch(mission)) + click.echo(f"[agent] Mission dispatched: {result}") + +@agent.command("status") +@click.argument("mission_id") +def agent_status(mission_id): + """Check mission status.""" + orchestrator = get_orchestrator() + mission = orchestrator.missions.get(mission_id) + + if not mission: + click.echo(f"Mission not found: {mission_id}") + return + + click.echo(f"Mission: {mission.mission_id}") + click.echo(f"Type: {mission.agent_type.value}") + click.echo(f"Target: {mission.target}") + click.echo(f"Status: {mission.status}") +``` + +--- + +## Receipt Types + +New receipt types for agent operations: + +| Type | When Emitted | +|------|--------------| +| `offsec_mission_created` | Mission created and pending | +| `offsec_mission_assigned` | Mission assigned to agent | +| `offsec_mission_approved` | Mission approved to proceed | +| `offsec_mission_rejected` | Mission approval rejected | +| `offsec_agent_started` | Agent began execution | +| `offsec_agent_checkpoint` | Agent requested mid-execution approval | +| `offsec_agent_finding` | Agent discovered a finding | +| `offsec_agent_completed` | Agent finished mission | +| `offsec_agent_failed` | Agent encountered error | + +--- + +## Directory Structure + +``` +engines/offsec_agents/ +├── __init__.py # Exports +├── base.py # BaseAgent class +├── orchestrator.py # AgentOrchestrator +├── mission.py # Mission dataclass +├── cai_bridge.py # CAI MCP integration +├── receipts.py # Receipt emission +├── agents/ +│ ├── __init__.py +│ ├── recon.py # ReconAgent +│ ├── vuln.py # VulnAgent +│ ├── exploit.py # ExploitAgent +│ ├── ctf.py # CTFAgent +│ ├── analyze.py # AnalyzeAgent +│ ├── dfir.py # DFIRAgent +│ ├── remediation.py # RemediationAgent +│ └── threat_intel.py # ThreatIntelAgent +└── tests/ + ├── test_orchestrator.py + ├── test_agents.py + └── test_receipts.py +``` + +--- + +## Implementation Phases + +### Phase 1: Foundation (Week 1) +- [ ] Create `engines/offsec_agents/` package structure +- [ ] Implement `BaseAgent` abstract class +- [ ] Implement `AgentOrchestrator` with basic dispatch +- [ ] Implement `Mission` and `AgentResult` dataclasses +- [ ] Add agent receipt types to OffSec scroll + +### Phase 2: Core Agents (Week 2) +- [ ] Implement `ReconAgent` with CAI bridge +- [ ] Implement `VulnAgent` with CAI bridge +- [ ] Implement `AnalyzeAgent` with CAI bridge +- [ ] Add CLI commands for agent management +- [ ] Write unit tests for each agent + +### Phase 3: Approval Integration (Week 3) +- [ ] Integrate with Console approval system +- [ ] Implement checkpoint flow for risky actions +- [ ] Add multi-party approval for high-risk agents +- [ ] Update Shield Portal with approval UI + +### Phase 4: Active Agents (Week 4) +- [ ] Implement `ExploitAgent` with strict controls +- [ ] Implement `CTFAgent` for CTF engagements +- [ ] Add engagement context validation +- [ ] Full evidence capture pipeline + +### Phase 5: Response Agents (Week 5) +- [ ] Implement `DFIRAgent` for incident response +- [ ] Implement `ThreatIntelAgent` for IOC correlation +- [ ] Implement `RemediationAgent` with change approval +- [ ] Integration testing with real incidents + +### Phase 6: Shield Portal UI (Week 6) +- [ ] Agent dashboard page +- [ ] Mission control view +- [ ] Real-time status updates +- [ ] Finding browser +- [ ] Approval workflow UI + +--- + +## Security Considerations + +1. **Scope Enforcement**: Agents cannot operate outside defined scope +2. **Approval Gates**: Risky actions require human approval +3. **Evidence Trail**: All actions produce receipts +4. **Capability Limits**: Agents have DID-based capabilities +5. **Kill Switch**: Orchestrator can terminate any agent +6. **Rate Limiting**: Prevent runaway agent behavior +7. **Isolation**: Agents run in sandboxed environments + +--- + +## Success Criteria + +- [ ] Agents can be dispatched via CLI and Portal +- [ ] All agent actions emit receipts to OffSec scroll +- [ ] High-risk actions require approval +- [ ] CAI tools accessible to agents via bridge +- [ ] Full audit trail for any mission +- [ ] Guardian can anchor agent activity + +--- + +*Shield commands. Agents execute. Receipts prove.* diff --git a/PLAN.md b/PLAN.md new file mode 100644 index 0000000..15e6b78 --- /dev/null +++ b/PLAN.md @@ -0,0 +1,215 @@ +# VaultMesh Implementation Plan +**Based on Level-of-Done Assessment (2.5/5 → Target 4/5)** + +--- + +## Phase 1: Foundation (Today) + +### 1.1 Add GitLab CI Pipeline +**File:** `.gitlab-ci.yml` + +```yaml +stages: + - build + - test + - lint + +rust-build: + stage: build + image: rust:1.75 + script: + - cargo build --workspace --locked + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + - ~/.cargo/registry/ + +rust-test: + stage: test + image: rust:1.75 + script: + - cargo test --workspace --locked + cache: + key: cargo-$CI_COMMIT_REF_SLUG + paths: + - target/ + +rust-lint: + stage: lint + image: rust:1.75 + script: + - rustup component add clippy rustfmt + - cargo fmt --check + - cargo clippy --workspace -- -D warnings + allow_failure: true +``` + +**Acceptance:** Pipeline runs on push, blocks MR on test failure. + +--- + +### 1.2 Add Unit Tests to vaultmesh-core + +**File:** `vaultmesh-core/src/lib.rs` — add test module + +Tests to add: +1. `test_vmhash_blake3_deterministic` — same input = same hash +2. `test_vmhash_from_json` — JSON serialization hashes correctly +3. `test_merkle_root_empty` — empty list returns blake3("empty") +4. `test_merkle_root_single` — single hash returns itself +5. `test_merkle_root_pair` — two hashes combine correctly +6. `test_merkle_root_odd` — odd number duplicates last +7. `test_did_new` — creates valid did:vm:type:name +8. `test_did_parse_valid` — parses did:vm:... correctly +9. `test_did_parse_invalid` — rejects non-vm DIDs +10. `test_receipt_serialization` — Receipt round-trips through JSON + +**Acceptance:** `cargo test -p vaultmesh-core` shows 10 passing tests. + +--- + +## Phase 2: Guardian Engine (This Week) + +### 2.1 Implement Guardian Engine in Rust + +**File:** `vaultmesh-guardian/src/lib.rs` + +```rust +//! VaultMesh Guardian Engine - Merkle root anchoring + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::path::Path; +use vaultmesh_core::{Receipt, ReceiptHeader, ReceiptMeta, Scroll, VmHash, merkle_root}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AnchorReceipt { + pub anchor_id: String, + pub anchor_epoch: u64, + pub anchor_by: String, + pub backend: String, + pub roots: std::collections::HashMap, + pub scrolls: Vec, + pub anchor_hash: String, +} + +pub struct GuardianEngine { + pub receipts_root: std::path::PathBuf, + pub guardian_did: String, +} + +impl GuardianEngine { + pub fn new(receipts_root: impl AsRef, guardian_did: &str) -> Self { + Self { + receipts_root: receipts_root.as_ref().to_path_buf(), + guardian_did: guardian_did.to_string(), + } + } + + /// Compute Merkle root for a scroll's JSONL file + pub fn compute_scroll_root(&self, scroll: Scroll) -> std::io::Result { + let path = self.receipts_root.join(scroll.jsonl_path()); + if !path.exists() { + return Ok(VmHash::blake3(b"empty")); + } + // Read lines, hash each, compute merkle root + let content = std::fs::read_to_string(&path)?; + let hashes: Vec = content + .lines() + .filter(|l| !l.trim().is_empty()) + .map(|l| VmHash::blake3(l.as_bytes())) + .collect(); + Ok(merkle_root(&hashes)) + } + + /// Anchor all scrolls and emit guardian receipt + pub fn anchor_all(&self, scrolls: &[Scroll]) -> std::io::Result> { + // Implementation: compute roots, build receipt, append to JSONL + todo!("Implement anchor_all") + } +} +``` + +**Tests to add:** +1. `test_compute_scroll_root_empty` — missing file returns empty hash +2. `test_compute_scroll_root_single_line` — single entry returns its hash +3. `test_compute_scroll_root_multiple` — computes correct merkle root +4. `test_anchor_creates_receipt` — anchor_all writes to JSONL +5. `test_anchor_hash_deterministic` — same inputs = same anchor_hash + +**Acceptance:** `cargo test -p vaultmesh-guardian` shows 5+ passing tests. + +--- + +## Phase 3: Treasury Engine (Next Week) + +### 3.1 Implement Treasury Basics + +**Structures:** +- `Budget { id, name, allocated, spent, currency }` +- `BudgetCreateReceipt` +- `BudgetSpendReceipt` +- `TreasuryEngine { budgets: HashMap }` + +**Methods:** +- `create_budget()` → emits receipt +- `record_spend()` → emits receipt +- `get_balance()` → returns remaining + +**Tests:** 5 unit tests covering create/spend/balance flows. + +--- + +## Phase 4: Remaining Engines (Weeks 2-3) + +| Engine | Core Implementation | Tests | +|--------|---------------------|-------| +| vaultmesh-mesh | Node registry, sync receipts | 5 tests | +| vaultmesh-observability | Metrics exporter, health receipts | 5 tests | +| vaultmesh-offsec | Incident/vuln receipt emission | 5 tests | +| vaultmesh-psi | PSI field primitives | 5 tests | +| vaultmesh-automation | Skill validation receipts | 5 tests | + +--- + +## Phase 5: Python Parity & Docs + +1. Add `requirements.txt` with test deps (pytest, blake3, click, pynacl) +2. Add pytest tests for CLI flows +3. Update README with quickstart +4. Add `CONTRIBUTING.md` with PR checklist + +--- + +## Implementation Order + +1. **Now:** Create `.gitlab-ci.yml` + add 10 tests to vaultmesh-core +2. **Today:** Implement GuardianEngine with 5 tests +3. **Tomorrow:** TreasuryEngine basics +4. **This week:** Remaining engines (parallelize) +5. **Next week:** Python tests + docs polish + +--- + +## Files to Create/Modify + +| Action | File | Description | +|--------|------|-------------| +| CREATE | `.gitlab-ci.yml` | CI pipeline | +| MODIFY | `vaultmesh-core/src/lib.rs` | Add test module | +| MODIFY | `vaultmesh-core/src/hash.rs` | Add tests | +| MODIFY | `vaultmesh-core/src/did.rs` | Add tests | +| MODIFY | `vaultmesh-guardian/src/lib.rs` | Full implementation | +| MODIFY | `vaultmesh-guardian/Cargo.toml` | Add deps | +| MODIFY | `vaultmesh-treasury/src/lib.rs` | Full implementation | +| CREATE | `requirements.txt` | Python deps for CI | + +--- + +## Success Criteria + +- [ ] CI pipeline runs on every push +- [ ] `cargo test --workspace` shows 30+ tests passing +- [ ] All engines emit receipts (not just stubs) +- [ ] Level-of-Done score reaches 4/5 diff --git a/README.md b/README.md new file mode 100644 index 0000000..9d2b3ab --- /dev/null +++ b/README.md @@ -0,0 +1,15 @@ +# VaultMesh + +Bootstrap workspace, generated from Eternal Pattern docs. + +- Rust workspace with 9 crates (core + 8 engine stubs) +- Minimal Python CLI in `cli/` +- Receipt scrolls under `receipts/` +- Design docs under `docs/` + +## Build + +```bash +cargo check --workspace +python3 cli/vm_cli.py --help +``` diff --git a/cli/__init__.py b/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cli/ledger.py b/cli/ledger.py new file mode 100644 index 0000000..b38bcc0 --- /dev/null +++ b/cli/ledger.py @@ -0,0 +1,667 @@ +from __future__ import annotations + +import argparse +import hashlib +import json +import sqlite3 +import uuid +from contextlib import contextmanager +from datetime import datetime, timedelta, timezone +from pathlib import Path +from typing import Any + +from ledger.db import ( + default_db_path, + ensure_migrated, + log_proof_artifact, + new_trace_id, + open_db, +) + + +def _parse_since(value: str | None) -> str | None: + if not value: + return None + + v = value.strip() + if not v: + return None + + parts = v.split() + if len(parts) == 2 and parts[0].isdigit(): + n = int(parts[0]) + unit = parts[1].lower() + if unit in {"day", "days"}: + dt = datetime.now(timezone.utc) - timedelta(days=n) + return dt.replace(microsecond=0).isoformat().replace("+00:00", "Z") + if unit in {"hour", "hours"}: + dt = datetime.now(timezone.utc) - timedelta(hours=n) + return dt.replace(microsecond=0).isoformat().replace("+00:00", "Z") + if unit in {"minute", "minutes"}: + dt = datetime.now(timezone.utc) - timedelta(minutes=n) + return dt.replace(microsecond=0).isoformat().replace("+00:00", "Z") + + # Accept "YYYY-MM-DD HH:MM:SS" or ISO8601 + try: + dt = datetime.fromisoformat(v.replace("Z", "+00:00")) + if dt.tzinfo is None: + dt = dt.replace(tzinfo=timezone.utc) + return ( + dt.astimezone(timezone.utc) + .replace(microsecond=0) + .isoformat() + .replace("+00:00", "Z") + ) + except Exception: + return v + + +def _fmt_ms(ms: int | None) -> str: + return f"{ms}ms" if ms is not None else "-" + + +def _fmt_ts(ts: str) -> str: + # Stored as ISO Z; keep as-is if it already looks like it. + if ts.endswith("Z") and "T" in ts: + return ts + try: + # sqlite datetime('now') is "YYYY-MM-DD HH:MM:SS" + dt = datetime.strptime(ts, "%Y-%m-%d %H:%M:%S").replace(tzinfo=timezone.utc) + return dt.replace(microsecond=0).isoformat().replace("+00:00", "Z") + except Exception: + return ts + + +def _one_line(value: str | None, *, max_len: int = 120) -> str | None: + if not value: + return None + line = value.splitlines()[0].strip() + if len(line) <= max_len: + return line + return line[:max_len] + "..." + + +@contextmanager +def _open(db_path: str | None): + with open_db(Path(db_path) if db_path else None) as conn: + ensure_migrated(conn) + yield conn + + +def cmd_last(args: argparse.Namespace) -> int: + with _open(args.db) as conn: + rows = conn.execute( + """ + SELECT ts, status, duration_ms, trace_id, kind, label, id, error_text + FROM ( + SELECT ts, status, duration_ms, trace_id, + 'tool' AS kind, + tool_name || COALESCE('.' || action, '') AS label, + id, + error_text + FROM tool_invocations + UNION ALL + SELECT ts, status, duration_ms, trace_id, + 'mcp' AS kind, + 'mcp:' || server_name || '.' || method AS label, + id, + error_text + FROM mcp_calls + UNION ALL + SELECT ts, 'ok' AS status, NULL AS duration_ms, trace_id, + 'artifact' AS kind, + 'artifact:' || kind || COALESCE(' ' || path, '') AS label, + id, + NULL AS error_text + FROM proof_artifacts + ) + ORDER BY ts DESC + LIMIT ?; + """, + (args.n,), + ).fetchall() + + for row in rows: + ts = _fmt_ts(row["ts"]) + status = row["status"] + label = row["label"] + duration = _fmt_ms(row["duration_ms"]) + trace_id = row["trace_id"] + err = _one_line(row["error_text"]) + tail = [] + if trace_id: + tail.append(f"trace={trace_id}") + if err and status != "ok": + tail.append(err) + tail_s = (" " + " ".join(tail)) if tail else "" + print(f"{ts} {status:<5} {label:<28} {duration:>6}{tail_s}") + return 0 + + +def cmd_trace(args: argparse.Namespace) -> int: + with _open(args.db) as conn: + rows = conn.execute( + """ + SELECT ts, status, duration_ms, trace_id, kind, label, id, error_text + FROM ( + SELECT ts, status, duration_ms, trace_id, + 'tool' AS kind, + tool_name || COALESCE('.' || action, '') AS label, + id, + error_text + FROM tool_invocations + WHERE trace_id = ? + UNION ALL + SELECT ts, status, duration_ms, trace_id, + 'mcp' AS kind, + 'mcp:' || server_name || '.' || method AS label, + id, + error_text + FROM mcp_calls + WHERE trace_id = ? + UNION ALL + SELECT ts, 'ok' AS status, NULL AS duration_ms, trace_id, + 'artifact' AS kind, + 'artifact:' || kind || COALESCE(' ' || path, '') AS label, + id, + NULL AS error_text + FROM proof_artifacts + WHERE trace_id = ? + ) + ORDER BY ts ASC; + """, + (args.trace_id, args.trace_id, args.trace_id), + ).fetchall() + + for row in rows: + ts = _fmt_ts(row["ts"]) + status = row["status"] + label = row["label"] + duration = _fmt_ms(row["duration_ms"]) + err = _one_line(row["error_text"]) + tail = f" {err}" if err and status != "ok" else "" + print(f"{ts} {status:<5} {label:<28} {duration:>6}{tail}") + return 0 + + +def cmd_tool(args: argparse.Namespace) -> int: + since = _parse_since(args.since) + params: list[Any] = [args.tool_name] + where = "tool_name = ?" + if since: + where += " AND ts >= ?" + params.append(since) + + limit_sql = " LIMIT ?" if args.n is not None else "" + if args.n is not None: + params.append(args.n) + + with _open(args.db) as conn: + rows = conn.execute( + f""" + SELECT ts, status, duration_ms, trace_id, tool_name, action, error_text + FROM tool_invocations + WHERE {where} + ORDER BY ts DESC{limit_sql}; + """, + tuple(params), + ).fetchall() + + for row in rows: + ts = _fmt_ts(row["ts"]) + status = row["status"] + label = row["tool_name"] + (("." + row["action"]) if row["action"] else "") + duration = _fmt_ms(row["duration_ms"]) + trace_id = row["trace_id"] + err = _one_line(row["error_text"]) + tail = [] + if trace_id: + tail.append(f"trace={trace_id}") + if err and status != "ok": + tail.append(err) + tail_s = (" " + " ".join(tail)) if tail else "" + print(f"{ts} {status:<5} {label:<28} {duration:>6}{tail_s}") + return 0 + + +def cmd_errors(args: argparse.Namespace) -> int: + since = _parse_since(args.since) + params: list[Any] = [] + where = "status != 'ok'" + if since: + where += " AND ts >= ?" + params.append(since) + + with _open(args.db) as conn: + rows = conn.execute( + f""" + SELECT ts, status, duration_ms, trace_id, + tool_name || COALESCE('.' || action, '') AS label, + error_text + FROM tool_invocations + WHERE {where} + ORDER BY ts DESC + LIMIT ?; + """, + (*params, args.n), + ).fetchall() + + for row in rows: + ts = _fmt_ts(row["ts"]) + status = row["status"] + duration = _fmt_ms(row["duration_ms"]) + trace_id = row["trace_id"] + label = row["label"] + err = _one_line(row["error_text"]) or "-" + tail = f" trace={trace_id}" if trace_id else "" + print(f"{ts} {status:<5} {label:<28} {duration:>6} {err}{tail}") + return 0 + + +def cmd_search(args: argparse.Namespace) -> int: + term = args.term.strip() + if not term: + return 2 + + like = f"%{term}%" + with _open(args.db) as conn: + rows = conn.execute( + """ + SELECT ts, status, duration_ms, trace_id, + tool_name || COALESCE('.' || action, '') AS label, + id + FROM tool_invocations + WHERE input_json LIKE ? OR output_json LIKE ? OR input_meta_json LIKE ? OR output_meta_json LIKE ? + OR error_text LIKE ? + ORDER BY ts DESC + LIMIT ?; + """, + (like, like, like, like, like, args.n), + ).fetchall() + + for row in rows: + ts = _fmt_ts(row["ts"]) + status = row["status"] + duration = _fmt_ms(row["duration_ms"]) + trace_id = row["trace_id"] + label = row["label"] + rec_id = row["id"] + tail = [f"id={rec_id}"] + if trace_id: + tail.append(f"trace={trace_id}") + print(f"{ts} {status:<5} {label:<28} {duration:>6} " + " ".join(tail)) + return 0 + + +def _sha256_hex_text(value: str | None) -> str: + b = (value or "").encode("utf-8") + return hashlib.sha256(b).hexdigest() + + +def _canon(value: Any) -> str: + if value is None: + return "" + if isinstance(value, (int,)): + return str(value) + if isinstance(value, float): + return format(value, ".17g") + return str(value) + + +def _digest_update(hasher: "hashlib._Hash", line: str) -> None: + hasher.update(line.encode("utf-8")) + hasher.update(b"\n") + + +def _seal_query_window( + args: argparse.Namespace, conn: sqlite3.Connection +) -> tuple[dict[str, Any], str]: + since = _parse_since(args.since) + until = _parse_since(args.until) if getattr(args, "until", None) else None + trace_ids: list[str] = [t.strip() for t in (args.trace_id or []) if t and t.strip()] + scope = "trace_set" if trace_ids else "time_window" + + def where_ts(prefix: str) -> tuple[str, list[Any]]: + clauses: list[str] = [] + params: list[Any] = [] + if since: + clauses.append(f"datetime({prefix}ts) >= datetime(?)") + params.append(since) + if until: + clauses.append(f"datetime({prefix}ts) <= datetime(?)") + params.append(until) + if trace_ids: + marks = ",".join(["?"] * len(trace_ids)) + clauses.append(f"{prefix}trace_id IN ({marks})") + params.extend(trace_ids) + if not clauses: + return ("1=1", []) + return (" AND ".join(clauses), params) + + hasher = hashlib.sha256() + counts: dict[str, int] = {} + bounds_min: str | None = None + bounds_max: str | None = None + + def note_ts(ts_value: Any) -> None: + nonlocal bounds_min, bounds_max + ts = _fmt_ts(_canon(ts_value)) + if not ts: + return + if bounds_min is None or ts < bounds_min: + bounds_min = ts + if bounds_max is None or ts > bounds_max: + bounds_max = ts + + # tool_invocations + where, params = where_ts("") + rows = conn.execute( + f""" + SELECT id, ts, tool_name, action, status, duration_ms, trace_id, + input_json, output_json, error_text + FROM tool_invocations + WHERE {where} + ORDER BY datetime(ts) ASC, id ASC; + """, + tuple(params), + ).fetchall() + for r in rows: + note_ts(r["ts"]) + line = ( + "tool_invocations" + f"|id={_canon(r['id'])}" + f"|ts={_fmt_ts(_canon(r['ts']))}" + f"|tool_name={_canon(r['tool_name'])}" + f"|action={_canon(r['action'])}" + f"|status={_canon(r['status'])}" + f"|duration_ms={_canon(r['duration_ms'])}" + f"|trace_id={_canon(r['trace_id'])}" + f"|input_sha256={_sha256_hex_text(r['input_json'])}" + f"|output_sha256={_sha256_hex_text(r['output_json'])}" + f"|error_sha256={_sha256_hex_text(r['error_text'])}" + ) + _digest_update(hasher, line) + counts["tool_invocations"] = len(rows) + + # mcp_calls + where, params = where_ts("") + rows = conn.execute( + f""" + SELECT id, ts, server_name, method, tool_name, status, duration_ms, trace_id, + request_json, response_json, error_text + FROM mcp_calls + WHERE {where} + ORDER BY datetime(ts) ASC, id ASC; + """, + tuple(params), + ).fetchall() + for r in rows: + note_ts(r["ts"]) + line = ( + "mcp_calls" + f"|id={_canon(r['id'])}" + f"|ts={_fmt_ts(_canon(r['ts']))}" + f"|server_name={_canon(r['server_name'])}" + f"|method={_canon(r['method'])}" + f"|tool_name={_canon(r['tool_name'])}" + f"|status={_canon(r['status'])}" + f"|duration_ms={_canon(r['duration_ms'])}" + f"|trace_id={_canon(r['trace_id'])}" + f"|request_sha256={_sha256_hex_text(r['request_json'])}" + f"|response_sha256={_sha256_hex_text(r['response_json'])}" + f"|error_sha256={_sha256_hex_text(r['error_text'])}" + ) + _digest_update(hasher, line) + counts["mcp_calls"] = len(rows) + + # proof_artifacts + where, params = where_ts("") + rows = conn.execute( + f""" + SELECT id, ts, kind, path, sha256_hex, blake3_hex, size_bytes, trace_id, meta_json + FROM proof_artifacts + WHERE {where} + ORDER BY datetime(ts) ASC, id ASC; + """, + tuple(params), + ).fetchall() + for r in rows: + note_ts(r["ts"]) + line = ( + "proof_artifacts" + f"|id={_canon(r['id'])}" + f"|ts={_fmt_ts(_canon(r['ts']))}" + f"|kind={_canon(r['kind'])}" + f"|path={_canon(r['path'])}" + f"|sha256_hex={_canon(r['sha256_hex'])}" + f"|blake3_hex={_canon(r['blake3_hex'])}" + f"|size_bytes={_canon(r['size_bytes'])}" + f"|trace_id={_canon(r['trace_id'])}" + f"|meta_sha256={_sha256_hex_text(r['meta_json'])}" + ) + _digest_update(hasher, line) + counts["proof_artifacts"] = len(rows) + + # shadow_receipts (if present) + try: + where, params = where_ts("") + rows = conn.execute( + f""" + SELECT id, ts, horizon_id, counterfactual_hash, entropy_delta, reason_unrealized, + observer_signature, trace_id, meta_json + FROM shadow_receipts + WHERE {where} + ORDER BY datetime(ts) ASC, id ASC; + """, + tuple(params), + ).fetchall() + for r in rows: + note_ts(r["ts"]) + line = ( + "shadow_receipts" + f"|id={_canon(r['id'])}" + f"|ts={_fmt_ts(_canon(r['ts']))}" + f"|horizon_id={_canon(r['horizon_id'])}" + f"|counterfactual_hash={_canon(r['counterfactual_hash'])}" + f"|entropy_delta={_canon(r['entropy_delta'])}" + f"|reason_unrealized={_canon(r['reason_unrealized'])}" + f"|observer_signature={_canon(r['observer_signature'])}" + f"|trace_id={_canon(r['trace_id'])}" + f"|meta_sha256={_sha256_hex_text(r['meta_json'])}" + ) + _digest_update(hasher, line) + counts["shadow_receipts"] = len(rows) + except sqlite3.OperationalError: + counts["shadow_receipts"] = 0 + + selection = { + "scope": scope, + "since": since, + "until": until, + "trace_ids": trace_ids, + "kinds": [ + "tool_invocations", + "mcp_calls", + "proof_artifacts", + "shadow_receipts", + ], + } + digest = {"algorithm": "sha256", "hex": hasher.hexdigest()} + bounds = {"min_ts": bounds_min, "max_ts": bounds_max} + return ( + {"selection": selection, "counts": counts, "bounds": bounds, "digest": digest}, + digest["hex"], + ) + + +def _schema_version(conn: sqlite3.Connection) -> tuple[int, str | None]: + """ + Returns (schema_version_int, last_migration_name). + + schema_version_int is derived from the leading 4-digit prefix in applied migration filenames. + """ + try: + rows = conn.execute("SELECT name FROM migrations;").fetchall() + except sqlite3.OperationalError: + return (0, None) + + max_v = 0 + last_name: str | None = None + for r in rows: + name = r["name"] if isinstance(r, sqlite3.Row) else r[0] + if not isinstance(name, str): + continue + if not last_name or name > last_name: + last_name = name + prefix = name.split("_", 1)[0] + if prefix.isdigit(): + max_v = max(max_v, int(prefix)) + return (max_v, last_name) + + +def cmd_seal(args: argparse.Namespace) -> int: + db_path = Path(args.db).expanduser().resolve() if args.db else default_db_path() + + with _open(str(db_path)) as conn: + payload, digest_hex = _seal_query_window(args, conn) + schema_version, schema_last_migration = _schema_version(conn) + + now = datetime.now(timezone.utc).replace(microsecond=0) + ts_tag = now.strftime("%Y%m%dT%H%M%SZ") + seal_dir = db_path.parent / "seals" + seal_dir.mkdir(parents=True, exist_ok=True) + bundle_path = (seal_dir / f"ouroboros_seal_{ts_tag}.json").resolve() + + seal_id = str(uuid.uuid4()) + trace_id = new_trace_id() + + bundle = { + "format": "vm-ouroboros-seal-v0", + "schema_version": schema_version, + "schema_last_migration": schema_last_migration, + "seal_id": seal_id, + "sealed_at": now.isoformat().replace("+00:00", "Z"), + "digest_algo": payload["digest"]["algorithm"], + "selection": payload["selection"], + "digest": payload["digest"], + "counts": payload["counts"], + "bounds": payload["bounds"], + "inputs": { + "sqlite_db_path": str(db_path), + }, + "trace_id": trace_id, + } + bundle_path.write_text( + json.dumps(bundle, ensure_ascii=False, sort_keys=True, indent=2) + "\n", + encoding="utf-8", + ) + + artifact_id = log_proof_artifact( + kind="ouroboros_seal_bundle", + path=bundle_path, + meta={ + "seal_id": seal_id, + "schema_version": schema_version, + "schema_last_migration": schema_last_migration, + "digest_algo": payload["digest"]["algorithm"], + "digest": payload["digest"], + "counts": payload["counts"], + "bounds": payload["bounds"], + "selection": payload["selection"], + }, + trace_id=trace_id, + db_path=db_path, + ) + + print(f"seal_id={seal_id}") + print(f"trace_id={trace_id}") + print(f"digest={digest_hex}") + print(f"bundle={bundle_path}") + print(f"artifact_id={artifact_id}") + return 0 + + +def cmd_seals_last(args: argparse.Namespace) -> int: + with _open(args.db) as conn: + rows = conn.execute( + """ + SELECT ts, id, kind, path, sha256_hex, trace_id + FROM proof_artifacts + WHERE kind = 'ouroboros_seal_bundle' + ORDER BY datetime(ts) DESC, id DESC + LIMIT ?; + """, + (int(args.n),), + ).fetchall() + + for r in rows: + ts = _fmt_ts(r["ts"]) + print( + f"{ts} id={r['id']} sha256={r['sha256_hex'] or '-'} trace={r['trace_id'] or '-'} {r['path'] or ''}" + ) + return 0 + + +def build_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + prog="ledger", description="Local-first SQLite ledger (tool + MCP call log)" + ) + p.add_argument("--db", help=f"SQLite path (default: {default_db_path()})") + + sub = p.add_subparsers(dest="cmd", required=True) + + sp = sub.add_parser("last", help="Show last N events") + sp.add_argument("--n", type=int, default=50) + sp.set_defaults(func=cmd_last) + + sp = sub.add_parser("trace", help="Show an end-to-end trace") + sp.add_argument("trace_id") + sp.set_defaults(func=cmd_trace) + + sp = sub.add_parser("tool", help="Filter tool invocations by tool name") + sp.add_argument("tool_name") + sp.add_argument("--since", help='e.g. "2025-12-17 00:00:00" or "7 days"') + sp.add_argument("--n", type=int, default=200) + sp.set_defaults(func=cmd_tool) + + sp = sub.add_parser("errors", help="Show recent errors") + sp.add_argument("--since", help='e.g. "7 days"') + sp.add_argument("--n", type=int, default=200) + sp.set_defaults(func=cmd_errors) + + sp = sub.add_parser("search", help="Search redacted JSON blobs") + sp.add_argument("term") + sp.add_argument("--n", type=int, default=200) + sp.set_defaults(func=cmd_search) + + sp = sub.add_parser( + "seal", help="Create an Ouroboros seal bundle (deterministic digest)" + ) + sp.add_argument("--since", help='e.g. "7 days" or ISO8601') + sp.add_argument("--until", help='e.g. "2025-12-17 00:00:00" or ISO8601') + sp.add_argument( + "--trace-id", + action="append", + default=[], + help="Limit sealing to one or more trace ids (repeatable).", + ) + sp.set_defaults(func=cmd_seal) + + sp = sub.add_parser("seals", help="Seal bundle utilities") + seals_sub = sp.add_subparsers(dest="seals_cmd", required=True) + + sp2 = seals_sub.add_parser("last", help="Show last N seal bundles") + sp2.add_argument("--n", type=int, default=10) + sp2.set_defaults(func=cmd_seals_last) + + return p + + +def main(argv: list[str] | None = None) -> int: + parser = build_parser() + args = parser.parse_args(argv) + return int(args.func(args)) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/cli/skill_validator.py b/cli/skill_validator.py new file mode 100755 index 0000000..4eea786 --- /dev/null +++ b/cli/skill_validator.py @@ -0,0 +1,550 @@ +#!/usr/bin/env python3 +""" +VaultMesh Claude Skill Validator + Receipt Emitter + +Checks that the VaultMesh skill is correctly installed under: + ~/.claude/skills/vaultmesh/ + +Validates: +- Directory exists +- All expected files present and non-empty +- SKILL.md has valid YAML frontmatter +- Supporting docs are properly linked +- Emits: + - VAULTMESH_SKILL_ROOT.txt (BLAKE3 integrity hash) + - VaultMesh Automation scroll receipts for each run: + automation_vm_skill_validate_success + automation_vm_skill_validate_warning + automation_vm_skill_validate_failure +""" + +import json +import os +import re +import sys +from dataclasses import dataclass, asdict +from datetime import datetime, timezone +from pathlib import Path +from typing import List, Optional, Dict, Tuple + +# --------------------------------------------------------------------------- +# Path configuration (self-rooting) +# --------------------------------------------------------------------------- + +THIS_FILE = Path(__file__).resolve() +CLI_DIR = THIS_FILE.parent # /root/work/vaultmesh/cli +REPO_ROOT = THIS_FILE.parents[1] # /root/work/vaultmesh + +# Allow override via env var, but default to auto-detected repo root +VM_ROOT = Path(os.environ.get("VAULTMESH_ROOT", REPO_ROOT)).resolve() +RECEIPTS_ROOT = Path(os.environ.get("VAULTMESH_RECEIPTS_ROOT", VM_ROOT / "receipts")) + +# --------------------------------------------------------------------------- +# Configuration +# --------------------------------------------------------------------------- + +EXPECTED_FILES = [ + "SKILL.md", + "QUICK_REFERENCE.md", + "OPERATIONS.md", + "MCP_INTEGRATION.md", + "PROTOCOLS.md", + "ALCHEMICAL_PATTERNS.md", + "INFRASTRUCTURE.md", + "CODE_TEMPLATES.md", + "ENGINE_SPECS.md", +] + +SUPPORTING_DOC_LINKS = { + "QUICK_REFERENCE.md": "Quick Reference", + "OPERATIONS.md": "Operations Guide", + "MCP_INTEGRATION.md": "MCP Integration", + "PROTOCOLS.md": "Protocols", + "ALCHEMICAL_PATTERNS.md": "Alchemical Patterns", + "INFRASTRUCTURE.md": "Infrastructure", + "CODE_TEMPLATES.md": "Code Templates", + "ENGINE_SPECS.md": "Engine Specs", +} + + +@dataclass +class CheckResult: + name: str + status: str # "ok", "warn", "fail" + details: str + + +@dataclass +class ValidationReport: + skill_dir: str + checks: List[CheckResult] + overall_status: str # "ok", "warn", "fail" + hash_algorithm: Optional[str] = None + root_hash: Optional[str] = None + + def to_dict(self) -> Dict: + return { + "skill_dir": self.skill_dir, + "overall_status": self.overall_status, + "hash_algorithm": self.hash_algorithm, + "root_hash": self.root_hash, + "checks": [asdict(c) for c in self.checks], + } + + +# --------------------------------------------------------------------------- +# Hashing helpers +# --------------------------------------------------------------------------- + +def load_hasher(): + """Return (name, constructor) for hash function (blake3 preferred).""" + try: + import blake3 # type: ignore + return "blake3", blake3.blake3 + except Exception: + import hashlib + return "sha256", hashlib.sha256 + + +# --------------------------------------------------------------------------- +# Basic checks +# --------------------------------------------------------------------------- + +def check_dir_exists(skill_dir: Path) -> CheckResult: + if skill_dir.is_dir(): + return CheckResult( + name="skill_dir_exists", + status="ok", + details=f"Found skill directory at {skill_dir}", + ) + return CheckResult( + name="skill_dir_exists", + status="fail", + details=f"Skill directory not found: {skill_dir}", + ) + + +def check_expected_files(skill_dir: Path) -> List[CheckResult]: + results = [] + for fname in EXPECTED_FILES: + path = skill_dir / fname + if not path.exists(): + results.append( + CheckResult( + name=f"file_missing:{fname}", + status="fail", + details=f"Expected file missing: {fname}", + ) + ) + elif path.stat().st_size == 0: + results.append( + CheckResult( + name=f"file_empty:{fname}", + status="warn", + details=f"File present but empty: {fname}", + ) + ) + else: + results.append( + CheckResult( + name=f"file_ok:{fname}", + status="ok", + details=f"File present: {fname}", + ) + ) + return results + + +FRONTMATTER_RE = re.compile( + r"^---\s*\n(.*?)\n---\s*\n", + re.DOTALL, +) + + +def parse_frontmatter(text: str) -> Optional[Dict[str, str]]: + m = FRONTMATTER_RE.match(text) + if not m: + return None + body = m.group(1) + data: Dict[str, str] = {} + for line in body.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + if ":" not in line: + continue + key, value = line.split(":", 1) + data[key.strip()] = value.strip().strip('"').strip("'") + return data + + +def check_skill_md(skill_dir: Path) -> List[CheckResult]: + results: List[CheckResult] = [] + path = skill_dir / "SKILL.md" + if not path.exists(): + return [ + CheckResult( + name="skill_md_exists", + status="fail", + details="SKILL.md is missing", + ) + ] + + text = path.read_text(encoding="utf-8") + + fm = parse_frontmatter(text) + if fm is None: + results.append( + CheckResult( + name="skill_md_frontmatter", + status="fail", + details="YAML frontmatter block (--- ... ---) not found at top of SKILL.md", + ) + ) + return results + + # name + if fm.get("name") == "vaultmesh": + results.append( + CheckResult( + name="skill_md_name", + status="ok", + details='Frontmatter name is "vaultmesh".', + ) + ) + else: + results.append( + CheckResult( + name="skill_md_name", + status="fail", + details=f'Frontmatter "name" should be "vaultmesh", got {fm.get("name")!r}', + ) + ) + + # description + desc = fm.get("description", "").strip() + if desc: + results.append( + CheckResult( + name="skill_md_description", + status="ok", + details=f"Description present ({len(desc)} chars).", + ) + ) + else: + results.append( + CheckResult( + name="skill_md_description", + status="fail", + details="Frontmatter 'description' is missing or empty.", + ) + ) + + # Supporting doc links + link_checks = check_supporting_links(text) + results.extend(link_checks) + + return results + + +def check_supporting_links(skill_md_text: str) -> List[CheckResult]: + results: List[CheckResult] = [] + + # Simple markdown link regex: [Label](FILE.md) + link_re = re.compile(r"\[([^\]]+)\]\(([^)]+)\)") + + found_links: Dict[str, str] = {} + for label, target in link_re.findall(skill_md_text): + found_links[target] = label + + for fname, expected_label in SUPPORTING_DOC_LINKS.items(): + if fname not in found_links: + results.append( + CheckResult( + name=f"link_missing:{fname}", + status="fail", + details=f"Missing markdown link to {fname} in SKILL.md", + ) + ) + else: + label = found_links[fname] + # Only warn if label is very different + if expected_label.lower() not in label.lower(): + results.append( + CheckResult( + name=f"link_label_warn:{fname}", + status="warn", + details=( + f"Link to {fname} present but label is '{label}', " + f"expected something like '{expected_label}'." + ), + ) + ) + else: + results.append( + CheckResult( + name=f"link_ok:{fname}", + status="ok", + details=f"Link to {fname} present with label '{label}'.", + ) + ) + + return results + + +# --------------------------------------------------------------------------- +# Integrity root for the skill (VAULTMESH_SKILL_ROOT.txt) +# --------------------------------------------------------------------------- + +def compute_skill_root_hash(skill_dir: Path) -> Tuple[str, str]: + algo_name, hasher_ctor = load_hasher() + h = hasher_ctor() + + # Sort to keep deterministic ordering + for fname in sorted(EXPECTED_FILES): + path = skill_dir / fname + if not path.exists(): + continue + h.update(fname.encode("utf-8")) + h.update(b"\0") + with path.open("rb") as f: + while True: + chunk = f.read(8192) + if not chunk: + break + h.update(chunk) + + digest = h.hexdigest() + return algo_name, digest + + +def write_skill_root_file(skill_dir: Path, algo_name: str, digest: str) -> CheckResult: + out_path = skill_dir / "VAULTMESH_SKILL_ROOT.txt" + if algo_name.lower() == "blake3": + value = f"blake3:{digest}\n" + else: + value = f"{algo_name}:{digest}\n" + out_path.write_text(value, encoding="utf-8") + return CheckResult( + name="root_file_written", + status="ok", + details=f"Wrote integrity root to {out_path} using {algo_name}.", + ) + + +# --------------------------------------------------------------------------- +# Automation scroll receipt emission +# --------------------------------------------------------------------------- + +def _now_iso() -> str: + return datetime.now(timezone.utc).isoformat().replace("+00:00", "Z") + + +def _read_last_receipt(receipts_path: Path) -> Optional[Dict]: + """Read the last receipt from the JSONL scroll file.""" + if not receipts_path.exists(): + return None + try: + with receipts_path.open("r", encoding="utf-8") as f: + last_line = None + for line in f: + line = line.strip() + if line: + last_line = line + if not last_line: + return None + return json.loads(last_line) + except Exception: + return None + + +def emit_automation_receipt( + event_type: str, + report: ValidationReport, + skill_root_algo: str, + skill_root_hash: str, +) -> None: + """ + Emit a VaultMesh Automation scroll receipt for this validator run. + + Scroll: Automation + Types: + - automation_vm_skill_validate_success + - automation_vm_skill_validate_warning + - automation_vm_skill_validate_failure + """ + + scroll_name = "Automation" + scroll_dir = RECEIPTS_ROOT / "automation" + scroll_dir.mkdir(parents=True, exist_ok=True) + receipts_path = scroll_dir / "automation_events.jsonl" + + # Determine sequence and previous_hash + last = _read_last_receipt(receipts_path) + if last is None: + sequence = 0 + previous_hash = None + else: + sequence = int(last.get("meta", {}).get("sequence", -1)) + 1 + previous_hash = last.get("header", {}).get("root_hash") + + # Body snapshot (we keep it compact) + body = { + "skill_dir": report.skill_dir, + "hash_algorithm": skill_root_algo, + "root_hash": f"blake3:{skill_root_hash}" + if skill_root_algo.lower() == "blake3" + else f"{skill_root_algo}:{skill_root_hash}", + "overall_status": report.overall_status, + "checks": [ + { + "name": c.name, + "status": c.status, + } + for c in report.checks + ], + } + + # Build receipt (schema v2-style) + timestamp = _now_iso() + receipt = { + "schema_version": "2.0.0", + "type": event_type, + "timestamp": timestamp, + "header": { + "root_hash": None, # filled after hash + "tags": [ + "vaultmesh_skill", + "validator", + f"status:{report.overall_status}", + ], + "previous_hash": previous_hash, + }, + "meta": { + "scroll": scroll_name, + "sequence": sequence, + "anchor_epoch": None, + "proof_path": None, + }, + "body": body, + } + + # Compute receipt hash over canonical JSON + algo_name, hasher_ctor = load_hasher() + h = hasher_ctor() + encoded = json.dumps(receipt, sort_keys=True, separators=(",", ":")).encode("utf-8") + h.update(encoded) + digest = h.hexdigest() + if algo_name.lower() == "blake3": + receipt_hash = f"blake3:{digest}" + else: + receipt_hash = f"{algo_name}:{digest}" + + receipt["header"]["root_hash"] = receipt_hash + + # Append to scroll file + with receipts_path.open("a", encoding="utf-8") as f: + f.write(json.dumps(receipt, separators=(",", ":")) + "\n") + + +# --------------------------------------------------------------------------- +# Aggregation + main +# --------------------------------------------------------------------------- + +def aggregate_status(checks: List[CheckResult]) -> str: + worst = "ok" + for c in checks: + if c.status == "fail": + return "fail" + if c.status == "warn" and worst == "ok": + worst = "warn" + return worst + + +def main(argv: List[str]) -> int: + if len(argv) > 2: + print(f"Usage: {argv[0]} [skill_dir]", file=sys.stderr) + return 2 + + if len(argv) == 2: + skill_dir = Path(argv[1]).expanduser() + else: + skill_dir = Path("~/.claude/skills/vaultmesh").expanduser() + + checks: List[CheckResult] = [] + + # 1. Directory + dir_check = check_dir_exists(skill_dir) + checks.append(dir_check) + if dir_check.status == "fail": + report = ValidationReport( + skill_dir=str(skill_dir), + checks=checks, + overall_status="fail", + ) + print(json.dumps(report.to_dict(), indent=2)) + # No receipt emitted if the skill dir doesn't exist + return 2 + + # 2. Files + checks.extend(check_expected_files(skill_dir)) + + # 3. SKILL.md + links + checks.extend(check_skill_md(skill_dir)) + + # 4. Skill integrity root + skill_algo, skill_digest = compute_skill_root_hash(skill_dir) + checks.append( + CheckResult( + name="hash_algorithm", + status="ok" if skill_algo == "blake3" else "warn", + details=( + f"Using {skill_algo} for integrity hash " + + ("(preferred)." if skill_algo == "blake3" else "(BLAKE3 not available, using fallback.)") + ), + ) + ) + checks.append(write_skill_root_file(skill_dir, skill_algo, skill_digest)) + + overall = aggregate_status(checks) + + report = ValidationReport( + skill_dir=str(skill_dir), + checks=checks, + overall_status=overall, + hash_algorithm=skill_algo, + root_hash=skill_digest, + ) + + # Print JSON report to stdout + print(json.dumps(report.to_dict(), indent=2)) + + # Emit Automation scroll receipt + if overall == "ok": + event_type = "automation_vm_skill_validate_success" + elif overall == "warn": + event_type = "automation_vm_skill_validate_warning" + else: + event_type = "automation_vm_skill_validate_failure" + + try: + emit_automation_receipt( + event_type=event_type, + report=report, + skill_root_algo=skill_algo, + skill_root_hash=skill_digest, + ) + except Exception as e: + # We don't want receipt emission failures to hide validation output, + # so just log to stderr and keep the original exit code. + print(f"WARNING: failed to emit automation receipt: {e}", file=sys.stderr) + + if overall == "ok": + return 0 + if overall == "warn": + return 1 + return 2 + + +if __name__ == "__main__": + raise SystemExit(main(sys.argv)) diff --git a/cli/vm_cli.py b/cli/vm_cli.py new file mode 100755 index 0000000..6e412da --- /dev/null +++ b/cli/vm_cli.py @@ -0,0 +1,2816 @@ +#!/usr/bin/env python3 +""" +VaultMesh Unified CLI (minimal) +vm skill validate - Validate the VaultMesh Claude skill and emit receipts. +""" + +import json +import os +import subprocess +import sys +from datetime import datetime, timezone +from pathlib import Path +from typing import NoReturn, Optional + + +def _missing_dep(dep: str, *, pip_name: str | None = None) -> NoReturn: + pkg = pip_name or dep + raise SystemExit( + f"Missing dependency: {dep}. Install with: python3 -m pip install {pkg}" + ) + + +try: + import base58 # type: ignore +except ModuleNotFoundError: # pragma: no cover + _missing_dep("base58") + +try: + import blake3 # type: ignore +except ModuleNotFoundError: # pragma: no cover + _missing_dep("blake3") + +try: + import click # type: ignore +except ModuleNotFoundError: # pragma: no cover + _missing_dep("click") + +try: + from nacl import signing # type: ignore +except ModuleNotFoundError: # pragma: no cover + _missing_dep("pynacl", pip_name="pynacl") + +# ============================================================================ +# Self-rooting: CLI auto-detects repo root +# ============================================================================ + +THIS_FILE = Path(__file__).resolve() +CLI_DIR = THIS_FILE.parent # /root/work/vaultmesh/cli +REPO_ROOT = THIS_FILE.parents[1] # /root/work/vaultmesh + +# Allow override via env var, but default to auto-detected repo root +VAULTMESH_ROOT = Path(os.environ.get("VAULTMESH_ROOT", REPO_ROOT)).resolve() +RECEIPTS_ROOT = VAULTMESH_ROOT / "receipts" + +# Add VAULTMESH_ROOT to sys.path for engine imports +if str(VAULTMESH_ROOT) not in sys.path: + sys.path.insert(0, str(VAULTMESH_ROOT)) + +# ============================================================================ +# Guardian / Scroll config and Merkle helpers +# ============================================================================ + +SCROLLS = { + "drills": { + "jsonl": RECEIPTS_ROOT / "drills" / "drill_runs.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.drills.txt", + }, + "compliance": { + "jsonl": RECEIPTS_ROOT / "compliance" / "oracle_answers.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.compliance.txt", + }, + "guardian": { + "jsonl": RECEIPTS_ROOT / "guardian" / "anchor_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.guardian.txt", + }, + "treasury": { + "jsonl": RECEIPTS_ROOT / "treasury" / "treasury_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.treasury.txt", + }, + "mesh": { + "jsonl": RECEIPTS_ROOT / "mesh" / "mesh_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.mesh.txt", + }, + "offsec": { + "jsonl": RECEIPTS_ROOT / "offsec" / "offsec_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.offsec.txt", + }, + "identity": { + "jsonl": RECEIPTS_ROOT / "identity" / "identity_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.identity.txt", + }, + "observability": { + "jsonl": RECEIPTS_ROOT / "observability" / "observability_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.observability.txt", + }, + "automation": { + "jsonl": RECEIPTS_ROOT / "automation" / "automation_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.automation.txt", + }, + "psi": { + "jsonl": RECEIPTS_ROOT / "psi" / "psi_events.jsonl", + "root_file": VAULTMESH_ROOT / "ROOT.psi.txt", + }, + "console": { + "jsonl": RECEIPTS_ROOT / "console" / "console_events.jsonl", + "root_file": RECEIPTS_ROOT / "console" / "ROOT.console.txt", + }, +} + + +def _vmhash_blake3(data: bytes) -> str: + """VaultMesh hash: blake3:.""" + return f"blake3:{blake3.blake3(data).hexdigest()}" + + +def _now_iso_z() -> str: + return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + +def _read_root_value(root_file: Path) -> Optional[str]: + if not root_file.exists(): + return None + + text = root_file.read_text(encoding="utf-8").strip() + if not text: + return None + + # Structured root file (e.g., Console engine root) + for line in text.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + if line.startswith("merkle_root="): + value = line.split("=", 1)[1].strip() + return value or None + + # Plain root file: first non-empty, non-comment line + for line in text.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + return line + + return None + + +def _write_root_value( + root_file: Path, root_value: str, *, leaves: int | None = None +) -> None: + root_file.parent.mkdir(parents=True, exist_ok=True) + + # Console root file is a structured key=value document used by the HTTP bridge. + if root_file.name == "ROOT.console.txt": + events = int(leaves or 0) + root_file.write_text( + "\n".join( + [ + "# VaultMesh Console Root", + "engine_id=engine:console", + f"merkle_root={root_value}", + f"events={events}", + f"updated_at={_now_iso_z()}", + "", + ] + ), + encoding="utf-8", + ) + return + + root_file.write_text(root_value + "\n", encoding="utf-8") + + +def get_guardian_did() -> str: + """ + Resolve the DID that represents the Guardian anchor actor. + + Priority: + 1) VAULTMESH_GUARDIAN_DID env var + 2) keys/identity/guardian-local.json + 3) default 'did:vm:guardian:local' + """ + env_did = os.environ.get("VAULTMESH_GUARDIAN_DID") + if env_did: + return env_did.strip() + + # Try key file from the identity bootstrap + key_path = VAULTMESH_ROOT / "keys" / "identity" / "guardian-local.json" + if key_path.exists(): + try: + data = json.loads(key_path.read_text(encoding="utf-8")) + did = data.get("did") + if did: + return did + except Exception: + pass + + # Fallback + return "did:vm:guardian:local" + + +def _merkle_root_from_hashes(hashes: list[str]) -> str: + """ + Compute Merkle root using the same pattern as the Rust VmHash::merkle_root: + + - If empty: blake3("empty") + - If 1 element: that hash + - Else: + * group in pairs, duplicate last if odd + * each parent = blake3( left_hex + right_hex ) + """ + if not hashes: + return _vmhash_blake3(b"empty") + if len(hashes) == 1: + return hashes[0] + + current = hashes[:] + while len(current) > 1: + next_level: list[str] = [] + for i in range(0, len(current), 2): + left = current[i] + right = current[i + 1] if i + 1 < len(current) else current[i] + combined = (left.split(":", 1)[-1] + right.split(":", 1)[-1]).encode( + "utf-8" + ) + next_level.append(_vmhash_blake3(combined)) + current = next_level + + return current[0] + + +def _compute_scroll_root(scroll_name: str) -> dict: + """ + Compute Merkle root for a given scroll's JSONL file. + + Returns dict: + { + "scroll": str, + "path": Path, + "root": str, + "leaves": int, + "exists": bool, + } + """ + cfg = SCROLLS[scroll_name] + path = cfg["jsonl"] + if not path.exists(): + return { + "scroll": scroll_name, + "path": path, + "root": _vmhash_blake3(b"empty"), + "leaves": 0, + "exists": False, + } + + hashes: list[str] = [] + leaves = 0 + with path.open("r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if not line: + continue + leaves += 1 + hashes.append(_vmhash_blake3(line.encode("utf-8"))) + + root = _merkle_root_from_hashes(hashes) + return { + "scroll": scroll_name, + "path": path, + "root": root, + "leaves": leaves, + "exists": True, + } + + +def _compute_all_roots(selected: Optional[list[str]] = None) -> list[dict]: + names = selected or list(SCROLLS.keys()) + results = [] + for name in names: + if name not in SCROLLS: + continue + results.append(_compute_scroll_root(name)) + return results + + +# ============================================================================ +# Main CLI Group +# ============================================================================ + + +@click.group() +@click.version_option(version="0.1.0") +def cli() -> None: + """VaultMesh Civilization Ledger CLI (minimal edition).""" + pass + + +# ============================================================================ +# Skill Commands +# ============================================================================ + + +@cli.group() +def skill() -> None: + """Skill utilities.""" + pass + + +@skill.command("validate") +@click.option( + "--skill-dir", + type=click.Path(), + help="Override skill directory (default: ~/.claude/skills/vaultmesh)", +) +@click.option( + "--validator-path", + type=click.Path(), + help=( + "Path to vm_validate_vaultmesh_skill.py " + "(default: VAULTMESH_ROOT or same directory as this CLI)" + ), +) +def skill_validate(skill_dir: Optional[str], validator_path: Optional[str]) -> None: + """ + Validate the VaultMesh Claude skill and emit an Automation receipt. + + This wraps vm_validate_vaultmesh_skill.py, which: + - Checks ~/.claude/skills/vaultmesh/ (or provided skill-dir) + - Ensures SKILL.md frontmatter + links + - Maintains VAULTMESH_SKILL_ROOT.txt + - Writes Automation receipts to receipts/automation/automation_events.jsonl + """ + # ------------------------------------------------------------------ + # Locate validator script (skill_validator.py in cli/) + # ------------------------------------------------------------------ + if validator_path: + validator = Path(validator_path).expanduser() + else: + # Primary: cli/skill_validator.py (renamed copy) + candidate = CLI_DIR / "skill_validator.py" + if candidate.exists(): + validator = candidate + else: + # Fallback: legacy name in same directory + fallback = CLI_DIR / "vm_validate_vaultmesh_skill.py" + if fallback.exists(): + validator = fallback + else: + click.echo( + "Could not find skill_validator.py\n" + " Tried:\n" + f" {candidate}\n" + f" {fallback}", + err=True, + ) + sys.exit(2) + + # ------------------------------------------------------------------ + # Build command + # ------------------------------------------------------------------ + cmd = ["python3", str(validator)] + if skill_dir: + cmd.append(str(Path(skill_dir).expanduser())) + + # ------------------------------------------------------------------ + # Run validator + # ------------------------------------------------------------------ + result = subprocess.run( + cmd, + capture_output=True, + text=True, + ) + + if not result.stdout.strip(): + click.echo("Validator produced no output", err=True) + if result.stderr: + click.echo(result.stderr, err=True) + sys.exit(result.returncode or 2) + + # ------------------------------------------------------------------ + # Parse JSON report + # ------------------------------------------------------------------ + try: + report = json.loads(result.stdout) + except json.JSONDecodeError: + click.echo("Failed to parse validator JSON output", err=True) + click.echo("Raw stdout:", err=True) + click.echo(result.stdout, err=True) + if result.stderr: + click.echo("\nStderr:", err=True) + click.echo(result.stderr, err=True) + sys.exit(result.returncode or 2) + + status = report.get("overall_status", "unknown") + algo = report.get("hash_algorithm", "unknown") + root = report.get("root_hash", "unknown") + skill_dir_used = report.get("skill_dir", "(unknown)") + + # ------------------------------------------------------------------ + # Human-friendly summary + # ------------------------------------------------------------------ + click.echo("VaultMesh Skill Validation") + click.echo("----------------------------------------") + click.echo(f" Skill dir: {skill_dir_used}") + click.echo(f" Status: {status}") + click.echo(f" Hash algo: {algo}") + click.echo(f" Root hash: {root}") + click.echo("----------------------------------------") + + checks = report.get("checks", []) + ok = sum(1 for c in checks if c.get("status") == "ok") + warn = sum(1 for c in checks if c.get("status") == "warn") + fail = sum(1 for c in checks if c.get("status") == "fail") + + click.echo(f" Checks: {ok} ok, {warn} warn, {fail} fail") + + # Show Automation scroll location + receipts_path = RECEIPTS_ROOT / "automation" / "automation_events.jsonl" + click.echo(f" Receipt scroll: {receipts_path}") + + if result.returncode == 0: + click.echo("[ok] Automation receipt emitted (status: success)") + elif result.returncode == 1: + click.echo("[warn] Automation receipt emitted (status: warning)") + else: + click.echo("[fail] Validation failed (see receipt + logs)") + + # Propagate exit code so CI / callers can react + sys.exit(result.returncode) + + +# ============================================================================ +# Guardian Commands +# ============================================================================ + + +@cli.group() +def guardian() -> None: + """Guardian Engine - Audit and anchor.""" + pass + + +@guardian.command("automation-log") +@click.option( + "--limit", + default=10, + show_default=True, + type=int, + help="Number of recent Automation receipts to show", +) +def guardian_automation_log(limit: int) -> None: + """ + Show recent Automation scroll events (e.g. skill validation runs). + + Reads the Automation scroll: + receipts/automation/automation_events.jsonl + and prints the last N events with sequence, status, and hash summary. + """ + receipts_path = RECEIPTS_ROOT / "automation" / "automation_events.jsonl" + + if not receipts_path.exists(): + click.echo(f"No Automation scroll found at: {receipts_path}") + sys.exit(0) + + lines = receipts_path.read_text(encoding="utf-8").splitlines() + lines = [ln for ln in lines if ln.strip()] + if not lines: + click.echo("Automation scroll is empty.") + sys.exit(0) + + # Take last N + recent = lines[-limit:] + + records = [] + for raw in recent: + try: + r = json.loads(raw) + except Exception: + continue + records.append(r) + + if not records: + click.echo("No valid Automation receipts parsed.") + sys.exit(0) + + click.echo("Guardian Audit View - Automation Scroll") + click.echo("========================================") + click.echo(f"File: {receipts_path}") + click.echo(f"Showing last {len(records)} event(s):") + click.echo() + + # Print a compact table + header = f"{'SEQ':>4} {'TIME':19} {'TYPE':36} {'STATUS':7} {'HASH':18}" + click.echo(header) + click.echo("-" * len(header)) + + for r in records: + meta = r.get("meta", {}) + hdr = r.get("header", {}) + body = r.get("body", {}) + + seq = meta.get("sequence", 0) + ts = r.get("timestamp", "")[:19] + rtype = str(r.get("type", ""))[:36] + status_tag = "?" + # Status is encoded in tags like "status:ok" + for t in hdr.get("tags", []): + if t.startswith("status:"): + status_tag = t.split(":", 1)[1] + break + # Fallback to body.overall_status if present + if status_tag == "?" and "overall_status" in body: + status_tag = body.get("overall_status") + + rhash = hdr.get("root_hash", "")[:18] + + click.echo(f"{seq:>4} {ts:19} {rtype:36} {status_tag:7} {rhash:18}") + + click.echo() + click.echo("Hint: use --limit N to adjust how many events are shown.") + + +@guardian.command("compute-roots") +@click.option( + "--scroll", + "scrolls", + multiple=True, + help="Limit to one or more scrolls (e.g. --scroll automation --scroll treasury)", +) +def guardian_compute_roots(scrolls: tuple[str, ...]) -> None: + """Compute Merkle root for each scroll's JSONL but do NOT write ROOT.* files.""" + selected = list(scrolls) if scrolls else None + results = _compute_all_roots(selected) + + click.echo("Guardian - Computed Scroll Roots") + click.echo("================================") + for r in results: + status = "OK" if r["exists"] else "MISSING" + click.echo( + f"- {r['scroll']:14} | leaves: {r['leaves']:4d} | " + f"status: {status:7} | root: {r['root']}" + ) + if not results: + click.echo("No matching scrolls (check --scroll names).") + + +@guardian.command("update-roots") +@click.option( + "--scroll", + "scrolls", + multiple=True, + help="Limit to one or more scrolls (e.g. --scroll automation --scroll psi)", +) +def guardian_update_roots(scrolls: tuple[str, ...]) -> None: + """ + Compute Merkle roots for scrolls and write them into ROOT.*.txt files. + + This is the canonical 'scroll -> Merkle root -> ROOT.*.txt' step. + """ + selected = list(scrolls) if scrolls else None + results = _compute_all_roots(selected) + + click.echo("Guardian - Updating ROOT.*.txt Files") + click.echo("====================================") + + for r in results: + cfg = SCROLLS[r["scroll"]] + root_file = cfg["root_file"] + _write_root_value(root_file, r["root"], leaves=r["leaves"]) + click.echo( + f"- {r['scroll']:14} -> {root_file.name:22} | " + f"leaves: {r['leaves']:4d} | root: {r['root']}" + ) + + if not results: + click.echo("No matching scrolls (check --scroll names).") + + +@guardian.command("anchor") +@click.option( + "--backend", + default="local", + show_default=True, + help="Anchor backend identifier (e.g. local, ots, eth, btc).", +) +def guardian_anchor(backend: str) -> None: + """ + Run a full anchor cycle: + + - Compute Merkle root for each scroll + - Update ROOT.*.txt files + - Emit a Guardian anchor receipt in receipts/guardian/anchor_events.jsonl + """ + results = _compute_all_roots(None) + + # Update ROOT files + for r in results: + cfg = SCROLLS[r["scroll"]] + root_file = cfg["root_file"] + _write_root_value(root_file, r["root"], leaves=r["leaves"]) + + # Build anchor payload + now = datetime.now(timezone.utc) + ts = now.isoformat(timespec="seconds").replace("+00:00", "Z") + anchor_id = f"anchor-{now.strftime('%Y%m%d%H%M%S')}" + anchor_epoch = int(now.timestamp()) + + # Get the Guardian DID for attribution + guardian_did = get_guardian_did() + + roots_map = {r["scroll"]: r["root"] for r in results} + + anchor_body = { + "schema_version": "2.0.0", + "type": "guardian_anchor", + "timestamp": ts, + "anchor_id": anchor_id, + "backend": backend, + "anchor_by": guardian_did, + "anchor_epoch": anchor_epoch, + "roots": roots_map, + "scrolls": list(roots_map.keys()), + } + + anchor_json = json.dumps(anchor_body, sort_keys=True).encode("utf-8") + anchor_hash = _vmhash_blake3(anchor_json) + + record = { + **anchor_body, + "anchor_hash": anchor_hash, + } + + guardian_path = SCROLLS["guardian"]["jsonl"] + guardian_path.parent.mkdir(parents=True, exist_ok=True) + with guardian_path.open("a", encoding="utf-8") as f: + f.write(json.dumps(record) + "\n") + + click.echo("Guardian Anchor") + click.echo("===============") + click.echo(f" Anchor ID: {anchor_id}") + click.echo(f" Backend: {backend}") + click.echo(f" Anchor by: {guardian_did}") + click.echo(f" Anchor hash: {anchor_hash}") + click.echo() + click.echo(" Scroll roots:") + for name, root in roots_map.items(): + click.echo(f" - {name:14} {root}") + click.echo() + click.echo(f" Receipt: {guardian_path}") + + +@guardian.command("status") +def guardian_status() -> None: + """ + Show current ROOT.*.txt hashes and whether they match the latest JSONL Merkle root. + """ + click.echo("Guardian Status - ROOT Files vs Computed Roots") + click.echo("==============================================") + + computed = {r["scroll"]: r for r in _compute_all_roots(None)} + + for name, cfg in SCROLLS.items(): + root_file = cfg["root_file"] + on_disk = _read_root_value(root_file) + + comp = computed.get(name) + comp_root = comp["root"] if comp else None + + if on_disk is None or on_disk == "": + state = "MISSING_ROOT" + elif comp_root is None: + state = "NO_COMPUTED" + elif on_disk == comp_root: + state = "OK" + else: + state = "STALE" + + # Truncate hashes for display + on_disk_short = ( + (on_disk[:30] + "...") + if on_disk and len(on_disk) > 30 + else (on_disk or "-") + ) + comp_short = ( + (comp_root[:30] + "...") + if comp_root and len(comp_root) > 30 + else (comp_root or "-") + ) + + click.echo( + f"- {name:14} | {state:12} | disk: {on_disk_short} | computed: {comp_short}" + ) + + +# ============================================================================ +# Identity Commands +# ============================================================================ + + +def _store_identity_keypair( + did: str, keypair: dict, explicit_path: Optional[Path] = None +) -> Path: + """ + Store the DID keypair JSON to disk. + Default: VAULTMESH_ROOT/keys/identity/{did_type}-{name}.json + """ + if explicit_path is not None: + out_path = explicit_path + else: + parts = did.split(":") + did_type = parts[2] if len(parts) >= 4 else "unknown" + name = parts[3] if len(parts) >= 4 else "unknown" + key_dir = VAULTMESH_ROOT / "keys" / "identity" + key_dir.mkdir(parents=True, exist_ok=True) + out_path = key_dir / f"{did_type}-{name}.json" + + tmp = { + "did": did, + "created_at": datetime.now(timezone.utc) + .isoformat(timespec="seconds") + .replace("+00:00", "Z"), + "public_key_multibase": keypair["public_key_multibase"], + "public_key_hex": keypair["public_key_hex"], + "secret_key_hex": keypair["secret_key_hex"], + } + + out_path.write_text(json.dumps(tmp, indent=2), encoding="utf-8") + try: + os.chmod(out_path, 0o600) + except PermissionError: + pass # Best-effort + + return out_path + + +@cli.group() +def identity() -> None: + """Identity Engine - DIDs and credentials.""" + pass + + +@identity.command("did-create") +@click.option( + "--type", + "did_type", + required=True, + type=click.Choice( + ["node", "human", "agent", "service", "mesh", "portal", "guardian", "skill"], + case_sensitive=False, + ), + help="DID type (node/human/agent/service/mesh/portal/guardian/skill)", +) +@click.option( + "--name", + "name", + required=True, + help="DID name (e.g. 'shield', 'local', 'karol')", +) +@click.option( + "--display-name", + "display_name", + help="Human-friendly display name for this DID", +) +@click.option( + "--role", + "role", + help="Logical role (e.g. 'portal', 'guardian', 'skill', 'auditor')", +) +@click.option( + "--controller", + "controller_did", + help="Controller DID (optional, default: creator DID)", +) +@click.option( + "--creator", + "creator_did", + required=True, + help="DID of the actor creating this DID (e.g. did:vm:human:karol)", +) +@click.option( + "--key-out", + "key_out", + type=click.Path(dir_okay=False, writable=True, path_type=Path), + help="Explicit path to write keypair JSON (default: keys/identity/{type}-{name}.json)", +) +def identity_did_create( + did_type: str, + name: str, + display_name: Optional[str], + role: Optional[str], + controller_did: Optional[str], + creator_did: str, + key_out: Optional[Path], +) -> None: + """ + Create a VaultMesh DID + Ed25519 keypair and emit identity_did_create receipt. + + Example: + vm identity did-create \\ + --type portal \\ + --name shield \\ + --display-name "VaultMesh Auditor Portal (shield)" \\ + --role portal \\ + --creator did:vm:human:karol + """ + # Assemble DID: did:vm:{type}:{name} + did_str = f"did:vm:{did_type.lower()}:{name}" + controller = controller_did or creator_did + + # 1) Generate Ed25519 keypair + signing_key = signing.SigningKey.generate() + verify_key = signing_key.verify_key + + public_bytes = bytes(verify_key) + secret_bytes = bytes(signing_key) + + # Multibase base58-btc with 'z' prefix (Ed25519VerificationKey2020 style) + public_key_b58 = base58.b58encode(public_bytes).decode("ascii") + public_key_multibase = f"z{public_key_b58}" + + keypair_info = { + "public_key_multibase": public_key_multibase, + "public_key_hex": public_bytes.hex(), + "secret_key_hex": secret_bytes.hex(), + } + + # 2) Build receipt body aligned with Rust DidCreateReceipt + doc_like = { + "id": did_str, + "controller": controller, + "public_key_multibase": public_key_multibase, + "display_name": display_name, + "role": role, + } + doc_json = json.dumps(doc_like, sort_keys=True).encode("utf-8") + doc_hash = _vmhash_blake3(doc_json) + + receipt_body = { + "did": did_str, + "did_type": did_type.lower(), + "controller": controller, + "created_by": creator_did, + "display_name": display_name, + "role": role, + "public_key_type": "Ed25519VerificationKey2020", + "public_key_multibase": public_key_multibase, + "initial_keys": [f"{did_str}#key-1"], + "did_document_hash": doc_hash, + } + + # 3) Emit receipt into identity scroll + now = datetime.now(timezone.utc) + ts = now.isoformat(timespec="seconds").replace("+00:00", "Z") + + body_json = json.dumps(receipt_body, sort_keys=True).encode("utf-8") + root_hash = _vmhash_blake3(body_json) + + receipt = { + "schema_version": "2.0.0", + "type": "identity_did_create", + "timestamp": ts, + "scroll": "identity", + "tags": ["identity", "did", "create", did_type.lower()], + "root_hash": root_hash, + "body": receipt_body, + } + + # Write to identity scroll + identity_scroll = SCROLLS["identity"]["jsonl"] + identity_scroll.parent.mkdir(parents=True, exist_ok=True) + with identity_scroll.open("a", encoding="utf-8") as f: + f.write(json.dumps(receipt) + "\n") + + # Update ROOT.identity.txt (scroll Merkle root) + root_file = SCROLLS["identity"]["root_file"] + root_info = _compute_scroll_root("identity") + _write_root_value(root_file, root_info["root"], leaves=root_info["leaves"]) + + # 4) Store key material safely on disk + key_path = _store_identity_keypair(did_str, keypair_info, explicit_path=key_out) + + click.echo("Identity DID Created") + click.echo("====================") + click.echo(f" DID: {did_str}") + click.echo(f" Type: {did_type.lower()}") + if display_name: + click.echo(f" Display: {display_name}") + if role: + click.echo(f" Role: {role}") + click.echo(f" Controller: {controller}") + click.echo(f" Creator: {creator_did}") + click.echo() + click.echo(f" Public key: {public_key_multibase}") + click.echo(f" Key file: {key_path}") + click.echo(f" Receipt: {root_hash[:40]}...") + click.echo(f" Scroll: {identity_scroll}") + + +@identity.command("list") +def identity_list() -> None: + """List all DIDs in the identity scroll.""" + identity_scroll = SCROLLS["identity"]["jsonl"] + + if not identity_scroll.exists(): + click.echo(f"No identity scroll found at: {identity_scroll}") + return + + lines = identity_scroll.read_text(encoding="utf-8").splitlines() + lines = [ln for ln in lines if ln.strip()] + + if not lines: + click.echo("Identity scroll is empty.") + return + + dids = [] + for raw in lines: + try: + r = json.loads(raw) + if r.get("type") == "identity_did_create": + body = r.get("body", {}) + dids.append( + { + "did": body.get("did", "?"), + "type": body.get("did_type", "?"), + "display_name": body.get("display_name", ""), + "role": body.get("role", ""), + "created_by": body.get("created_by", "?"), + "timestamp": r.get("timestamp", "?"), + } + ) + except Exception: + continue + + if not dids: + click.echo("No DIDs found in identity scroll.") + return + + click.echo("Identity Registry") + click.echo("=================") + click.echo(f"{'DID':40} {'TYPE':10} {'ROLE':12} {'DISPLAY NAME'}") + click.echo("-" * 80) + + for d in dids: + did_short = d["did"][:40] if len(d["did"]) > 40 else d["did"] + click.echo( + f"{did_short:40} {d['type']:10} {d['role'] or '-':12} {d['display_name'] or '-'}" + ) + + +# ============================================================================ +# OffSec Commands +# ============================================================================ + + +def _resolve_actor_did(explicit: Optional[str] = None) -> str: + """ + Resolve the actor DID for OffSec operations. + + Priority: + 1) explicit --actor-did option + 2) VAULTMESH_ACTOR_DID env var + 3) raise error + """ + if explicit: + return explicit + env = os.environ.get("VAULTMESH_ACTOR_DID") + if not env: + raise click.UsageError( + "Actor DID required; pass --actor-did or set VAULTMESH_ACTOR_DID" + ) + return env + + +def _emit_offsec_receipt( + vm_root: Path, + receipt_type: str, + body: dict, + tags: list[str], +) -> dict: + """ + Append an OffSec receipt to receipts/offsec/offsec_events.jsonl + with hash chaining (previous_hash -> root_hash). + """ + scroll_path = vm_root / "receipts" / "offsec" / "offsec_events.jsonl" + scroll_path.parent.mkdir(parents=True, exist_ok=True) + + previous_hash = None + if scroll_path.exists() and scroll_path.stat().st_size > 0: + text = scroll_path.read_text(encoding="utf-8").strip() + if text: + last_line = text.splitlines()[-1] + try: + last_obj = json.loads(last_line) + previous_hash = last_obj.get("header", {}).get("root_hash") + except json.JSONDecodeError: + previous_hash = None + + timestamp = ( + datetime.now(timezone.utc).isoformat(timespec="seconds").replace("+00:00", "Z") + ) + + envelope = { + "schema_version": "1.0.0", + "type": receipt_type, + "timestamp": timestamp, + "scroll": "offsec", + "header": { + "previous_hash": previous_hash, + "tags": tags, + "root_hash": None, + }, + "body": body, + } + + # Compute root_hash over canonical JSON (with root_hash=None) + serialized = json.dumps( + envelope, + sort_keys=True, + separators=(",", ":"), + ).encode("utf-8") + root_hash = "blake3:" + blake3.blake3(serialized).hexdigest() + envelope["header"]["root_hash"] = root_hash + + with scroll_path.open("a", encoding="utf-8") as f: + f.write(json.dumps(envelope, sort_keys=True) + "\n") + + return envelope + + +# ============================================================================ +# OffSec Phase 2 Helpers +# ============================================================================ + + +def _generate_offsec_id(prefix: str) -> str: + """ + Generate unique ID like VULN-2025-12-001, RT-2025-12-001, INTEL-2025-12-001. + Counts existing IDs with same prefix+date to get sequence number. + """ + now = datetime.now(timezone.utc) + date_part = now.strftime("%Y-%m") + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + seq = 1 + if scroll_path.exists(): + for line in scroll_path.read_text(encoding="utf-8").splitlines(): + if line.strip(): + try: + obj = json.loads(line) + body = obj.get("body", {}) + existing_id = ( + body.get("vuln_id") + or body.get("exercise_id") + or body.get("intel_id") + ) + if existing_id and existing_id.startswith(f"{prefix}-{date_part}"): + seq += 1 + except Exception: + pass + return f"{prefix}-{date_part}-{seq:03d}" + + +def _parse_severity_breakdown(json_str: str) -> dict: + """Parse severity breakdown JSON string.""" + try: + breakdown = json.loads(json_str) + required = {"critical", "high", "medium", "low", "info"} + missing = required - set(breakdown.keys()) + if missing: + # Fill missing with 0 + for key in missing: + breakdown[key] = 0 + return breakdown + except json.JSONDecodeError as e: + raise click.UsageError(f"Invalid severity breakdown JSON: {e}") + + +def _parse_affected_products(json_str: str) -> list: + """Parse affected products JSON array.""" + try: + products = json.loads(json_str) + if not isinstance(products, list): + raise ValueError("affected-products must be a JSON array") + return products + except json.JSONDecodeError as e: + raise click.UsageError(f"Invalid affected-products JSON: {e}") + + +def _parse_comma_list(value: Optional[str]) -> list: + """Parse comma-separated string into list.""" + if not value: + return [] + return [v.strip() for v in value.split(",") if v.strip()] + + +def _validate_cvss_vector(vector: str) -> bool: + """Validate CVSS 3.1 vector format (basic check).""" + if not vector: + return True # Optional field + return vector.startswith("CVSS:3.1/") or vector.startswith("CVSS:3.0/") + + +def _resolve_asset_identifier(asset: str) -> tuple: + """ + Resolve asset to (DID, hostname) tuple. + + Input can be: + - DID: did:vm:node:brick-02 -> (did:vm:node:brick-02, None) + - Hostname: brick-02.mesh.local -> (attempt DID lookup, brick-02.mesh.local) + - Short: brick-02 -> (did:vm:node:brick-02, brick-02) + """ + if asset.startswith("did:vm:"): + return (asset, None) + + # Check if it's a known node name in identity scroll + identity_scroll = VAULTMESH_ROOT / "receipts" / "identity" / "identity_events.jsonl" + if identity_scroll.exists(): + for line in identity_scroll.read_text(encoding="utf-8").splitlines(): + if line.strip(): + try: + obj = json.loads(line) + if obj.get("type") == "identity_did_create": + did = obj.get("body", {}).get("did", "") + short_name = asset.split(".")[0] + if did.endswith(f":{short_name}"): + return (did, asset if "." in asset else None) + except Exception: + pass + + # Fallback: construct DID from short name + short_name = asset.split(".")[0] + return (f"did:vm:node:{short_name}", asset if "." in asset else None) + + +@cli.group() +def offsec(): + """OffSec Engine - incidents, forensics, remediation.""" + pass + + +@offsec.command("agents") +def offsec_agents() -> None: + """List agents on the Shield Node (requires OFFSEC_NODE_URL).""" + import asyncio + + # Lazy import to avoid breaking CLI if aiohttp not installed + try: + from scripts.offsec_node_client import OffsecNodeClient, OffsecNodeError + except ImportError: + click.echo( + "[offsec] Error: aiohttp not installed. Run: pip install aiohttp", err=True + ) + raise SystemExit(1) + + async def _run() -> None: + client = OffsecNodeClient.from_env() + click.echo(f"[offsec] Connecting to {client.base_url}...") + resp = await client.command("agents list") + if resp.get("status") == "ok": + for line in resp.get("lines", []): + click.echo(line) + else: + click.echo(json.dumps(resp, indent=2)) + + try: + asyncio.run(_run()) + except OffsecNodeError as e: + click.echo(f"[offsec] Shield node error: {e}", err=True) + raise SystemExit(1) + except Exception as e: + click.echo(f"[offsec] Connection failed: {e}", err=True) + click.echo( + "[offsec] Set OFFSEC_NODE_URL to Shield Node address (e.g. http://shield-vm:8081)" + ) + raise SystemExit(1) + + +@offsec.command("shield-status") +def offsec_shield_status() -> None: + """Get Shield Node status (requires OFFSEC_NODE_URL).""" + import asyncio + + try: + from scripts.offsec_node_client import OffsecNodeClient, OffsecNodeError + except ImportError: + click.echo( + "[offsec] Error: aiohttp not installed. Run: pip install aiohttp", err=True + ) + raise SystemExit(1) + + async def _run() -> None: + client = OffsecNodeClient.from_env() + click.echo(f"[offsec] Connecting to {client.base_url}...") + + # Get health + health = await client.health() + click.echo(f"\n SHIELD NODE HEALTH") + click.echo(f" Status: {health.get('status', 'unknown')}") + click.echo(f" Nodes: {health.get('nodes', 0)}") + click.echo(f" Proofs: {health.get('proofs', 0)}") + click.echo(f" Uptime: {health.get('uptime', 'unknown')}") + + # Get shield status + resp = await client.command("shield status") + if resp.get("status") == "ok": + click.echo("") + for line in resp.get("lines", []): + click.echo(line) + + try: + asyncio.run(_run()) + except OffsecNodeError as e: + click.echo(f"[offsec] Shield node error: {e}", err=True) + raise SystemExit(1) + except Exception as e: + click.echo(f"[offsec] Connection failed: {e}", err=True) + raise SystemExit(1) + + +@offsec.command("incident-open") +@click.option( + "--id", "incident_id", required=True, help="Incident ID (e.g. INC-2025-12-001)" +) +@click.option("--title", required=True, help="Short incident title") +@click.option( + "--severity", + type=click.Choice(["low", "medium", "high", "critical"]), + required=True, +) +@click.option("--summary", required=True, help="Initial summary of the incident") +@click.option("--device", default="unknown", help="Device or node label (e.g. shield)") +@click.option( + "--actor-did", required=False, help="Override actor DID (else VAULTMESH_ACTOR_DID)" +) +def offsec_incident_open( + incident_id: str, + title: str, + severity: str, + summary: str, + device: str, + actor_did: Optional[str], +) -> None: + """Open a new security incident.""" + actor = _resolve_actor_did(actor_did) + + body = { + "incident_id": incident_id, + "title": title, + "severity": severity, + "status": "reported", + "opened_by": actor, + "device": device, + "affected_nodes": [], + "summary": summary, + } + tags = ["offsec", "incident", "open", incident_id, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_incident_open", body, tags) + click.echo(f"[offsec] incident opened: {incident_id}") + click.echo(f" severity: {severity}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec.command("incident-update") +@click.option("--id", "incident_id", required=True, help="Incident ID") +@click.option("--field", required=True, help="Field to update (e.g. status)") +@click.option("--value", required=True, help="New value for the field") +@click.option("--old-value", required=False, help="Previous value (optional)") +@click.option("--actor-did", required=False, help="Override actor DID") +def offsec_incident_update( + incident_id: str, + field: str, + value: str, + old_value: Optional[str], + actor_did: Optional[str], +) -> None: + """Record an incident field change (e.g. status).""" + actor = _resolve_actor_did(actor_did) + + body = { + "incident_id": incident_id, + "field": field, + "old_value": old_value, + "new_value": value, + "updated_by": actor, + } + tags = ["offsec", "incident", "update", incident_id, actor, f"field:{field}"] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_incident_update", body, tags) + click.echo(f"[offsec] incident updated: {incident_id} {field} -> {value}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec.command("incident-close") +@click.option("--id", "incident_id", required=True, help="Incident ID") +@click.option( + "--resolution", required=True, help="Resolution code (e.g. credentials_revoked)" +) +@click.option("--lessons", required=True, help="Lessons learned summary") +@click.option("--phases-completed", type=int, default=4, show_default=True) +@click.option("--timeline-hash", required=False, help="Optional hash of timeline.json") +@click.option("--actor-did", required=False, help="Override actor DID") +def offsec_incident_close( + incident_id: str, + resolution: str, + lessons: str, + phases_completed: int, + timeline_hash: Optional[str], + actor_did: Optional[str], +) -> None: + """Close an incident with resolution and lessons learned.""" + actor = _resolve_actor_did(actor_did) + + body = { + "incident_id": incident_id, + "closed_by": actor, + "resolution": resolution, + "lessons_learned": lessons, + "phases_completed": phases_completed, + "timeline_hash": timeline_hash, + } + tags = ["offsec", "incident", "close", incident_id, actor, resolution] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_incident_close", body, tags) + click.echo(f"[offsec] incident closed: {incident_id} ({resolution})") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec.command("snapshot") +@click.option("--incident", "incident_id", required=True, help="Incident ID") +@click.option("--snapshot-id", required=True, help="Snapshot ID (e.g. SNAP-001)") +@click.option("--summary", required=True, help="What this snapshot captures") +@click.option("--path", "evidence_path", required=True, help="Path to evidence file") +@click.option("--actor-did", required=False, help="Override actor DID") +def offsec_snapshot( + incident_id: str, + snapshot_id: str, + summary: str, + evidence_path: str, + actor_did: Optional[str], +) -> None: + """Record a forensic snapshot for an incident.""" + actor = _resolve_actor_did(actor_did) + + ev_path = Path(evidence_path) + if not ev_path.exists(): + raise click.UsageError(f"Evidence path does not exist: {evidence_path}") + + # Hash the evidence file + hasher = blake3.blake3() + with ev_path.open("rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + hasher.update(chunk) + file_hash = "blake3:" + hasher.hexdigest() + + body = { + "incident_id": incident_id, + "snapshot_id": snapshot_id, + "summary": summary, + "collected_by": actor, + "path": str(ev_path), + "file_hash": file_hash, + } + tags = ["offsec", "forensic", "snapshot", incident_id, actor, snapshot_id] + + receipt = _emit_offsec_receipt( + VAULTMESH_ROOT, "offsec_forensic_snapshot", body, tags + ) + click.echo(f"[offsec] snapshot recorded: {incident_id} / {snapshot_id}") + click.echo(f" evidence_hash: {file_hash}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec.command("recover") +@click.option("--incident", "incident_id", required=True, help="Incident ID") +@click.option("--action-id", required=True, help="Action ID (e.g. ACT-001)") +@click.option("--description", required=True, help="Description of recovery action") +@click.option("--actor-did", required=False, help="Override actor DID") +def offsec_recover( + incident_id: str, action_id: str, description: str, actor_did: Optional[str] +) -> None: + """Record a remediation / recovery action for an incident.""" + actor = _resolve_actor_did(actor_did) + + body = { + "incident_id": incident_id, + "action_id": action_id, + "description": description, + "executed_by": actor, + } + tags = ["offsec", "remediation", incident_id, actor, action_id] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_remediation", body, tags) + click.echo(f"[offsec] remediation recorded: {incident_id} / {action_id}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec.command("status") +@click.option("--incident", "incident_id", required=False, help="Filter by incident ID") +@click.option( + "--limit", type=int, default=20, show_default=True, help="Max receipts to show" +) +def offsec_status(incident_id: Optional[str], limit: int) -> None: + """Show recent OffSec receipts (optionally filtered by incident).""" + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + + if not scroll_path.exists() or scroll_path.stat().st_size == 0: + click.echo("[offsec] no OffSec receipts yet") + return + + lines = scroll_path.read_text(encoding="utf-8").strip().splitlines() + lines = [ln for ln in lines if ln.strip()] + lines = lines[-limit:] + + click.echo(f"[offsec] last {len(lines)} receipts") + click.echo() + click.echo(f"{'TIMESTAMP':20} {'TYPE':28} {'INCIDENT':20} {'DETAIL'}") + click.echo("-" * 90) + + for ln in lines: + try: + obj = json.loads(ln) + except json.JSONDecodeError: + continue + body = obj.get("body", {}) + inc = body.get("incident_id") or "" + if incident_id and inc != incident_id: + continue + ts = obj.get("timestamp", "")[:19] + rtype = obj.get("type", "") + # Extract a useful detail + detail = "" + if "status" in body: + detail = f"status={body['status']}" + elif "new_value" in body: + detail = f"{body.get('field', '?')}={body['new_value']}" + elif "resolution" in body: + detail = f"resolution={body['resolution']}" + elif "action_id" in body: + detail = f"action={body['action_id']}" + elif "snapshot_id" in body: + detail = f"snap={body['snapshot_id']}" + + click.echo(f"{ts:20} {rtype:28} {inc:20} {detail}") + + +# ============================================================================ +# OffSec Phase 2: Vulnerability Commands +# ============================================================================ + + +@offsec.group("vuln") +def offsec_vuln(): + """Vulnerability discovery and tracking.""" + pass + + +@offsec_vuln.command("report") +@click.option( + "--id", + "vuln_id", + required=False, + help="Vulnerability ID (auto-generated if not provided)", +) +@click.option("--title", required=True, help="Vulnerability title") +@click.option( + "--severity", + type=click.Choice(["critical", "high", "medium", "low", "info"]), + required=True, +) +@click.option("--description", required=False, help="Detailed description") +@click.option("--cvss-score", type=float, required=False, help="CVSS score (0.0-10.0)") +@click.option("--cvss-vector", required=False, help="CVSS 3.1 vector string") +@click.option("--asset", required=True, help="Affected asset (DID or hostname)") +@click.option("--component", required=False, help="Affected software component") +@click.option("--version", "affected_version", required=False, help="Affected version") +@click.option("--cve", "cve_id", required=False, help="CVE ID if known") +@click.option("--cwe", "cwe_id", required=False, help="CWE ID if known") +@click.option( + "--method", + type=click.Choice(["pentest", "scan", "manual", "bug_bounty", "threat_intel"]), + default="manual", +) +@click.option( + "--engagement", "engagement_id", required=False, help="Link to red team exercise ID" +) +@click.option( + "--incident", "incident_id", required=False, help="Link to incident ID if exploited" +) +@click.option("--actor-did", required=False, help="Override actor DID") +def vuln_report( + vuln_id: Optional[str], + title: str, + severity: str, + description: Optional[str], + cvss_score: Optional[float], + cvss_vector: Optional[str], + asset: str, + component: Optional[str], + affected_version: Optional[str], + cve_id: Optional[str], + cwe_id: Optional[str], + method: str, + engagement_id: Optional[str], + incident_id: Optional[str], + actor_did: Optional[str], +) -> None: + """Report a new vulnerability discovery.""" + actor = _resolve_actor_did(actor_did) + + if not vuln_id: + vuln_id = _generate_offsec_id("VULN") + + if cvss_vector and not _validate_cvss_vector(cvss_vector): + raise click.UsageError("Invalid CVSS vector format (expected CVSS:3.1/...)") + + asset_did, asset_hostname = _resolve_asset_identifier(asset) + + body = { + "vuln_id": vuln_id, + "title": title, + "description": description, + "severity": severity, + "cvss_score": cvss_score, + "cvss_vector": cvss_vector, + "affected_asset": asset_did, + "affected_hostname": asset_hostname, + "affected_component": component, + "affected_version": affected_version, + "cve_id": cve_id, + "cwe_id": cwe_id, + "discovery_method": method, + "discovered_by": actor, + "engagement_id": engagement_id, + "incident_id": incident_id, + "status": "reported", + "remediation_status": "pending", + "proof_of_concept": False, + "exploited_in_wild": False, + } + + tags = ["offsec", "vulnerability", vuln_id, severity, actor] + if cve_id: + tags.append(cve_id) + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_vuln_discovery", body, tags) + click.echo(f"[offsec] vulnerability reported: {vuln_id}") + click.echo(f" severity: {severity}") + click.echo(f" asset: {asset_did}") + if cve_id: + click.echo(f" cve: {cve_id}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_vuln.command("update") +@click.option("--id", "vuln_id", required=True, help="Vulnerability ID") +@click.option( + "--status", + type=click.Choice( + ["reported", "confirmed", "remediated", "accepted", "false_positive"] + ), + required=False, +) +@click.option( + "--remediation-status", + type=click.Choice(["pending", "in_progress", "completed", "wont_fix"]), + required=False, +) +@click.option("--poc/--no-poc", "has_poc", default=None, help="Proof of concept exists") +@click.option( + "--exploited/--not-exploited", + "exploited", + default=None, + help="Exploited in the wild", +) +@click.option("--actor-did", required=False, help="Override actor DID") +def vuln_update( + vuln_id: str, + status: Optional[str], + remediation_status: Optional[str], + has_poc: Optional[bool], + exploited: Optional[bool], + actor_did: Optional[str], +) -> None: + """Update vulnerability status.""" + actor = _resolve_actor_did(actor_did) + + if not any( + [status, remediation_status, has_poc is not None, exploited is not None] + ): + raise click.UsageError("At least one field must be updated") + + body = { + "vuln_id": vuln_id, + "updated_by": actor, + "status": status, + "remediation_status": remediation_status, + "proof_of_concept": has_poc, + "exploited_in_wild": exploited, + } + # Remove None values + body = {k: v for k, v in body.items() if v is not None} + + tags = ["offsec", "vulnerability", "update", vuln_id, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_vuln_update", body, tags) + click.echo(f"[offsec] vulnerability updated: {vuln_id}") + if status: + click.echo(f" status: {status}") + if remediation_status: + click.echo(f" remediation: {remediation_status}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_vuln.command("link-incident") +@click.option("--id", "vuln_id", required=True, help="Vulnerability ID") +@click.option("--incident", "incident_id", required=True, help="Incident ID to link") +@click.option("--actor-did", required=False, help="Override actor DID") +def vuln_link_incident( + vuln_id: str, incident_id: str, actor_did: Optional[str] +) -> None: + """Link a vulnerability to an incident.""" + actor = _resolve_actor_did(actor_did) + + body = { + "vuln_id": vuln_id, + "incident_id": incident_id, + "linked_by": actor, + } + tags = ["offsec", "vulnerability", "link", vuln_id, incident_id, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_vuln_link", body, tags) + click.echo(f"[offsec] vulnerability linked: {vuln_id} <-> {incident_id}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_vuln.command("list") +@click.option("--severity", required=False, help="Filter by severity (comma-separated)") +@click.option( + "--remediation-status", required=False, help="Filter by remediation status" +) +@click.option("--limit", type=int, default=20, show_default=True) +def vuln_list( + severity: Optional[str], remediation_status: Optional[str], limit: int +) -> None: + """List vulnerabilities.""" + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + + if not scroll_path.exists(): + click.echo("[offsec] no vulnerabilities yet") + return + + severity_filter = set(_parse_comma_list(severity)) if severity else None + vulns = {} + + for line in scroll_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + obj = json.loads(line) + if obj.get("type") == "offsec_vuln_discovery": + body = obj.get("body", {}) + vid = body.get("vuln_id") + if vid: + vulns[vid] = { + "vuln_id": vid, + "title": body.get("title", ""), + "severity": body.get("severity", ""), + "status": body.get("status", ""), + "remediation_status": body.get("remediation_status", ""), + "cve_id": body.get("cve_id", ""), + "asset": body.get("affected_asset", ""), + "timestamp": obj.get("timestamp", ""), + } + elif obj.get("type") == "offsec_vuln_update": + body = obj.get("body", {}) + vid = body.get("vuln_id") + if vid and vid in vulns: + if body.get("status"): + vulns[vid]["status"] = body["status"] + if body.get("remediation_status"): + vulns[vid]["remediation_status"] = body["remediation_status"] + except Exception: + continue + + # Apply filters + results = list(vulns.values()) + if severity_filter: + results = [v for v in results if v["severity"] in severity_filter] + if remediation_status: + results = [v for v in results if v["remediation_status"] == remediation_status] + + results = results[-limit:] + + click.echo(f"[offsec] {len(results)} vulnerabilities") + click.echo() + click.echo( + f"{'VULN_ID':20} {'SEVERITY':10} {'STATUS':15} {'REMEDIATION':12} {'CVE':18} {'TITLE'}" + ) + click.echo("-" * 100) + + for v in results: + click.echo( + f"{v['vuln_id']:20} {v['severity']:10} {v['status']:15} {v['remediation_status']:12} {v['cve_id'] or '-':18} {v['title'][:30]}" + ) + + +# ============================================================================ +# OffSec Phase 2: Red Team Commands +# ============================================================================ + + +@offsec.group("redteam") +def offsec_redteam(): + """Red team exercise management.""" + pass + + +@offsec_redteam.command("start") +@click.option( + "--id", + "exercise_id", + required=False, + help="Exercise ID (auto-generated if not provided)", +) +@click.option("--title", required=True, help="Exercise title") +@click.option( + "--type", + "engagement_type", + type=click.Choice( + ["external_pentest", "internal_pentest", "red_team", "purple_team", "tabletop"] + ), + required=True, +) +@click.option("--scope-in", required=False, help="In-scope targets (comma-separated)") +@click.option( + "--scope-out", required=False, help="Out-of-scope targets (comma-separated)" +) +@click.option("--roe", required=False, help="Rules of engagement") +@click.option("--team", required=False, help="Team member DIDs (comma-separated)") +@click.option("--authorized-by", required=False, help="DID of authorizing party") +@click.option("--actor-did", required=False, help="Override actor DID") +def redteam_start( + exercise_id: Optional[str], + title: str, + engagement_type: str, + scope_in: Optional[str], + scope_out: Optional[str], + roe: Optional[str], + team: Optional[str], + authorized_by: Optional[str], + actor_did: Optional[str], +) -> None: + """Start a new red team exercise.""" + actor = _resolve_actor_did(actor_did) + + if not exercise_id: + exercise_id = _generate_offsec_id("RT") + + now = ( + datetime.now(timezone.utc).isoformat(timespec="seconds").replace("+00:00", "Z") + ) + + body = { + "exercise_id": exercise_id, + "title": title, + "engagement_type": engagement_type, + "scope": { + "in_scope": _parse_comma_list(scope_in), + "out_of_scope": _parse_comma_list(scope_out), + "rules_of_engagement": roe, + }, + "team_dids": _parse_comma_list(team), + "authorized_by": authorized_by, + "executed_by": actor, + "started_at": now, + "ended_at": None, + "status": "in_progress", + "tools_used": [], + "findings_count": 0, + "severity_breakdown": { + "critical": 0, + "high": 0, + "medium": 0, + "low": 0, + "info": 0, + }, + "vulns_created": [], + "incidents_triggered": [], + } + + tags = ["offsec", "redteam", "start", exercise_id, engagement_type, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_redteam_start", body, tags) + click.echo(f"[offsec] red team exercise started: {exercise_id}") + click.echo(f" type: {engagement_type}") + click.echo(f" title: {title}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_redteam.command("update") +@click.option("--id", "exercise_id", required=True, help="Exercise ID") +@click.option("--tools-used", required=False, help="Tools used (comma-separated)") +@click.option("--findings", type=int, required=False, help="Current findings count") +@click.option("--severity-breakdown", required=False, help="Severity breakdown JSON") +@click.option("--actor-did", required=False, help="Override actor DID") +def redteam_update( + exercise_id: str, + tools_used: Optional[str], + findings: Optional[int], + severity_breakdown: Optional[str], + actor_did: Optional[str], +) -> None: + """Update red team exercise progress.""" + actor = _resolve_actor_did(actor_did) + + body = { + "exercise_id": exercise_id, + "updated_by": actor, + "status": "in_progress", + } + + if tools_used: + body["tools_used"] = _parse_comma_list(tools_used) + if findings is not None: + body["findings_count"] = findings + if severity_breakdown: + body["severity_breakdown"] = _parse_severity_breakdown(severity_breakdown) + + tags = ["offsec", "redteam", "update", exercise_id, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_redteam_update", body, tags) + click.echo(f"[offsec] red team exercise updated: {exercise_id}") + if findings is not None: + click.echo(f" findings: {findings}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_redteam.command("complete") +@click.option("--id", "exercise_id", required=True, help="Exercise ID") +@click.option("--tools-used", required=False, help="Tools used (comma-separated)") +@click.option("--findings", type=int, required=True, help="Total findings count") +@click.option("--severity-breakdown", required=True, help="Severity breakdown JSON") +@click.option( + "--vulns-created", + required=False, + help="Created vulnerability IDs (comma-separated)", +) +@click.option( + "--incidents-triggered", + required=False, + help="Triggered incident IDs (comma-separated)", +) +@click.option("--report-path", required=False, help="Path to final report") +@click.option("--actor-did", required=False, help="Override actor DID") +def redteam_complete( + exercise_id: str, + tools_used: Optional[str], + findings: int, + severity_breakdown: str, + vulns_created: Optional[str], + incidents_triggered: Optional[str], + report_path: Optional[str], + actor_did: Optional[str], +) -> None: + """Complete a red team exercise.""" + actor = _resolve_actor_did(actor_did) + + now = ( + datetime.now(timezone.utc).isoformat(timespec="seconds").replace("+00:00", "Z") + ) + + body = { + "exercise_id": exercise_id, + "executed_by": actor, + "ended_at": now, + "status": "completed", + "tools_used": _parse_comma_list(tools_used), + "findings_count": findings, + "severity_breakdown": _parse_severity_breakdown(severity_breakdown), + "vulns_created": _parse_comma_list(vulns_created), + "incidents_triggered": _parse_comma_list(incidents_triggered), + } + + if report_path: + rp = Path(report_path) + if rp.exists(): + hasher = blake3.blake3() + with rp.open("rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + hasher.update(chunk) + body["report_hash"] = "blake3:" + hasher.hexdigest() + body["report_path"] = str(rp) + + tags = ["offsec", "redteam", "complete", exercise_id, actor] + + receipt = _emit_offsec_receipt( + VAULTMESH_ROOT, "offsec_redteam_complete", body, tags + ) + click.echo(f"[offsec] red team exercise completed: {exercise_id}") + click.echo(f" findings: {findings}") + click.echo(f" vulns: {len(_parse_comma_list(vulns_created))}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_redteam.command("list") +@click.option( + "--status", + type=click.Choice(["in_progress", "completed", "cancelled"]), + required=False, +) +@click.option("--limit", type=int, default=10, show_default=True) +def redteam_list(status: Optional[str], limit: int) -> None: + """List red team exercises.""" + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + + if not scroll_path.exists(): + click.echo("[offsec] no red team exercises yet") + return + + exercises = {} + + for line in scroll_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + obj = json.loads(line) + rtype = obj.get("type", "") + if rtype.startswith("offsec_redteam"): + body = obj.get("body", {}) + eid = body.get("exercise_id") + if eid: + if eid not in exercises: + exercises[eid] = { + "exercise_id": eid, + "title": body.get("title", ""), + "type": body.get("engagement_type", ""), + "status": "in_progress", + "findings": 0, + "timestamp": obj.get("timestamp", ""), + } + if body.get("status"): + exercises[eid]["status"] = body["status"] + if body.get("findings_count"): + exercises[eid]["findings"] = body["findings_count"] + if body.get("title"): + exercises[eid]["title"] = body["title"] + if body.get("engagement_type"): + exercises[eid]["type"] = body["engagement_type"] + except Exception: + continue + + results = list(exercises.values()) + if status: + results = [e for e in results if e["status"] == status] + results = results[-limit:] + + click.echo(f"[offsec] {len(results)} red team exercises") + click.echo() + click.echo( + f"{'EXERCISE_ID':20} {'TYPE':18} {'STATUS':12} {'FINDINGS':10} {'TITLE'}" + ) + click.echo("-" * 90) + + for e in results: + click.echo( + f"{e['exercise_id']:20} {e['type']:18} {e['status']:12} {e['findings']:<10} {e['title'][:30]}" + ) + + +# ============================================================================ +# OffSec Phase 2: Threat Intel Commands +# ============================================================================ + + +@offsec.group("intel") +def offsec_intel(): + """Threat intelligence management.""" + pass + + +@offsec_intel.command("ingest") +@click.option( + "--id", "intel_id", required=False, help="Intel ID (auto-generated if not provided)" +) +@click.option("--title", required=True, help="Intelligence title") +@click.option( + "--type", + "intel_type", + type=click.Choice(["cve", "advisory", "ioc", "ttp", "campaign"]), + required=True, +) +@click.option( + "--source", required=True, help="Source (NVD, CISA, vendor, MISP, OTX, custom)" +) +@click.option("--source-url", required=False, help="Source URL") +@click.option("--cve", "cve_id", required=False, help="CVE ID") +@click.option("--advisory-id", required=False, help="Advisory ID") +@click.option("--cvss-score", type=float, required=False, help="CVSS score") +@click.option("--cvss-vector", required=False, help="CVSS vector") +@click.option( + "--affected-products", required=False, help="Affected products JSON array" +) +@click.option( + "--priority", + type=click.Choice(["critical", "high", "medium", "low", "info"]), + default="medium", +) +@click.option( + "--action-required/--no-action", default=False, help="Action required flag" +) +@click.option( + "--mitre", required=False, help="MITRE ATT&CK techniques (comma-separated)" +) +@click.option("--actor-did", required=False, help="Override actor DID") +def intel_ingest( + intel_id: Optional[str], + title: str, + intel_type: str, + source: str, + source_url: Optional[str], + cve_id: Optional[str], + advisory_id: Optional[str], + cvss_score: Optional[float], + cvss_vector: Optional[str], + affected_products: Optional[str], + priority: str, + action_required: bool, + mitre: Optional[str], + actor_did: Optional[str], +) -> None: + """Ingest external threat intelligence.""" + actor = _resolve_actor_did(actor_did) + + if not intel_id: + intel_id = _generate_offsec_id("INTEL") + + now = ( + datetime.now(timezone.utc).isoformat(timespec="seconds").replace("+00:00", "Z") + ) + + body = { + "intel_id": intel_id, + "title": title, + "intel_type": intel_type, + "source": source, + "source_url": source_url, + "cve_id": cve_id, + "advisory_id": advisory_id, + "cvss_score": cvss_score, + "cvss_vector": cvss_vector, + "affected_products": _parse_affected_products(affected_products) + if affected_products + else [], + "ingested_by": actor, + "ingested_at": now, + "action_required": action_required, + "priority": priority, + "matched_assets": [], + "vulns_created": [], + "indicators": [], + "mitre_techniques": _parse_comma_list(mitre), + } + + tags = ["offsec", "threat_intel", intel_id, source, intel_type, actor] + if cve_id: + tags.append(cve_id) + if action_required: + tags.append("action_required") + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_threat_intel", body, tags) + click.echo(f"[offsec] threat intel ingested: {intel_id}") + click.echo(f" type: {intel_type}") + click.echo(f" source: {source}") + click.echo(f" priority: {priority}") + if cve_id: + click.echo(f" cve: {cve_id}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_intel.command("add-ioc") +@click.option("--id", "intel_id", required=True, help="Intel ID to add IOC to") +@click.option( + "--ioc-type", required=True, help="IOC type (ip, domain, hash, url, email)" +) +@click.option("--ioc-value", required=True, help="IOC value") +@click.option("--context", required=False, help="Context/description for the IOC") +@click.option("--actor-did", required=False, help="Override actor DID") +def intel_add_ioc( + intel_id: str, + ioc_type: str, + ioc_value: str, + context: Optional[str], + actor_did: Optional[str], +) -> None: + """Add an IOC to a threat intel record.""" + actor = _resolve_actor_did(actor_did) + + body = { + "intel_id": intel_id, + "added_by": actor, + "indicator": { + "type": ioc_type, + "value": ioc_value, + "context": context, + }, + } + + tags = ["offsec", "threat_intel", "ioc", intel_id, ioc_type, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_intel_ioc_add", body, tags) + click.echo(f"[offsec] IOC added to {intel_id}") + click.echo(f" type: {ioc_type}") + click.echo(f" value: {ioc_value}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + + +@offsec_intel.command("match") +@click.option( + "--id", "intel_id", required=True, help="Intel ID to match against assets" +) +@click.option( + "--auto-create-vulns/--no-vulns", + default=False, + help="Auto-create vulnerabilities for matches", +) +@click.option("--actor-did", required=False, help="Override actor DID") +def intel_match( + intel_id: str, auto_create_vulns: bool, actor_did: Optional[str] +) -> None: + """Match threat intel against mesh assets.""" + actor = _resolve_actor_did(actor_did) + + # This is a placeholder - real implementation would query mesh inventory + matched_assets: list = [] + + body = { + "intel_id": intel_id, + "matched_by": actor, + "matched_assets": matched_assets, + "vulns_created": [], + "auto_create_vulns": auto_create_vulns, + } + + tags = ["offsec", "threat_intel", "match", intel_id, actor] + + receipt = _emit_offsec_receipt(VAULTMESH_ROOT, "offsec_intel_match", body, tags) + click.echo(f"[offsec] intel matched: {intel_id}") + click.echo(f" matched_assets: {len(matched_assets)}") + click.echo(f" root_hash: {receipt['header']['root_hash']}") + click.echo() + click.echo("Note: Asset matching requires Mesh engine integration (Phase 3)") + + +@offsec_intel.command("search") +@click.option("--cve", "cve_id", required=False, help="Search by CVE ID") +@click.option("--source", required=False, help="Filter by source (comma-separated)") +@click.option("--limit", type=int, default=10, show_default=True) +def intel_search(cve_id: Optional[str], source: Optional[str], limit: int) -> None: + """Search threat intel records.""" + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + + if not scroll_path.exists(): + click.echo("[offsec] no threat intel yet") + return + + source_filter = set(_parse_comma_list(source)) if source else None + results = [] + + for line in scroll_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + obj = json.loads(line) + if obj.get("type") == "offsec_threat_intel": + body = obj.get("body", {}) + if cve_id and body.get("cve_id") != cve_id: + continue + if source_filter and body.get("source") not in source_filter: + continue + results.append( + { + "intel_id": body.get("intel_id", ""), + "title": body.get("title", ""), + "type": body.get("intel_type", ""), + "source": body.get("source", ""), + "cve_id": body.get("cve_id", ""), + "priority": body.get("priority", ""), + "action_required": body.get("action_required", False), + } + ) + except Exception: + continue + + results = results[-limit:] + + click.echo(f"[offsec] {len(results)} intel records found") + click.echo() + click.echo( + f"{'INTEL_ID':20} {'TYPE':10} {'SOURCE':10} {'CVE':18} {'PRIORITY':10} {'ACTION'}" + ) + click.echo("-" * 90) + + for r in results: + action = "YES" if r["action_required"] else "-" + click.echo( + f"{r['intel_id']:20} {r['type']:10} {r['source']:10} {r['cve_id'] or '-':18} {r['priority']:10} {action}" + ) + + +@offsec_intel.command("list") +@click.option("--priority", required=False, help="Filter by priority (comma-separated)") +@click.option( + "--action-required/--all", default=False, help="Only show action required" +) +@click.option("--limit", type=int, default=20, show_default=True) +def intel_list(priority: Optional[str], action_required: bool, limit: int) -> None: + """List threat intel records.""" + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + + if not scroll_path.exists(): + click.echo("[offsec] no threat intel yet") + return + + priority_filter = set(_parse_comma_list(priority)) if priority else None + results = [] + + for line in scroll_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + obj = json.loads(line) + if obj.get("type") == "offsec_threat_intel": + body = obj.get("body", {}) + if priority_filter and body.get("priority") not in priority_filter: + continue + if action_required and not body.get("action_required"): + continue + results.append( + { + "intel_id": body.get("intel_id", ""), + "title": body.get("title", ""), + "type": body.get("intel_type", ""), + "source": body.get("source", ""), + "priority": body.get("priority", ""), + "action_required": body.get("action_required", False), + "timestamp": obj.get("timestamp", ""), + } + ) + except Exception: + continue + + results = results[-limit:] + + click.echo(f"[offsec] {len(results)} intel records") + click.echo() + click.echo( + f"{'INTEL_ID':20} {'TYPE':10} {'SOURCE':10} {'PRIORITY':10} {'ACTION':8} {'TITLE'}" + ) + click.echo("-" * 100) + + for r in results: + action = "YES" if r["action_required"] else "-" + click.echo( + f"{r['intel_id']:20} {r['type']:10} {r['source']:10} {r['priority']:10} {action:8} {r['title'][:30]}" + ) + + +# ============================================================================ +# OffSec Phase 2: Enhanced Status & Summary +# ============================================================================ + + +@offsec.command("summary") +def offsec_summary() -> None: + """Show OffSec security posture summary.""" + scroll_path = VAULTMESH_ROOT / "receipts" / "offsec" / "offsec_events.jsonl" + + if not scroll_path.exists(): + click.echo("[offsec] no receipts yet") + return + + # Track latest state per ID + incident_states: dict = {} + vuln_states: dict = {} + exercise_states: dict = {} + intel_stats = {"total": 0, "action_required": 0, "by_source": {}} + + for line in scroll_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + obj = json.loads(line) + body = obj.get("body", {}) + rtype = obj.get("type", "") + + if rtype.startswith("offsec_incident"): + inc_id = body.get("incident_id") + if inc_id: + if inc_id not in incident_states: + incident_states[inc_id] = {"severity": None, "closed": False} + if body.get("severity"): + incident_states[inc_id]["severity"] = body["severity"] + if rtype == "offsec_incident_close": + incident_states[inc_id]["closed"] = True + + elif rtype == "offsec_vuln_discovery": + vuln_id = body.get("vuln_id") + if vuln_id: + vuln_states[vuln_id] = { + "severity": body.get("severity"), + "status": body.get("remediation_status", "pending"), + } + + elif rtype == "offsec_vuln_update": + vuln_id = body.get("vuln_id") + if vuln_id and vuln_id in vuln_states: + if body.get("remediation_status"): + vuln_states[vuln_id]["status"] = body["remediation_status"] + + elif rtype.startswith("offsec_redteam"): + ex_id = body.get("exercise_id") + if ex_id: + if ex_id not in exercise_states: + exercise_states[ex_id] = { + "status": "in_progress", + "findings": 0, + } + if body.get("status"): + exercise_states[ex_id]["status"] = body["status"] + if body.get("findings_count"): + exercise_states[ex_id]["findings"] = body["findings_count"] + + elif rtype == "offsec_threat_intel": + intel_stats["total"] += 1 + if body.get("action_required"): + intel_stats["action_required"] += 1 + src = body.get("source", "unknown") + intel_stats["by_source"][src] = intel_stats["by_source"].get(src, 0) + 1 + + except Exception: + pass + + # Compute aggregates + incidents_open = sum(1 for i in incident_states.values() if not i["closed"]) + incidents_closed = sum(1 for i in incident_states.values() if i["closed"]) + incidents_by_sev: dict = {} + for i in incident_states.values(): + sev = i.get("severity") or "unknown" + incidents_by_sev[sev] = incidents_by_sev.get(sev, 0) + 1 + + vulns_open = sum( + 1 + for v in vuln_states.values() + if v["status"] not in ("completed", "remediated") + ) + vulns_remediated = sum( + 1 for v in vuln_states.values() if v["status"] in ("completed", "remediated") + ) + vulns_by_sev: dict = {} + for v in vuln_states.values(): + sev = v.get("severity") or "unknown" + vulns_by_sev[sev] = vulns_by_sev.get(sev, 0) + 1 + + redteam_active = sum( + 1 for e in exercise_states.values() if e["status"] != "completed" + ) + redteam_completed = sum( + 1 for e in exercise_states.values() if e["status"] == "completed" + ) + total_findings = sum(e["findings"] for e in exercise_states.values()) + + # Display + click.echo("OffSec Security Posture Summary") + click.echo("=" * 40) + click.echo() + click.echo(f"INCIDENTS: {incidents_open} open, {incidents_closed} closed") + for sev in ["critical", "high", "medium", "low"]: + if sev in incidents_by_sev: + click.echo(f" {sev}: {incidents_by_sev[sev]}") + click.echo() + click.echo(f"VULNERABILITIES: {vulns_open} open, {vulns_remediated} remediated") + for sev in ["critical", "high", "medium", "low", "info"]: + if sev in vulns_by_sev: + click.echo(f" {sev}: {vulns_by_sev[sev]}") + click.echo() + click.echo(f"RED TEAM: {redteam_active} active, {redteam_completed} completed") + click.echo(f" total findings: {total_findings}") + click.echo() + click.echo( + f"THREAT INTEL: {intel_stats['total']} total, {intel_stats['action_required']} action required" + ) + for src, cnt in sorted(intel_stats["by_source"].items()): + click.echo(f" {src}: {cnt}") + + +# ============================================================================ +# Console Commands +# ============================================================================ + + +@cli.group() +def console() -> None: + """Console Engine - AI agent session management.""" + pass + + +@console.command("receipts") +@click.option("--limit", default=20, show_default=True, type=int) +@click.option("--type", "receipt_type", default=None, help="Filter by receipt type") +def console_receipts(limit: int, receipt_type: Optional[str]) -> None: + """Show recent Console receipts.""" + from engines.console.receipts import get_emitter + + emitter = get_emitter(str(VAULTMESH_ROOT)) + events_path = Path(emitter.events_path) + + if not events_path.exists(): + click.echo("No Console scroll found.") + return + + lines = events_path.read_text(encoding="utf-8").splitlines() + lines = [ln for ln in lines if ln.strip()] + + records = [] + for raw in reversed(lines): + try: + r = json.loads(raw) + if receipt_type and r.get("type") != receipt_type: + continue + records.append(r) + if len(records) >= limit: + break + except Exception: + continue + + for r in reversed(records): + click.echo(json.dumps(r, separators=(",", ":"))) + + +@console.command("sessions") +def console_sessions() -> None: + """List known Console sessions.""" + from engines.console.receipts import get_emitter + + emitter = get_emitter(str(VAULTMESH_ROOT)) + events_path = Path(emitter.events_path) + + if not events_path.exists(): + click.echo("No Console scroll found.") + return + + sessions: dict = {} + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + sid = r.get("session_id") + if not sid: + continue + t = r.get("type") + if sid not in sessions: + sessions[sid] = { + "session_id": sid, + "started": None, + "ended": None, + "events": 0, + } + sessions[sid]["events"] += 1 + if t == "console_session_start": + sessions[sid]["started"] = r["ts"] + elif t == "console_session_end": + sessions[sid]["ended"] = r["ts"] + except Exception: + continue + + click.echo("Console Sessions") + click.echo("================") + for s in sessions.values(): + status = "ended" if s["ended"] else "active" + click.echo(f" {s['session_id']}: {status} ({s['events']} events)") + + +@console.command("history") +@click.option("--session", required=True, help="Session ID to show history for") +def console_history(session: str) -> None: + """Show history for a specific session.""" + from engines.console.receipts import get_emitter + + emitter = get_emitter(str(VAULTMESH_ROOT)) + events_path = Path(emitter.events_path) + + if not events_path.exists(): + click.echo("No Console scroll found.") + return + + click.echo(f"History for session: {session}") + click.echo("=" * 60) + + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + if r.get("session_id") == session: + click.echo(json.dumps(r, separators=(",", ":"))) + except Exception: + continue + + +@console.command("root") +def console_root() -> None: + """Show Console scroll Merkle root info.""" + from engines.console.receipts import get_emitter + + emitter = get_emitter(str(VAULTMESH_ROOT)) + info = emitter.get_root_info() + + click.echo("Console Scroll Root") + click.echo("===================") + click.echo(f" engine_id: {info.get('engine_id', 'N/A')}") + merkle_root = info.get("merkle_root", "0") + click.echo( + f" merkle_root: {merkle_root[:32]}..." + if len(merkle_root) > 32 + else f" merkle_root: {merkle_root}" + ) + click.echo(f" events: {info.get('events', 0)}") + click.echo(f" updated_at: {info.get('updated_at', 'N/A')}") + + +@console.command("approvals") +@click.option("--session", default=None, help="Filter by session ID") +def console_approvals(session: Optional[str]) -> None: + """List pending approval requests.""" + from engines.console.approvals import get_approval_manager + + manager = get_approval_manager(str(VAULTMESH_ROOT)) + pending = manager.list_pending(session) + + if not pending: + click.echo("No pending approvals.") + return + + click.echo("Pending Approvals") + click.echo("=================") + for r in pending: + click.echo(f" {r.approval_id}: {r.action_type}") + click.echo(f" session: {r.session_id}") + click.echo(f" approvers: {', '.join(r.approvers)}") + click.echo(f" expires: {r.expires_at}") + click.echo("") + + +@console.command("approve") +@click.argument("approval_id") +@click.option("--reason", default="", help="Reason for approval") +@click.option("--actor-did", required=False, help="Override actor DID") +def console_approve(approval_id: str, reason: str, actor_did: Optional[str]) -> None: + """Approve a pending action.""" + from engines.console.approvals import get_approval_manager + + actor = actor_did or os.environ.get("VAULTMESH_ACTOR_DID", "did:vm:human:unknown") + manager = get_approval_manager(str(VAULTMESH_ROOT)) + + try: + success = manager.decide( + approval_id, + approved=True, + approver=actor, + reason=reason, + ) + if success: + click.echo(f"[console] approved: {approval_id}") + else: + click.echo(f"[console] approval not found: {approval_id}") + except PermissionError as e: + click.echo(f"[console] permission denied: {e}", err=True) + except KeyError: + click.echo(f"[console] approval not found: {approval_id}", err=True) + + +@console.command("reject") +@click.argument("approval_id") +@click.option("--reason", required=True, help="Reason for rejection") +@click.option("--actor-did", required=False, help="Override actor DID") +def console_reject(approval_id: str, reason: str, actor_did: Optional[str]) -> None: + """Reject a pending action.""" + from engines.console.approvals import get_approval_manager + + actor = actor_did or os.environ.get("VAULTMESH_ACTOR_DID", "did:vm:human:unknown") + manager = get_approval_manager(str(VAULTMESH_ROOT)) + + try: + success = manager.decide( + approval_id, + approved=False, + approver=actor, + reason=reason, + ) + if success: + click.echo(f"[console] rejected: {approval_id}") + else: + click.echo(f"[console] approval not found: {approval_id}") + except PermissionError as e: + click.echo(f"[console] permission denied: {e}", err=True) + except KeyError: + click.echo(f"[console] approval not found: {approval_id}", err=True) + + +@console.command("story") +@click.argument("session_id") +@click.option( + "--format", + "fmt", + default="narrative", + type=click.Choice(["narrative", "json", "timeline"]), + help="Output format", +) +def console_story(session_id: str, fmt: str) -> None: + """ + Generate a human-readable narrative of a Console session. + + Reads all receipts for a session and presents them as a coherent story, + showing the sequence of events from start to finish. + """ + from engines.console.receipts import get_emitter + + emitter = get_emitter(str(VAULTMESH_ROOT)) + events_path = Path(emitter.events_path) + + if not events_path.exists(): + click.echo(f"No Console scroll found at {events_path}") + return + + # Collect all receipts for this session + receipts = [] + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + if r.get("session_id") == session_id: + receipts.append(r) + except json.JSONDecodeError: + continue + + if not receipts: + click.echo(f"No receipts found for session: {session_id}") + return + + if fmt == "json": + click.echo(json.dumps(receipts, indent=2)) + return + + if fmt == "timeline": + click.echo(f"Timeline: {session_id}") + click.echo("=" * 60) + for r in receipts: + ts = r.get("ts", "")[:19] # Trim to seconds + rtype = r.get("type", "unknown") + payload = r.get("payload", {}) + + # One-line summary + if rtype == "console_session_start": + agent = payload.get("agent_type", "unknown") + caller = payload.get("caller", "unknown") + click.echo(f"{ts} START {agent} by {caller}") + elif rtype == "console_session_end": + reason = payload.get("exit_reason", "unknown") + click.echo(f"{ts} END {reason}") + elif rtype == "console_command": + cmd = payload.get("command", "?") + exit_code = payload.get("exit_code", "?") + click.echo(f"{ts} CMD {cmd} (exit: {exit_code})") + elif rtype == "console_file_edit": + path = payload.get("file_path", "?") + edit_type = payload.get("edit_type", "?") + click.echo(f"{ts} EDIT {edit_type} {path}") + elif rtype == "console_approval_request": + action = payload.get("action_type", "?") + aid = payload.get("approval_id", "?") + click.echo(f"{ts} REQUEST {action} [{aid}]") + elif rtype == "console_approval": + action = payload.get("action_type", "?") + approved = "APPROVED" if payload.get("approved") else "REJECTED" + approver = payload.get("approver", "?") + click.echo(f"{ts} {approved} {action} by {approver}") + else: + click.echo(f"{ts} {rtype}") + return + + # Default: narrative format + click.echo("") + click.echo("=" * 70) + click.echo(f" CONSOLE SESSION STORY: {session_id}") + click.echo("=" * 70) + click.echo("") + + # Find session metadata + session_start = None + session_end = None + commands = [] + edits = [] + approval_requests = [] + approvals = [] + + for r in receipts: + rtype = r.get("type", "") + payload = r.get("payload", {}) + + if rtype == "console_session_start": + session_start = r + elif rtype == "console_session_end": + session_end = r + elif rtype == "console_command": + commands.append(r) + elif rtype == "console_file_edit": + edits.append(r) + elif rtype == "console_approval_request": + approval_requests.append(r) + elif rtype == "console_approval": + approvals.append(r) + + # Narrator header + if session_start: + p = session_start.get("payload", {}) + agent = p.get("agent_type", "unknown agent") + caller = p.get("caller", "unknown caller") + project = p.get("project_path", "unknown project") + ts = session_start.get("ts", "")[:19] + + click.echo(f" On {ts}, a session began.") + click.echo(f" Agent: {agent}") + click.echo(f" Caller: {caller}") + click.echo(f" Project: {project}") + click.echo("") + + # Commands + if commands: + click.echo(f" The session executed {len(commands)} command(s):") + for c in commands: + p = c.get("payload", {}) + cmd = p.get("command", "?") + exit_code = p.get("exit_code", "?") + ts = c.get("ts", "")[:19] + status = "successfully" if exit_code == 0 else f"with exit code {exit_code}" + click.echo(f" - [{ts}] {cmd} completed {status}") + click.echo("") + + # Edits + if edits: + click.echo(f" The session modified {len(edits)} file(s):") + for e in edits: + p = e.get("payload", {}) + path = p.get("file_path", "?") + edit_type = p.get("edit_type", "modified") + lines = p.get("lines_changed", 0) + click.echo(f" - {edit_type} {path} ({lines} lines changed)") + click.echo("") + + # Approval requests + if approval_requests: + click.echo(f" The session requested {len(approval_requests)} approval(s):") + for ar in approval_requests: + p = ar.get("payload", {}) + action = p.get("action_type", "?") + aid = p.get("approval_id", "?") + approvers = ", ".join(p.get("approvers", [])) + ts = ar.get("ts", "")[:19] + click.echo(f" - [{ts}] Requested approval for '{action}'") + click.echo(f" Approval ID: {aid}") + click.echo(f" Required approvers: {approvers}") + click.echo("") + + # Approvals + if approvals: + click.echo(f" Approval decisions made during session:") + for a in approvals: + p = a.get("payload", {}) + action = p.get("action_type", "?") + approved = p.get("approved", False) + approver = p.get("approver", "?") + reason = p.get("reason", "") + ts = a.get("ts", "")[:19] + decision = "APPROVED" if approved else "REJECTED" + click.echo(f" - [{ts}] {decision}: '{action}'") + click.echo(f" By: {approver}") + if reason: + click.echo(f" Reason: {reason}") + click.echo("") + + # Session end + if session_end: + p = session_end.get("payload", {}) + reason = p.get("exit_reason", "unknown") + ts = session_end.get("ts", "")[:19] + click.echo(f" The session ended at {ts}.") + click.echo(f" Exit reason: {reason}") + click.echo("") + + # Summary + click.echo("-" * 70) + click.echo(f" Summary: {len(receipts)} total events") + click.echo( + f" Commands: {len(commands)} | Edits: {len(edits)} | Approvals: {len(approvals)}/{len(approval_requests)}" + ) + click.echo("=" * 70) + click.echo("") + + +# ============================================================================ +# Entry Point +# ============================================================================ + +if __name__ == "__main__": + cli() diff --git a/docs/GITLAB-CONSOLE-SETUP.md b/docs/GITLAB-CONSOLE-SETUP.md new file mode 100644 index 0000000..09e8d6d --- /dev/null +++ b/docs/GITLAB-CONSOLE-SETUP.md @@ -0,0 +1,155 @@ +# GitLab → Console Integration Setup + +This guide walks through wiring a real GitLab project to VaultMesh Console. + +## Prerequisites + +1. **VaultMesh Console HTTP bridge running**: + ```bash + cd /root/work/vaultmesh + python3 scripts/console_receipts_server.py & + ``` + +2. **Network access** from GitLab runners to your Console bridge + - If runners can't reach your host directly, expose via Tailscale/ngrok/etc. + +## Step 1: GitLab CI/CD Variables + +In your GitLab project: **Settings → CI/CD → Variables** + +| Variable | Value | Example | +|----------|-------|---------| +| `VAULTMESH_CONSOLE_BASE` | Console bridge URL | `http://your-host:9110/v1/console` | +| `VAULTMESH_CALLER_DID` | GitLab service DID | `did:vm:service:gitlab-ci` | +| `VAULTMESH_APPROVER_DID` | Default approver | `did:vm:human:karol` | +| `VM_ENV` | Environment | `dev`, `staging`, or `prod` | + +## Step 2: Add Helper Script + +Copy `scripts/gitlab_console_session.sh` to your repository: + +```bash +cp scripts/gitlab_console_session.sh /path/to/your/repo/scripts/ +chmod +x /path/to/your/repo/scripts/gitlab_console_session.sh +git add scripts/gitlab_console_session.sh +git commit -m "Add VaultMesh Console helper" +``` + +## Step 3: Update .gitlab-ci.yml + +Add Console session jobs to your pipeline: + +```yaml +stages: + - console + - build + - test + - deploy + - console-end + +# Session start (first job) +console:session-start: + stage: console + script: + - ./scripts/gitlab_console_session.sh start + +# Your existing jobs... +build: + stage: build + script: + - ./scripts/gitlab_console_session.sh cmd build 0 + - make build # your actual build + +test: + stage: test + script: + - ./scripts/gitlab_console_session.sh cmd test 0 + - make test # your actual tests + +# Gated deploy +deploy:prod: + stage: deploy + when: manual + script: + - ./scripts/gitlab_console_session.sh request_approval deploy_prod + # If we get here, approval was already granted + - ./scripts/deploy.sh prod + +# Session end (always runs) +console:session-end: + stage: console-end + when: always + script: + - ./scripts/gitlab_console_session.sh end +``` + +## Step 4: (Optional) GitLab Webhooks + +For richer event tracking (MRs, pushes), add a webhook: + +**GitLab → Settings → Webhooks** + +- URL: `http://your-host:9110/gitlab/webhook` +- Triggers: Push events, Merge request events, Pipeline events + +## Step 5: Verify + +Run a pipeline and check Console: + +```bash +# List sessions +vm console sessions + +# See pipeline story +vm console story gitlab-pipeline- + +# Check dashboard +open http://127.0.0.1:9110/console/dashboard +``` + +## Approval Flow + +When a deploy job requests approval: + +1. Job calls `request_approval deploy_prod` +2. Job fails with approval ID +3. You approve: + ```bash + export VAULTMESH_ACTOR_DID="did:vm:human:karol" + vm console approvals + vm console approve approval-XXXX --reason "Deploy approved" + ``` +4. Retry the deploy job in GitLab UI + +## Environment-Based Policies + +Set `VM_ENV` per job or globally: + +| Environment | Requires Approval For | +|-------------|----------------------| +| `dev` | `git_force_push`, `rm -rf` | +| `staging` | Above + `deploy_staging`, `db:migrate` | +| `prod` | Above + `deploy_prod`, `docker push`, everything dangerous | + +Override per-job: +```yaml +deploy:staging: + variables: + VM_ENV: staging + script: + - ./scripts/gitlab_console_session.sh request_approval deploy_staging +``` + +## Troubleshooting + +**Bridge unreachable from runner**: +- Check firewall rules +- Try `curl $VAULTMESH_CONSOLE_BASE/health` from runner + +**Approvals not working**: +- Verify `VAULTMESH_APPROVER_DID` matches your actor DID +- Check `vm console approvals` shows the pending request + +**Dashboard not updating**: +- Bridge may need restart after code changes +- Check `/tmp/console_bridge.log` for errors diff --git a/docs/VAULTMESH-AUTOMATION-ENGINE.md b/docs/VAULTMESH-AUTOMATION-ENGINE.md new file mode 100644 index 0000000..a02d9d6 --- /dev/null +++ b/docs/VAULTMESH-AUTOMATION-ENGINE.md @@ -0,0 +1,907 @@ +# VAULTMESH-AUTOMATION-ENGINE.md + +**Civilization Ledger Workflow Primitive** + +> *Every workflow has a contract. Every execution has a receipt.* + +Automation is VaultMesh's orchestration layer — managing n8n workflows, scheduled jobs, event-driven triggers, and multi-step processes with complete audit trails and cryptographic evidence of execution. + +--- + +## 1. Scroll Definition + +| Property | Value | +| --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| **Scroll Name** | `Automation` | +| **JSONL Path** | `receipts/automation/automation_events.jsonl` | +| **Root File** | `ROOT.automation.txt` | +| **Receipt Types** | `auto_workflow_register`, `auto_workflow_execute`, `auto_workflow_complete`, `auto_schedule_create`, `auto_trigger_fire`, `auto_approval_request`, `auto_approval_decision` | + +--- + +## 2. Core Concepts + +### 2.1 Workflows + +A **workflow** is a defined sequence of automated steps that can be triggered manually, on schedule, or by events. + +```json +{ + "workflow_id": "wf:daily-compliance-check", + "name": "Daily Compliance Check", + "description": "Run Oracle compliance queries and alert on gaps", + "version": 3, + "status": "active", + "created_at": "2025-10-01T00:00:00Z", + "updated_at": "2025-12-01T00:00:00Z", + "created_by": "did:vm:user:sovereign", + "trigger": { + "type": "schedule", + "cron": "0 6 * * *", + "timezone": "Europe/Dublin" + }, + "steps": [ + { + "step_id": "step-1", + "name": "Query Oracle for GDPR compliance", + "type": "mcp_tool", + "tool": "oracle_compliance_answer", + "params": { + "question": "What is our current GDPR compliance status?", + "frameworks": ["GDPR"] + } + }, + { + "step_id": "step-2", + "name": "Query Oracle for AI Act compliance", + "type": "mcp_tool", + "tool": "oracle_compliance_answer", + "params": { + "question": "What is our current EU AI Act compliance status?", + "frameworks": ["EU_AI_ACT"] + } + }, + { + "step_id": "step-3", + "name": "Analyze gaps", + "type": "condition", + "condition": "steps['step-1'].result.gaps.length > 0 OR steps['step-2'].result.gaps.length > 0", + "on_true": "step-4", + "on_false": "step-5" + }, + { + "step_id": "step-4", + "name": "Alert on compliance gaps", + "type": "notification", + "channels": ["slack:compliance-alerts", "email:compliance-team"], + "template": "compliance_gap_alert" + }, + { + "step_id": "step-5", + "name": "Log success", + "type": "log", + "level": "info", + "message": "Daily compliance check passed" + } + ], + "error_handling": { + "on_step_failure": "continue", + "max_retries": 3, + "retry_delay": "5m", + "notify_on_failure": ["slack:ops-alerts"] + }, + "metadata": { + "category": "compliance", + "tags": ["daily", "gdpr", "ai-act", "oracle"], + "owner": "compliance-team" + } +} +``` + +**Workflow types**: +- `scheduled` — cron-based execution +- `event_triggered` — fires on system events +- `manual` — operator-initiated +- `webhook` — external HTTP triggers +- `chained` — triggered by other workflow completion + +### 2.2 Executions + +An **execution** is a single run of a workflow with full context and results. + +```json +{ + "execution_id": "exec-2025-12-06-001", + "workflow_id": "wf:daily-compliance-check", + "workflow_version": 3, + "status": "completed", + "triggered_by": "schedule", + "triggered_at": "2025-12-06T06:00:00Z", + "started_at": "2025-12-06T06:00:01Z", + "completed_at": "2025-12-06T06:02:34Z", + "duration_ms": 153000, + "steps": [ + { + "step_id": "step-1", + "status": "completed", + "started_at": "2025-12-06T06:00:01Z", + "completed_at": "2025-12-06T06:01:15Z", + "duration_ms": 74000, + "result": { + "compliance_score": 0.94, + "gaps": ["Missing DPO appointment documentation"] + } + }, + { + "step_id": "step-2", + "status": "completed", + "started_at": "2025-12-06T06:01:15Z", + "completed_at": "2025-12-06T06:02:20Z", + "duration_ms": 65000, + "result": { + "compliance_score": 0.87, + "gaps": ["Risk assessment incomplete for high-risk AI system"] + } + }, + { + "step_id": "step-3", + "status": "completed", + "result": {"condition_result": true, "next_step": "step-4"} + }, + { + "step_id": "step-4", + "status": "completed", + "started_at": "2025-12-06T06:02:21Z", + "completed_at": "2025-12-06T06:02:34Z", + "result": { + "notifications_sent": ["slack:compliance-alerts", "email:compliance-team"] + } + } + ], + "input": {}, + "output": { + "gdpr_score": 0.94, + "ai_act_score": 0.87, + "total_gaps": 2, + "alert_sent": true + }, + "context": { + "node": "did:vm:node:brick-01", + "environment": "production" + } +} +``` + +### 2.3 Schedules + +**Schedules** define when workflows should run automatically. + +```json +{ + "schedule_id": "sched:daily-compliance", + "workflow_id": "wf:daily-compliance-check", + "cron": "0 6 * * *", + "timezone": "Europe/Dublin", + "enabled": true, + "created_at": "2025-10-01T00:00:00Z", + "created_by": "did:vm:user:sovereign", + "next_run": "2025-12-07T06:00:00Z", + "last_run": "2025-12-06T06:00:00Z", + "last_status": "completed", + "run_count": 67, + "failure_count": 2, + "constraints": { + "max_concurrent": 1, + "skip_if_running": true, + "maintenance_window_skip": true + } +} +``` + +### 2.4 Triggers + +**Triggers** define event-driven workflow activation. + +```json +{ + "trigger_id": "trig:security-incident", + "name": "Security Incident Response", + "workflow_id": "wf:incident-response-initial", + "trigger_type": "event", + "event_source": "offsec", + "event_filter": { + "type": "offsec_incident", + "severity": ["critical", "high"] + }, + "enabled": true, + "created_at": "2025-11-15T00:00:00Z", + "created_by": "did:vm:user:sovereign", + "fire_count": 3, + "last_fired": "2025-12-06T03:47:00Z", + "debounce": { + "enabled": true, + "window": "5m", + "group_by": ["incident_id"] + } +} +``` + +**Trigger types**: +- `event` — fires on VaultMesh events (receipts, alerts, etc.) +- `webhook` — fires on external HTTP POST +- `file_watch` — fires on file system changes +- `mesh_event` — fires on mesh topology changes +- `approval` — fires when approval is granted/denied + +### 2.5 Approvals + +**Approvals** gate workflow continuation on human decisions. + +```json +{ + "approval_id": "approval-2025-12-06-001", + "workflow_id": "wf:production-deploy", + "execution_id": "exec-2025-12-06-002", + "step_id": "step-3-deploy", + "title": "Approve Production Deployment", + "description": "Deploy Guardian v2.1.0 to production nodes", + "status": "pending", + "requested_at": "2025-12-06T10:00:00Z", + "requested_by": "did:vm:service:ci-pipeline", + "required_approvers": 2, + "approvers": ["did:vm:user:sovereign", "did:vm:user:operator-alpha"], + "current_approvals": [], + "current_rejections": [], + "expires_at": "2025-12-06T18:00:00Z", + "context": { + "version": "2.1.0", + "commit": "abc123...", + "changelog": "https://github.com/vaultmesh/guardian/releases/v2.1.0", + "test_results": "all passed", + "affected_nodes": ["brick-01", "brick-02", "brick-03"] + }, + "notification_channels": ["slack:approvals", "email:approvers"] +} +``` + +--- + +## 3. Mapping to Eternal Pattern + +### 3.1 Experience Layer (L1) + +**CLI** (`vm-auto`): +```bash +# Workflow management +vm-auto workflow list +vm-auto workflow show wf:daily-compliance-check +vm-auto workflow create --from workflow-def.json +vm-auto workflow update wf:daily-compliance-check --from workflow-def-v2.json +vm-auto workflow enable wf:daily-compliance-check +vm-auto workflow disable wf:daily-compliance-check --reason "maintenance" +vm-auto workflow delete wf:deprecated-workflow + +# Manual execution +vm-auto run wf:daily-compliance-check +vm-auto run wf:onboarding --input '{"user": "new-operator"}' + +# Execution monitoring +vm-auto exec list --workflow wf:daily-compliance-check --last 10 +vm-auto exec show exec-2025-12-06-001 +vm-auto exec logs exec-2025-12-06-001 +vm-auto exec cancel exec-2025-12-06-003 --reason "testing" + +# Schedules +vm-auto schedule list +vm-auto schedule show sched:daily-compliance +vm-auto schedule pause sched:daily-compliance --until "2025-12-10" +vm-auto schedule resume sched:daily-compliance + +# Triggers +vm-auto trigger list +vm-auto trigger show trig:security-incident +vm-auto trigger test trig:security-incident --event test-event.json + +# Approvals +vm-auto approval list --status pending +vm-auto approval show approval-2025-12-06-001 +vm-auto approval approve approval-2025-12-06-001 --comment "Reviewed and approved" +vm-auto approval reject approval-2025-12-06-001 --reason "Not ready for production" + +# History +vm-auto history --workflow wf:daily-compliance-check --from 2025-12-01 +vm-auto history --status failed --last 7d +``` + +**MCP Tools**: +- `auto_workflow_list` — list workflows +- `auto_workflow_run` — execute workflow +- `auto_execution_status` — get execution status +- `auto_approval_pending` — list pending approvals +- `auto_approval_decide` — approve/reject +- `auto_schedule_next` — next scheduled runs + +**Portal HTTP**: +- `GET /auto/workflows` — list workflows +- `POST /auto/workflows` — create workflow +- `GET /auto/workflows/{id}` — workflow details +- `PUT /auto/workflows/{id}` — update workflow +- `POST /auto/workflows/{id}/run` — execute workflow +- `GET /auto/executions` — list executions +- `GET /auto/executions/{id}` — execution details +- `POST /auto/executions/{id}/cancel` — cancel execution +- `GET /auto/schedules` — list schedules +- `GET /auto/triggers` — list triggers +- `GET /auto/approvals` — list approvals +- `POST /auto/approvals/{id}/approve` — approve +- `POST /auto/approvals/{id}/reject` — reject + +--- + +### 3.2 Engine Layer (L2) + +#### Step 1 — Plan → `automation_workflow_contract.json` + +**Workflow Registration Contract**: +```json +{ + "operation_id": "auto-op-2025-12-06-001", + "operation_type": "workflow_register", + "initiated_by": "did:vm:user:sovereign", + "initiated_at": "2025-12-06T09:00:00Z", + "workflow": { + "id": "wf:treasury-reconciliation", + "name": "Treasury Reconciliation", + "version": 1, + "steps": ["..."], + "trigger": { + "type": "schedule", + "cron": "0 0 * * *" + } + }, + "validation": { + "syntax_valid": true, + "steps_valid": true, + "permissions_valid": true + }, + "requires_approval": false +} +``` + +**Execution Contract** (for complex/sensitive workflows): +```json +{ + "operation_id": "auto-op-2025-12-06-002", + "operation_type": "workflow_execute", + "workflow_id": "wf:production-deploy", + "workflow_version": 5, + "triggered_by": "did:vm:service:ci-pipeline", + "triggered_at": "2025-12-06T10:00:00Z", + "trigger_type": "webhook", + "input": { + "version": "2.1.0", + "commit": "abc123...", + "target_nodes": ["brick-01", "brick-02", "brick-03"] + }, + "requires_approval": true, + "approval_config": { + "required_approvers": 2, + "approver_pool": ["did:vm:user:sovereign", "did:vm:user:operator-alpha", "did:vm:user:operator-bravo"], + "timeout": "8h" + }, + "risk_assessment": { + "impact": "high", + "reversibility": "medium", + "affected_services": ["guardian"] + } +} +``` + +#### Step 2 — Execute → `automation_execution_state.json` + +```json +{ + "execution_id": "exec-2025-12-06-002", + "workflow_id": "wf:production-deploy", + "status": "awaiting_approval", + "created_at": "2025-12-06T10:00:00Z", + "updated_at": "2025-12-06T10:30:00Z", + "steps": [ + { + "step_id": "step-1-build", + "name": "Build artifacts", + "status": "completed", + "started_at": "2025-12-06T10:00:01Z", + "completed_at": "2025-12-06T10:05:00Z", + "result": { + "artifact_hash": "blake3:abc123...", + "artifact_path": "builds/guardian-2.1.0.tar.gz" + } + }, + { + "step_id": "step-2-test", + "name": "Run integration tests", + "status": "completed", + "started_at": "2025-12-06T10:05:01Z", + "completed_at": "2025-12-06T10:15:00Z", + "result": { + "tests_passed": 147, + "tests_failed": 0, + "coverage": 0.89 + } + }, + { + "step_id": "step-3-deploy", + "name": "Deploy to production", + "status": "awaiting_approval", + "approval_id": "approval-2025-12-06-001", + "started_at": "2025-12-06T10:15:01Z" + }, + { + "step_id": "step-4-verify", + "name": "Verify deployment", + "status": "pending" + }, + { + "step_id": "step-5-notify", + "name": "Notify stakeholders", + "status": "pending" + } + ], + "approval_status": { + "approval_id": "approval-2025-12-06-001", + "required": 2, + "received": 1, + "approvals": [ + { + "approver": "did:vm:user:sovereign", + "decision": "approve", + "timestamp": "2025-12-06T10:30:00Z", + "comment": "Tests passed, changelog reviewed" + } + ] + }, + "context": { + "node": "did:vm:node:brick-01", + "trace_id": "trace-xyz..." + } +} +``` + +**Execution status transitions**: +``` +pending → running → completed + ↘ failed → (retry) → running + ↘ awaiting_approval → approved → running + ↘ rejected → cancelled + ↘ cancelled + ↘ timed_out +``` + +#### Step 3 — Seal → Receipts + +**Workflow Registration Receipt**: +```json +{ + "type": "auto_workflow_register", + "workflow_id": "wf:treasury-reconciliation", + "workflow_name": "Treasury Reconciliation", + "version": 1, + "timestamp": "2025-12-06T09:00:00Z", + "registered_by": "did:vm:user:sovereign", + "step_count": 5, + "trigger_type": "schedule", + "workflow_hash": "blake3:aaa111...", + "tags": ["automation", "workflow", "register", "treasury"], + "root_hash": "blake3:bbb222..." +} +``` + +**Workflow Execution Start Receipt**: +```json +{ + "type": "auto_workflow_execute", + "execution_id": "exec-2025-12-06-002", + "workflow_id": "wf:production-deploy", + "workflow_version": 5, + "timestamp": "2025-12-06T10:00:00Z", + "triggered_by": "did:vm:service:ci-pipeline", + "trigger_type": "webhook", + "input_hash": "blake3:ccc333...", + "node": "did:vm:node:brick-01", + "tags": ["automation", "execution", "start", "deploy"], + "root_hash": "blake3:ddd444..." +} +``` + +**Workflow Execution Complete Receipt**: +```json +{ + "type": "auto_workflow_complete", + "execution_id": "exec-2025-12-06-002", + "workflow_id": "wf:production-deploy", + "workflow_version": 5, + "timestamp_started": "2025-12-06T10:00:00Z", + "timestamp_completed": "2025-12-06T11:30:00Z", + "duration_ms": 5400000, + "status": "completed", + "steps_total": 5, + "steps_completed": 5, + "steps_failed": 0, + "output_hash": "blake3:eee555...", + "approvals_required": 2, + "approvals_received": 2, + "tags": ["automation", "execution", "complete", "deploy", "success"], + "root_hash": "blake3:fff666..." +} +``` + +**Schedule Creation Receipt**: +```json +{ + "type": "auto_schedule_create", + "schedule_id": "sched:treasury-reconciliation", + "workflow_id": "wf:treasury-reconciliation", + "timestamp": "2025-12-06T09:00:00Z", + "created_by": "did:vm:user:sovereign", + "cron": "0 0 * * *", + "timezone": "UTC", + "first_run": "2025-12-07T00:00:00Z", + "tags": ["automation", "schedule", "create"], + "root_hash": "blake3:ggg777..." +} +``` + +**Trigger Fire Receipt**: +```json +{ + "type": "auto_trigger_fire", + "trigger_id": "trig:security-incident", + "workflow_id": "wf:incident-response-initial", + "execution_id": "exec-2025-12-06-003", + "timestamp": "2025-12-06T03:47:00Z", + "event_type": "offsec_incident", + "event_id": "INC-2025-12-001", + "event_severity": "high", + "debounce_applied": false, + "tags": ["automation", "trigger", "fire", "incident"], + "root_hash": "blake3:hhh888..." +} +``` + +**Approval Request Receipt**: +```json +{ + "type": "auto_approval_request", + "approval_id": "approval-2025-12-06-001", + "workflow_id": "wf:production-deploy", + "execution_id": "exec-2025-12-06-002", + "step_id": "step-3-deploy", + "timestamp": "2025-12-06T10:15:01Z", + "title": "Approve Production Deployment", + "required_approvers": 2, + "approver_pool": ["did:vm:user:sovereign", "did:vm:user:operator-alpha", "did:vm:user:operator-bravo"], + "expires_at": "2025-12-06T18:00:00Z", + "context_hash": "blake3:iii999...", + "tags": ["automation", "approval", "request", "deploy"], + "root_hash": "blake3:jjj000..." +} +``` + +**Approval Decision Receipt**: +```json +{ + "type": "auto_approval_decision", + "approval_id": "approval-2025-12-06-001", + "execution_id": "exec-2025-12-06-002", + "timestamp": "2025-12-06T10:45:00Z", + "decision": "approved", + "approvers": [ + { + "did": "did:vm:user:sovereign", + "decision": "approve", + "timestamp": "2025-12-06T10:30:00Z" + }, + { + "did": "did:vm:user:operator-alpha", + "decision": "approve", + "timestamp": "2025-12-06T10:45:00Z" + } + ], + "quorum_met": true, + "workflow_resumed": true, + "tags": ["automation", "approval", "decision", "approved"], + "root_hash": "blake3:kkk111..." +} +``` + +--- + +### 3.3 Ledger Layer (L3) + +**Receipt Types**: + +| Type | When Emitted | +| ------------------------- | ------------------------------- | +| `auto_workflow_register` | Workflow created/updated | +| `auto_workflow_execute` | Execution started | +| `auto_workflow_complete` | Execution completed (any status)| +| `auto_schedule_create` | Schedule created/modified | +| `auto_trigger_fire` | Trigger activated | +| `auto_approval_request` | Approval requested | +| `auto_approval_decision` | Approval granted/denied | + +**Merkle Coverage**: +- All receipts append to `receipts/automation/automation_events.jsonl` +- `ROOT.automation.txt` updated after each append +- Guardian anchors Automation root in anchor cycles + +--- + +## 4. Query Interface + +`automation_query_events.py`: + +```bash +# Workflow history +vm-auto query --workflow wf:daily-compliance-check + +# Failed executions +vm-auto query --type workflow_complete --filter "status == 'failed'" + +# Approvals by user +vm-auto query --type approval_decision --filter "approvers[].did == 'did:vm:user:sovereign'" + +# Trigger fires by event type +vm-auto query --type trigger_fire --filter "event_type == 'offsec_incident'" + +# Date range +vm-auto query --from 2025-12-01 --to 2025-12-06 + +# By workflow category +vm-auto query --tag compliance + +# Export for analysis +vm-auto query --from 2025-01-01 --format csv > automation_2025.csv +``` + +**Execution Timeline**: +```bash +# Show execution timeline with all steps +vm-auto timeline exec-2025-12-06-002 + +# Output: +# exec-2025-12-06-002: wf:production-deploy v5 +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# 10:00:00 ▶ STARTED (triggered by ci-pipeline via webhook) +# 10:00:01 ├─ step-1-build: STARTED +# 10:05:00 ├─ step-1-build: COMPLETED (5m) ✓ +# 10:05:01 ├─ step-2-test: STARTED +# 10:15:00 ├─ step-2-test: COMPLETED (10m) ✓ +# 10:15:01 ├─ step-3-deploy: AWAITING APPROVAL +# 10:30:00 │ └─ sovereign: APPROVED +# 10:45:00 │ └─ operator-alpha: APPROVED (quorum met) +# 10:45:01 ├─ step-3-deploy: STARTED +# 11:15:00 ├─ step-3-deploy: COMPLETED (30m) ✓ +# 11:15:01 ├─ step-4-verify: STARTED +# 11:25:00 ├─ step-4-verify: COMPLETED (10m) ✓ +# 11:25:01 ├─ step-5-notify: STARTED +# 11:30:00 ├─ step-5-notify: COMPLETED (5m) ✓ +# 11:30:00 ■ COMPLETED (1h 30m total) +``` + +--- + +## 5. Design Gate Checklist + +| Question | Automation Answer | +| --------------------- | ---------------------------------------------------------------- | +| Clear entrypoint? | ✅ CLI (`vm-auto`), MCP tools, Portal HTTP | +| Contract produced? | ✅ `automation_workflow_contract.json` for registrations/executions | +| State object? | ✅ `automation_execution_state.json` tracking step progress | +| Receipts emitted? | ✅ Seven receipt types covering all automation events | +| Append-only JSONL? | ✅ `receipts/automation/automation_events.jsonl` | +| Merkle root? | ✅ `ROOT.automation.txt` | +| Guardian anchor path? | ✅ Automation root included in ProofChain | +| Query tool? | ✅ `automation_query_events.py` + execution timeline | + +--- + +## 6. n8n Integration + +### 6.1 VaultMesh n8n Nodes + +Custom n8n nodes for VaultMesh integration: + +```typescript +// VaultMesh Trigger Node +{ + name: 'VaultMesh Trigger', + description: 'Trigger workflow on VaultMesh events', + inputs: [], + outputs: ['main'], + properties: [ + { + displayName: 'Event Type', + name: 'eventType', + type: 'options', + options: [ + { name: 'Receipt Emitted', value: 'receipt' }, + { name: 'Alert Fired', value: 'alert' }, + { name: 'Anchor Complete', value: 'anchor' }, + { name: 'Mesh Change', value: 'mesh' } + ] + }, + { + displayName: 'Filter', + name: 'filter', + type: 'json' + } + ] +} + +// VaultMesh Action Node +{ + name: 'VaultMesh', + description: 'Interact with VaultMesh APIs', + inputs: ['main'], + outputs: ['main'], + properties: [ + { + displayName: 'Operation', + name: 'operation', + type: 'options', + options: [ + { name: 'Oracle Query', value: 'oracle_query' }, + { name: 'Emit Receipt', value: 'emit_receipt' }, + { name: 'Treasury Transfer', value: 'treasury_transfer' }, + { name: 'Mesh Node Status', value: 'mesh_status' }, + { name: 'Identity Verify', value: 'identity_verify' } + ] + } + ] +} +``` + +### 6.2 Workflow-to-Receipt Mapping + +Every n8n workflow execution produces VaultMesh receipts: + +``` +n8n Workflow Execution + │ + ▼ +┌─────────────────────────┐ +│ VaultMesh Automation │ +│ Engine Wrapper │ +│ │ +│ • Intercepts start │ +│ • Tracks step progress │ +│ • Captures outputs │ +│ • Handles approvals │ +│ • Emits receipts │ +└─────────────────────────┘ + │ + ▼ + JSONL + Merkle +``` + +### 6.3 n8n Credential Storage + +VaultMesh credentials for n8n stored securely: + +```json +{ + "credential_id": "n8n-cred:vaultmesh-api", + "type": "vaultmesh_api", + "name": "VaultMesh Production", + "data_encrypted": "aes-256-gcm:...", + "created_at": "2025-12-01T00:00:00Z", + "created_by": "did:vm:user:sovereign", + "last_used": "2025-12-06T10:00:00Z", + "scopes": ["oracle:read", "treasury:read", "automation:execute"] +} +``` + +--- + +## 7. Step Types + +### 7.1 Built-in Step Types + +| Step Type | Description | Example Use | +| --------------- | -------------------------------------------- | -------------------------------- | +| `mcp_tool` | Call VaultMesh MCP tool | Oracle query, Treasury check | +| `http_request` | Make HTTP request | External API calls | +| `condition` | Branch based on expression | Check compliance score | +| `loop` | Iterate over collection | Process multiple accounts | +| `parallel` | Execute steps concurrently | Check multiple nodes | +| `approval` | Wait for human approval | Production deployments | +| `delay` | Wait for duration | Rate limiting | +| `notification` | Send notifications | Slack, email, PagerDuty | +| `script` | Execute custom script | Complex transformations | +| `sub_workflow` | Call another workflow | Reusable components | +| `receipt_emit` | Emit custom receipt | Business events | + +### 7.2 Step Configuration + +```json +{ + "step_id": "step-1", + "name": "Query Treasury Balance", + "type": "mcp_tool", + "tool": "treasury_balance", + "params": { + "account": "{{ input.account_id }}" + }, + "timeout": "30s", + "retry": { + "max_attempts": 3, + "backoff": "exponential", + "initial_delay": "1s" + }, + "error_handling": { + "on_error": "continue", + "fallback_value": {"balance": 0} + }, + "output_mapping": { + "balance": "$.result.balance", + "currency": "$.result.currency" + } +} +``` + +--- + +## 8. Integration Points + +| System | Integration | +| ---------------- | --------------------------------------------------------------------------- | +| **Guardian** | Trigger workflows on anchor events; automate anchor scheduling | +| **Treasury** | Automated reconciliation; scheduled reports; transfer approvals | +| **Identity** | Credential rotation workflows; onboarding/offboarding automation | +| **Mesh** | Node provisioning workflows; topology change automation | +| **OffSec** | Incident response playbooks; automated remediation | +| **Oracle** | Scheduled compliance checks; gap remediation workflows | +| **Observability**| Alert-triggered workflows; automated runbook execution | + +--- + +## 9. Security Model + +### 9.1 Workflow Permissions + +```json +{ + "workflow_id": "wf:production-deploy", + "permissions": { + "view": ["did:vm:org:engineering"], + "execute": ["did:vm:user:sovereign", "did:vm:service:ci-pipeline"], + "edit": ["did:vm:user:sovereign"], + "delete": ["did:vm:user:sovereign"], + "approve": ["did:vm:user:sovereign", "did:vm:user:operator-alpha"] + }, + "execution_identity": "did:vm:service:automation-engine", + "secret_access": ["vault:deploy-keys", "vault:api-tokens"] +} +``` + +### 9.2 Audit Requirements + +All workflow operations are receipted for: +- **Compliance**: Prove workflows executed as designed +- **Debugging**: Trace execution failures +- **Accountability**: Track who approved what +- **Non-repudiation**: Cryptographic proof of execution + +--- + +## 10. Future Extensions + +- **Visual workflow builder**: Drag-and-drop in Portal UI +- **Workflow versioning**: Git-like version control for workflows +- **A/B testing**: Test workflow variations +- **Cost tracking**: Treasury integration for workflow execution costs +- **ML-powered optimization**: Suggest workflow improvements +- **Cross-mesh orchestration**: Federated workflow execution +- **Workflow marketplace**: Share/import community workflows diff --git a/docs/VAULTMESH-CONSOLE-ENGINE.md b/docs/VAULTMESH-CONSOLE-ENGINE.md new file mode 100644 index 0000000..0f604c5 --- /dev/null +++ b/docs/VAULTMESH-CONSOLE-ENGINE.md @@ -0,0 +1,438 @@ +# VAULTMESH-CONSOLE-ENGINE.md + +**Sovereign AI Agent Session Management** + +> *Every coding session is a chapter in the Civilization Ledger.* + +The Console Engine binds AI coding agents (OpenCode, Claude Code, CAI, custom agents) into the VaultMesh receipting system. Every session, command, file edit, tool call, approval, and git commit becomes a receipted event. + +--- + +## 1. Engine Registration + +| Property | Value | +|----------|-------| +| **Engine ID** | `engine:console` | +| **Name** | Console | +| **Scroll** | `Console` | +| **JSONL Path** | `receipts/console/console_events.jsonl` | +| **Root File** | `receipts/console/ROOT.console.txt` | +| **Authority** | AI agent session management, code operations, and sovereign development | +| **Status** | `active` | + +### 1.1 Capabilities + +```json +[ + "console_read", + "console_write", + "console_execute", + "console_spawn", + "file_read", + "file_write", + "bash_execute", + "git_commit", + "mcp_call" +] +``` + +--- + +## 2. Receipt Types + +### 2.1 Receipt Schema + +All Console receipts share a common envelope: + +```json +{ + "ts": "2025-12-07T04:00:00Z", + "engine_id": "engine:console", + "type": "console_session_start", + "session_id": "session-1765123456", + "payload": { ... } +} +``` + +### 2.2 Receipt Type Definitions + +| Type | Description | Payload Fields | +|------|-------------|----------------| +| `console_genesis` | Engine initialization marker | `note` | +| `console_session_start` | Agent session initiated | `agent_type`, `model_id`, `caller`, `project_path` | +| `console_session_end` | Agent session completed | `duration_ms`, `commands_executed`, `files_modified`, `exit_reason` | +| `console_command` | CLI command executed | `command`, `args_hash`, `exit_code`, `duration_ms` | +| `console_file_edit` | File modification via agent | `file_path`, `old_hash`, `new_hash`, `edit_type`, `lines_changed` | +| `console_tool_call` | Agent tool invocation | `tool_name`, `params_hash`, `result_hash`, `capability_used` | +| `console_approval` | Human approval for agent action | `action_type`, `approved`, `approver`, `reason` | +| `console_git_commit` | Git commit created by agent | `commit_hash`, `files_changed`, `message_hash`, `signed` | +| `console_agent_spawn` | Sub-agent spawned | `parent_session_id`, `child_session_id`, `agent_type`, `task_hash` | + +--- + +## 3. Mapping to Eternal Pattern + +### 3.1 Experience Layer (L1) + +**Entrypoints:** +- `opencode --sovereign` — Launch sovereign OpenCode session +- `vm-console spawn ` — Spawn a new agent session +- MCP tools (`console_session_list`, `console_spawn_agent`, etc.) +- Portal dashboard for session monitoring + +**Intent Capture:** +```bash +# Sovereign OpenCode invocation +opencode --sovereign --project /root/work/vaultmesh + +# With explicit identity +opencode --identity did:vm:agent:opencode-sovereign --capabilities console_write,file_edit +``` + +### 3.2 Engine Layer (L2) + +**Session Contract:** + +```json +{ + "contract_type": "console_session", + "session_id": "session-1765123456", + "agent_type": "opencode", + "model_id": "claude-opus-4-5-20251101", + "caller": "did:vm:human:karol", + "project_path": "/root/work/vaultmesh", + "capabilities_requested": [ + "file_read", + "file_write", + "bash_execute", + "git_commit" + ], + "constraints": { + "max_duration_minutes": 60, + "max_files_modified": 50, + "require_approval_for": ["git_push", "file_delete"], + "sandbox_mode": false + }, + "created_at": "2025-12-07T04:00:00Z" +} +``` + +This contract is captured as a `console_session_start` receipt payload. + +**Session State (derived from receipts):** + +```json +{ + "session_id": "session-1765123456", + "status": "active", + "started_at": "2025-12-07T04:00:00Z", + "commands_executed": 23, + "files_read": 15, + "files_modified": 4, + "tool_calls": 47, + "approvals_requested": 1, + "approvals_granted": 1, + "current_task": "Implementing Console engine receipts", + "git_commits": [] +} +``` + +State is derived from receipts, not a primary source of truth. + +### 3.3 Ledger Layer (L3) + +**Receipt Flow:** + +``` +Session Start → Tool Calls → File Edits → Approvals → Git Commits → Session End + ↓ ↓ ↓ ↓ ↓ ↓ + Receipt Receipt Receipt Receipt Receipt Receipt + ↓ ↓ ↓ ↓ ↓ ↓ + └─────────────┴────────────┴────────────┴────────────┴────────────┘ + ↓ + console_events.jsonl + ↓ + ROOT.console.txt + ↓ + Guardian Anchor +``` + +--- + +## 4. Root File Format + +`receipts/console/ROOT.console.txt`: + +``` +# VaultMesh Console Root +engine_id=engine:console +merkle_root=8a71c1c0b9c6... +events=128 +updated_at=2025-12-07T05:30:00Z +``` + +| Field | Description | +|-------|-------------| +| `engine_id` | Fixed identifier (`engine:console`) | +| `merkle_root` | Hex-encoded Merkle root over line hashes | +| `events` | Number of receipts in `console_events.jsonl` | +| `updated_at` | ISO 8601 timestamp of last update | + +--- + +## 5. DID Scheme + +``` +did:vm:agent:opencode- # Per-session agent identity +did:vm:agent:opencode-sovereign # Persistent sovereign agent +did:vm:service:console-gateway # MCP gateway service +``` + +For Phase 1, DIDs are treated as opaque strings. Full Identity engine integration comes later. + +--- + +## 6. CLI Commands + +```bash +# Session management +vm-console session list --status active +vm-console session show session-1765123456 +vm-console session kill session-1765123456 --reason "Manual termination" + +# Spawn agents +vm-console spawn opencode --task "Implement Treasury engine" --project /root/work/vaultmesh +vm-console spawn cai --task "Audit authentication flow" --capabilities offsec_read + +# Approvals +vm-console approvals pending +vm-console approve action-12345 --reason "Looks safe" +vm-console reject action-12345 --reason "Too risky" + +# History and audit +vm-console history --session session-1765123456 +vm-console audit --date 2025-12-07 --agent-type opencode +vm-console receipts --scroll Console --limit 100 +``` + +--- + +## 7. MCP Tools + +### 7.1 Read-Only Tools + +| Tool | Description | +|------|-------------| +| `console_session_list` | List active/completed sessions | +| `console_session_status` | Get detailed session status | +| `console_receipts_search` | Search Console scroll receipts | + +### 7.2 Write Tools + +| Tool | Capability Required | Description | +|------|---------------------|-------------| +| `console_spawn_agent` | `console_spawn` | Spawn a new agent session | +| `console_approve_action` | `console_approve` | Approve/reject pending action | + +--- + +## 8. Python API + +### 8.1 Emitting Receipts + +```python +from engines.console.receipts import emit_console_receipt + +# Session start +emit_console_receipt( + "console_session_start", + { + "agent_type": "opencode", + "model_id": "claude-opus-4-5", + "caller": "did:vm:human:karol", + "project_path": "/root/work/vaultmesh" + }, + session_id="session-1765123456", +) + +# File edit +emit_console_receipt( + "console_file_edit", + { + "file_path": "engines/console/receipts.py", + "old_hash": "blake3:abc123...", + "new_hash": "blake3:def456...", + "edit_type": "modify", + "lines_changed": 42 + }, + session_id="session-1765123456", +) + +# Session end +emit_console_receipt( + "console_session_end", + { + "duration_ms": 3600000, + "commands_executed": 47, + "files_modified": 12, + "exit_reason": "completed" + }, + session_id="session-1765123456", +) +``` + +### 8.2 Reading Root Info + +```python +from engines.console.receipts import get_emitter + +emitter = get_emitter() +info = emitter.get_root_info() +print(f"Events: {info['events']}, Root: {info['merkle_root'][:16]}...") +``` + +--- + +## 9. HTTP Bridge + +For OpenCode plugin integration, a FastAPI sidecar exposes the receipt emitter: + +```python +# scripts/console_receipts_server.py +from fastapi import FastAPI +from pydantic import BaseModel +import uvicorn +from engines.console.receipts import emit_console_receipt, ReceiptType + +app = FastAPI() + +class ReceiptIn(BaseModel): + type: ReceiptType + session_id: str | None = None + payload: dict + +@app.post("/v1/console/receipt") +async def console_receipt(rec: ReceiptIn): + record = emit_console_receipt( + receipt_type=rec.type, + payload=rec.payload, + session_id=rec.session_id, + ) + return {"ok": True, "record": record} + +if __name__ == "__main__": + uvicorn.run(app, host="127.0.0.1", port=9110) +``` + +--- + +## 10. OpenCode Plugin + +The `@vaultmesh/opencode-plugin` hooks into OpenCode's lifecycle: + +```typescript +export const VaultMeshConsolePlugin = async (ctx) => { + const sessionId = await initSession(ctx); + + return { + hooks: { + onSessionStart: async () => { /* emit console_session_start */ }, + onSessionEnd: async (result) => { /* emit console_session_end */ }, + onToolCall: async (tool, params, result) => { /* emit console_tool_call */ }, + onFileEdit: async (path, oldContent, newContent) => { /* emit console_file_edit */ }, + }, + tool: { + vm_anchor: tool({ /* trigger Guardian anchor */ }), + vm_receipt_search: tool({ /* search Console receipts */ }), + vm_identity: tool({ /* get session identity */ }), + }, + }; +}; +``` + +--- + +## 11. Integration Points + +### 11.1 Guardian + +Console root is included in the ProofChain anchor cycle: + +```python +# Guardian reads ROOT.console.txt alongside other scroll roots +roots = { + "console": read_root("receipts/console/ROOT.console.txt"), + "drills": read_root("ROOT.drills.txt"), + # ... other scrolls +} +anchor_hash = compute_combined_root(roots) +``` + +### 11.2 Identity + +Session DIDs resolve via the Identity engine: + +```json +{ + "did": "did:vm:agent:opencode-session-1765123456", + "type": "agent", + "controller": "did:vm:human:karol", + "capabilities": ["file_read", "file_write", "bash_execute"], + "session_id": "session-1765123456", + "expires_at": "2025-12-07T05:00:00Z" +} +``` + +### 11.3 Governance + +Dangerous operations trigger constitutional compliance checks: + +```python +async def check_before_execute(action: str, target: str): + if action in DANGEROUS_OPERATIONS: + result = await governance_engine.check_compliance( + action=action, + actor=current_session.identity, + target=target, + ) + if not result.compliant: + raise ConstitutionalViolation(result.articles_violated) +``` + +--- + +## 12. Design Gate Checklist + +| Question | Answer | +|----------|--------| +| Clear entrypoint? | ✅ `opencode --sovereign`, `vm-console spawn`, MCP tools | +| Contract produced? | ✅ Session contract in `console_session_start` payload | +| State object? | ✅ Derived session state from receipts | +| Receipts emitted? | ✅ 9 receipt types (including genesis) | +| Append-only JSONL? | ✅ `receipts/console/console_events.jsonl` | +| Merkle root? | ✅ `receipts/console/ROOT.console.txt` | +| Guardian anchor path? | ✅ Console root included in ProofChain | +| Query tool? | ✅ `vm-console`, MCP tools, Portal dashboard | + +--- + +## 13. Future Extensions + +### 13.1 Phase 2: Albedo 🜄 + +- OpenCode plugin integration +- HTTP bridge for receipt emission +- Real session tracking + +### 13.2 Phase 3: Citrinitas 🜆 + +- `vm-console` CLI with full commands +- MCP server tools +- Session replay and audit + +### 13.3 Phase 4: Rubedo 🜂 + +- Multi-agent orchestration +- Cross-session task continuity +- Federation support for remote agents +- Full Identity engine integration diff --git a/docs/VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md b/docs/VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md new file mode 100644 index 0000000..6382118 --- /dev/null +++ b/docs/VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md @@ -0,0 +1,752 @@ +# VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md +**The Laws That Govern the Ledger** + +> *A civilization without laws is just a database.* + +Constitutional Governance defines the rules, amendments, and enforcement mechanisms that govern VaultMesh itself. This is the meta-layer — the constitution that the engines must obey. + +--- + +## 1. Governance Philosophy + +### 1.1 Why a Constitution? + +VaultMesh isn't just infrastructure — it's a **trust machine**. Trust requires: +- **Predictability**: Rules don't change arbitrarily +- **Transparency**: Changes are visible and receipted +- **Legitimacy**: Changes follow defined procedures +- **Accountability**: Violations have consequences + +The Constitution provides these guarantees. + +### 1.2 Constitutional Hierarchy + +``` +┌─────────────────────────────────────────────────────┐ +│ IMMUTABLE AXIOMS │ +│ (Cannot be changed, ever) │ +│ • Receipts are append-only │ +│ • Hashes are cryptographically verified │ +│ • All changes are receipted │ +└─────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────┐ +│ CONSTITUTIONAL ARTICLES │ +│ (Can be amended with supermajority + ratification) │ +│ • Governance procedures │ +│ • Engine authorities │ +│ • Federation rules │ +└─────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────┐ +│ STATUTORY RULES │ +│ (Can be changed with standard procedures) │ +│ • Operational parameters │ +│ • Default configurations │ +│ • Policy settings │ +└─────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────┐ +│ EXECUTIVE ORDERS │ +│ (Can be issued by authorized actors) │ +│ • Emergency responses │ +│ • Temporary measures │ +│ • Operational decisions │ +└─────────────────────────────────────────────────────┘ +``` + +--- + +## 2. Governance Scroll + +| Property | Value | +|----------|-------| +| **Scroll Name** | `Governance` | +| **JSONL Path** | `receipts/governance/governance_events.jsonl` | +| **Root File** | `ROOT.governance.txt` | +| **Receipt Types** | `gov_proposal`, `gov_vote`, `gov_ratification`, `gov_amendment`, `gov_executive_order`, `gov_violation`, `gov_enforcement` | + +--- + +## 3. The Constitution + +### 3.1 Preamble + +```markdown +# VAULTMESH CONSTITUTION v1.0 + +We, the architects and stewards of VaultMesh, establish this Constitution to: + +1. Preserve the integrity of the Civilization Ledger +2. Ensure transparent and accountable governance +3. Protect the sovereignty of all participants +4. Enable durable, cross-generational trust + +This Constitution is the supreme law of this VaultMesh instance. +All engines, agents, and actors are bound by its provisions. +``` + +### 3.2 Immutable Axioms + +```json +{ + "axioms": [ + { + "id": "AXIOM-001", + "name": "Append-Only Receipts", + "statement": "Receipts, once written, shall never be modified or deleted. The ledger is append-only.", + "rationale": "Immutability is the foundation of trust.", + "immutable": true + }, + { + "id": "AXIOM-002", + "name": "Cryptographic Integrity", + "statement": "All receipts shall include cryptographic hashes computed from their content. Hash algorithms may be upgraded but never weakened.", + "rationale": "Verification requires mathematical certainty.", + "immutable": true + }, + { + "id": "AXIOM-003", + "name": "Universal Receipting", + "statement": "All significant state changes shall produce receipts. No governance action is valid without a receipt.", + "rationale": "What is not receipted did not happen.", + "immutable": true + }, + { + "id": "AXIOM-004", + "name": "Constitutional Supremacy", + "statement": "No engine, agent, or actor may take action that violates this Constitution. Violations are void ab initio.", + "rationale": "The Constitution is the root of legitimacy.", + "immutable": true + }, + { + "id": "AXIOM-005", + "name": "Axiom Immutability", + "statement": "These axioms cannot be amended, suspended, or circumvented by any procedure.", + "rationale": "Some truths must be eternal.", + "immutable": true + } + ] +} +``` + +### 3.3 Constitutional Articles + +```json +{ + "articles": [ + { + "id": "ARTICLE-I", + "name": "Governance Structure", + "sections": [ + { + "id": "I.1", + "title": "Sovereign Authority", + "text": "The Sovereign (designated human administrator) holds ultimate authority over this VaultMesh instance, subject to the Axioms." + }, + { + "id": "I.2", + "title": "Engine Authorities", + "text": "Each Engine operates within its defined domain. No Engine may exceed its constitutional authority." + }, + { + "id": "I.3", + "title": "Agent Delegation", + "text": "Agents may exercise delegated authority within explicit bounds. All agent actions are attributable to their delegator." + } + ] + }, + { + "id": "ARTICLE-II", + "name": "Amendment Procedure", + "sections": [ + { + "id": "II.1", + "title": "Proposal", + "text": "Constitutional amendments may be proposed by the Sovereign or by consensus of admin-capability holders." + }, + { + "id": "II.2", + "title": "Deliberation Period", + "text": "All amendments require a minimum 7-day deliberation period before voting." + }, + { + "id": "II.3", + "title": "Ratification", + "text": "Amendments require approval by the Sovereign AND successful execution of the amendment receipt." + }, + { + "id": "II.4", + "title": "Effective Date", + "text": "Amendments take effect upon anchor confirmation of the ratification receipt." + } + ] + }, + { + "id": "ARTICLE-III", + "name": "Engine Governance", + "sections": [ + { + "id": "III.1", + "title": "Engine Registry", + "text": "Only engines registered in the Constitution may operate. New engines require constitutional amendment." + }, + { + "id": "III.2", + "title": "Engine Boundaries", + "text": "Each engine's authority is limited to its defined scroll(s). Cross-scroll operations require explicit authorization." + }, + { + "id": "III.3", + "title": "Engine Lifecycle", + "text": "Engines may be suspended or deprecated by executive order, but removal requires amendment." + } + ] + }, + { + "id": "ARTICLE-IV", + "name": "Rights and Protections", + "sections": [ + { + "id": "IV.1", + "title": "Audit Rights", + "text": "Any authorized party may audit any receipt. Audit requests shall not be unreasonably denied." + }, + { + "id": "IV.2", + "title": "Data Sovereignty", + "text": "Data subjects retain rights over their personal data as defined by applicable law." + }, + { + "id": "IV.3", + "title": "Due Process", + "text": "No capability shall be revoked without notice and opportunity to respond, except in emergencies." + } + ] + }, + { + "id": "ARTICLE-V", + "name": "Federation", + "sections": [ + { + "id": "V.1", + "title": "Federation Authority", + "text": "Federation agreements require Sovereign approval." + }, + { + "id": "V.2", + "title": "Federation Limits", + "text": "No federation agreement may compromise the Axioms or require violation of this Constitution." + }, + { + "id": "V.3", + "title": "Federation Termination", + "text": "Federation agreements may be terminated with 30 days notice, or immediately upon material breach." + } + ] + }, + { + "id": "ARTICLE-VI", + "name": "Emergency Powers", + "sections": [ + { + "id": "VI.1", + "title": "Emergency Declaration", + "text": "The Sovereign may declare an emergency upon credible threat to system integrity." + }, + { + "id": "VI.2", + "title": "Emergency Powers", + "text": "During emergencies, the Sovereign may suspend normal procedures except the Axioms." + }, + { + "id": "VI.3", + "title": "Emergency Duration", + "text": "Emergencies automatically expire after 72 hours unless renewed. All emergency actions must be receipted." + } + ] + } + ] +} +``` + +### 3.4 Engine Registry + +```json +{ + "registered_engines": [ + { + "engine_id": "engine:drills", + "name": "Security Drills", + "scroll": "Drills", + "authority": "Security training and exercise management", + "registered_at": "2025-06-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:oracle", + "name": "Compliance Oracle", + "scroll": "Compliance", + "authority": "Compliance question answering and attestation", + "registered_at": "2025-06-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:guardian", + "name": "Guardian", + "scroll": "Guardian", + "authority": "Anchoring, monitoring, and security response", + "registered_at": "2025-06-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:treasury", + "name": "Treasury", + "scroll": "Treasury", + "authority": "Financial tracking and settlement", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:mesh", + "name": "Mesh", + "scroll": "Mesh", + "authority": "Topology and federation management", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:offsec", + "name": "OffSec", + "scroll": "OffSec", + "authority": "Security operations and incident response", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:identity", + "name": "Identity", + "scroll": "Identity", + "authority": "DID, credential, and capability management", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:observability", + "name": "Observability", + "scroll": "Observability", + "authority": "Telemetry and health monitoring", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:automation", + "name": "Automation", + "scroll": "Automation", + "authority": "Workflow and agent execution", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:psi", + "name": "Psi-Field", + "scroll": "PsiField", + "authority": "Consciousness and transmutation tracking", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:federation", + "name": "Federation", + "scroll": "Federation", + "authority": "Cross-mesh trust and verification", + "registered_at": "2025-12-01T00:00:00Z", + "status": "active" + }, + { + "engine_id": "engine:governance", + "name": "Governance", + "scroll": "Governance", + "authority": "Constitutional enforcement and amendment", + "registered_at": "2025-06-01T00:00:00Z", + "status": "active" + } + ] +} +``` + +--- + +## 4. Governance Procedures + +### 4.1 Amendment Workflow + +``` +┌──────────────┐ +│ PROPOSAL │ +│ │ +│ Author drafts│ +│ amendment │ +└──────┬───────┘ + │ + ▼ +┌──────────────┐ +│ SUBMISSION │ +│ │ +│ Submit via │ +│ gov_proposal │ +│ receipt │ +└──────┬───────┘ + │ + ▼ +┌──────────────┐ 7+ days +│ DELIBERATION │◄────────────┐ +│ │ │ +│ Public │ Comments │ +│ discussion │─────────────┘ +└──────┬───────┘ + │ + ▼ +┌──────────────┐ +│ VOTING │ +│ │ +│ Sovereign + │ +│ Admin quorum │ +└──────┬───────┘ + │ + ├─────── REJECTED ──────► Archive + │ + ▼ APPROVED +┌──────────────┐ +│ RATIFICATION │ +│ │ +│ Sovereign │ +│ signs │ +└──────┬───────┘ + │ + ▼ +┌──────────────┐ +│ ACTIVATION │ +│ │ +│ Upon anchor │ +│ confirmation │ +└──────────────┘ +``` + +### 4.2 Proposal Receipt + +```json +{ + "type": "gov_proposal", + "proposal_id": "PROP-2025-12-001", + "proposal_type": "amendment", + "title": "Add Data Retention Article", + "author": "did:vm:human:sovereign", + "submitted_at": "2025-12-06T10:00:00Z", + "deliberation_ends": "2025-12-13T10:00:00Z", + "content": { + "target": "ARTICLE-VII", + "action": "add", + "text": { + "id": "ARTICLE-VII", + "name": "Data Retention", + "sections": [ + { + "id": "VII.1", + "title": "Retention Periods", + "text": "Receipts shall be retained for a minimum of 10 years..." + } + ] + } + }, + "rationale": "Compliance with emerging EU digital infrastructure regulations requires explicit retention policies.", + "impact_assessment": { + "affected_engines": ["all"], + "backward_compatible": true, + "migration_required": false + }, + "status": "deliberation", + "tags": ["governance", "proposal", "amendment"], + "root_hash": "blake3:aaa111..." +} +``` + +### 4.3 Vote Receipt + +```json +{ + "type": "gov_vote", + "vote_id": "VOTE-2025-12-001-sovereign", + "proposal_id": "PROP-2025-12-001", + "voter": "did:vm:human:sovereign", + "voted_at": "2025-12-14T10:00:00Z", + "vote": "approve", + "weight": 1.0, + "comments": "Essential for regulatory compliance.", + "signature": "z58D...", + "tags": ["governance", "vote", "approve"], + "root_hash": "blake3:bbb222..." +} +``` + +### 4.4 Ratification Receipt + +```json +{ + "type": "gov_ratification", + "ratification_id": "RAT-2025-12-001", + "proposal_id": "PROP-2025-12-001", + "ratified_at": "2025-12-14T12:00:00Z", + "ratified_by": "did:vm:human:sovereign", + "vote_summary": { + "approve": 1, + "reject": 0, + "abstain": 0 + }, + "quorum_met": true, + "effective_at": "pending_anchor", + "constitution_version_before": "1.0.0", + "constitution_version_after": "1.1.0", + "signature": "z58D...", + "tags": ["governance", "ratification", "amendment"], + "root_hash": "blake3:ccc333..." +} +``` + +### 4.5 Amendment Receipt + +```json +{ + "type": "gov_amendment", + "amendment_id": "AMEND-2025-12-001", + "proposal_id": "PROP-2025-12-001", + "ratification_id": "RAT-2025-12-001", + "effective_at": "2025-12-14T14:00:00Z", + "anchor_confirmed_at": "2025-12-14T14:00:00Z", + "anchor_proof": { + "backend": "ethereum", + "tx_hash": "0x123...", + "block_number": 12345678 + }, + "amendment_type": "add_article", + "target": "ARTICLE-VII", + "constitution_hash_before": "blake3:const_v1.0...", + "constitution_hash_after": "blake3:const_v1.1...", + "tags": ["governance", "amendment", "effective"], + "root_hash": "blake3:ddd444..." +} +``` + +--- + +## 5. Executive Orders + +For operational decisions that don't require full amendment: + +### 5.1 Executive Order Receipt + +```json +{ + "type": "gov_executive_order", + "order_id": "EO-2025-12-001", + "title": "Temporary Rate Limit Increase", + "issued_by": "did:vm:human:sovereign", + "issued_at": "2025-12-06T15:00:00Z", + "authority": "ARTICLE-I.1 (Sovereign Authority)", + "order_type": "parameter_change", + "content": { + "parameter": "guardian.anchor_rate_limit", + "old_value": "100/day", + "new_value": "500/day", + "reason": "Handling increased receipt volume during Q4 compliance push" + }, + "duration": { + "type": "temporary", + "expires_at": "2026-01-01T00:00:00Z" + }, + "tags": ["governance", "executive-order", "parameter"], + "root_hash": "blake3:eee555..." +} +``` + +### 5.2 Emergency Declaration + +```json +{ + "type": "gov_executive_order", + "order_id": "EO-2025-12-002", + "title": "Security Emergency Declaration", + "issued_by": "did:vm:human:sovereign", + "issued_at": "2025-12-06T03:50:00Z", + "authority": "ARTICLE-VI.1 (Emergency Declaration)", + "order_type": "emergency", + "content": { + "emergency_type": "security_incident", + "threat_description": "Active intrusion attempt detected on BRICK-02", + "powers_invoked": [ + "Suspend normal authentication delays", + "Enable enhanced logging on all nodes", + "Authorize immediate capability revocation" + ], + "incident_reference": "INC-2025-12-001" + }, + "duration": { + "type": "emergency", + "expires_at": "2025-12-09T03:50:00Z", + "renewable": true + }, + "tags": ["governance", "executive-order", "emergency", "security"], + "root_hash": "blake3:fff666..." +} +``` + +--- + +## 6. Violation and Enforcement + +### 6.1 Violation Detection + +Guardian monitors for constitutional violations: + +```json +{ + "type": "gov_violation", + "violation_id": "VIOL-2025-12-001", + "detected_at": "2025-12-06T16:00:00Z", + "detected_by": "engine:guardian", + "violation_type": "unauthorized_action", + "severity": "high", + "details": { + "actor": "did:vm:agent:automation-01", + "action_attempted": "modify_receipt", + "receipt_targeted": "receipt:compliance:oracle-answer-4721", + "rule_violated": "AXIOM-001 (Append-Only Receipts)", + "action_result": "blocked" + }, + "evidence": { + "log_entries": ["..."], + "request_hash": "blake3:...", + "stack_trace": "..." + }, + "tags": ["governance", "violation", "axiom", "blocked"], + "root_hash": "blake3:ggg777..." +} +``` + +### 6.2 Enforcement Action + +```json +{ + "type": "gov_enforcement", + "enforcement_id": "ENF-2025-12-001", + "violation_id": "VIOL-2025-12-001", + "enforced_at": "2025-12-06T16:05:00Z", + "enforced_by": "engine:guardian", + "enforcement_type": "capability_suspension", + "target": "did:vm:agent:automation-01", + "action_taken": { + "capability_suspended": "write", + "scope": "all_scrolls", + "duration": "pending_review" + }, + "authority": "ARTICLE-IV.3 (Due Process) - emergency exception", + "review_required": true, + "review_deadline": "2025-12-07T16:05:00Z", + "tags": ["governance", "enforcement", "suspension"], + "root_hash": "blake3:hhh888..." +} +``` + +--- + +## 7. CLI Commands + +```bash +# Constitution +vm-gov constitution show +vm-gov constitution version +vm-gov constitution diff v1.0.0 v1.1.0 +vm-gov constitution export --format pdf + +# Proposals +vm-gov proposal create --type amendment --file proposal.json +vm-gov proposal list --status deliberation +vm-gov proposal show PROP-2025-12-001 +vm-gov proposal comment PROP-2025-12-001 --text "I support this because..." + +# Voting +vm-gov vote PROP-2025-12-001 --vote approve --comment "Essential change" +vm-gov vote PROP-2025-12-001 --vote reject --reason "Needs more deliberation" + +# Ratification (Sovereign only) +vm-gov ratify PROP-2025-12-001 + +# Executive Orders +vm-gov order create --type parameter_change --file order.json +vm-gov order list --active +vm-gov order show EO-2025-12-001 +vm-gov order revoke EO-2025-12-001 + +# Emergencies +vm-gov emergency declare --type security_incident --description "..." --incident INC-2025-12-001 +vm-gov emergency status +vm-gov emergency extend --hours 24 +vm-gov emergency end + +# Violations +vm-gov violations list --severity high,critical +vm-gov violations show VIOL-2025-12-001 +vm-gov violations review VIOL-2025-12-001 --decision dismiss --reason "False positive" + +# Enforcement +vm-gov enforcement list --pending-review +vm-gov enforcement review ENF-2025-12-001 --decision uphold +vm-gov enforcement review ENF-2025-12-001 --decision reverse --reason "Excessive response" +``` + +--- + +## 8. Design Gate Checklist + +| Question | Governance Answer | +|----------|-------------------| +| Clear entrypoint? | ✅ CLI (`vm-gov`), Portal routes | +| Contract produced? | ✅ Proposal documents | +| State object? | ✅ Constitution + amendment state | +| Receipts emitted? | ✅ Seven receipt types | +| Append-only JSONL? | ✅ `receipts/governance/governance_events.jsonl` | +| Merkle root? | ✅ `ROOT.governance.txt` | +| Guardian anchor path? | ✅ Governance root included in ProofChain | +| Query tool? | ✅ `vm-gov` CLI | + +--- + +## 9. Constitutional Hash Chain + +The Constitution itself is version-controlled with a hash chain: + +```json +{ + "constitution_versions": [ + { + "version": "1.0.0", + "effective_at": "2025-06-01T00:00:00Z", + "hash": "blake3:const_v1.0_abc123...", + "previous_hash": null, + "amendment_id": null + }, + { + "version": "1.1.0", + "effective_at": "2025-12-14T14:00:00Z", + "hash": "blake3:const_v1.1_def456...", + "previous_hash": "blake3:const_v1.0_abc123...", + "amendment_id": "AMEND-2025-12-001" + } + ] +} +``` + +This creates an immutable chain of constitutional states — you can always verify what the rules were at any point in time. diff --git a/docs/VAULTMESH-DEPLOYMENT-MANIFESTS.md b/docs/VAULTMESH-DEPLOYMENT-MANIFESTS.md new file mode 100644 index 0000000..8c5bc85 --- /dev/null +++ b/docs/VAULTMESH-DEPLOYMENT-MANIFESTS.md @@ -0,0 +1,1267 @@ +# VAULTMESH-DEPLOYMENT-MANIFESTS.md +**Production Infrastructure for the Civilization Ledger** + +> *A system that cannot be deployed is a system that cannot exist.* + +This document provides complete Kubernetes, Docker, and infrastructure-as-code configurations for deploying VaultMesh in production environments. + +--- + +## 1. Deployment Architecture + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ KUBERNETES CLUSTER │ +├─────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────┐ │ +│ │ INGRESS LAYER │ │ +│ │ • Traefik / NGINX Ingress │ │ +│ │ • TLS termination (cert-manager) │ │ +│ │ • Rate limiting │ │ +│ └─────────────────────────────────────────────────────────────────┘ │ +│ │ │ +│ ┌─────────────────────────┼─────────────────────────┐ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌───────────────┐ ┌───────────────┐ ┌───────────────┐ │ +│ │ PORTAL │ │ GUARDIAN │ │ ORACLE │ │ +│ │ (Rust) │ │ (Rust) │ │ (Python) │ │ +│ │ │ │ │ │ │ │ +│ │ • HTTP API │ │ • Anchor │ │ • MCP Server │ │ +│ │ • WebSocket │ │ • ProofChain │ │ • LLM Client │ │ +│ │ • Auth │ │ • Sentinel │ │ • Corpus │ │ +│ └───────┬───────┘ └───────┬───────┘ └───────┬───────┘ │ +│ │ │ │ │ +│ └───────────────────────┼───────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌─────────────────────────────────────────────────────────────────┐ │ +│ │ DATA LAYER │ │ +│ │ │ │ +│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │ +│ │ │ PostgreSQL │ │ Redis │ │ MinIO │ │ │ +│ │ │ (receipts) │ │ (cache) │ │ (artifacts) │ │ │ +│ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │ +│ │ │ │ +│ │ ┌─────────────────────────────────────────────────────────┐ │ │ +│ │ │ PERSISTENT VOLUMES │ │ │ +│ │ │ • receipts/ (JSONL files) │ │ │ +│ │ │ • cases/ (artifacts) │ │ │ +│ │ │ • corpus/ (Oracle documents) │ │ │ +│ │ └─────────────────────────────────────────────────────────┘ │ │ +│ └─────────────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─────────────────────────────────────────────────────────────────┐ │ +│ │ OBSERVABILITY LAYER │ │ +│ │ • Prometheus (metrics) │ │ +│ │ • Loki (logs) │ │ +│ │ • Tempo (traces) │ │ +│ │ • Grafana (dashboards) │ │ +│ └─────────────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## 2. Namespace and RBAC + +### 2.1 Namespace + +```yaml +# kubernetes/base/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: vaultmesh + labels: + app.kubernetes.io/name: vaultmesh + app.kubernetes.io/part-of: civilization-ledger + pod-security.kubernetes.io/enforce: restricted + pod-security.kubernetes.io/audit: restricted + pod-security.kubernetes.io/warn: restricted +``` + +### 2.2 Service Accounts + +```yaml +# kubernetes/base/rbac.yaml +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vaultmesh-portal + namespace: vaultmesh +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vaultmesh-guardian + namespace: vaultmesh +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vaultmesh-oracle + namespace: vaultmesh +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: vaultmesh-guardian-role + namespace: vaultmesh +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + resourceNames: ["guardian-anchor-keys", "guardian-config"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "update"] + resourceNames: ["vaultmesh-roots", "guardian-state"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: vaultmesh-guardian-binding + namespace: vaultmesh +subjects: + - kind: ServiceAccount + name: vaultmesh-guardian + namespace: vaultmesh +roleRef: + kind: Role + name: vaultmesh-guardian-role + apiGroup: rbac.authorization.k8s.io +``` + +--- + +## 3. Core Services + +### 3.1 Portal Deployment + +```yaml +# kubernetes/base/portal/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultmesh-portal + namespace: vaultmesh + labels: + app.kubernetes.io/name: portal + app.kubernetes.io/component: api + app.kubernetes.io/part-of: vaultmesh +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: portal + template: + metadata: + labels: + app.kubernetes.io/name: portal + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + prometheus.io/path: "/metrics" + spec: + serviceAccountName: vaultmesh-portal + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: portal + image: ghcr.io/vaultmesh/portal:v0.1.0 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + ports: + - name: http + containerPort: 8080 + protocol: TCP + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: RUST_LOG + value: "info,vaultmesh=debug" + - name: VAULTMESH_CONFIG + value: "/config/portal.toml" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: vaultmesh-db-credentials + key: portal-url + - name: REDIS_URL + valueFrom: + secretKeyRef: + name: vaultmesh-redis-credentials + key: url + volumeMounts: + - name: config + mountPath: /config + readOnly: true + - name: receipts + mountPath: /data/receipts + - name: tmp + mountPath: /tmp + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 1000m + memory: 1Gi + livenessProbe: + httpGet: + path: /health/live + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health/ready + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: vaultmesh-portal-config + - name: receipts + persistentVolumeClaim: + claimName: vaultmesh-receipts + - name: tmp + emptyDir: {} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: portal + topologyKey: kubernetes.io/hostname +--- +apiVersion: v1 +kind: Service +metadata: + name: vaultmesh-portal + namespace: vaultmesh +spec: + selector: + app.kubernetes.io/name: portal + ports: + - name: http + port: 80 + targetPort: http + - name: metrics + port: 9090 + targetPort: metrics + type: ClusterIP +``` + +### 3.2 Guardian Deployment + +```yaml +# kubernetes/base/guardian/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultmesh-guardian + namespace: vaultmesh + labels: + app.kubernetes.io/name: guardian + app.kubernetes.io/component: anchor + app.kubernetes.io/part-of: vaultmesh +spec: + replicas: 1 # Single instance for anchor coordination + strategy: + type: Recreate # Ensure only one instance runs at a time + selector: + matchLabels: + app.kubernetes.io/name: guardian + template: + metadata: + labels: + app.kubernetes.io/name: guardian + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + spec: + serviceAccountName: vaultmesh-guardian + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: guardian + image: ghcr.io/vaultmesh/guardian:v0.1.0 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + ports: + - name: http + containerPort: 8081 + protocol: TCP + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: RUST_LOG + value: "info,guardian=debug" + - name: GUARDIAN_CONFIG + value: "/config/guardian.toml" + - name: GUARDIAN_ANCHOR_KEY + valueFrom: + secretKeyRef: + name: guardian-anchor-keys + key: private-key + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: vaultmesh-db-credentials + key: guardian-url + volumeMounts: + - name: config + mountPath: /config + readOnly: true + - name: receipts + mountPath: /data/receipts + - name: guardian-state + mountPath: /data/guardian + - name: tmp + mountPath: /tmp + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 2000m + memory: 2Gi + livenessProbe: + httpGet: + path: /health/live + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health/ready + port: http + initialDelaySeconds: 10 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: vaultmesh-guardian-config + - name: receipts + persistentVolumeClaim: + claimName: vaultmesh-receipts + - name: guardian-state + persistentVolumeClaim: + claimName: vaultmesh-guardian-state + - name: tmp + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: vaultmesh-guardian + namespace: vaultmesh +spec: + selector: + app.kubernetes.io/name: guardian + ports: + - name: http + port: 80 + targetPort: http + - name: metrics + port: 9090 + targetPort: metrics + type: ClusterIP +``` + +### 3.3 Oracle Deployment + +```yaml +# kubernetes/base/oracle/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultmesh-oracle + namespace: vaultmesh + labels: + app.kubernetes.io/name: oracle + app.kubernetes.io/component: compliance + app.kubernetes.io/part-of: vaultmesh +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: oracle + template: + metadata: + labels: + app.kubernetes.io/name: oracle + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + spec: + serviceAccountName: vaultmesh-oracle + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: oracle + image: ghcr.io/vaultmesh/oracle:v0.1.0 + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + ports: + - name: http + containerPort: 8082 + protocol: TCP + - name: mcp + containerPort: 8083 + protocol: TCP + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: ORACLE_CONFIG + value: "/config/oracle.toml" + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: oracle-llm-credentials + key: openai-key + - name: ANTHROPIC_API_KEY + valueFrom: + secretKeyRef: + name: oracle-llm-credentials + key: anthropic-key + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: vaultmesh-db-credentials + key: oracle-url + - name: VAULTMESH_RECEIPT_ENDPOINT + value: "http://vaultmesh-portal/api/receipts" + volumeMounts: + - name: config + mountPath: /config + readOnly: true + - name: corpus + mountPath: /data/corpus + readOnly: true + - name: cache + mountPath: /data/cache + - name: tmp + mountPath: /tmp + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 2000m + memory: 4Gi + livenessProbe: + httpGet: + path: /health/live + port: http + initialDelaySeconds: 20 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health/ready + port: http + initialDelaySeconds: 15 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: vaultmesh-oracle-config + - name: corpus + persistentVolumeClaim: + claimName: vaultmesh-corpus + - name: cache + emptyDir: + sizeLimit: 1Gi + - name: tmp + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: vaultmesh-oracle + namespace: vaultmesh +spec: + selector: + app.kubernetes.io/name: oracle + ports: + - name: http + port: 80 + targetPort: http + - name: mcp + port: 8083 + targetPort: mcp + - name: metrics + port: 9090 + targetPort: metrics + type: ClusterIP +``` + +--- + +## 4. Data Layer + +### 4.1 PostgreSQL (via CloudNativePG) + +```yaml +# kubernetes/base/database/postgresql.yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: vaultmesh-db + namespace: vaultmesh +spec: + instances: 3 + + postgresql: + parameters: + max_connections: "200" + shared_buffers: "256MB" + effective_cache_size: "768MB" + maintenance_work_mem: "128MB" + checkpoint_completion_target: "0.9" + wal_buffers: "16MB" + default_statistics_target: "100" + random_page_cost: "1.1" + effective_io_concurrency: "200" + work_mem: "6553kB" + min_wal_size: "1GB" + max_wal_size: "4GB" + + storage: + size: 50Gi + storageClass: fast-ssd + + backup: + barmanObjectStore: + destinationPath: "s3://vaultmesh-backups/postgresql" + s3Credentials: + accessKeyId: + name: vaultmesh-backup-credentials + key: access-key + secretAccessKey: + name: vaultmesh-backup-credentials + key: secret-key + wal: + compression: gzip + maxParallel: 8 + retentionPolicy: "30d" + + monitoring: + enablePodMonitor: true + + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 2000m + memory: 4Gi + +--- +apiVersion: postgresql.cnpg.io/v1 +kind: ScheduledBackup +metadata: + name: vaultmesh-db-daily-backup + namespace: vaultmesh +spec: + schedule: "0 2 * * *" + backupOwnerReference: self + cluster: + name: vaultmesh-db +``` + +### 4.2 Redis + +```yaml +# kubernetes/base/cache/redis.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: vaultmesh-redis + namespace: vaultmesh +spec: + serviceName: vaultmesh-redis + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: redis + template: + metadata: + labels: + app.kubernetes.io/name: redis + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 1000 + containers: + - name: redis + image: redis:7-alpine + ports: + - containerPort: 6379 + command: + - redis-server + - /config/redis.conf + volumeMounts: + - name: config + mountPath: /config + - name: data + mountPath: /data + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 500m + memory: 1Gi + volumes: + - name: config + configMap: + name: vaultmesh-redis-config + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: fast-ssd + resources: + requests: + storage: 10Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: vaultmesh-redis + namespace: vaultmesh +spec: + selector: + app.kubernetes.io/name: redis + ports: + - port: 6379 + targetPort: 6379 + type: ClusterIP +``` + +### 4.3 Persistent Volumes + +```yaml +# kubernetes/base/storage/persistent-volumes.yaml +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vaultmesh-receipts + namespace: vaultmesh +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs-csi + resources: + requests: + storage: 100Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vaultmesh-guardian-state + namespace: vaultmesh +spec: + accessModes: + - ReadWriteOnce + storageClassName: fast-ssd + resources: + requests: + storage: 10Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vaultmesh-corpus + namespace: vaultmesh +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs-csi + resources: + requests: + storage: 50Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vaultmesh-cases + namespace: vaultmesh +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs-csi + resources: + requests: + storage: 200Gi +``` + +--- + +## 5. Configuration + +### 5.1 Portal Config + +```yaml +# kubernetes/base/portal/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: vaultmesh-portal-config + namespace: vaultmesh +data: + portal.toml: | + [server] + bind = "0.0.0.0:8080" + metrics_bind = "0.0.0.0:9090" + + [database] + max_connections = 20 + min_connections = 5 + + [receipts] + base_path = "/data/receipts" + + [scrolls] + enabled = [ + "Drills", + "Compliance", + "Guardian", + "Treasury", + "Mesh", + "OffSec", + "Identity", + "Observability", + "Automation", + "PsiField", + "Federation", + "Governance", + ] + + [auth] + jwt_issuer = "vaultmesh-portal" + session_ttl_hours = 24 + + [guardian] + endpoint = "http://vaultmesh-guardian" + + [oracle] + endpoint = "http://vaultmesh-oracle" +``` + +### 5.2 Guardian Config + +```yaml +# kubernetes/base/guardian/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: vaultmesh-guardian-config + namespace: vaultmesh +data: + guardian.toml: | + [server] + bind = "0.0.0.0:8081" + metrics_bind = "0.0.0.0:9090" + + [proofchain] + receipts_path = "/data/receipts" + roots_path = "/data/receipts" + + [anchor] + # Primary anchor backend + primary = "ethereum" + + # Anchor schedule + interval_seconds = 3600 # Every hour + min_receipts_threshold = 10 + + # Ethereum configuration + [anchor.ethereum] + rpc_url = "https://mainnet.infura.io/v3/${INFURA_PROJECT_ID}" + contract_address = "0x..." + chain_id = 1 + gas_limit = 100000 + + # OpenTimestamps backup + [anchor.ots] + enabled = true + calendar_urls = [ + "https://a.pool.opentimestamps.org", + "https://b.pool.opentimestamps.org", + ] + + # Bitcoin anchor (optional, for high-value anchors) + [anchor.bitcoin] + enabled = false + rpc_url = "http://bitcoin-node:8332" + + [sentinel] + enabled = true + alert_webhook = "http://alertmanager:9093/api/v2/alerts" + + [state] + path = "/data/guardian/state.json" +``` + +### 5.3 Oracle Config + +```yaml +# kubernetes/base/oracle/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: vaultmesh-oracle-config + namespace: vaultmesh +data: + oracle.toml: | + [server] + http_bind = "0.0.0.0:8082" + mcp_bind = "0.0.0.0:8083" + metrics_bind = "0.0.0.0:9090" + + [corpus] + path = "/data/corpus" + index_path = "/data/cache/index" + supported_formats = ["docx", "pdf", "md", "txt"] + + [llm] + # Primary model + primary_provider = "anthropic" + primary_model = "claude-sonnet-4-20250514" + + # Fallback model + fallback_provider = "openai" + fallback_model = "gpt-4o" + + # Settings + temperature = 0.1 + max_tokens = 4096 + timeout_seconds = 60 + + [prompts] + version = "vm_oracle_answer_v1" + + [receipts] + endpoint = "http://vaultmesh-portal/api/receipts/oracle" + + [cache] + enabled = true + path = "/data/cache/answers" + ttl_hours = 24 +``` + +--- + +## 6. Ingress and TLS + +### 6.1 Ingress + +```yaml +# kubernetes/base/ingress/ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: vaultmesh-ingress + namespace: vaultmesh + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-body-size: "50m" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/rate-limit: "100" + nginx.ingress.kubernetes.io/rate-limit-window: "1m" +spec: + ingressClassName: nginx + tls: + - hosts: + - portal.vaultmesh.io + - guardian.vaultmesh.io + - oracle.vaultmesh.io + - federation.vaultmesh.io + secretName: vaultmesh-tls + rules: + - host: portal.vaultmesh.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vaultmesh-portal + port: + name: http + - host: guardian.vaultmesh.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vaultmesh-guardian + port: + name: http + - host: oracle.vaultmesh.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vaultmesh-oracle + port: + name: http + - host: federation.vaultmesh.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vaultmesh-portal + port: + name: http +``` + +### 6.2 Certificate + +```yaml +# kubernetes/base/ingress/certificate.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: vaultmesh-tls + namespace: vaultmesh +spec: + secretName: vaultmesh-tls + issuerRef: + name: letsencrypt-prod + kind: ClusterIssuer + dnsNames: + - portal.vaultmesh.io + - guardian.vaultmesh.io + - oracle.vaultmesh.io + - federation.vaultmesh.io +``` + +--- + +## 7. Secrets Management + +### 7.1 Sealed Secrets + +```yaml +# kubernetes/base/secrets/sealed-secrets.yaml +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + name: vaultmesh-db-credentials + namespace: vaultmesh +spec: + encryptedData: + portal-url: AgBy3i4OJSWK+PiTySYZZA9rO53sFO... + guardian-url: AgBy3i4OJSWK+PiTySYZZA9rO53sFO... + oracle-url: AgBy3i4OJSWK+PiTySYZZA9rO53sFO... + template: + metadata: + name: vaultmesh-db-credentials + namespace: vaultmesh +--- +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + name: guardian-anchor-keys + namespace: vaultmesh +spec: + encryptedData: + private-key: AgBy3i4OJSWK+PiTySYZZA9rO53sFO... + template: + metadata: + name: guardian-anchor-keys + namespace: vaultmesh +--- +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + name: oracle-llm-credentials + namespace: vaultmesh +spec: + encryptedData: + openai-key: AgBy3i4OJSWK+PiTySYZZA9rO53sFO... + anthropic-key: AgBy3i4OJSWK+PiTySYZZA9rO53sFO... + template: + metadata: + name: oracle-llm-credentials + namespace: vaultmesh +``` + +--- + +## 8. Kustomization + +### 8.1 Base Kustomization + +```yaml +# kubernetes/base/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: vaultmesh + +resources: + - namespace.yaml + - rbac.yaml + - portal/deployment.yaml + - portal/configmap.yaml + - guardian/deployment.yaml + - guardian/configmap.yaml + - oracle/deployment.yaml + - oracle/configmap.yaml + - database/postgresql.yaml + - cache/redis.yaml + - storage/persistent-volumes.yaml + - ingress/ingress.yaml + - ingress/certificate.yaml + - secrets/sealed-secrets.yaml + +commonLabels: + app.kubernetes.io/part-of: vaultmesh + app.kubernetes.io/managed-by: kustomize +``` + +### 8.2 Production Overlay + +```yaml +# kubernetes/overlays/production/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: vaultmesh + +resources: + - ../../base + +patches: + - path: portal-resources.yaml + - path: guardian-resources.yaml + - path: oracle-resources.yaml + +configMapGenerator: + - name: vaultmesh-portal-config + behavior: merge + files: + - portal.toml=configs/portal-prod.toml + - name: vaultmesh-guardian-config + behavior: merge + files: + - guardian.toml=configs/guardian-prod.toml + +replicas: + - name: vaultmesh-portal + count: 3 + - name: vaultmesh-oracle + count: 3 +``` + +```yaml +# kubernetes/overlays/production/portal-resources.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultmesh-portal +spec: + template: + spec: + containers: + - name: portal + resources: + requests: + cpu: 500m + memory: 1Gi + limits: + cpu: 2000m + memory: 4Gi +``` + +--- + +## 9. Docker Compose (Development) + +```yaml +# docker-compose.yaml +version: "3.9" + +services: + portal: + build: + context: . + dockerfile: docker/portal/Dockerfile + ports: + - "8080:8080" + - "9090:9090" + environment: + - RUST_LOG=info,vaultmesh=debug + - VAULTMESH_CONFIG=/config/portal.toml + - DATABASE_URL=postgresql://vaultmesh:vaultmesh@postgres:5432/vaultmesh + - REDIS_URL=redis://redis:6379 + volumes: + - ./config/portal.toml:/config/portal.toml:ro + - receipts:/data/receipts + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_started + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health/live"] + interval: 10s + timeout: 5s + retries: 5 + + guardian: + build: + context: . + dockerfile: docker/guardian/Dockerfile + ports: + - "8081:8081" + environment: + - RUST_LOG=info,guardian=debug + - GUARDIAN_CONFIG=/config/guardian.toml + - DATABASE_URL=postgresql://vaultmesh:vaultmesh@postgres:5432/vaultmesh + volumes: + - ./config/guardian.toml:/config/guardian.toml:ro + - receipts:/data/receipts + - guardian-state:/data/guardian + depends_on: + portal: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8081/health/live"] + interval: 10s + timeout: 5s + retries: 5 + + oracle: + build: + context: . + dockerfile: docker/oracle/Dockerfile + ports: + - "8082:8082" + - "8083:8083" + environment: + - ORACLE_CONFIG=/config/oracle.toml + - OPENAI_API_KEY=${OPENAI_API_KEY} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - VAULTMESH_RECEIPT_ENDPOINT=http://portal:8080/api/receipts + volumes: + - ./config/oracle.toml:/config/oracle.toml:ro + - ./corpus:/data/corpus:ro + depends_on: + portal: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8082/health/live"] + interval: 10s + timeout: 5s + retries: 5 + + postgres: + image: postgres:16-alpine + environment: + - POSTGRES_USER=vaultmesh + - POSTGRES_PASSWORD=vaultmesh + - POSTGRES_DB=vaultmesh + volumes: + - postgres-data:/var/lib/postgresql/data + - ./docker/postgres/init.sql:/docker-entrypoint-initdb.d/init.sql:ro + healthcheck: + test: ["CMD-SHELL", "pg_isready -U vaultmesh"] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + volumes: + - redis-data:/data + command: redis-server --appendonly yes + + prometheus: + image: prom/prometheus:v2.47.0 + ports: + - "9091:9090" + volumes: + - ./config/prometheus.yaml:/etc/prometheus/prometheus.yml:ro + - prometheus-data:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.enable-lifecycle' + + grafana: + image: grafana/grafana:10.1.0 + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + volumes: + - ./config/grafana/provisioning:/etc/grafana/provisioning:ro + - grafana-data:/var/lib/grafana + depends_on: + - prometheus + +volumes: + receipts: + guardian-state: + postgres-data: + redis-data: + prometheus-data: + grafana-data: + +networks: + default: + name: vaultmesh +``` diff --git a/docs/VAULTMESH-ETERNAL-PATTERN.md b/docs/VAULTMESH-ETERNAL-PATTERN.md new file mode 100644 index 0000000..ac2f721 --- /dev/null +++ b/docs/VAULTMESH-ETERNAL-PATTERN.md @@ -0,0 +1,507 @@ +# VAULTMESH-ETERNAL-PATTERN.md + +**Canonical Design Pattern for All VaultMesh Subsystems** + +> *Every serious subsystem in VaultMesh should feel different in flavor, but identical in **shape**.* + +This document defines that shared shape — the **Eternal Pattern**. + +It is the architectural law that binds Drills, Oracle, Guardian, Treasury, Mesh, and any future module into one Civilization Ledger. + +--- + +## 1. Core Idea (One-Line Contract) + +All VaultMesh subsystems follow this arc: + +> **Real-world intent → Engine → Structured JSON → Receipt → Scroll → Guardian Anchor** + +If a new feature does **not** fit this pattern, it's either: + +- not finished yet, or +- not part of the Ledger core. + +--- + +## 2. Three-Layer VaultMesh Stack + +At the highest level, VaultMesh is three stacked layers: + +``` +┌───────────────────────────────────────────────┐ +│ L1 — Experience Layer │ +│ (Humans & Agents) │ +│ • CLI / UI / MCP tools / agents │ +│ • "Ask a question", "start a drill", │ +│ "anchor now", "run settlement" │ +└───────────────────────────────────────────────┘ + │ + ▼ +┌───────────────────────────────────────────────┐ +│ L2 — Engine Layer │ +│ (Domain Engines & Contracts) │ +│ • Domain logics: Drills, Oracle, Guardian, │ +│ Treasury, Mesh, OffSec, etc. │ +│ • Contracts (plans) │ +│ • Runners (state machines) │ +│ • State JSON (progress) │ +└───────────────────────────────────────────────┘ + │ + ▼ +┌───────────────────────────────────────────────┐ +│ L3 — Ledger Layer │ +│ (Receipts, Scrolls, ProofChain, Anchors) │ +│ • Receipts in append-only JSONL files │ +│ • Scrolls per domain (Drills, Compliance, │ +│ Guardian, Treasury, Mesh, etc.) │ +│ • Merkle roots (ROOT..txt) │ +│ • Guardian anchor cycles (local/OTS/chain) │ +└───────────────────────────────────────────────┘ +``` + +Everything you build plugs into this stack. + +--- + +## 3. Eternal Pattern — Generic Lifecycle + +This is the reusable template for any new subsystem "X". + +### 3.1 Experience Layer (L1) — Intent In + +**Goal**: Take messy human/agent intent and normalize it. + +**Typical surfaces**: +- CLI (`vm-drills`, `vm-oracle`, `guardian`, `vm-treasury`, etc.) +- MCP tools (e.g. `oracle_answer`) +- Web / TUI / dashboard +- Automation hooks (cron, CI, schedulers) + +**Typical inputs**: +- "Run a security drill for IoT ↔ OT" +- "Are we compliant with Annex IV today?" +- "Anchor this ProofChain root" +- "Reconcile treasury balances between nodes" +- "Apply this mesh topology change" + +**L1 should**: +- Capture the raw intent +- Attach minimal context (who, when, where) +- Hand it off to the appropriate Engine in L2 + +--- + +### 3.2 Engine Layer (L2) — Plan and Execute + +Every Engine follows the same internal shape: + +#### Step 1 — Plan → `contract.json` + +An Engine takes the intent and creates a **contract**: + +`contract.json` (or equivalent JSON struct) contains: +- `id`: unique contract / drill / run id +- `title`: short human title +- `severity` / `priority` (optional, domain-specific) +- `stages[]` / `steps[]`: + - ordered id, skill / module, workflow, role, objective +- high-level `objectives[]` + +**Example** (Drill contract snippet): + +```json +{ + "id": "drill-1764691390", + "title": "IoT device bridging into OT with weak detection", + "stages": [ + { + "id": "stage-1-iot-wireless-security", + "order": 1, + "skill": "iot-wireless-security", + "workflow": "IoT Device Recon and Fingerprinting", + "role": "primary" + }, + { + "id": "stage-2-ot-ics-security", + "order": 2, + "skill": "ot-ics-security", + "workflow": "OT Asset and Network Mapping", + "role": "supporting" + } + ] +} +``` + +For Oracle, the "contract" can be implicit: +- the `vm_oracle_answer_v1` payload is itself the "answer contract". + +For future Engines (Treasury, Mesh), mirror the same concept: +- plan file describing what will happen. + +#### Step 2 — Execute → `state.json` + `outputs/` + +A **runner** component walks through the contract and tracks reality. + +**Typical commands**: +- `init contract.json` → `state.json` +- `next state.json` → show next stage/checklist +- `complete-stage --outputs ...` → update `state.json` + +The state file (e.g. `drill_state.json`) should contain: +- `drill_id` / `run_id` +- `status` (`pending` | `in_progress` | `completed` | `aborted`) +- `stages[]` with status, timestamps, attached outputs +- `created_at`, `updated_at` +- optional `tags` / `context` + +**Example** (simplified): + +```json +{ + "drill_id": "drill-1764691390", + "status": "completed", + "created_at": "2025-12-02T10:03:00Z", + "updated_at": "2025-12-02T11:45:00Z", + "stages": [ + { + "id": "stage-1-iot-wireless-security", + "status": "completed", + "outputs": [ + "inventory.yaml", + "topology.png", + "findings.md" + ] + } + ] +} +``` + +Runners exist today for Drills; the same pattern will apply to other Engines. + +#### Step 3 — Seal → Receipts + +A **sealer** takes: +- `contract.json` +- `state.json` +- `outputs/` (optional, usually via manifest or aggregated hash) + +And produces a **receipt** in L3. + +**Example** (Drills sealer behavior): +- Copies contract + state into `cases/drills//` +- Mirrors `outputs/` +- Computes blake3 or similar hash over `drill_state.json` (and later outputs manifest) +- Derives summary metrics: + - `status` + - `stages_total` + - `stages_completed` + - unique domains / workflows +- Appends a receipt entry to `receipts/drills/drill_runs.jsonl` +- Calls a generic receipts Merkle updater to update `ROOT.drills.txt` +- Optionally triggers ANCHOR via Guardian + +This "seal" step is what promotes local execution into **civilization evidence**. + +--- + +### 3.3 Ledger Layer (L3) — Scrolls, Roots, Anchors + +L3 is shared by all subsystems; only field names differ. + +#### 3.3.1 Scrolls + +A **scroll** is just a logical ledger space for a domain. + +**Examples**: +- `Drills` (security drills and exercises) +- `Compliance` (Oracle answers) +- `Guardian` (anchor events, healing proofs) +- `Treasury` (credit/debit/settlements) +- `Mesh` (topology & configuration changes) +- `OffSec` (real incident & red-team case receipts) +- `Identity` (DIDs, credentials, auth events) +- `Observability` (metrics, logs, traces, alerts) +- `Automation` (workflow executions, approvals) + +Each scroll has: +- 1+ JSONL files under `receipts//` +- 1 Merkle root file `ROOT..txt` + +#### 3.3.2 Receipts + +Receipts are append-only JSON objects with at least: +- `type`: operation type (e.g. `security_drill_run`, `oracle_answer`) +- domain-specific fields +- one or more hash fields: + - `root_hash` / `answer_hash` / etc. +- optional `tags`: + - `tags: [ "drill", "iot", "kubernetes" ]` + +**Drill Receipt** (shape): + +```json +{ + "type": "security_drill_run", + "drill_id": "drill-1764691390", + "prompt": "IoT device bridging into OT network with weak detection", + "timestamp_started": "2025-12-02T10:03:00Z", + "timestamp_completed": "2025-12-02T11:45:00Z", + "status": "completed", + "stages_total": 3, + "stages_completed": 3, + "domains": ["iot-wireless-security", "ot-ics-security", "detection-defense-ir"], + "workflows": [ + "IoT Device Recon and Fingerprinting", + "OT Asset and Network Mapping", + "IR Triage and Containment" + ], + "severity": "unknown", + "tags": ["drill", "iot", "ot", "detection"], + "root_hash": "", + "proof_path": "cases/drills/drill-1764691390/PROOF.json", + "artifacts_manifest": "cases/drills/drill-1764691390/ARTIFACTS.sha256" +} +``` + +**Oracle Answer Receipt** (shape): + +```json +{ + "scroll": "Compliance", + "issuer": "did:vm:node:oracle-01", + "body": { + "op_type": "oracle_answer", + "question": "Are we compliant with Annex IV?", + "model_id": "gpt-4.1", + "citations_used": ["VM-AI-TECHDOC-001 §4.2", "..."], + "compliance_flags": { + "insufficient_context": false, + "ambiguous_requirements": true, + "out_of_scope_question": false + }, + "answer_hash": "blake3:...", + "context_docs": ["VM-AI-TECHDOC-001_Annex_IV_Technical_Documentation.docx"], + "frameworks": ["AI_Act"], + "extra": { + "version": "v0.5.0", + "prompt_version": "vm_oracle_answer_v1" + } + } +} +``` + +#### 3.3.3 ProofChain & Guardian Anchors + +- A receipts update tool (or ProofChain engine) computes Merkle roots over each scroll's JSONL. +- Guardian sees the new root via `ProofChain.current_root_hex()`. +- Guardian's Anchor module: + - Submits `root_hex` → anchor backend (HTTP/CLI/blockchain/OTS) + - Keeps an internal `AnchorStatus` (`last_root`, `last_anchor_id`, `count`). + - Emits `SecurityEvents` (`AnchorSuccess`, `AnchorFailure`, `AnchorDivergence`). + +--- + +## 4. Existing Subsystems Mapped to the Pattern + +### 4.1 Security Drills (Security Lab Suite) + +**Experience (L1)**: +- `security_lab_router.py` (select skill) +- `security_lab_chain_engine.py` (multi-skill chain) +- CLI usage: + - `security_lab_chain_engine.py --contract "prompt"` → `contract.json` + - `security_lab_drill_runner.py init/next/complete-stage` + +**Engine (L2)**: +- `contract.json` (drill plan) +- `drill_state.json` (progress) +- Runners hydrate stages from runbooks (actions, expected_outputs). + +**Ledger (L3)**: +- `security_drill_seal_run.py`: + - Syncs case directory + - Hashes state + - Appends drill receipt → `receipts/drills/drill_runs.jsonl` + - Updates `ROOT.drills.txt` + - Optionally auto-anchors using existing anchor scripts. + +--- + +### 4.2 Oracle Node (Compliance Appliance) + +**Experience (L1)**: +- MCP server exposing `oracle_answer` tool. + +**Engine (L2)**: +- Corpus loader/search: `corpus/loader.py`, `corpus/search.py` +- Prompt + schema: `prompts/` (`vm_oracle_answer_v1`, `build_oracle_prompt()`) +- LLM abstraction: `oracle/llm.py` +- End-to-end: + - question → context → prompt → LLM JSON → schema validation. + +**Ledger (L3)**: +- `emit_oracle_answer_receipt()` +- Hash: + - `answer_hash = "blake3:" + blake3(canonical_answer_json).hexdigest()` +- Receipts POSTed to `VAULTMESH_RECEIPT_ENDPOINT` (e.g. `/api/receipts/oracle`). +- Scroll: `Compliance`. + +--- + +### 4.3 Guardian (Anchor-Integrated Sentinel) + +**Experience (L1)**: +- `guardian_cli`: + - `guardian anchor-status` + - `guardian anchor-now` (with capability) +- Portal HTTP routes: + - `GET /guardian/anchor-status` + - `POST /guardian/anchor-now` + +**Engine (L2)**: +- Rust crate `guardian`: + - Holds `ProofChain`, `AnchorClient`, `AnchorVerifier`, config. + - `run_anchor_cycle(&ProofChain)` → `AnchorVerdict` + - `spawn_anchor_task()` for periodic anchoring. + +**Ledger (L3)**: +- Anchors + anchor events: + - `anchor_success`/`failure`/`divergence` events. + - Can be streamed into `receipts/guardian/anchor_events.jsonl` with `ROOT.guardian.txt` and anchored further (if desired). + +--- + +## 5. Adding New Domains (Treasury, Mesh, OffSec, etc.) + +When adding a new subsystem "X" (e.g. Treasury, Mesh), follow this checklist. + +### 5.1 Scroll Definition + +1. **Pick a scroll name**: + - Treasury / Mesh / OffSec / Identity / Observability / Automation, etc. + +2. **Define**: + - JSONL path: `receipts//.jsonl` + - Root file: `ROOT..txt` + +3. **Define 1–3 receipt types**: + - Treasury: + - `treasury_credit`, `treasury_debit`, `treasury_settlement` + - Mesh: + - `mesh_route_change`, `mesh_node_join`, `mesh_node_leave` + +### 5.2 Engine API + +For each engine: +- **Define a Plan API**: + - `*_plan_*.py` → produce `contract.json` +- **Define a Runner**: + - `*_runner.py` → manage `state.json` + `outputs/` +- **Define a Sealer**: + - `*_seal_*.py` → write receipts, update roots, maybe anchor. + +### 5.3 Query CLI + +Add a small query layer: +- Treasury: + - `treasury_query_runs.py`: + - filters: node, asset, date range, tags. +- Mesh: + - `mesh_query_changes.py`: + - filters: node, segment, change type, date. + +This makes scrolls self-explaining and agent-friendly. + +--- + +## 6. Design Gate: "Is It Aligned With the Eternal Pattern?" + +Use this quick checklist whenever you design a new feature or refactor an old one. + +### 6.1 Experience Layer + +- [ ] Is there a clear entrypoint (CLI, MCP tool, HTTP route)? +- [ ] Is the intent clearly represented in a structured form (arguments, payload, contract)? + +### 6.2 Engine Layer + +- [ ] Does the subsystem produce a contract (even if implicit)? +- [ ] Is there a state object tracking progress or outcomes? +- [ ] Are the actions and outputs visible and inspectable (e.g. via JSON + files)? + +### 6.3 Ledger Layer + +- [ ] Does the subsystem emit a receipt for its important operations? +- [ ] Are receipts written to an append-only JSONL file? +- [ ] Is the JSONL covered by a Merkle root in `ROOT..txt`? +- [ ] Does Guardian have a way to anchor the relevant root(s)? +- [ ] Is there/will there be a simple query tool for this scroll? + +**If any of these is "no", you have a clear next step.** + +--- + +## 7. Future Extensions (Stable Pattern, Evolving Domains) + +The Eternal Pattern is deliberately minimal: +- It doesn't care what chain you anchor to. +- It doesn't care which LLM model you use. +- It doesn't care whether the Runner is human-driven or fully autonomous. + +As VaultMesh evolves, you can: +- **Swap LLMs** → Oracle stays the same; receipts remain valid. +- **Swap anchor backends** (OTS, Ethereum, Bitcoin, custom chain) → roots remain valid. +- **Add automated agents** (vm-copilot, OffSec agents, Mesh guardians) → they all just become more Experience Layer clients of the same Engine + Ledger. + +**The shape does not change.** + +--- + +## 8. Short Human Explanation (for README / Auditors) + +VaultMesh treats every serious operation — a security drill, a compliance answer, an anchor event, a treasury transfer — as a small story with a beginning, middle, and end: + +1. A **human or agent** expresses intent +2. An **engine** plans and executes the work, tracking state +3. The outcome is **sealed** into an append-only ledger, hashed, merklized, and anchored + +This pattern — **Intent → Engine → Receipt → Scroll → Anchor** — is the same across all domains. + +It's what makes VaultMesh composable, auditable, and explainable to both humans and machines. + +--- + +## 9. Engine Specifications Index + +The following engine specifications implement the Eternal Pattern: + +| Engine | Scroll | Description | Receipt Types | +|--------|--------|-------------|---------------| +| [VAULTMESH-CONSOLE-ENGINE.md](./VAULTMESH-CONSOLE-ENGINE.md) | `Console` | AI agent sessions, code operations, sovereign development | `console_session_start`, `console_session_end`, `console_command`, `console_file_edit`, `console_tool_call`, `console_approval`, `console_git_commit`, `console_agent_spawn` | +| [VAULTMESH-MESH-ENGINE.md](./VAULTMESH-MESH-ENGINE.md) | `Mesh` | Federation topology, node management, routes, capabilities | `mesh_node_join`, `mesh_node_leave`, `mesh_route_change`, `mesh_capability_grant`, `mesh_capability_revoke`, `mesh_topology_snapshot` | +| [VAULTMESH-OFFSEC-ENGINE.md](./VAULTMESH-OFFSEC-ENGINE.md) | `OffSec` | Security incidents, red team engagements, vulnerability tracking | `offsec_incident`, `offsec_redteam`, `offsec_vuln_discovery`, `offsec_remediation`, `offsec_threat_intel`, `offsec_forensic_snapshot` | +| [VAULTMESH-IDENTITY-ENGINE.md](./VAULTMESH-IDENTITY-ENGINE.md) | `Identity` | DIDs, verifiable credentials, authentication, authorization | `identity_did_create`, `identity_did_rotate`, `identity_did_revoke`, `identity_credential_issue`, `identity_credential_revoke`, `identity_auth_event`, `identity_authz_decision` | +| [VAULTMESH-OBSERVABILITY-ENGINE.md](./VAULTMESH-OBSERVABILITY-ENGINE.md) | `Observability` | Metrics, logs, traces, alerts, SLOs | `obs_metric_snapshot`, `obs_log_batch`, `obs_trace_complete`, `obs_alert_fired`, `obs_alert_resolved`, `obs_slo_report`, `obs_anomaly_detected` | +| [VAULTMESH-AUTOMATION-ENGINE.md](./VAULTMESH-AUTOMATION-ENGINE.md) | `Automation` | n8n workflows, schedules, triggers, approvals | `auto_workflow_register`, `auto_workflow_execute`, `auto_workflow_complete`, `auto_schedule_create`, `auto_trigger_fire`, `auto_approval_request`, `auto_approval_decision` | +| [VAULTMESH-PSI-FIELD-ENGINE.md](./VAULTMESH-PSI-FIELD-ENGINE.md) | `PsiField` | Alchemical consciousness, phase transitions, transmutations | `psi_phase_transition`, `psi_emergence_event`, `psi_transmutation`, `psi_resonance`, `psi_integration`, `psi_oracle_insight` | +| [VAULTMESH-FEDERATION-PROTOCOL.md](./VAULTMESH-FEDERATION-PROTOCOL.md) | `Federation` | Cross-mesh trust, witness verification, cross-anchoring | `fed_trust_proposal`, `fed_trust_established`, `fed_trust_revoked`, `fed_witness_event`, `fed_cross_anchor`, `fed_schema_sync` | +| [VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md](./VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md) | `Governance` | Constitutional rules, amendments, enforcement, violations | `gov_proposal`, `gov_vote`, `gov_ratification`, `gov_amendment`, `gov_executive_order`, `gov_violation`, `gov_enforcement` | + +**Implementation Reference**: +| Document | Description | +|----------|-------------| +| [VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md](./VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md) | Rust structs, Python CLI, directory structure | +| [VAULTMESH-MCP-SERVERS.md](./VAULTMESH-MCP-SERVERS.md) | MCP server implementations for Claude integration, tool definitions, gateway | +| [VAULTMESH-DEPLOYMENT-MANIFESTS.md](./VAULTMESH-DEPLOYMENT-MANIFESTS.md) | Kubernetes manifests, Docker Compose, infrastructure-as-code | +| [VAULTMESH-MONITORING-STACK.md](./VAULTMESH-MONITORING-STACK.md) | Prometheus config, Grafana dashboards, alerting rules, metrics | +| [VAULTMESH-TESTING-FRAMEWORK.md](./VAULTMESH-TESTING-FRAMEWORK.md) | Property-based tests, integration tests, chaos tests, fixtures | +| [VAULTMESH-MIGRATION-GUIDE.md](./VAULTMESH-MIGRATION-GUIDE.md) | Version upgrades, migration scripts, rollback procedures | + +Each engine specification follows the same structure: +1. **Scroll Definition** (JSONL path, root file, receipt types) +2. **Core Concepts** (domain-specific entities) +3. **Mapping to Eternal Pattern** (L1, L2, L3) +4. **Query Interface** +5. **Design Gate Checklist** +6. **Integration Points** +7. **Future Extensions** diff --git a/docs/VAULTMESH-FEDERATION-PROTOCOL.md b/docs/VAULTMESH-FEDERATION-PROTOCOL.md new file mode 100644 index 0000000..108a1eb --- /dev/null +++ b/docs/VAULTMESH-FEDERATION-PROTOCOL.md @@ -0,0 +1,560 @@ +# VAULTMESH-FEDERATION-PROTOCOL.md +**Cross-Mesh Trust and Receipt Sharing** + +> *Sovereign meshes that verify each other become civilizations that remember together.* + +The Federation Protocol defines how independent VaultMesh deployments establish trust, share receipts, and create a network of mutually-witnessing civilization ledgers. + +--- + +## 1. Federation Philosophy + +### 1.1 Sovereignty First + +Each VaultMesh instance is **sovereign** — it controls its own: +- Identity roots +- Anchor backends +- Governance rules +- Data retention +- Access policies + +Federation doesn't compromise sovereignty. It creates **voluntary witness relationships** where meshes choose to verify and attest to each other's receipts. + +### 1.2 The Witness Network + +``` +┌─────────────────┐ ┌─────────────────┐ +│ VaultMesh-A │◄───────►│ VaultMesh-B │ +│ (Dublin) │ witness │ (Berlin) │ +└────────┬────────┘ └────────┬────────┘ + │ │ + │ witness │ + │ ┌─────────────────┐ │ + └───►│ VaultMesh-C │◄───┘ + │ (Singapore) │ + └─────────────────┘ +``` + +When Mesh-A anchors a root, Mesh-B and Mesh-C can: +1. Verify the anchor independently +2. Record their verification as a receipt +3. Include Mesh-A's root in their own anchor cycles + +This creates **redundant civilizational memory** — even if one mesh is compromised or lost, the others retain witnessed evidence. + +### 1.3 Trust Levels + +| Level | Name | Description | Use Case | +|-------|------|-------------|----------| +| 0 | `isolated` | No federation | Air-gapped deployments | +| 1 | `observe` | Read-only witness | Public audit | +| 2 | `verify` | Mutual verification | Partner organizations | +| 3 | `attest` | Cross-attestation | Compliance networks | +| 4 | `integrate` | Shared scrolls | Tight federation | + +--- + +## 2. Federation Scroll + +| Property | Value | +|----------|-------| +| **Scroll Name** | `Federation` | +| **JSONL Path** | `receipts/federation/federation_events.jsonl` | +| **Root File** | `ROOT.federation.txt` | +| **Receipt Types** | `fed_trust_proposal`, `fed_trust_established`, `fed_trust_revoked`, `fed_witness_event`, `fed_cross_anchor`, `fed_schema_sync` | + +--- + +## 3. Trust Establishment Protocol + +### 3.1 Phase 1: Discovery + +Meshes discover each other via: +- Manual configuration +- DNS-based discovery (`_vaultmesh._tcp.example.com`) +- DHT announcement (for public meshes) + +**Discovery Record**: +```json +{ + "mesh_id": "did:vm:mesh:vaultmesh-berlin", + "display_name": "VaultMesh Berlin Node", + "endpoints": { + "federation": "https://federation.vaultmesh-berlin.io", + "verification": "https://verify.vaultmesh-berlin.io" + }, + "public_key": "ed25519:z6Mk...", + "scrolls_available": ["Drills", "Compliance", "Treasury"], + "trust_policy": { + "accepts_proposals": true, + "min_trust_level": 1, + "requires_mutual": true + }, + "attestations": [ + { + "attester": "did:vm:mesh:vaultmesh-dublin", + "attested_at": "2025-06-01T00:00:00Z", + "attestation_type": "identity_verified" + } + ] +} +``` + +### 3.2 Phase 2: Proposal + +Mesh-A proposes federation to Mesh-B: + +**Trust Proposal**: +```json +{ + "proposal_id": "fed-proposal-2025-12-06-001", + "proposer": "did:vm:mesh:vaultmesh-dublin", + "target": "did:vm:mesh:vaultmesh-berlin", + "proposed_at": "2025-12-06T10:00:00Z", + "expires_at": "2025-12-13T10:00:00Z", + "proposed_trust_level": 2, + "proposed_terms": { + "scrolls_to_share": ["Compliance"], + "verification_frequency": "hourly", + "retention_period_days": 365, + "data_jurisdiction": "EU", + "audit_rights": true + }, + "proposer_attestations": { + "identity_proof": "...", + "capability_proof": "...", + "compliance_credentials": ["ISO27001", "SOC2"] + }, + "signature": { + "algorithm": "Ed25519", + "value": "z58D..." + } +} +``` + +### 3.3 Phase 3: Negotiation + +Target mesh reviews and may counter-propose: + +**Counter-Proposal**: +```json +{ + "proposal_id": "fed-proposal-2025-12-06-001", + "response_type": "counter", + "responder": "did:vm:mesh:vaultmesh-berlin", + "responded_at": "2025-12-06T14:00:00Z", + "counter_terms": { + "scrolls_to_share": ["Compliance", "Drills"], + "verification_frequency": "daily", + "retention_period_days": 180, + "additional_requirement": "quarterly_audit_call" + }, + "signature": "z47C..." +} +``` + +### 3.4 Phase 4: Establishment + +Both parties sign the final agreement: + +**Federation Agreement**: +```json +{ + "agreement_id": "fed-agreement-2025-12-06-001", + "parties": [ + "did:vm:mesh:vaultmesh-dublin", + "did:vm:mesh:vaultmesh-berlin" + ], + "established_at": "2025-12-06T16:00:00Z", + "trust_level": 2, + "terms": { + "scrolls_shared": ["Compliance", "Drills"], + "verification_frequency": "daily", + "retention_period_days": 180, + "data_jurisdiction": "EU", + "audit_rights": true, + "dispute_resolution": "arbitration_zurich" + }, + "key_exchange": { + "dublin_federation_key": "ed25519:z6MkDublin...", + "berlin_federation_key": "ed25519:z6MkBerlin..." + }, + "signatures": { + "did:vm:mesh:vaultmesh-dublin": { + "signed_at": "2025-12-06T15:30:00Z", + "signature": "z58D..." + }, + "did:vm:mesh:vaultmesh-berlin": { + "signed_at": "2025-12-06T16:00:00Z", + "signature": "z47C..." + } + }, + "agreement_hash": "blake3:abc123..." +} +``` + +### 3.5 Phase 5: Activation + +Both meshes: +1. Store the agreement in their Federation scroll +2. Exchange current Merkle roots +3. Begin scheduled verification cycles +4. Emit `fed_trust_established` receipt + +--- + +## 4. Witness Protocol + +### 4.1 Verification Cycle + +``` +┌─────────────┐ ┌─────────────┐ +│ Mesh-A │ │ Mesh-B │ +│ (Dublin) │ │ (Berlin) │ +└──────┬──────┘ └──────┬──────┘ + │ │ + │ 1. Anchor cycle completes │ + │ ROOT.compliance.txt updated │ + │ │ + │ 2. POST /federation/notify │ + │────────────────────────────────►│ + │ { │ + │ scroll: "Compliance", │ + │ root: "blake3:aaa...", │ + │ anchor_proof: {...} │ + │ } │ + │ │ + │ │ 3. Verify anchor proof + │ │ against known backends + │ │ + │ │ 4. Optionally fetch + │ │ receipt samples + │ │ + │ 5. POST /federation/witness │ + │◄────────────────────────────────│ + │ { │ + │ witnessed_root: "blake3:aaa",│ + │ witness_result: "verified", │ + │ witness_signature: "z47C..." │ + │ } │ + │ │ + │ 6. Store witness receipt │ + │ │ + └──────────────────────────────────┘ +``` + +### 4.2 Witness Receipt + +```json +{ + "type": "fed_witness_event", + "witness_id": "witness-2025-12-06-001", + "witnessed_mesh": "did:vm:mesh:vaultmesh-dublin", + "witnessing_mesh": "did:vm:mesh:vaultmesh-berlin", + "timestamp": "2025-12-06T12:05:00Z", + "scroll": "Compliance", + "witnessed_root": "blake3:aaa111...", + "witnessed_anchor": { + "backend": "ethereum", + "tx_hash": "0x123...", + "block_number": 12345678 + }, + "verification_method": "anchor_proof_validation", + "verification_result": "verified", + "samples_checked": 5, + "discrepancies": [], + "witness_signature": "z47C...", + "tags": ["federation", "witness", "compliance", "verified"], + "root_hash": "blake3:bbb222..." +} +``` + +### 4.3 Cross-Anchor + +At trust level 3+, meshes can include each other's roots in their anchor cycles: + +**Cross-Anchor Receipt**: +```json +{ + "type": "fed_cross_anchor", + "cross_anchor_id": "cross-anchor-2025-12-06-001", + "anchoring_mesh": "did:vm:mesh:vaultmesh-berlin", + "anchored_mesh": "did:vm:mesh:vaultmesh-dublin", + "timestamp": "2025-12-06T12:10:00Z", + "dublin_roots_included": { + "Compliance": "blake3:aaa111...", + "Drills": "blake3:bbb222..." + }, + "combined_root": "blake3:ccc333...", + "anchor_proof": { + "backend": "bitcoin", + "tx_hash": "abc123...", + "merkle_path": [...] + }, + "tags": ["federation", "cross-anchor", "bitcoin"], + "root_hash": "blake3:ddd444..." +} +``` + +This means Dublin's receipts are now anchored on **both** Dublin's chosen backend **and** Berlin's Bitcoin anchor — double civilizational durability. + +--- + +## 5. Federation API + +### 5.1 Endpoints + +```yaml +# Federation API Specification +openapi: 3.0.0 +info: + title: VaultMesh Federation API + version: 1.0.0 + +paths: + /federation/discovery: + get: + summary: Get mesh discovery record + responses: + 200: + description: Discovery record + content: + application/json: + schema: + $ref: '#/components/schemas/DiscoveryRecord' + + /federation/proposals: + post: + summary: Submit trust proposal + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/TrustProposal' + responses: + 202: + description: Proposal received + + /federation/proposals/{id}: + get: + summary: Get proposal status + put: + summary: Respond to proposal (accept/reject/counter) + + /federation/agreements: + get: + summary: List active federation agreements + + /federation/agreements/{id}: + get: + summary: Get agreement details + delete: + summary: Revoke federation (with notice period) + + /federation/notify: + post: + summary: Notify of new anchor (push) + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AnchorNotification' + + /federation/witness: + post: + summary: Submit witness attestation + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/WitnessAttestation' + + /federation/roots: + get: + summary: Get current Merkle roots for all scrolls + parameters: + - name: scrolls + in: query + schema: + type: array + items: + type: string + + /federation/receipts/{scroll}: + get: + summary: Fetch receipt samples for verification + parameters: + - name: scroll + in: path + required: true + - name: from + in: query + schema: + type: string + format: date-time + - name: limit + in: query + schema: + type: integer + default: 100 + + /federation/verify: + post: + summary: Request verification of specific receipt + requestBody: + content: + application/json: + schema: + type: object + properties: + receipt_hash: + type: string + scroll: + type: string +``` + +### 5.2 Authentication + +Federation API uses mutual TLS + signed requests: + +``` +POST /federation/notify HTTP/1.1 +Host: federation.vaultmesh-berlin.io +Content-Type: application/json +X-Mesh-ID: did:vm:mesh:vaultmesh-dublin +X-Timestamp: 2025-12-06T12:00:00Z +X-Signature: z58D... + +{ + "scroll": "Compliance", + "root": "blake3:aaa111...", + ... +} +``` + +Signature covers: `${method}|${path}|${timestamp}|${body_hash}` + +--- + +## 6. Conflict Resolution + +### 6.1 Discrepancy Types + +| Type | Description | Severity | +|------|-------------|----------| +| `root_mismatch` | Claimed root doesn't match computed | Critical | +| `anchor_invalid` | Anchor proof fails verification | Critical | +| `timestamp_drift` | Timestamps outside tolerance (>5min) | Warning | +| `schema_incompatible` | Receipt schema version mismatch | Warning | +| `sample_missing` | Requested receipt not found | Info | + +### 6.2 Discrepancy Protocol + +```json +{ + "type": "fed_discrepancy", + "discrepancy_id": "discrepancy-2025-12-06-001", + "reporting_mesh": "did:vm:mesh:vaultmesh-berlin", + "reported_mesh": "did:vm:mesh:vaultmesh-dublin", + "timestamp": "2025-12-06T12:15:00Z", + "discrepancy_type": "root_mismatch", + "severity": "critical", + "details": { + "scroll": "Compliance", + "claimed_root": "blake3:aaa111...", + "computed_root": "blake3:xxx999...", + "sample_receipts_checked": 50, + "first_divergence_at": "receipt-sequence-4721" + }, + "evidence_hash": "blake3:evidence...", + "resolution_requested": true +} +``` + +### 6.3 Resolution Workflow + +1. **Automatic**: Re-sync and recompute +2. **Manual**: Human review of divergence +3. **Arbitration**: Third-party mesh verification +4. **Escalation**: Federation suspension pending resolution + +--- + +## 7. Schema Synchronization + +Federated meshes must agree on receipt schemas: + +**Schema Sync Receipt**: +```json +{ + "type": "fed_schema_sync", + "sync_id": "schema-sync-2025-12-06-001", + "meshes": ["did:vm:mesh:vaultmesh-dublin", "did:vm:mesh:vaultmesh-berlin"], + "timestamp": "2025-12-06T10:00:00Z", + "schemas_synced": { + "Compliance": { + "version": "1.2.0", + "hash": "blake3:schema1..." + }, + "Drills": { + "version": "1.1.0", + "hash": "blake3:schema2..." + } + }, + "backward_compatible": true, + "migration_required": false, + "tags": ["federation", "schema", "sync"], + "root_hash": "blake3:eee555..." +} +``` + +--- + +## 8. CLI Commands + +```bash +# Discovery +vm-federation discover --mesh vaultmesh-berlin.io +vm-federation list-known + +# Proposals +vm-federation propose \ + --target did:vm:mesh:vaultmesh-berlin \ + --trust-level 2 \ + --scrolls Compliance,Drills \ + --terms federation-terms.json + +vm-federation proposals list +vm-federation proposals show fed-proposal-2025-12-06-001 +vm-federation proposals accept fed-proposal-2025-12-06-001 +vm-federation proposals reject fed-proposal-2025-12-06-001 --reason "incompatible_jurisdiction" +vm-federation proposals counter fed-proposal-2025-12-06-001 --terms counter-terms.json + +# Agreements +vm-federation agreements list +vm-federation agreements show fed-agreement-2025-12-06-001 +vm-federation agreements revoke fed-agreement-2025-12-06-001 --notice-days 30 + +# Verification +vm-federation verify --mesh vaultmesh-berlin --scroll Compliance +vm-federation witness-history --mesh vaultmesh-berlin --last 30d + +# Status +vm-federation status +vm-federation health --all-peers +``` + +--- + +## 9. Design Gate Checklist + +| Question | Federation Answer | +|----------|-------------------| +| Clear entrypoint? | ✅ CLI (`vm-federation`), Federation API | +| Contract produced? | ✅ `federation_agreement.json` | +| State object? | ✅ Agreement + witness state | +| Receipts emitted? | ✅ Six receipt types | +| Append-only JSONL? | ✅ `receipts/federation/federation_events.jsonl` | +| Merkle root? | ✅ `ROOT.federation.txt` | +| Guardian anchor path? | ✅ Federation root included in ProofChain | +| Query tool? | ✅ `vm-federation` CLI | diff --git a/docs/VAULTMESH-IDENTITY-ENGINE.md b/docs/VAULTMESH-IDENTITY-ENGINE.md new file mode 100644 index 0000000..9ff3295 --- /dev/null +++ b/docs/VAULTMESH-IDENTITY-ENGINE.md @@ -0,0 +1,635 @@ +# VAULTMESH-IDENTITY-ENGINE.md + +**Civilization Ledger Identity Primitive** + +> *Every actor has a provenance. Every credential has a receipt.* + +Identity is VaultMesh's trust anchor — managing decentralized identifiers (DIDs), verifiable credentials, authentication events, and authorization decisions with cryptographic proof chains. + +--- + +## 1. Scroll Definition + +| Property | Value | +| --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| **Scroll Name** | `Identity` | +| **JSONL Path** | `receipts/identity/identity_events.jsonl` | +| **Root File** | `ROOT.identity.txt` | +| **Receipt Types** | `identity_did_create`, `identity_did_rotate`, `identity_did_revoke`, `identity_credential_issue`, `identity_credential_revoke`, `identity_auth_event`, `identity_authz_decision` | + +--- + +## 2. Core Concepts + +### 2.1 Decentralized Identifiers (DIDs) + +A **DID** is a self-sovereign identifier for any entity in the VaultMesh ecosystem. + +```json +{ + "did": "did:vm:user:sovereign", + "did_document": { + "@context": ["https://www.w3.org/ns/did/v1", "https://vaultmesh.io/ns/did/v1"], + "id": "did:vm:user:sovereign", + "controller": "did:vm:user:sovereign", + "verificationMethod": [ + { + "id": "did:vm:user:sovereign#key-1", + "type": "Ed25519VerificationKey2020", + "controller": "did:vm:user:sovereign", + "publicKeyMultibase": "z6Mkf5rGMoatrSj1f..." + } + ], + "authentication": ["did:vm:user:sovereign#key-1"], + "assertionMethod": ["did:vm:user:sovereign#key-1"], + "capabilityInvocation": ["did:vm:user:sovereign#key-1"], + "capabilityDelegation": ["did:vm:user:sovereign#key-1"] + }, + "created_at": "2025-01-15T00:00:00Z", + "updated_at": "2025-12-06T10:00:00Z", + "status": "active", + "metadata": { + "display_name": "Sovereign Operator", + "roles": ["admin", "operator"], + "organization": "did:vm:org:vaultmesh-hq" + } +} +``` + +**DID types** (method-specific): +- `did:vm:user:*` — human operators +- `did:vm:node:*` — infrastructure nodes (BRICKs, portals) +- `did:vm:service:*` — automated services, agents +- `did:vm:org:*` — organizations, teams +- `did:vm:device:*` — hardware devices, HSMs, YubiKeys + +### 2.2 Verifiable Credentials + +**Credentials** are signed attestations about a subject, issued by trusted parties. + +```json +{ + "credential_id": "vc:vm:2025-12-001", + "@context": [ + "https://www.w3.org/2018/credentials/v1", + "https://vaultmesh.io/ns/credentials/v1" + ], + "type": ["VerifiableCredential", "VaultMeshOperatorCredential"], + "issuer": "did:vm:org:vaultmesh-hq", + "issuanceDate": "2025-12-01T00:00:00Z", + "expirationDate": "2026-12-01T00:00:00Z", + "credentialSubject": { + "id": "did:vm:user:sovereign", + "role": "administrator", + "permissions": ["anchor", "admin", "oracle"], + "clearance_level": "full", + "jurisdiction": ["eu-west", "us-east"] + }, + "credentialStatus": { + "id": "https://vaultmesh.io/credentials/status/2025-12-001", + "type": "RevocationList2023" + }, + "proof": { + "type": "Ed25519Signature2020", + "created": "2025-12-01T00:00:00Z", + "verificationMethod": "did:vm:org:vaultmesh-hq#key-1", + "proofPurpose": "assertionMethod", + "proofValue": "z3FXQjecWufY..." + } +} +``` + +**Credential types**: +- `VaultMeshOperatorCredential` — human operator authorization +- `VaultMeshNodeCredential` — node identity and capabilities +- `VaultMeshServiceCredential` — service authentication +- `VaultMeshComplianceCredential` — compliance attestations +- `VaultMeshDelegationCredential` — delegated authority + +### 2.3 Authentication Events + +Every authentication attempt is logged with full context. + +```json +{ + "auth_event_id": "auth-2025-12-06-001", + "timestamp": "2025-12-06T14:30:00Z", + "subject": "did:vm:user:sovereign", + "method": "ed25519_challenge", + "result": "success", + "session_id": "session-abc123...", + "client": { + "ip": "10.77.1.100", + "user_agent": "VaultMesh-CLI/1.0", + "device_fingerprint": "blake3:fff..." + }, + "node": "did:vm:node:portal-01", + "mfa_used": true, + "mfa_method": "yubikey", + "risk_score": 0.1, + "tags": ["cli", "internal", "mfa"] +} +``` + +**Authentication methods**: +- `ed25519_challenge` — cryptographic challenge-response +- `passkey` — WebAuthn/FIDO2 +- `yubikey` — hardware security key +- `totp` — time-based OTP (fallback) +- `mtls` — mutual TLS (node-to-node) +- `api_key` — service accounts (with rotation) + +### 2.4 Authorization Decisions + +Every access control decision is logged for audit trails. + +```json +{ + "authz_event_id": "authz-2025-12-06-001", + "timestamp": "2025-12-06T14:30:05Z", + "subject": "did:vm:user:sovereign", + "action": "anchor_submit", + "resource": "scroll:treasury", + "decision": "allow", + "policy_matched": "policy:admin-full-access", + "context": { + "session_id": "session-abc123...", + "node": "did:vm:node:portal-01", + "request_id": "req-xyz789..." + }, + "credentials_presented": ["vc:vm:2025-12-001"], + "evaluation_time_ms": 2 +} +``` + +--- + +## 3. Mapping to Eternal Pattern + +### 3.1 Experience Layer (L1) + +**CLI** (`vm-identity`): +```bash +# DID operations +vm-identity did create --type user --name "operator-alpha" +vm-identity did show did:vm:user:sovereign +vm-identity did list --type user --status active +vm-identity did rotate --did did:vm:user:sovereign --reason "scheduled rotation" +vm-identity did revoke --did did:vm:user:operator-alpha --reason "offboarded" + +# Key management +vm-identity key list --did did:vm:user:sovereign +vm-identity key add --did did:vm:user:sovereign --type ed25519 --purpose authentication +vm-identity key revoke --did did:vm:user:sovereign --key-id key-2 --reason "compromised" + +# Credential operations +vm-identity credential issue --subject did:vm:user:operator-alpha --type operator --role viewer +vm-identity credential list --subject did:vm:user:sovereign +vm-identity credential verify vc:vm:2025-12-001 +vm-identity credential revoke vc:vm:2025-12-001 --reason "role change" + +# Authentication +vm-identity auth login --method passkey +vm-identity auth logout +vm-identity auth sessions --did did:vm:user:sovereign +vm-identity auth revoke-session session-abc123 + +# Authorization +vm-identity authz check --subject did:vm:user:sovereign --action anchor_submit --resource scroll:treasury +vm-identity authz policies list +vm-identity authz policy show policy:admin-full-access + +# Audit +vm-identity audit --did did:vm:user:sovereign --from 2025-12-01 +vm-identity audit --type auth_event --result failure --last 24h +``` + +**MCP Tools**: +- `identity_did_resolve` — resolve DID to DID document +- `identity_credential_verify` — verify credential validity +- `identity_auth_status` — current session status +- `identity_authz_check` — check authorization +- `identity_audit_query` — query identity events + +**Portal HTTP**: +- `GET /identity/dids` — list DIDs +- `GET /identity/dids/{did}` — resolve DID +- `POST /identity/dids` — create DID +- `POST /identity/dids/{did}/rotate` — rotate keys +- `DELETE /identity/dids/{did}` — revoke DID +- `GET /identity/credentials` — list credentials +- `POST /identity/credentials` — issue credential +- `GET /identity/credentials/{id}/verify` — verify credential +- `DELETE /identity/credentials/{id}` — revoke credential +- `POST /identity/auth/challenge` — initiate auth +- `POST /identity/auth/verify` — verify auth response +- `GET /identity/sessions` — list sessions +- `DELETE /identity/sessions/{id}` — revoke session + +--- + +### 3.2 Engine Layer (L2) + +#### Step 1 — Plan → `identity_operation_contract.json` + +**DID Creation Contract**: +```json +{ + "operation_id": "identity-op-2025-12-06-001", + "operation_type": "did_create", + "initiated_by": "did:vm:user:sovereign", + "initiated_at": "2025-12-06T10:00:00Z", + "target": { + "did_type": "user", + "display_name": "Operator Bravo", + "initial_roles": ["operator"], + "key_type": "ed25519" + }, + "approval_required": true, + "approvers": ["did:vm:user:sovereign"], + "constraints": { + "credential_auto_issue": true, + "credential_type": "VaultMeshOperatorCredential", + "credential_expiry": "365d" + } +} +``` + +**Credential Issuance Contract**: +```json +{ + "operation_id": "identity-op-2025-12-06-002", + "operation_type": "credential_issue", + "initiated_by": "did:vm:org:vaultmesh-hq", + "initiated_at": "2025-12-06T11:00:00Z", + "credential": { + "type": "VaultMeshOperatorCredential", + "subject": "did:vm:user:operator-bravo", + "claims": { + "role": "operator", + "permissions": ["storage", "compute"], + "jurisdiction": ["eu-west"] + }, + "validity_period": "365d" + }, + "approval_required": false +} +``` + +#### Step 2 — Execute → `identity_operation_state.json` + +```json +{ + "operation_id": "identity-op-2025-12-06-001", + "status": "completed", + "created_at": "2025-12-06T10:00:00Z", + "updated_at": "2025-12-06T10:05:00Z", + "steps": [ + { + "step": "generate_keypair", + "status": "completed", + "completed_at": "2025-12-06T10:01:00Z", + "result": { + "public_key": "z6Mkf5rGMoatrSj1f...", + "key_id": "key-1" + } + }, + { + "step": "create_did_document", + "status": "completed", + "completed_at": "2025-12-06T10:02:00Z", + "result": { + "did": "did:vm:user:operator-bravo" + } + }, + { + "step": "register_did", + "status": "completed", + "completed_at": "2025-12-06T10:03:00Z", + "result": { + "registered": true, + "registry_hash": "blake3:aaa..." + } + }, + { + "step": "issue_credential", + "status": "completed", + "completed_at": "2025-12-06T10:04:00Z", + "result": { + "credential_id": "vc:vm:2025-12-002" + } + } + ], + "approvals": { + "did:vm:user:sovereign": { + "approved_at": "2025-12-06T10:00:30Z", + "signature": "ed25519:..." + } + } +} +``` + +#### Step 3 — Seal → Receipts + +**DID Creation Receipt**: +```json +{ + "type": "identity_did_create", + "did": "did:vm:user:operator-bravo", + "did_type": "user", + "timestamp": "2025-12-06T10:03:00Z", + "created_by": "did:vm:user:sovereign", + "operation_id": "identity-op-2025-12-06-001", + "public_key_fingerprint": "SHA256:abc123...", + "did_document_hash": "blake3:bbb222...", + "initial_roles": ["operator"], + "tags": ["identity", "did", "create", "user"], + "root_hash": "blake3:ccc333..." +} +``` + +**DID Key Rotation Receipt**: +```json +{ + "type": "identity_did_rotate", + "did": "did:vm:user:sovereign", + "timestamp": "2025-12-06T15:00:00Z", + "rotated_by": "did:vm:user:sovereign", + "old_key_fingerprint": "SHA256:old123...", + "new_key_fingerprint": "SHA256:new456...", + "reason": "scheduled rotation", + "old_key_status": "revoked", + "tags": ["identity", "did", "rotate", "key"], + "root_hash": "blake3:ddd444..." +} +``` + +**Credential Issuance Receipt**: +```json +{ + "type": "identity_credential_issue", + "credential_id": "vc:vm:2025-12-002", + "credential_type": "VaultMeshOperatorCredential", + "timestamp": "2025-12-06T10:04:00Z", + "issuer": "did:vm:org:vaultmesh-hq", + "subject": "did:vm:user:operator-bravo", + "claims_hash": "blake3:eee555...", + "expires_at": "2026-12-06T00:00:00Z", + "operation_id": "identity-op-2025-12-06-001", + "tags": ["identity", "credential", "issue", "operator"], + "root_hash": "blake3:fff666..." +} +``` + +**Credential Revocation Receipt**: +```json +{ + "type": "identity_credential_revoke", + "credential_id": "vc:vm:2025-12-002", + "timestamp": "2025-12-06T18:00:00Z", + "revoked_by": "did:vm:user:sovereign", + "reason": "role change", + "revocation_list_updated": true, + "tags": ["identity", "credential", "revoke"], + "root_hash": "blake3:ggg777..." +} +``` + +**Authentication Event Receipt**: +```json +{ + "type": "identity_auth_event", + "auth_event_id": "auth-2025-12-06-001", + "timestamp": "2025-12-06T14:30:00Z", + "subject": "did:vm:user:sovereign", + "method": "passkey", + "result": "success", + "session_id": "session-abc123...", + "node": "did:vm:node:portal-01", + "client_fingerprint": "blake3:hhh888...", + "mfa_used": true, + "risk_score": 0.1, + "tags": ["identity", "auth", "success", "mfa"], + "root_hash": "blake3:iii999..." +} +``` + +**Authorization Decision Receipt** (for sensitive operations): +```json +{ + "type": "identity_authz_decision", + "authz_event_id": "authz-2025-12-06-001", + "timestamp": "2025-12-06T14:30:05Z", + "subject": "did:vm:user:sovereign", + "action": "capability_grant", + "resource": "did:vm:node:brick-03", + "decision": "allow", + "policy_matched": "policy:admin-full-access", + "credentials_verified": ["vc:vm:2025-12-001"], + "tags": ["identity", "authz", "allow", "sensitive"], + "root_hash": "blake3:jjj000..." +} +``` + +--- + +### 3.3 Ledger Layer (L3) + +**Receipt Types**: + +| Type | When Emitted | +| --------------------------- | ------------------------------------- | +| `identity_did_create` | New DID registered | +| `identity_did_rotate` | DID keys rotated | +| `identity_did_revoke` | DID revoked/deactivated | +| `identity_credential_issue` | New credential issued | +| `identity_credential_revoke`| Credential revoked | +| `identity_auth_event` | Authentication attempt (success/fail) | +| `identity_authz_decision` | Sensitive authorization decision | + +**Merkle Coverage**: +- All receipts append to `receipts/identity/identity_events.jsonl` +- `ROOT.identity.txt` updated after each append +- Guardian anchors Identity root in anchor cycles + +--- + +## 4. Query Interface + +`identity_query_events.py`: + +```bash +# DID history +vm-identity query --did did:vm:user:sovereign + +# All auth events for a subject +vm-identity query --type auth_event --subject did:vm:user:sovereign + +# Failed authentications +vm-identity query --type auth_event --result failure --last 7d + +# Credentials issued by an org +vm-identity query --type credential_issue --issuer did:vm:org:vaultmesh-hq + +# Authorization denials +vm-identity query --type authz_decision --decision deny + +# Date range +vm-identity query --from 2025-12-01 --to 2025-12-06 + +# Export for compliance audit +vm-identity query --from 2025-01-01 --format csv > identity_audit_2025.csv +``` + +**DID Resolution History**: +```bash +# Show all versions of a DID document +vm-identity did history did:vm:user:sovereign + +# Output: +# Version 1: 2025-01-15T00:00:00Z (created) +# - Key: key-1 (ed25519) +# Version 2: 2025-06-15T00:00:00Z (key rotation) +# - Key: key-1 (revoked), key-2 (ed25519) +# Version 3: 2025-12-06T15:00:00Z (key rotation) +# - Key: key-2 (revoked), key-3 (ed25519) +``` + +--- + +## 5. Design Gate Checklist + +| Question | Identity Answer | +| --------------------- | ---------------------------------------------------------------- | +| Clear entrypoint? | ✅ CLI (`vm-identity`), MCP tools, Portal HTTP | +| Contract produced? | ✅ `identity_operation_contract.json` for DID/credential ops | +| State object? | ✅ `identity_operation_state.json` tracking multi-step operations | +| Receipts emitted? | ✅ Seven receipt types covering all identity events | +| Append-only JSONL? | ✅ `receipts/identity/identity_events.jsonl` | +| Merkle root? | ✅ `ROOT.identity.txt` | +| Guardian anchor path? | ✅ Identity root included in ProofChain | +| Query tool? | ✅ `identity_query_events.py` + DID history | + +--- + +## 6. Key Management + +### 6.1 Key Hierarchy + +``` +Root of Trust (Hardware) +├── Organization Master Key (HSM-protected) +│ ├── Node Signing Keys +│ │ ├── did:vm:node:brick-01#key-1 +│ │ ├── did:vm:node:brick-02#key-1 +│ │ └── did:vm:node:portal-01#key-1 +│ ├── Service Keys +│ │ ├── did:vm:service:guardian#key-1 +│ │ └── did:vm:service:oracle#key-1 +│ └── Credential Issuing Keys +│ └── did:vm:org:vaultmesh-hq#issuer-key-1 +└── User Keys (Self-custodied) + ├── did:vm:user:sovereign#key-1 + └── did:vm:user:operator-bravo#key-1 +``` + +### 6.2 Key Rotation Policy + +| Key Type | Rotation Period | Trigger Events | +| ------------------- | --------------- | --------------------------------- | +| User keys | 365 days | Compromise, role change | +| Node keys | 180 days | Compromise, node migration | +| Service keys | 90 days | Compromise, version upgrade | +| Credential issuers | 730 days | Compromise, policy change | +| Organization master | Manual only | Compromise, leadership change | + +### 6.3 Recovery Procedures + +```json +{ + "recovery_id": "recovery-2025-12-06-001", + "did": "did:vm:user:operator-bravo", + "reason": "lost_device", + "initiated_at": "2025-12-06T09:00:00Z", + "recovery_method": "social_recovery", + "guardians_required": 3, + "guardians_responded": [ + {"guardian": "did:vm:user:sovereign", "approved_at": "2025-12-06T09:15:00Z"}, + {"guardian": "did:vm:user:operator-alpha", "approved_at": "2025-12-06T09:20:00Z"}, + {"guardian": "did:vm:user:operator-charlie", "approved_at": "2025-12-06T09:25:00Z"} + ], + "status": "completed", + "new_key_registered_at": "2025-12-06T09:30:00Z" +} +``` + +--- + +## 7. Policy Engine + +### 7.1 Policy Definition + +```json +{ + "policy_id": "policy:admin-full-access", + "name": "Administrator Full Access", + "description": "Full access to all VaultMesh operations", + "version": 1, + "effect": "allow", + "subjects": { + "match": "credential", + "credential_type": "VaultMeshOperatorCredential", + "claims": { + "role": "administrator" + } + }, + "actions": ["*"], + "resources": ["*"], + "conditions": { + "mfa_required": true, + "allowed_hours": {"start": "00:00", "end": "23:59"}, + "allowed_nodes": ["*"] + } +} +``` + +### 7.2 Policy Evaluation + +``` +Request: + Subject: did:vm:user:sovereign + Action: anchor_submit + Resource: scroll:treasury + +Evaluation: + 1. Resolve subject credentials + 2. Match policies by subject claims + 3. Check action/resource match + 4. Evaluate conditions (MFA, time, location) + 5. Log decision with full context + 6. Return allow/deny with reason +``` + +--- + +## 8. Integration Points + +| System | Integration | +| ---------------- | -------------------------------------------------------------------------- | +| **Guardian** | Uses Identity for anchor authentication; alerts on suspicious auth events | +| **Mesh** | Node DIDs registered via Identity; capability grants require valid credentials | +| **Treasury** | Account ownership linked to DIDs; transaction signing uses Identity keys | +| **Oracle** | Oracle queries authenticated via Identity; responses signed with service DID | +| **OffSec** | Incident response can trigger emergency credential revocations | +| **Observability**| All identity events flow to observability for correlation | + +--- + +## 9. Future Extensions + +- **Biometric binding**: Link credentials to biometric templates +- **Delegation chains**: Transitive capability delegation with constraints +- **Anonymous credentials**: Zero-knowledge proofs for privacy-preserving auth +- **Cross-mesh identity**: Federated identity across VaultMesh instances +- **Hardware attestation**: TPM/Secure Enclave binding for high-assurance +- **Identity recovery DAO**: Decentralized recovery governance diff --git a/docs/VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md b/docs/VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md new file mode 100644 index 0000000..7ca0320 --- /dev/null +++ b/docs/VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md @@ -0,0 +1,1621 @@ +# VAULTMESH-IMPLEMENTATION-SCAFFOLDS.md + +**From Pattern to Code** + +> *Every specification deserves a skeleton.* + +This document provides implementation scaffolds for all VaultMesh engines — Rust structs for core types and Python CLI skeletons for tooling. + +--- + +## 1. Shared Types (Rust) + +### 1.1 Core Receipt Types + +```rust +// vaultmesh-core/src/receipt.rs + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Universal receipt header present in all receipts +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReceiptHeader { + pub receipt_type: String, + pub timestamp: DateTime, + pub root_hash: String, + pub tags: Vec, +} + +/// Receipt metadata for tracking and querying +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReceiptMeta { + pub scroll: Scroll, + pub sequence: u64, + pub anchor_epoch: Option, + pub proof_path: Option, +} + +/// Scroll identifiers +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum Scroll { + Drills, + Compliance, + Guardian, + Treasury, + Mesh, + OffSec, + Identity, + Observability, + Automation, + PsiField, +} + +impl Scroll { + pub fn jsonl_path(&self) -> &'static str { + match self { + Scroll::Drills => "receipts/drills/drill_runs.jsonl", + Scroll::Compliance => "receipts/compliance/oracle_answers.jsonl", + Scroll::Guardian => "receipts/guardian/anchor_events.jsonl", + Scroll::Treasury => "receipts/treasury/treasury_events.jsonl", + Scroll::Mesh => "receipts/mesh/mesh_events.jsonl", + Scroll::OffSec => "receipts/offsec/offsec_events.jsonl", + Scroll::Identity => "receipts/identity/identity_events.jsonl", + Scroll::Observability => "receipts/observability/observability_events.jsonl", + Scroll::Automation => "receipts/automation/automation_events.jsonl", + Scroll::PsiField => "receipts/psi/psi_events.jsonl", + } + } + + pub fn root_file(&self) -> &'static str { + match self { + Scroll::Drills => "ROOT.drills.txt", + Scroll::Compliance => "ROOT.compliance.txt", + Scroll::Guardian => "ROOT.guardian.txt", + Scroll::Treasury => "ROOT.treasury.txt", + Scroll::Mesh => "ROOT.mesh.txt", + Scroll::OffSec => "ROOT.offsec.txt", + Scroll::Identity => "ROOT.identity.txt", + Scroll::Observability => "ROOT.observability.txt", + Scroll::Automation => "ROOT.automation.txt", + Scroll::PsiField => "ROOT.psi.txt", + } + } +} + +/// Generic receipt wrapper +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Receipt { + #[serde(flatten)] + pub header: ReceiptHeader, + #[serde(flatten)] + pub meta: ReceiptMeta, + #[serde(flatten)] + pub body: T, +} +``` + +### 1.2 DID Types + +```rust +// vaultmesh-core/src/did.rs + +use serde::{Deserialize, Serialize}; + +/// VaultMesh DID +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Did(String); + +impl Did { + pub fn new(did_type: DidType, identifier: &str) -> Self { + Did(format!("did:vm:{}:{}", did_type.as_str(), identifier)) + } + + pub fn parse(s: &str) -> Result { + if !s.starts_with("did:vm:") { + return Err(DidParseError::InvalidPrefix); + } + Ok(Did(s.to_string())) + } + + pub fn did_type(&self) -> Option { + let parts: Vec<&str> = self.0.split(':').collect(); + if parts.len() >= 3 { + DidType::from_str(parts[2]) + } else { + None + } + } + + pub fn identifier(&self) -> Option<&str> { + let parts: Vec<&str> = self.0.split(':').collect(); + if parts.len() >= 4 { + Some(parts[3]) + } else { + None + } + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum DidType { + Node, + Human, + Agent, + Service, + Mesh, +} + +impl DidType { + pub fn as_str(&self) -> &'static str { + match self { + DidType::Node => "node", + DidType::Human => "human", + DidType::Agent => "agent", + DidType::Service => "service", + DidType::Mesh => "mesh", + } + } + + pub fn from_str(s: &str) -> Option { + match s { + "node" => Some(DidType::Node), + "human" => Some(DidType::Human), + "agent" => Some(DidType::Agent), + "service" => Some(DidType::Service), + "mesh" => Some(DidType::Mesh), + _ => None, + } + } +} + +#[derive(Debug)] +pub enum DidParseError { + InvalidPrefix, + MissingType, + MissingIdentifier, +} +``` + +### 1.3 Hash Utilities + +```rust +// vaultmesh-core/src/hash.rs + +use blake3::Hasher; +use serde::{Deserialize, Serialize}; + +/// VaultMesh hash with algorithm prefix +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct VmHash(String); + +impl VmHash { + /// Create hash from bytes using Blake3 + pub fn blake3(data: &[u8]) -> Self { + let hash = blake3::hash(data); + VmHash(format!("blake3:{}", hash.to_hex())) + } + + /// Create hash from JSON-serializable value + pub fn from_json(value: &T) -> Result { + let json = serde_json::to_vec(value)?; + Ok(Self::blake3(&json)) + } + + /// Create hash from file contents + pub fn from_file(path: &std::path::Path) -> std::io::Result { + let contents = std::fs::read(path)?; + Ok(Self::blake3(&contents)) + } + + /// Get the raw hex value without prefix + pub fn hex(&self) -> &str { + self.0.strip_prefix("blake3:").unwrap_or(&self.0) + } + + /// Get full prefixed value + pub fn as_str(&self) -> &str { + &self.0 + } +} + +/// Compute Merkle root from list of hashes +pub fn merkle_root(hashes: &[VmHash]) -> VmHash { + if hashes.is_empty() { + return VmHash::blake3(b"empty"); + } + if hashes.len() == 1 { + return hashes[0].clone(); + } + + let mut current_level: Vec = hashes.to_vec(); + + while current_level.len() > 1 { + let mut next_level = Vec::new(); + + for chunk in current_level.chunks(2) { + let combined = if chunk.len() == 2 { + format!("{}{}", chunk[0].hex(), chunk[1].hex()) + } else { + format!("{}{}", chunk[0].hex(), chunk[0].hex()) + }; + next_level.push(VmHash::blake3(combined.as_bytes())); + } + + current_level = next_level; + } + + current_level.remove(0) +} +``` + +--- + +## 2. Treasury Engine (Rust) + +```rust +// vaultmesh-treasury/src/lib.rs + +use chrono::{DateTime, Utc}; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; +use vaultmesh_core::{Did, Receipt, ReceiptHeader, ReceiptMeta, Scroll, VmHash}; + +/// Treasury account +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Account { + pub account_id: String, + pub display_name: String, + pub account_type: AccountType, + pub currency: Currency, + pub balance: Decimal, + pub created_at: DateTime, + pub tags: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum AccountType { + Operational, + Reserve, + Escrow, + External, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Currency { + EUR, + USD, + GBP, + BTC, + ETH, +} + +/// Treasury entry (single debit or credit) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Entry { + pub entry_id: String, + pub entry_type: EntryType, + pub account: String, + pub amount: Decimal, + pub currency: Currency, + pub memo: String, + pub timestamp: DateTime, + pub tags: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum EntryType { + Debit, + Credit, +} + +/// Settlement contract +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SettlementContract { + pub settlement_id: String, + pub title: String, + pub initiated_by: Did, + pub initiated_at: DateTime, + pub parties: Vec, + pub entries: Vec, + pub requires_signatures: Vec, + pub settlement_type: SettlementType, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum SettlementType { + InterNodeResource, + VendorPayment, + ClientInvoice, + GrantDisbursement, +} + +/// Settlement state +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SettlementState { + pub settlement_id: String, + pub status: SettlementStatus, + pub created_at: DateTime, + pub updated_at: DateTime, + pub signatures: std::collections::HashMap>, + pub entries_applied: bool, + pub balance_snapshot_before: std::collections::HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum SettlementStatus { + Draft, + PendingSignatures, + Executing, + Completed, + Disputed, + Expired, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Signature { + pub signed_at: DateTime, + pub signature: String, +} + +// Receipt types + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreasuryCreditReceipt { + pub entry_id: String, + pub account: String, + pub amount: Decimal, + pub currency: Currency, + pub memo: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreasuryDebitReceipt { + pub entry_id: String, + pub account: String, + pub amount: Decimal, + pub currency: Currency, + pub memo: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreasurySettlementReceipt { + pub settlement_id: String, + pub title: String, + pub timestamp_initiated: DateTime, + pub timestamp_completed: DateTime, + pub status: SettlementStatus, + pub parties: Vec, + pub entries_count: usize, + pub net_flow: std::collections::HashMap, + pub currency: Currency, + pub settlement_type: SettlementType, + pub signatures_manifest: String, +} + +/// Treasury engine +pub struct TreasuryEngine { + accounts: std::collections::HashMap, + pending_settlements: std::collections::HashMap, +} + +impl TreasuryEngine { + pub fn new() -> Self { + TreasuryEngine { + accounts: std::collections::HashMap::new(), + pending_settlements: std::collections::HashMap::new(), + } + } + + pub fn create_account(&mut self, account: Account) -> Result<(), TreasuryError> { + if self.accounts.contains_key(&account.account_id) { + return Err(TreasuryError::AccountExists); + } + self.accounts.insert(account.account_id.clone(), account); + Ok(()) + } + + pub fn record_entry(&mut self, entry: Entry) -> Result, TreasuryError> { + let account = self.accounts.get_mut(&entry.account) + .ok_or(TreasuryError::AccountNotFound)?; + + match entry.entry_type { + EntryType::Credit => account.balance += entry.amount, + EntryType::Debit => { + if account.balance < entry.amount { + return Err(TreasuryError::InsufficientBalance); + } + account.balance -= entry.amount; + } + } + + let receipt_body = TreasuryCreditReceipt { + entry_id: entry.entry_id.clone(), + account: entry.account.clone(), + amount: entry.amount, + currency: entry.currency, + memo: entry.memo, + }; + + let root_hash = VmHash::from_json(&receipt_body) + .map_err(|_| TreasuryError::SerializationError)?; + + Ok(Receipt { + header: ReceiptHeader { + receipt_type: "treasury_credit".to_string(), + timestamp: entry.timestamp, + root_hash: root_hash.as_str().to_string(), + tags: entry.tags, + }, + meta: ReceiptMeta { + scroll: Scroll::Treasury, + sequence: 0, // Set by receipt store + anchor_epoch: None, + proof_path: None, + }, + body: receipt_body, + }) + } + + pub fn initiate_settlement(&mut self, contract: SettlementContract) -> Result { + // Validate all accounts exist + for entry in &contract.entries { + if !self.accounts.contains_key(&entry.account) { + return Err(TreasuryError::AccountNotFound); + } + } + + // Capture balance snapshot + let mut snapshot = std::collections::HashMap::new(); + for entry in &contract.entries { + if !snapshot.contains_key(&entry.account) { + let balance = self.accounts.get(&entry.account).unwrap().balance; + snapshot.insert(entry.account.clone(), balance); + } + } + + let state = SettlementState { + settlement_id: contract.settlement_id.clone(), + status: SettlementStatus::PendingSignatures, + created_at: contract.initiated_at, + updated_at: Utc::now(), + signatures: contract.requires_signatures.iter() + .map(|s| (s.clone(), None)) + .collect(), + entries_applied: false, + balance_snapshot_before: snapshot, + }; + + self.pending_settlements.insert( + contract.settlement_id.clone(), + (contract, state.clone()), + ); + + Ok(state) + } + + pub fn add_signature( + &mut self, + settlement_id: &str, + signer: &str, + signature: Signature, + ) -> Result { + let (_, state) = self.pending_settlements.get_mut(settlement_id) + .ok_or(TreasuryError::SettlementNotFound)?; + + if !state.signatures.contains_key(signer) { + return Err(TreasuryError::UnauthorizedSigner); + } + + state.signatures.insert(signer.to_string(), Some(signature)); + state.updated_at = Utc::now(); + + // Check if all signatures collected + let all_signed = state.signatures.values().all(|s| s.is_some()); + if all_signed { + state.status = SettlementStatus::Executing; + } + + Ok(state.clone()) + } + + pub fn execute_settlement(&mut self, settlement_id: &str) -> Result, TreasuryError> { + let (contract, state) = self.pending_settlements.get_mut(settlement_id) + .ok_or(TreasuryError::SettlementNotFound)?; + + if state.status != SettlementStatus::Executing { + return Err(TreasuryError::SettlementNotReady); + } + + // Apply all entries + let mut net_flow: std::collections::HashMap = std::collections::HashMap::new(); + for entry in &contract.entries { + let account = self.accounts.get_mut(&entry.account).unwrap(); + match entry.entry_type { + EntryType::Credit => { + account.balance += entry.amount; + *net_flow.entry(entry.account.clone()).or_insert(Decimal::ZERO) += entry.amount; + } + EntryType::Debit => { + account.balance -= entry.amount; + *net_flow.entry(entry.account.clone()).or_insert(Decimal::ZERO) -= entry.amount; + } + } + } + + state.entries_applied = true; + state.status = SettlementStatus::Completed; + state.updated_at = Utc::now(); + + let receipt_body = TreasurySettlementReceipt { + settlement_id: contract.settlement_id.clone(), + title: contract.title.clone(), + timestamp_initiated: contract.initiated_at, + timestamp_completed: state.updated_at, + status: SettlementStatus::Completed, + parties: contract.parties.clone(), + entries_count: contract.entries.len(), + net_flow, + currency: contract.entries.first().map(|e| e.currency.clone()).unwrap_or(Currency::EUR), + settlement_type: contract.settlement_type.clone(), + signatures_manifest: format!("cases/treasury/{}/SIGNATURES.json", settlement_id), + }; + + let root_hash = VmHash::from_json(&receipt_body) + .map_err(|_| TreasuryError::SerializationError)?; + + Ok(Receipt { + header: ReceiptHeader { + receipt_type: "treasury_settlement".to_string(), + timestamp: state.updated_at, + root_hash: root_hash.as_str().to_string(), + tags: vec!["treasury".to_string(), "settlement".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::Treasury, + sequence: 0, + anchor_epoch: None, + proof_path: Some(format!("cases/treasury/{}/PROOF.json", settlement_id)), + }, + body: receipt_body, + }) + } +} + +#[derive(Debug)] +pub enum TreasuryError { + AccountExists, + AccountNotFound, + InsufficientBalance, + SettlementNotFound, + SettlementNotReady, + UnauthorizedSigner, + SerializationError, +} +``` + +--- + +## 3. Identity Engine (Rust) + +```rust +// vaultmesh-identity/src/lib.rs + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use vaultmesh_core::{Did, DidType, Receipt, ReceiptHeader, ReceiptMeta, Scroll, VmHash}; + +/// DID Document +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DidDocument { + #[serde(rename = "@context")] + pub context: Vec, + pub id: Did, + pub controller: Option, + #[serde(rename = "verificationMethod")] + pub verification_method: Vec, + pub authentication: Vec, + #[serde(rename = "assertionMethod")] + pub assertion_method: Vec, + pub service: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VerificationMethod { + pub id: String, + #[serde(rename = "type")] + pub method_type: String, + pub controller: Did, + #[serde(rename = "publicKeyMultibase")] + pub public_key_multibase: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Service { + pub id: String, + #[serde(rename = "type")] + pub service_type: String, + #[serde(rename = "serviceEndpoint")] + pub service_endpoint: String, +} + +/// Verifiable Credential +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VerifiableCredential { + pub credential_id: String, + #[serde(rename = "@context")] + pub context: Vec, + #[serde(rename = "type")] + pub credential_type: Vec, + pub issuer: Did, + #[serde(rename = "issuanceDate")] + pub issuance_date: DateTime, + #[serde(rename = "expirationDate")] + pub expiration_date: Option>, + #[serde(rename = "credentialSubject")] + pub credential_subject: CredentialSubject, + pub proof: CredentialProof, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CredentialSubject { + pub id: Did, + #[serde(flatten)] + pub claims: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CredentialProof { + #[serde(rename = "type")] + pub proof_type: String, + pub created: DateTime, + #[serde(rename = "verificationMethod")] + pub verification_method: String, + #[serde(rename = "proofPurpose")] + pub proof_purpose: String, + #[serde(rename = "proofValue")] + pub proof_value: String, +} + +/// Capability Token +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CapabilityToken { + pub capability_id: String, + pub holder: Did, + pub capability: Capability, + pub scope: CapabilityScope, + pub granted_by: Did, + pub granted_at: DateTime, + pub expires_at: DateTime, + pub constraints: HashMap, + pub proof: CredentialProof, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum Capability { + Anchor, + Storage, + Compute, + Oracle, + Admin, + Federate, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CapabilityScope { + pub scrolls: Option>, + pub backends: Option>, + pub max_rate: Option, +} + +/// Authentication Event +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthEvent { + pub auth_event_id: String, + pub subject: Did, + pub target: Did, + pub method: AuthMethod, + pub result: AuthResult, + pub timestamp: DateTime, + pub source_ip: Option, + pub session_id: Option, + pub credential_used: Option, + pub mfa_verified: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum AuthMethod { + SshKey, + Password, + Certificate, + Token, + Passkey, + Mfa, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum AuthResult { + Success, + Failure, + Denied, + Expired, +} + +// Receipt bodies + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DidCreateReceipt { + pub did: Did, + pub did_type: String, + pub controller: Option, + pub created_by: Did, + pub initial_keys: Vec, + pub did_document_hash: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CredentialIssueReceipt { + pub credential_id: String, + pub credential_type: String, + pub issuer: Did, + pub subject: Did, + pub expires_at: Option>, + pub claims_summary: HashMap, + pub credential_hash: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CapabilityGrantReceipt { + pub capability_id: String, + pub holder: Did, + pub capability: Capability, + pub scope_summary: String, + pub granted_by: Did, + pub expires_at: DateTime, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthEventReceipt { + pub auth_event_id: String, + pub subject: Did, + pub target: Did, + pub method: AuthMethod, + pub result: AuthResult, + pub source_ip_hash: Option, + pub mfa_verified: bool, +} + +/// Identity Engine +pub struct IdentityEngine { + did_documents: HashMap, + credentials: HashMap, + capabilities: HashMap, + revoked_credentials: HashMap>, +} + +impl IdentityEngine { + pub fn new() -> Self { + IdentityEngine { + did_documents: HashMap::new(), + credentials: HashMap::new(), + capabilities: HashMap::new(), + revoked_credentials: HashMap::new(), + } + } + + pub fn create_did( + &mut self, + did_type: DidType, + identifier: &str, + controller: Option, + public_key: &str, + created_by: Did, + ) -> Result, IdentityError> { + let did = Did::new(did_type, identifier); + + if self.did_documents.contains_key(&did) { + return Err(IdentityError::DidExists); + } + + let key_id = format!("{}#key-1", did.as_str()); + + let doc = DidDocument { + context: vec![ + "https://www.w3.org/ns/did/v1".to_string(), + "https://vaultmesh.io/ns/did/v1".to_string(), + ], + id: did.clone(), + controller: controller.clone(), + verification_method: vec![VerificationMethod { + id: key_id.clone(), + method_type: "Ed25519VerificationKey2020".to_string(), + controller: did.clone(), + public_key_multibase: public_key.to_string(), + }], + authentication: vec![key_id.clone()], + assertion_method: vec![key_id.clone()], + service: vec![], + }; + + let doc_hash = VmHash::from_json(&doc) + .map_err(|_| IdentityError::SerializationError)?; + + self.did_documents.insert(did.clone(), doc); + + let receipt_body = DidCreateReceipt { + did: did.clone(), + did_type: did_type.as_str().to_string(), + controller, + created_by, + initial_keys: vec![key_id], + did_document_hash: doc_hash.as_str().to_string(), + }; + + let root_hash = VmHash::from_json(&receipt_body) + .map_err(|_| IdentityError::SerializationError)?; + + Ok(Receipt { + header: ReceiptHeader { + receipt_type: "identity_did_create".to_string(), + timestamp: Utc::now(), + root_hash: root_hash.as_str().to_string(), + tags: vec!["identity".to_string(), "did".to_string(), "create".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::Identity, + sequence: 0, + anchor_epoch: None, + proof_path: None, + }, + body: receipt_body, + }) + } + + pub fn resolve_did(&self, did: &Did) -> Option<&DidDocument> { + self.did_documents.get(did) + } + + pub fn issue_credential( + &mut self, + credential: VerifiableCredential, + ) -> Result, IdentityError> { + // Verify issuer exists + if !self.did_documents.contains_key(&credential.issuer) { + return Err(IdentityError::IssuerNotFound); + } + + let credential_hash = VmHash::from_json(&credential) + .map_err(|_| IdentityError::SerializationError)?; + + let claims_summary: HashMap = credential.credential_subject.claims + .iter() + .map(|(k, v)| (k.clone(), v.to_string())) + .collect(); + + let receipt_body = CredentialIssueReceipt { + credential_id: credential.credential_id.clone(), + credential_type: credential.credential_type.join(", "), + issuer: credential.issuer.clone(), + subject: credential.credential_subject.id.clone(), + expires_at: credential.expiration_date, + claims_summary, + credential_hash: credential_hash.as_str().to_string(), + }; + + self.credentials.insert(credential.credential_id.clone(), credential); + + let root_hash = VmHash::from_json(&receipt_body) + .map_err(|_| IdentityError::SerializationError)?; + + Ok(Receipt { + header: ReceiptHeader { + receipt_type: "identity_credential_issue".to_string(), + timestamp: Utc::now(), + root_hash: root_hash.as_str().to_string(), + tags: vec!["identity".to_string(), "credential".to_string(), "issue".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::Identity, + sequence: 0, + anchor_epoch: None, + proof_path: None, + }, + body: receipt_body, + }) + } + + pub fn verify_credential(&self, credential_id: &str) -> Result { + // Check if revoked + if self.revoked_credentials.contains_key(credential_id) { + return Ok(false); + } + + let credential = self.credentials.get(credential_id) + .ok_or(IdentityError::CredentialNotFound)?; + + // Check expiration + if let Some(expires) = credential.expiration_date { + if Utc::now() > expires { + return Ok(false); + } + } + + // In production, would verify cryptographic proof here + Ok(true) + } + + pub fn check_capability(&self, holder: &Did, capability: Capability) -> bool { + self.capabilities.values().any(|cap| { + &cap.holder == holder + && cap.capability == capability + && Utc::now() < cap.expires_at + }) + } +} + +#[derive(Debug)] +pub enum IdentityError { + DidExists, + DidNotFound, + IssuerNotFound, + CredentialNotFound, + CredentialRevoked, + CapabilityDenied, + SerializationError, +} +``` + +--- + +## 4. Python CLI Skeleton + +```python +#!/usr/bin/env python3 +""" +VaultMesh Unified CLI +vm-cli - Gateway to all VaultMesh engines +""" + +import click +from datetime import datetime +from pathlib import Path +from typing import Optional +import json + +# ============================================================================ +# Shared utilities +# ============================================================================ + +def emit_receipt(scroll: str, receipt_type: str, body: dict, tags: list[str]) -> dict: + """Create and emit a receipt to the appropriate scroll.""" + import hashlib + + receipt = { + "type": receipt_type, + "timestamp": datetime.utcnow().isoformat() + "Z", + "tags": tags, + **body + } + + # Compute root hash + receipt_json = json.dumps(receipt, sort_keys=True) + root_hash = f"blake3:{hashlib.blake3(receipt_json.encode()).hexdigest()}" + receipt["root_hash"] = root_hash + + # Append to scroll + scroll_path = Path(f"receipts/{scroll}/{scroll}_events.jsonl") + scroll_path.parent.mkdir(parents=True, exist_ok=True) + + with open(scroll_path, "a") as f: + f.write(json.dumps(receipt) + "\n") + + # Update Merkle root (simplified) + root_file = Path(f"ROOT.{scroll}.txt") + root_file.write_text(root_hash) + + return receipt + +def load_receipts(scroll: str, filters: dict = None) -> list[dict]: + """Load and optionally filter receipts from a scroll.""" + scroll_path = Path(f"receipts/{scroll}/{scroll}_events.jsonl") + + if not scroll_path.exists(): + return [] + + receipts = [] + with open(scroll_path) as f: + for line in f: + receipt = json.loads(line.strip()) + + # Apply filters + if filters: + match = True + for key, value in filters.items(): + if key == "from_date": + if receipt.get("timestamp", "") < value: + match = False + elif key == "to_date": + if receipt.get("timestamp", "") > value: + match = False + elif key == "type": + if receipt.get("type") not in value: + match = False + elif receipt.get(key) != value: + match = False + + if match: + receipts.append(receipt) + else: + receipts.append(receipt) + + return receipts + +# ============================================================================ +# Main CLI Group +# ============================================================================ + +@click.group() +@click.version_option(version="0.1.0") +def cli(): + """VaultMesh Civilization Ledger CLI""" + pass + +# ============================================================================ +# Treasury Commands +# ============================================================================ + +@cli.group() +def treasury(): + """Treasury Engine - Financial operations""" + pass + +@treasury.command("debit") +@click.option("--from", "from_account", required=True, help="Source account") +@click.option("--amount", required=True, type=float, help="Amount") +@click.option("--currency", default="EUR", help="Currency code") +@click.option("--memo", required=True, help="Transaction memo") +@click.option("--tags", default="", help="Comma-separated tags") +def treasury_debit(from_account: str, amount: float, currency: str, memo: str, tags: str): + """Record a debit entry.""" + entry_id = f"entry-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}" + + receipt = emit_receipt( + scroll="treasury", + receipt_type="treasury_debit", + body={ + "entry_id": entry_id, + "account": from_account, + "amount": amount, + "currency": currency, + "memo": memo, + }, + tags=["treasury", "debit"] + (tags.split(",") if tags else []) + ) + + click.echo(f"✓ Debit recorded: {entry_id}") + click.echo(f" Amount: {amount} {currency}") + click.echo(f" Hash: {receipt['root_hash'][:20]}...") + +@treasury.command("credit") +@click.option("--to", "to_account", required=True, help="Destination account") +@click.option("--amount", required=True, type=float, help="Amount") +@click.option("--currency", default="EUR", help="Currency code") +@click.option("--memo", required=True, help="Transaction memo") +@click.option("--tags", default="", help="Comma-separated tags") +def treasury_credit(to_account: str, amount: float, currency: str, memo: str, tags: str): + """Record a credit entry.""" + entry_id = f"entry-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}" + + receipt = emit_receipt( + scroll="treasury", + receipt_type="treasury_credit", + body={ + "entry_id": entry_id, + "account": to_account, + "amount": amount, + "currency": currency, + "memo": memo, + }, + tags=["treasury", "credit"] + (tags.split(",") if tags else []) + ) + + click.echo(f"✓ Credit recorded: {entry_id}") + click.echo(f" Amount: {amount} {currency}") + click.echo(f" Hash: {receipt['root_hash'][:20]}...") + +@treasury.command("query") +@click.option("--account", help="Filter by account") +@click.option("--from", "from_date", help="From date (ISO format)") +@click.option("--to", "to_date", help="To date (ISO format)") +@click.option("--type", "entry_type", help="Entry type (credit/debit)") +@click.option("--format", "output_format", default="table", type=click.Choice(["table", "json", "csv"])) +def treasury_query(account: str, from_date: str, to_date: str, entry_type: str, output_format: str): + """Query treasury entries.""" + filters = {} + if account: + filters["account"] = account + if from_date: + filters["from_date"] = from_date + if to_date: + filters["to_date"] = to_date + if entry_type: + filters["type"] = [f"treasury_{entry_type}"] + + receipts = load_receipts("treasury", filters) + + if output_format == "json": + click.echo(json.dumps(receipts, indent=2)) + elif output_format == "csv": + if receipts: + click.echo(",".join(receipts[0].keys())) + for r in receipts: + click.echo(",".join(str(v) for v in r.values())) + else: + click.echo(f"Found {len(receipts)} entries:") + for r in receipts: + click.echo(f" {r.get('timestamp', 'N/A')[:19]} | {r.get('type', 'N/A'):20} | {r.get('amount', 'N/A'):>10} {r.get('currency', '')}") + +# ============================================================================ +# Mesh Commands +# ============================================================================ + +@cli.group() +def mesh(): + """Mesh Engine - Federation topology""" + pass + +@mesh.command("node") +@click.argument("action", type=click.Choice(["list", "show", "join", "leave"])) +@click.option("--id", "node_id", help="Node identifier") +@click.option("--config", type=click.Path(exists=True), help="Node config file") +def mesh_node(action: str, node_id: str, config: str): + """Manage mesh nodes.""" + if action == "list": + receipts = load_receipts("mesh", {"type": ["mesh_node_join"]}) + click.echo("Active nodes:") + for r in receipts: + click.echo(f" • {r.get('node_id', 'unknown')} ({r.get('node_type', 'unknown')})") + + elif action == "show" and node_id: + receipts = load_receipts("mesh", {"node_id": f"did:vm:node:{node_id}"}) + if receipts: + click.echo(json.dumps(receipts[-1], indent=2)) + else: + click.echo(f"Node not found: {node_id}") + + elif action == "join" and config: + with open(config) as f: + node_config = json.load(f) + + receipt = emit_receipt( + scroll="mesh", + receipt_type="mesh_node_join", + body={ + "node_id": node_config.get("node_id"), + "display_name": node_config.get("display_name"), + "node_type": node_config.get("node_type", "infrastructure"), + "endpoints_hash": "blake3:...", # Would compute from endpoints + }, + tags=["mesh", "node", "join"] + ) + + click.echo(f"✓ Node joined: {node_config.get('node_id')}") + + elif action == "leave" and node_id: + receipt = emit_receipt( + scroll="mesh", + receipt_type="mesh_node_leave", + body={ + "node_id": f"did:vm:node:{node_id}", + "reason": "manual_leave", + }, + tags=["mesh", "node", "leave"] + ) + + click.echo(f"✓ Node left: {node_id}") + +@mesh.command("topology") +@click.option("--output", type=click.Path(), help="Output file") +def mesh_topology(output: str): + """Show current mesh topology.""" + node_receipts = load_receipts("mesh", {"type": ["mesh_node_join", "mesh_node_leave"]}) + route_receipts = load_receipts("mesh", {"type": ["mesh_route_change"]}) + + # Build current state (simplified) + nodes = {} + for r in node_receipts: + node_id = r.get("node_id") + if r["type"] == "mesh_node_join": + nodes[node_id] = r + elif r["type"] == "mesh_node_leave" and node_id in nodes: + del nodes[node_id] + + topology = { + "timestamp": datetime.utcnow().isoformat() + "Z", + "node_count": len(nodes), + "nodes": list(nodes.keys()), + } + + if output: + with open(output, "w") as f: + json.dump(topology, f, indent=2) + click.echo(f"✓ Topology saved to {output}") + else: + click.echo(json.dumps(topology, indent=2)) + +# ============================================================================ +# Identity Commands +# ============================================================================ + +@cli.group() +def identity(): + """Identity Engine - DIDs and credentials""" + pass + +@identity.command("did") +@click.argument("action", type=click.Choice(["create", "show", "list", "rotate-key"])) +@click.option("--type", "did_type", type=click.Choice(["node", "human", "agent", "service"])) +@click.option("--id", "identifier", help="DID identifier") +@click.option("--controller", help="Controller DID") +def identity_did(action: str, did_type: str, identifier: str, controller: str): + """Manage decentralized identifiers.""" + if action == "create" and did_type and identifier: + did = f"did:vm:{did_type}:{identifier}" + + receipt = emit_receipt( + scroll="identity", + receipt_type="identity_did_create", + body={ + "did": did, + "did_type": did_type, + "controller": controller, + "initial_keys": [f"{did}#key-1"], + "did_document_hash": "blake3:...", + }, + tags=["identity", "did", "create", did_type] + ) + + click.echo(f"✓ DID created: {did}") + + elif action == "show" and identifier: + did = f"did:vm:node:{identifier}" # Simplified + receipts = load_receipts("identity", {"did": did}) + if receipts: + click.echo(json.dumps(receipts[-1], indent=2)) + else: + click.echo(f"DID not found: {did}") + + elif action == "list": + receipts = load_receipts("identity", {"type": ["identity_did_create"]}) + click.echo("Registered DIDs:") + for r in receipts: + click.echo(f" • {r.get('did', 'unknown')} ({r.get('did_type', 'unknown')})") + +@identity.command("capability") +@click.argument("action", type=click.Choice(["grant", "check", "list", "revoke"])) +@click.option("--holder", help="Capability holder DID") +@click.option("--capability", type=click.Choice(["anchor", "storage", "compute", "oracle", "admin"])) +@click.option("--expires", help="Expiration date (ISO format)") +def identity_capability(action: str, holder: str, capability: str, expires: str): + """Manage capability tokens.""" + if action == "grant" and holder and capability: + cap_id = f"cap:vm:{datetime.utcnow().strftime('%Y-%m-%d')}:{capability}:{holder.split(':')[-1]}" + + receipt = emit_receipt( + scroll="identity", + receipt_type="identity_capability_grant", + body={ + "capability_id": cap_id, + "holder": holder, + "capability": capability, + "granted_by": "did:vm:node:portal-01", # Would come from context + "expires_at": expires or "2026-01-01T00:00:00Z", + }, + tags=["identity", "capability", "grant", capability] + ) + + click.echo(f"✓ Capability granted: {cap_id}") + + elif action == "check" and holder and capability: + receipts = load_receipts("identity", {"holder": holder, "capability": capability}) + grants = [r for r in receipts if r["type"] == "identity_capability_grant"] + revokes = [r for r in receipts if r["type"] == "identity_capability_revoke"] + + # Check if any valid grant exists + valid = False + for g in grants: + grant_id = g.get("capability_id") + if not any(r.get("capability_id") == grant_id for r in revokes): + expires = g.get("expires_at", "") + if expires > datetime.utcnow().isoformat(): + valid = True + break + + if valid: + click.echo(f"✓ {holder} HAS capability: {capability}") + else: + click.echo(f"✗ {holder} does NOT have capability: {capability}") + + elif action == "list" and holder: + receipts = load_receipts("identity", {"holder": holder, "type": ["identity_capability_grant"]}) + click.echo(f"Capabilities for {holder}:") + for r in receipts: + click.echo(f" • {r.get('capability', 'unknown')} (expires: {r.get('expires_at', 'never')[:10]})") + +# ============================================================================ +# Psi-Field Commands +# ============================================================================ + +@cli.group() +def psi(): + """Ψ-Field Engine - Alchemical consciousness""" + pass + +@psi.command("phase") +@click.argument("action", type=click.Choice(["current", "history", "transition"])) +@click.option("--to", "to_phase", type=click.Choice(["nigredo", "albedo", "citrinitas", "rubedo"])) +@click.option("--trigger", help="Transition trigger description") +def psi_phase(action: str, to_phase: str, trigger: str): + """Manage alchemical phases.""" + if action == "current": + receipts = load_receipts("psi", {"type": ["psi_phase_transition"]}) + if receipts: + last = receipts[-1] + phase = last.get("to_phase", "unknown") + symbols = {"nigredo": "🜁", "albedo": "🜄", "citrinitas": "🜆", "rubedo": "🜂"} + click.echo(f"Current Phase: {phase.upper()} {symbols.get(phase, '')}") + click.echo(f"Since: {last.get('timestamp', 'unknown')[:19]}") + else: + click.echo("Current Phase: NIGREDO 🜁 (initial)") + + elif action == "history": + receipts = load_receipts("psi", {"type": ["psi_phase_transition"]}) + click.echo("Phase History:") + for r in receipts: + click.echo(f" {r.get('timestamp', '')[:19]} | {r.get('from_phase', 'init'):10} → {r.get('to_phase', ''):10}") + + elif action == "transition" and to_phase and trigger: + # Get current phase + receipts = load_receipts("psi", {"type": ["psi_phase_transition"]}) + from_phase = receipts[-1].get("to_phase", "nigredo") if receipts else "nigredo" + + receipt = emit_receipt( + scroll="psi", + receipt_type="psi_phase_transition", + body={ + "transition_id": f"psi-trans-{datetime.utcnow().strftime('%Y-%m-%d-%H%M%S')}", + "from_phase": from_phase, + "to_phase": to_phase, + "trigger_type": "manual", + "trigger_description": trigger, + }, + tags=["psi", "phase", from_phase, to_phase] + ) + + click.echo(f"✓ Phase transition: {from_phase} → {to_phase}") + +@psi.command("transmute") +@click.option("--input", "input_ref", required=True, help="Input reference (e.g., INC-2025-12-001)") +@click.option("--input-type", required=True, type=click.Choice(["incident", "vulnerability", "drill"])) +@click.option("--title", required=True, help="Transmutation title") +def psi_transmute(input_ref: str, input_type: str, title: str): + """Initiate a transmutation process.""" + trans_id = f"psi-transmute-{datetime.utcnow().strftime('%Y-%m-%d-%H%M%S')}" + + receipt = emit_receipt( + scroll="psi", + receipt_type="psi_transmutation", + body={ + "transmutation_id": trans_id, + "title": title, + "input_type": input_type, + "input_reference": input_ref, + "status": "initiated", + "alchemical_phase_target": "citrinitas", + }, + tags=["psi", "transmutation", input_type] + ) + + click.echo(f"✓ Transmutation initiated: {trans_id}") + click.echo(f" Input: {input_ref} ({input_type})") + click.echo(f" Target phase: CITRINITAS 🜆") + +@psi.command("opus") +def psi_opus(): + """Display Magnum Opus status.""" + # Get current phase + phase_receipts = load_receipts("psi", {"type": ["psi_phase_transition"]}) + current_phase = phase_receipts[-1].get("to_phase", "nigredo") if phase_receipts else "nigredo" + + # Get transmutation counts + trans_receipts = load_receipts("psi", {"type": ["psi_transmutation"]}) + + # Get resonance count + res_receipts = load_receipts("psi", {"type": ["psi_resonance"]}) + + symbols = {"nigredo": "🜁", "albedo": "🜄", "citrinitas": "🜆", "rubedo": "🜂"} + + click.echo("=" * 50) + click.echo(" MAGNUM OPUS STATUS") + click.echo("=" * 50) + click.echo() + click.echo(f" Current Phase: {current_phase.upper()} {symbols.get(current_phase, '')}") + click.echo() + click.echo(" Phase Progress:") + phases = ["nigredo", "albedo", "citrinitas", "rubedo"] + for i, p in enumerate(phases): + marker = "●" if p == current_phase else ("○" if phases.index(p) > phases.index(current_phase) else "●") + click.echo(f" {marker} {p.upper():12} {symbols.get(p, '')}") + click.echo() + click.echo(f" Transmutations: {len(trans_receipts)}") + click.echo(f" Resonances: {len(res_receipts)}") + click.echo() + click.echo("=" * 50) + +# ============================================================================ +# Guardian Commands (bridge to existing) +# ============================================================================ + +@cli.group() +def guardian(): + """Guardian Engine - Anchor and sentinel""" + pass + +@guardian.command("anchor-status") +def guardian_anchor_status(): + """Show current anchor status.""" + receipts = load_receipts("guardian", {"type": ["anchor_success", "anchor_failure"]}) + + if receipts: + last = receipts[-1] + click.echo(f"Last Anchor: {last.get('timestamp', 'unknown')[:19]}") + click.echo(f"Status: {last.get('type', 'unknown')}") + click.echo(f"Root: {last.get('root_hash', 'unknown')[:30]}...") + else: + click.echo("No anchor events recorded") + +@guardian.command("anchor-now") +@click.option("--scroll", multiple=True, help="Scrolls to anchor (default: all)") +def guardian_anchor_now(scroll: tuple): + """Trigger immediate anchor cycle.""" + scrolls = list(scroll) if scroll else ["drills", "compliance", "treasury", "mesh", "identity", "psi"] + + click.echo(f"Anchoring scrolls: {', '.join(scrolls)}") + + # Would call actual Guardian anchor cycle here + receipt = emit_receipt( + scroll="guardian", + receipt_type="anchor_success", + body={ + "anchor_id": f"anchor-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}", + "scrolls_anchored": scrolls, + "backend": "local", # Would be ots/ethereum/bitcoin in production + }, + tags=["guardian", "anchor", "manual"] + ) + + click.echo(f"✓ Anchor cycle complete") + click.echo(f" ID: {receipt.get('anchor_id')}") + +# ============================================================================ +# Entry Point +# ============================================================================ + +if __name__ == "__main__": + cli() +``` + +--- + +## 5. Directory Structure + +``` +vaultmesh/ +├── Cargo.toml # Rust workspace +├── pyproject.toml # Python CLI +│ +├── vaultmesh-core/ # Shared Rust types +│ └── src/ +│ ├── lib.rs +│ ├── receipt.rs +│ ├── did.rs +│ └── hash.rs +│ +├── vaultmesh-treasury/ # Treasury engine +│ └── src/lib.rs +│ +├── vaultmesh-mesh/ # Mesh engine +│ └── src/lib.rs +│ +├── vaultmesh-identity/ # Identity engine +│ └── src/lib.rs +│ +├── vaultmesh-offsec/ # OffSec engine +│ └── src/lib.rs +│ +├── vaultmesh-observability/ # Observability engine +│ └── src/lib.rs +│ +├── vaultmesh-automation/ # Automation engine +│ └── src/lib.rs +│ +├── vaultmesh-psi/ # Ψ-Field engine +│ └── src/lib.rs +│ +├── vaultmesh-guardian/ # Guardian (existing) +│ └── src/lib.rs +│ +├── cli/ # Python CLI +│ ├── __init__.py +│ └── vm_cli.py +│ +├── receipts/ # Receipt storage +│ ├── drills/ +│ ├── compliance/ +│ ├── guardian/ +│ ├── treasury/ +│ ├── mesh/ +│ ├── offsec/ +│ ├── identity/ +│ ├── observability/ +│ ├── automation/ +│ └── psi/ +│ +├── cases/ # Case artifacts +│ ├── drills/ +│ ├── treasury/ +│ ├── offsec/ +│ ├── identity/ +│ └── psi/ +│ +└── ROOT.*.txt # Merkle roots +``` + +--- + +## 6. Summary + +This document provides: + +1. **Rust Core Types** (`vaultmesh-core`) + - `Receipt` — generic receipt wrapper + - `Scroll` enum — all 10 scrolls with paths + - `Did` — decentralized identifier type + - `VmHash` — blake3 hash with prefix + - `merkle_root()` — Merkle tree computation + +2. **Treasury Engine** (`vaultmesh-treasury`) + - `Account`, `Entry`, `SettlementContract`, `SettlementState` + - Full settlement workflow with signatures + - Receipt generation for credits, debits, settlements + +3. **Identity Engine** (`vaultmesh-identity`) + - `DidDocument`, `VerifiableCredential`, `CapabilityToken` + - DID creation, credential issuance, capability checks + - Authentication events + +4. **Python CLI** (`vm-cli`) + - Unified CLI covering Treasury, Mesh, Identity, Ψ-Field, Guardian + - `emit_receipt()` and `load_receipts()` utilities + - Complete command structure matching engine specs + +5. **Directory Structure** + - Rust workspace layout + - Receipt and case storage paths + - Merkle root file locations + +All engines now have both specifications and implementation scaffolds following the Eternal Pattern. diff --git a/docs/VAULTMESH-MCP-SERVERS.md b/docs/VAULTMESH-MCP-SERVERS.md new file mode 100644 index 0000000..168e750 --- /dev/null +++ b/docs/VAULTMESH-MCP-SERVERS.md @@ -0,0 +1,1049 @@ +# VAULTMESH-MCP-SERVERS.md +**Exposing the Civilization Ledger to Claude** + +> *Every tool is a doorway. Every doorway has a guardian.* + +MCP (Model Context Protocol) servers expose VaultMesh engines to Claude, enabling AI-assisted operations while maintaining constitutional governance. + +--- + +## 1. MCP Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ CLAUDE │ +│ │ +│ "What's our compliance status for Annex IV?" │ +│ "Start a drill for IoT security" │ +│ "Show me the mesh topology" │ +└───────────────────────────┬─────────────────────────────────┘ + │ + │ MCP Protocol + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ MCP GATEWAY │ +│ │ +│ • Authentication (capability verification) │ +│ • Rate limiting │ +│ • Audit logging (all tool calls receipted) │ +│ • Constitutional compliance checking │ +└───────────────────────────┬─────────────────────────────────┘ + │ + ┌───────────────┼───────────────┐ + │ │ │ + ▼ ▼ ▼ + ┌───────────┐ ┌───────────┐ ┌───────────┐ + │ Oracle │ │ Drills │ │ Mesh │ + │ Server │ │ Server │ │ Server │ + └───────────┘ └───────────┘ └───────────┘ + │ │ │ + ▼ ▼ ▼ + ┌─────────────────────────────────────────┐ + │ VAULTMESH ENGINES │ + └─────────────────────────────────────────┘ +``` + +--- + +## 2. Core MCP Server + +### 2.1 Server Definition + +```python +# vaultmesh_mcp/server.py + +from mcp.server import Server +from mcp.types import Tool, TextContent +from typing import Any +import json + +from .engines import ( + oracle_engine, + drills_engine, + mesh_engine, + treasury_engine, + identity_engine, + guardian_engine, + psi_engine, + governance_engine, +) +from .auth import verify_capability, get_caller_identity +from .receipts import emit_tool_call_receipt + +server = Server("vaultmesh") + +# ============================================================================ +# Oracle Tools +# ============================================================================ + +@server.tool() +async def oracle_answer( + question: str, + frameworks: list[str] = None, + context_docs: list[str] = None, +) -> str: + """ + Ask the VaultMesh Compliance Oracle a question about regulatory compliance. + + Args: + question: The compliance question to answer + frameworks: Optional list of frameworks to focus on (e.g., ["AI_Act", "GDPR"]) + context_docs: Optional list of document IDs to use as context + + Returns: + Structured compliance answer with citations and confidence indicators + """ + caller = await get_caller_identity() + await verify_capability(caller, "oracle_query") + + result = await oracle_engine.answer( + question=question, + frameworks=frameworks or [], + context_docs=context_docs or [], + ) + + await emit_tool_call_receipt( + tool="oracle_answer", + caller=caller, + params={"question": question, "frameworks": frameworks}, + result_hash=result.answer_hash, + ) + + return json.dumps(result.to_dict(), indent=2) + +@server.tool() +async def oracle_corpus_search( + query: str, + limit: int = 10, +) -> str: + """ + Search the compliance corpus for relevant documents. + + Args: + query: Search query + limit: Maximum number of results + + Returns: + List of matching documents with relevance scores + """ + caller = await get_caller_identity() + await verify_capability(caller, "oracle_query") + + results = await oracle_engine.search_corpus(query, limit) + + return json.dumps([r.to_dict() for r in results], indent=2) + +# ============================================================================ +# Drills Tools +# ============================================================================ + +@server.tool() +async def drills_create( + prompt: str, + skills: list[str] = None, + severity: str = "medium", +) -> str: + """ + Create a new security drill contract from a scenario description. + + Args: + prompt: Natural language description of the security scenario + skills: Optional list of specific skills to include + severity: Drill severity (low, medium, high, critical) + + Returns: + Generated drill contract with stages and objectives + """ + caller = await get_caller_identity() + await verify_capability(caller, "drills_create") + + contract = await drills_engine.create_contract( + prompt=prompt, + skills=skills, + severity=severity, + ) + + await emit_tool_call_receipt( + tool="drills_create", + caller=caller, + params={"prompt": prompt, "severity": severity}, + result_hash=contract.contract_hash, + ) + + return json.dumps(contract.to_dict(), indent=2) + +@server.tool() +async def drills_status( + drill_id: str = None, +) -> str: + """ + Get status of active drills or a specific drill. + + Args: + drill_id: Optional specific drill ID to check + + Returns: + Drill status information + """ + caller = await get_caller_identity() + await verify_capability(caller, "drills_view") + + if drill_id: + status = await drills_engine.get_drill_status(drill_id) + else: + status = await drills_engine.get_active_drills() + + return json.dumps(status, indent=2) + +@server.tool() +async def drills_complete_stage( + drill_id: str, + stage_id: str, + outputs: list[str] = None, + findings: str = None, +) -> str: + """ + Mark a drill stage as complete with outputs. + + Args: + drill_id: The drill ID + stage_id: The stage to complete + outputs: List of output artifact paths + findings: Summary of findings from this stage + + Returns: + Updated drill state + """ + caller = await get_caller_identity() + await verify_capability(caller, "drills_execute") + + state = await drills_engine.complete_stage( + drill_id=drill_id, + stage_id=stage_id, + outputs=outputs or [], + findings=findings, + ) + + await emit_tool_call_receipt( + tool="drills_complete_stage", + caller=caller, + params={"drill_id": drill_id, "stage_id": stage_id}, + result_hash=state.state_hash, + ) + + return json.dumps(state.to_dict(), indent=2) + +# ============================================================================ +# Mesh Tools +# ============================================================================ + +@server.tool() +async def mesh_topology() -> str: + """ + Get current mesh topology including nodes, routes, and health. + + Returns: + Current topology snapshot + """ + caller = await get_caller_identity() + await verify_capability(caller, "mesh_view") + + topology = await mesh_engine.get_topology() + + return json.dumps(topology.to_dict(), indent=2) + +@server.tool() +async def mesh_node_status( + node_id: str, +) -> str: + """ + Get detailed status of a specific node. + + Args: + node_id: Node identifier (e.g., "brick-01") + + Returns: + Node status including health, capabilities, and recent events + """ + caller = await get_caller_identity() + await verify_capability(caller, "mesh_view") + + status = await mesh_engine.get_node_status(f"did:vm:node:{node_id}") + + return json.dumps(status.to_dict(), indent=2) + +@server.tool() +async def mesh_capability_check( + node_id: str, + capability: str, +) -> str: + """ + Check if a node has a specific capability. + + Args: + node_id: Node identifier + capability: Capability to check (anchor, storage, compute, oracle, admin) + + Returns: + Capability status and details + """ + caller = await get_caller_identity() + await verify_capability(caller, "mesh_view") + + result = await mesh_engine.check_capability( + f"did:vm:node:{node_id}", + capability, + ) + + return json.dumps(result, indent=2) + +# ============================================================================ +# Treasury Tools +# ============================================================================ + +@server.tool() +async def treasury_balance( + account_id: str = None, +) -> str: + """ + Get treasury balance for an account or all accounts. + + Args: + account_id: Optional specific account ID + + Returns: + Balance information + """ + caller = await get_caller_identity() + await verify_capability(caller, "treasury_view") + + if account_id: + balance = await treasury_engine.get_balance(account_id) + else: + balance = await treasury_engine.get_all_balances() + + return json.dumps(balance, indent=2) + +@server.tool() +async def treasury_record_entry( + entry_type: str, + account: str, + amount: float, + currency: str, + memo: str, + tags: list[str] = None, +) -> str: + """ + Record a treasury entry (credit or debit). + + Args: + entry_type: "credit" or "debit" + account: Account ID + amount: Amount + currency: Currency code (EUR, USD, etc.) + memo: Transaction memo + tags: Optional tags + + Returns: + Entry receipt + """ + caller = await get_caller_identity() + await verify_capability(caller, "treasury_write") + + receipt = await treasury_engine.record_entry( + entry_type=entry_type, + account=account, + amount=amount, + currency=currency, + memo=memo, + tags=tags or [], + ) + + await emit_tool_call_receipt( + tool="treasury_record_entry", + caller=caller, + params={"entry_type": entry_type, "account": account, "amount": amount}, + result_hash=receipt.root_hash, + ) + + return json.dumps(receipt.to_dict(), indent=2) + +# ============================================================================ +# Guardian Tools +# ============================================================================ + +@server.tool() +async def guardian_anchor_status() -> str: + """ + Get current Guardian anchor status including last anchor time and health. + + Returns: + Anchor status information + """ + caller = await get_caller_identity() + await verify_capability(caller, "guardian_view") + + status = await guardian_engine.get_anchor_status() + + return json.dumps(status.to_dict(), indent=2) + +@server.tool() +async def guardian_anchor_now( + scrolls: list[str] = None, +) -> str: + """ + Trigger an immediate anchor cycle. + + Args: + scrolls: Optional list of specific scrolls to anchor (default: all) + + Returns: + Anchor result + """ + caller = await get_caller_identity() + await verify_capability(caller, "anchor") + + result = await guardian_engine.anchor_now(scrolls) + + await emit_tool_call_receipt( + tool="guardian_anchor_now", + caller=caller, + params={"scrolls": scrolls}, + result_hash=result.anchor_hash, + ) + + return json.dumps(result.to_dict(), indent=2) + +@server.tool() +async def guardian_verify_receipt( + receipt_hash: str, + scroll: str, +) -> str: + """ + Verify a specific receipt against the anchor chain. + + Args: + receipt_hash: Hash of the receipt to verify + scroll: Scroll containing the receipt + + Returns: + Verification result with proof path + """ + caller = await get_caller_identity() + await verify_capability(caller, "guardian_view") + + result = await guardian_engine.verify_receipt(receipt_hash, scroll) + + return json.dumps(result.to_dict(), indent=2) + +# ============================================================================ +# Identity Tools +# ============================================================================ + +@server.tool() +async def identity_resolve_did( + did: str, +) -> str: + """ + Resolve a DID to its document. + + Args: + did: DID to resolve (e.g., "did:vm:node:brick-01") + + Returns: + DID document + """ + caller = await get_caller_identity() + await verify_capability(caller, "identity_view") + + doc = await identity_engine.resolve_did(did) + + return json.dumps(doc.to_dict(), indent=2) + +@server.tool() +async def identity_verify_credential( + credential_id: str, +) -> str: + """ + Verify a verifiable credential. + + Args: + credential_id: Credential ID to verify + + Returns: + Verification result + """ + caller = await get_caller_identity() + await verify_capability(caller, "identity_view") + + result = await identity_engine.verify_credential(credential_id) + + return json.dumps(result, indent=2) + +@server.tool() +async def identity_whoami() -> str: + """ + Get the current caller's identity and capabilities. + + Returns: + Current identity context + """ + caller = await get_caller_identity() + + identity = await identity_engine.get_identity_context(caller) + + return json.dumps(identity.to_dict(), indent=2) + +# ============================================================================ +# Psi-Field Tools +# ============================================================================ + +@server.tool() +async def psi_phase_status() -> str: + """ + Get current alchemical phase status and Magnum Opus progress. + + Returns: + Current phase and recent transitions + """ + caller = await get_caller_identity() + await verify_capability(caller, "psi_view") + + status = await psi_engine.get_phase_status() + + return json.dumps(status.to_dict(), indent=2) + +@server.tool() +async def psi_transmute( + input_reference: str, + input_type: str, + title: str, +) -> str: + """ + Initiate a transmutation process to transform a negative event into capability. + + Args: + input_reference: Reference to the input event (e.g., "INC-2025-12-001") + input_type: Type of input (incident, vulnerability, drill) + title: Title for the transmutation + + Returns: + Initiated transmutation contract + """ + caller = await get_caller_identity() + await verify_capability(caller, "psi_transmute") + + contract = await psi_engine.initiate_transmutation( + input_reference=input_reference, + input_type=input_type, + title=title, + ) + + await emit_tool_call_receipt( + tool="psi_transmute", + caller=caller, + params={"input_reference": input_reference, "input_type": input_type}, + result_hash=contract.transmutation_hash, + ) + + return json.dumps(contract.to_dict(), indent=2) + +@server.tool() +async def psi_opus_status() -> str: + """ + Get full Magnum Opus status including phases, transmutations, and resonances. + + Returns: + Complete opus status + """ + caller = await get_caller_identity() + await verify_capability(caller, "psi_view") + + opus = await psi_engine.get_opus_status() + + return json.dumps(opus.to_dict(), indent=2) + +# ============================================================================ +# Governance Tools +# ============================================================================ + +@server.tool() +async def governance_constitution_summary() -> str: + """ + Get a summary of the current constitution including version and key articles. + + Returns: + Constitution summary + """ + caller = await get_caller_identity() + await verify_capability(caller, "governance_view") + + summary = await governance_engine.get_constitution_summary() + + return json.dumps(summary.to_dict(), indent=2) + +@server.tool() +async def governance_active_proposals() -> str: + """ + Get list of active governance proposals. + + Returns: + Active proposals in deliberation or voting + """ + caller = await get_caller_identity() + await verify_capability(caller, "governance_view") + + proposals = await governance_engine.get_active_proposals() + + return json.dumps([p.to_dict() for p in proposals], indent=2) + +@server.tool() +async def governance_check_compliance( + action: str, + actor: str, + target: str = None, +) -> str: + """ + Check if an action would comply with the constitution. + + Args: + action: Action to check (e.g., "modify_receipt", "revoke_capability") + actor: DID of the actor + target: Optional target of the action + + Returns: + Compliance check result with relevant articles + """ + caller = await get_caller_identity() + await verify_capability(caller, "governance_view") + + result = await governance_engine.check_compliance(action, actor, target) + + return json.dumps(result.to_dict(), indent=2) + +# ============================================================================ +# Cross-Engine Query Tools +# ============================================================================ + +@server.tool() +async def receipts_search( + scroll: str = None, + receipt_type: str = None, + from_date: str = None, + to_date: str = None, + tags: list[str] = None, + limit: int = 50, +) -> str: + """ + Search receipts across scrolls with filters. + + Args: + scroll: Specific scroll to search (default: all) + receipt_type: Filter by receipt type + from_date: Start date (ISO format) + to_date: End date (ISO format) + tags: Filter by tags + limit: Maximum results + + Returns: + Matching receipts + """ + caller = await get_caller_identity() + await verify_capability(caller, "receipts_view") + + from .receipts import search_receipts + + results = await search_receipts( + scroll=scroll, + receipt_type=receipt_type, + from_date=from_date, + to_date=to_date, + tags=tags, + limit=limit, + ) + + return json.dumps([r.to_dict() for r in results], indent=2) + +@server.tool() +async def system_health() -> str: + """ + Get comprehensive system health across all engines. + + Returns: + Health status for all engines and nodes + """ + caller = await get_caller_identity() + await verify_capability(caller, "system_view") + + from datetime import datetime + from .receipts import get_total_receipt_count + + health = { + "timestamp": datetime.utcnow().isoformat() + "Z", + "mesh": (await mesh_engine.get_health()).to_dict(), + "guardian": (await guardian_engine.get_health()).to_dict(), + "oracle": (await oracle_engine.get_health()).to_dict(), + "psi_phase": (await psi_engine.get_phase_status()).current_phase, + "receipts_total": await get_total_receipt_count(), + "last_anchor_age_seconds": await guardian_engine.get_last_anchor_age(), + } + + return json.dumps(health, indent=2) + +# ============================================================================ +# Server Entry Point +# ============================================================================ + +def main(): + """Run the VaultMesh MCP server.""" + import asyncio + from mcp.server.stdio import stdio_server + + async def run(): + async with stdio_server() as (read_stream, write_stream): + await server.run( + read_stream, + write_stream, + server.create_initialization_options(), + ) + + asyncio.run(run()) + +if __name__ == "__main__": + main() +``` + +### 2.2 Tool Call Receipt + +Every MCP tool call is receipted: + +```json +{ + "type": "mcp_tool_call", + "call_id": "mcp-call-2025-12-06-001", + "timestamp": "2025-12-06T14:30:00Z", + "caller": "did:vm:agent:claude-session-abc123", + "tool": "oracle_answer", + "params_hash": "blake3:params...", + "result_hash": "blake3:result...", + "duration_ms": 1250, + "capability_used": "oracle_query", + "session_id": "session-xyz789", + "tags": ["mcp", "oracle", "tool-call"], + "root_hash": "blake3:aaa111..." +} +``` + +--- + +## 3. MCP Gateway + +### 3.1 Authentication Flow + +```python +# vaultmesh_mcp/auth.py + +from typing import Optional +from .identity import identity_engine + +async def get_caller_identity() -> str: + """Get the DID of the current MCP caller.""" + # In production, this comes from the MCP session context + # For Claude Desktop, it's derived from the session token + session = get_current_session() + + if session.authenticated_did: + return session.authenticated_did + + # Anonymous callers get a session-scoped agent DID + return f"did:vm:agent:mcp-session-{session.id}" + +async def verify_capability(caller: str, capability: str) -> bool: + """Verify the caller has the required capability.""" + # Check capability token + has_cap = await identity_engine.check_capability(caller, capability) + + if not has_cap: + raise PermissionError( + f"Caller {caller} lacks capability: {capability}" + ) + + # Log capability exercise + await identity_engine.log_capability_exercise( + caller=caller, + capability=capability, + action="mcp_tool_call", + ) + + return True +``` + +### 3.2 Rate Limiting + +```python +# vaultmesh_mcp/ratelimit.py + +from collections import defaultdict +from datetime import datetime, timedelta + +class RateLimiter: + def __init__(self): + self.calls = defaultdict(list) + self.limits = { + "oracle_answer": (10, timedelta(minutes=1)), # 10/min + "guardian_anchor_now": (5, timedelta(hours=1)), # 5/hour + "treasury_record_entry": (100, timedelta(hours=1)), # 100/hour + "default": (60, timedelta(minutes=1)), # 60/min + } + + async def check(self, caller: str, tool: str) -> bool: + key = f"{caller}:{tool}" + now = datetime.utcnow() + + limit, window = self.limits.get(tool, self.limits["default"]) + + # Clean old entries + self.calls[key] = [ + t for t in self.calls[key] + if now - t < window + ] + + if len(self.calls[key]) >= limit: + raise RateLimitExceeded( + f"Rate limit exceeded for {tool}: {limit} per {window}" + ) + + self.calls[key].append(now) + return True +``` + +### 3.3 Constitutional Compliance + +```python +# vaultmesh_mcp/compliance.py + +async def check_constitutional_compliance( + tool: str, + caller: str, + params: dict, +) -> bool: + """Check if a tool call complies with the constitution.""" + + # Check against axioms + if tool in AXIOM_VIOLATING_TOOLS: + raise ConstitutionalViolation( + f"Tool {tool} would violate constitutional axioms" + ) + + # Check for emergency restrictions + emergency = await governance_engine.get_active_emergency() + if emergency and tool in emergency.restricted_tools: + raise EmergencyRestriction( + f"Tool {tool} is restricted during current emergency" + ) + + # Check for specific parameter violations + violations = await governance_engine.check_params_compliance(tool, params) + if violations: + raise ConstitutionalViolation( + f"Parameters violate constitution: {violations}" + ) + + return True +``` + +--- + +## 4. Claude Desktop Configuration + +### 4.1 config.json + +```json +{ + "mcpServers": { + "vaultmesh": { + "command": "python", + "args": ["-m", "vaultmesh_mcp.server"], + "env": { + "VAULTMESH_CONFIG": "/path/to/vaultmesh/config.toml", + "VAULTMESH_IDENTITY": "did:vm:agent:claude-desktop" + } + } + } +} +``` + +### 4.2 Capability Configuration + +```toml +# vaultmesh/config.toml + +[mcp.capabilities] +# Default capabilities for Claude Desktop sessions +default_capabilities = [ + "oracle_query", + "drills_view", + "mesh_view", + "treasury_view", + "guardian_view", + "identity_view", + "psi_view", + "governance_view", + "receipts_view", + "system_view", +] + +# Elevated capabilities require explicit grant +elevated_capabilities = [ + "drills_create", + "drills_execute", + "treasury_write", + "anchor", + "psi_transmute", +] + +[mcp.rate_limits] +oracle_answer = { limit = 10, window_minutes = 1 } +guardian_anchor_now = { limit = 5, window_hours = 1 } +treasury_record_entry = { limit = 100, window_hours = 1 } +default = { limit = 60, window_minutes = 1 } + +[mcp.audit] +receipt_all_calls = true +log_params = true +log_results = false # Results may contain sensitive data +``` + +--- + +## 5. Tool Categories + +### 5.1 Read-Only Tools (Default Access) + +| Tool | Description | +|------|-------------| +| `oracle_answer` | Ask compliance questions | +| `oracle_corpus_search` | Search compliance corpus | +| `drills_status` | View drill status | +| `mesh_topology` | View mesh topology | +| `mesh_node_status` | View node status | +| `mesh_capability_check` | Check node capabilities | +| `treasury_balance` | View balances | +| `guardian_anchor_status` | View anchor status | +| `guardian_verify_receipt` | Verify receipts | +| `identity_resolve_did` | Resolve DIDs | +| `identity_verify_credential` | Verify credentials | +| `identity_whoami` | View own identity | +| `psi_phase_status` | View phase status | +| `psi_opus_status` | View opus status | +| `governance_constitution_summary` | View constitution | +| `governance_active_proposals` | View proposals | +| `governance_check_compliance` | Check compliance | +| `receipts_search` | Search receipts | +| `system_health` | View system health | + +### 5.2 Write Tools (Elevated Access) + +| Tool | Capability Required | +|------|---------------------| +| `drills_create` | `drills_create` | +| `drills_complete_stage` | `drills_execute` | +| `treasury_record_entry` | `treasury_write` | +| `guardian_anchor_now` | `anchor` | +| `psi_transmute` | `psi_transmute` | + +--- + +## 6. Example Interactions + +### 6.1 Compliance Query + +``` +Human: What's our current compliance status for the AI Act Annex IV? + +Claude: [Calls oracle_answer with question="What is our compliance status for AI Act Annex IV?", frameworks=["AI_Act"]] + +Based on the Oracle's assessment: + +**Compliance Status: Partial (73%)** + +Key findings: +- Technical documentation requirements (§4.2): ✅ Compliant +- Risk management system (§4.3): ⚠️ Needs update for latest guidance +- Data governance (§4.5): ✅ Compliant +- Human oversight measures (§4.7): ⚠️ Documentation incomplete + +The Oracle flagged that our risk management documentation needs updating to reflect the December 2024 EU guidance. I can help draft those updates if you'd like. +``` + +### 6.2 Security Drill + +``` +Human: Start a drill for detecting lateral movement in our Kubernetes cluster + +Claude: [Calls drills_create with prompt="Detect and respond to lateral movement within Kubernetes cluster", skills=["kubernetes-security", "detection-defense-ir"]] + +I've created a new security drill: + +**Drill ID:** drill-1764691432 +**Scenario:** Lateral Movement Detection in Kubernetes + +**Stages:** +1. **Initial Access Simulation** (kubernetes-security) + - Simulate compromised pod gaining shell access + - Establish baseline network patterns + +2. **Lateral Movement Attempt** (kubernetes-security) + - Attempt service account token theft + - Test pod-to-pod communication abuse + - Probe for secrets and configmaps + +3. **Detection Validation** (detection-defense-ir) + - Verify Falco alerts triggered + - Check network policy enforcement + - Validate audit log capture + +4. **Response Execution** (detection-defense-ir) + - Execute containment playbook + - Document response timeline + +Ready to begin Stage 1? +``` + +### 6.3 System Health Check + +``` +Human: How's the system looking? + +Claude: [Calls system_health] + +**VaultMesh Health Summary** + +🟢 **Overall Status: Healthy** + +**Mesh:** +- Nodes: 5 active, 0 degraded +- Routes: 12 healthy +- Last topology change: 3 days ago + +**Guardian:** +- Last anchor: 47 minutes ago +- Anchor backend: Ethereum (block 12345678) +- Receipts since last anchor: 23 + +**Oracle:** +- Corpus documents: 147 +- Last query: 12 minutes ago +- Average response time: 1.2s + +**Psi-Field:** +- Current phase: ALBEDO 🜄 +- Time in phase: 4 days +- Recent transmutations: 2 + +**Receipts:** +- Total: 1,847 across all scrolls +- Today: 34 new receipts + +Everything looks good. The Guardian is anchoring on schedule, and no anomalies detected. +``` diff --git a/docs/VAULTMESH-MCP-TEM-NODE.md b/docs/VAULTMESH-MCP-TEM-NODE.md new file mode 100644 index 0000000..6a42ef4 --- /dev/null +++ b/docs/VAULTMESH-MCP-TEM-NODE.md @@ -0,0 +1,161 @@ +# VaultMesh MCP + TEM Shield Node Integration + +## 1. Overview + +The VaultMesh core ledger integrates with an external **OffSec Shield +Node** that runs: + +- OffSec agents +- MCP backend (FastAPI) +- TEM Engine (Threat / Experience Memory) + +The node is implemented in the separate `offsec-agents/` repository and +deployed to `shield-vm` (or lab nodes). VaultMesh talks to it via HTTP +and ingests receipts. + +--- + +## 2. Node Contract + +**Node identity:** + +- Node ID: `shield-vm` (example) +- Role: `shield` / `offsec-node` + +**MCP endpoints (examples):** + +- `GET /health` +- `POST /api/command` + - `{ "cmd": "agents list" }` + - `{ "cmd": "agent spawn", "args": {...} }` + - `{ "cmd": "agent mission", "args": {...} }` + - `{ "cmd": "tem status" }` +- `GET /tem/status` +- `GET /tem/stats` + +--- + +## 3. VaultMesh Client (Thin Shim) + +VaultMesh does not embed offsec-agents. It uses a minimal HTTP client: + +- Location: `scripts/offsec_node_client.py` +- Responsibilities: + - Send commands to Shield Node + - Handle timeouts / errors + - Normalize responses for `vm_cli.py` + +Example call: + +```python +from scripts.offsec_node_client import OffsecNodeClient + +client = OffsecNodeClient(base_url="http://shield-vm:8081") + +agents = await client.command("agents list") +status = await client.command("tem status") +``` + +--- + +## 4. CLI Integration + +`vm_cli.py` can expose high-level commands that proxy to the Shield Node: + +- `vm offsec agents` + - Calls `agents list` +- `vm offsec mission --agent --target ` + - Calls `agent mission` +- `vm tem status` + - Calls `tem status` + +These commands are optional and only work if the Shield Node is +configured in env: + +- `OFFSEC_NODE_URL=http://shield-vm:8081` + +If the node is unreachable, the CLI should: + +- Fail gracefully +- Print a clear error message +- Not affect core ledger operations + +--- + +## 5. Receipts and Guardian Integration + +The Shield Node writes receipts locally (e.g. on shield-vm): + +- `/opt/offsec-agents/receipts/offsec.jsonl` +- `/opt/offsec-agents/receipts/tem/tem_events.jsonl` + +Integration options: + +1. **File sync / pull** + - A sync job (cron, rsync, MinIO, etc.) copies receipts into the + VaultMesh node under: + - `receipts/shield/offsec.jsonl` + - `receipts/shield/tem_events.jsonl` + +2. **API pull** + - Shield Node exposes `/receipts/export` endpoints + - VaultMesh pulls and stores under `receipts/shield/` + +Guardian then: + +- Computes partial roots for Shield receipts: + - `ROOT.shield.offsec.txt` + - `ROOT.shield.tem.txt` +- Includes them in the combined anchor: + +```python +roots = { + "mesh": read_root("ROOT.mesh.txt"), + "treasury": read_root("ROOT.treasury.txt"), + "offsec": read_root("ROOT.offsec.txt"), + "shield_tem": read_root("ROOT.shield.tem.txt"), +} +anchor_root = compute_combined_root(roots) +``` + +--- + +## 6. Configuration + +Example env vars for VaultMesh: + +- `OFFSEC_NODE_URL=http://shield-vm:8081` +- `OFFSEC_NODE_ID=shield-vm` +- `OFFSEC_RECEIPTS_PATH=/var/lib/vaultmesh/receipts/shield` + +Example env vars for Shield Node: + +- `VAULTMESH_ROOT=/opt/vaultmesh` +- `TEM_DB_PATH=/opt/offsec-agents/state/tem.db` +- `TEM_RECEIPTS_PATH=/opt/offsec-agents/receipts/tem` + +--- + +## 7. Failure Modes + +If the Shield Node is: + +- **Down**: CLI commands fail, core ledger continues; Guardian anchors + without Shield roots (or marks them missing). +- **Lagging**: Receipts are delayed; anchors include older Shield state. +- **Misconfigured**: CLI reports invalid node URL or protocol errors. + +VaultMesh must never block core anchors solely because Shield is +unavailable; Shield is an extension, not the root of truth. + +--- + +## 8. Design Principles + +- Keep Shield node separate from VaultMesh core. +- Integrate via: + - HTTP commands + - Receipt ingestion +- Treat Shield as: + - A specialized OffSec/TEM appliance + - A contributor to the global ProofChain diff --git a/docs/VAULTMESH-MESH-ENGINE.md b/docs/VAULTMESH-MESH-ENGINE.md new file mode 100644 index 0000000..7be5549 --- /dev/null +++ b/docs/VAULTMESH-MESH-ENGINE.md @@ -0,0 +1,554 @@ +# VAULTMESH-MESH-ENGINE.md + +**Civilization Ledger Federation Primitive** + +> *Nodes that anchor together, survive together.* + +Mesh is VaultMesh's topology memory — tracking how nodes discover each other, establish trust, share capabilities, and evolve their federation relationships over time. Every topology change becomes evidence. + +--- + +## 1. Scroll Definition + +| Property | Value | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| **Scroll Name** | `Mesh` | +| **JSONL Path** | `receipts/mesh/mesh_events.jsonl` | +| **Root File** | `ROOT.mesh.txt` | +| **Receipt Types** | `mesh_node_join`, `mesh_node_leave`, `mesh_route_change`, `mesh_capability_grant`, `mesh_capability_revoke`, `mesh_topology_snapshot` | + +--- + +## 2. Core Concepts + +### 2.1 Nodes + +A **node** is any VaultMesh-aware endpoint that participates in the federation. + +```json +{ + "node_id": "did:vm:node:brick-01", + "display_name": "BRICK-01 (Dublin Primary)", + "node_type": "infrastructure", + "endpoints": { + "portal": "https://brick-01.vaultmesh.local:8443", + "wireguard": "10.77.1.1", + "tailscale": "brick-01.tail.net" + }, + "public_key": "ed25519:abc123...", + "capabilities": ["anchor", "storage", "compute", "oracle"], + "status": "active", + "joined_at": "2025-06-15T00:00:00Z", + "last_seen": "2025-12-06T14:30:00Z", + "tags": ["production", "eu-west", "akash"] +} +``` + +**Node types**: + +* `infrastructure` — BRICK servers, compute nodes +* `edge` — mobile devices, sovereign phones, field endpoints +* `oracle` — compliance oracle instances +* `guardian` — dedicated anchor/sentinel nodes +* `external` — federated nodes from other VaultMesh deployments + +### 2.2 Routes + +A **route** defines how traffic flows between nodes or segments. + +```json +{ + "route_id": "route-brick01-to-brick02", + "source": "did:vm:node:brick-01", + "destination": "did:vm:node:brick-02", + "transport": "wireguard", + "priority": 1, + "status": "active", + "latency_ms": 12, + "established_at": "2025-06-20T00:00:00Z" +} +``` + +Routes can be: + +* **Direct**: Node-to-node (WireGuard, Tailscale) +* **Relayed**: Through a gateway node +* **Redundant**: Multiple paths with failover priority + +### 2.3 Capabilities + +**Capabilities** are the trust primitives — what a node is permitted to do within the federation. + +```json +{ + "capability_id": "cap:brick-01:anchor:2025", + "node_id": "did:vm:node:brick-01", + "capability": "anchor", + "scope": "global", + "granted_by": "did:vm:node:portal-01", + "granted_at": "2025-06-15T00:00:00Z", + "expires_at": "2026-06-15T00:00:00Z", + "constraints": { + "max_anchor_rate": "100/day", + "allowed_scrolls": ["*"] + } +} +``` + +Standard capabilities: + +* `anchor` — can submit roots to anchor backends +* `storage` — can store receipts and artifacts +* `compute` — can execute drills, run agents +* `oracle` — can issue compliance answers +* `admin` — can grant/revoke capabilities to other nodes +* `federate` — can establish trust with external meshes + +### 2.4 Topology Snapshots + +Periodic **snapshots** capture the full mesh state — useful for auditing, disaster recovery, and proving historical topology. + +--- + +## 3. Mapping to Eternal Pattern + +### 3.1 Experience Layer (L1) + +**CLI** (`vm-mesh`): + +```bash +# Node operations +vm-mesh node list +vm-mesh node show brick-01 +vm-mesh node join --config node-manifest.json +vm-mesh node leave --node brick-02 --reason "decommissioned" + +# Route operations +vm-mesh route list +vm-mesh route add --from brick-01 --to brick-03 --transport tailscale +vm-mesh route test --route route-brick01-to-brick02 + +# Capability operations +vm-mesh capability list --node brick-01 +vm-mesh capability grant --node brick-02 --capability oracle --expires 2026-01-01 +vm-mesh capability revoke --node brick-02 --capability anchor --reason "security incident" + +# Topology +vm-mesh topology show +vm-mesh topology snapshot --output snapshots/2025-12-06.json +vm-mesh topology diff --from snapshots/2025-11-01.json --to snapshots/2025-12-06.json + +# Health +vm-mesh health --full +vm-mesh ping --all +``` + +**MCP Tools**: + +* `mesh_node_status` — get node details and health +* `mesh_list_nodes` — enumerate active nodes +* `mesh_topology_summary` — current topology overview +* `mesh_capability_check` — verify if node has capability +* `mesh_route_health` — check route latency and status + +**Portal HTTP**: + +* `GET /mesh/nodes` — list nodes +* `GET /mesh/nodes/{node_id}` — node details +* `POST /mesh/nodes/join` — register new node +* `POST /mesh/nodes/{node_id}/leave` — deregister node +* `GET /mesh/routes` — list routes +* `POST /mesh/routes` — add route +* `GET /mesh/capabilities/{node_id}` — node capabilities +* `POST /mesh/capabilities/grant` — grant capability +* `POST /mesh/capabilities/revoke` — revoke capability +* `GET /mesh/topology` — current topology +* `POST /mesh/topology/snapshot` — create snapshot + +--- + +### 3.2 Engine Layer (L2) + +#### Step 1 — Plan → `mesh_change_contract.json` + +For simple operations (single node join, route add), the contract is implicit. + +For coordinated topology changes, an explicit contract: + +```json +{ + "change_id": "mesh-change-2025-12-06-001", + "title": "Add BRICK-03 to Dublin Cluster", + "initiated_by": "did:vm:node:portal-01", + "initiated_at": "2025-12-06T11:00:00Z", + "change_type": "node_expansion", + "operations": [ + { + "op_id": "op-001", + "operation": "node_join", + "target": "did:vm:node:brick-03", + "config": { + "display_name": "BRICK-03 (Dublin Secondary)", + "node_type": "infrastructure", + "endpoints": { + "portal": "https://brick-03.vaultmesh.local:8443", + "wireguard": "10.77.1.3" + }, + "public_key": "ed25519:def456..." + } + }, + { + "op_id": "op-002", + "operation": "route_add", + "config": { + "source": "did:vm:node:brick-01", + "destination": "did:vm:node:brick-03", + "transport": "wireguard" + } + }, + { + "op_id": "op-003", + "operation": "route_add", + "config": { + "source": "did:vm:node:brick-02", + "destination": "did:vm:node:brick-03", + "transport": "wireguard" + } + }, + { + "op_id": "op-004", + "operation": "capability_grant", + "config": { + "node_id": "did:vm:node:brick-03", + "capability": "storage", + "scope": "local", + "expires_at": "2026-12-06T00:00:00Z" + } + } + ], + "requires_approval": ["portal-01"], + "rollback_on_failure": true +} +``` + +#### Step 2 — Execute → `mesh_change_state.json` + +```json +{ + "change_id": "mesh-change-2025-12-06-001", + "status": "in_progress", + "created_at": "2025-12-06T11:00:00Z", + "updated_at": "2025-12-06T11:05:00Z", + "operations": [ + { + "op_id": "op-001", + "status": "completed", + "completed_at": "2025-12-06T11:02:00Z", + "result": { + "node_registered": true, + "handshake_verified": true + } + }, + { + "op_id": "op-002", + "status": "completed", + "completed_at": "2025-12-06T11:03:00Z", + "result": { + "route_established": true, + "latency_ms": 8 + } + }, + { + "op_id": "op-003", + "status": "in_progress", + "started_at": "2025-12-06T11:04:00Z" + }, + { + "op_id": "op-004", + "status": "pending" + } + ], + "topology_before_hash": "blake3:aaa111...", + "approvals": { + "portal-01": { + "approved_at": "2025-12-06T11:01:00Z", + "signature": "ed25519:..." + } + } +} +``` + +**Status transitions**: + +``` +draft → pending_approval → in_progress → completed + ↘ partial_failure → rollback → rolled_back + ↘ failed → rollback → rolled_back +``` + +#### Step 3 — Seal → Receipts + +Each operation in a change produces its own receipt, plus a summary receipt for coordinated changes. + +**Node Join Receipt**: + +```json +{ + "type": "mesh_node_join", + "node_id": "did:vm:node:brick-03", + "display_name": "BRICK-03 (Dublin Secondary)", + "node_type": "infrastructure", + "timestamp": "2025-12-06T11:02:00Z", + "initiated_by": "did:vm:node:portal-01", + "change_id": "mesh-change-2025-12-06-001", + "endpoints_hash": "blake3:...", + "public_key_fingerprint": "SHA256:...", + "tags": ["mesh", "node", "join", "infrastructure"], + "root_hash": "blake3:bbb222..." +} +``` + +**Route Change Receipt**: + +```json +{ + "type": "mesh_route_change", + "route_id": "route-brick01-to-brick03", + "operation": "add", + "source": "did:vm:node:brick-01", + "destination": "did:vm:node:brick-03", + "transport": "wireguard", + "timestamp": "2025-12-06T11:03:00Z", + "initiated_by": "did:vm:node:portal-01", + "change_id": "mesh-change-2025-12-06-001", + "latency_ms": 8, + "tags": ["mesh", "route", "add"], + "root_hash": "blake3:ccc333..." +} +``` + +**Capability Grant Receipt**: + +```json +{ + "type": "mesh_capability_grant", + "capability_id": "cap:brick-03:storage:2025", + "node_id": "did:vm:node:brick-03", + "capability": "storage", + "scope": "local", + "granted_by": "did:vm:node:portal-01", + "timestamp": "2025-12-06T11:06:00Z", + "expires_at": "2026-12-06T00:00:00Z", + "change_id": "mesh-change-2025-12-06-001", + "tags": ["mesh", "capability", "grant", "storage"], + "root_hash": "blake3:ddd444..." +} +``` + +**Topology Snapshot Receipt** (periodic): + +```json +{ + "type": "mesh_topology_snapshot", + "snapshot_id": "snapshot-2025-12-06-001", + "timestamp": "2025-12-06T12:00:00Z", + "node_count": 5, + "route_count": 12, + "capability_count": 23, + "nodes": ["brick-01", "brick-02", "brick-03", "portal-01", "oracle-01"], + "topology_hash": "blake3:eee555...", + "snapshot_path": "snapshots/mesh/2025-12-06-001.json", + "tags": ["mesh", "snapshot", "topology"], + "root_hash": "blake3:fff666..." +} +``` + +--- + +### 3.3 Ledger Layer (L3) + +**Receipt Types**: + +| Type | When Emitted | +| -------------------------- | --------------------------------- | +| `mesh_node_join` | Node registered in mesh | +| `mesh_node_leave` | Node deregistered | +| `mesh_route_change` | Route added, removed, or modified | +| `mesh_capability_grant` | Capability granted to node | +| `mesh_capability_revoke` | Capability revoked from node | +| `mesh_topology_snapshot` | Periodic full topology capture | + +**Merkle Coverage**: + +* All receipts append to `receipts/mesh/mesh_events.jsonl` +* `ROOT.mesh.txt` updated after each append +* Guardian anchors Mesh root in anchor cycles + +--- + +## 4. Query Interface + +`mesh_query_events.py`: + +```bash +# All events for a node +vm-mesh query --node brick-01 + +# Events by type +vm-mesh query --type node_join +vm-mesh query --type capability_grant + +# Date range +vm-mesh query --from 2025-11-01 --to 2025-12-01 + +# By change ID (coordinated changes) +vm-mesh query --change-id mesh-change-2025-12-06-001 + +# Capability history for a node +vm-mesh query --node brick-02 --type capability_grant,capability_revoke + +# Export topology history +vm-mesh query --type topology_snapshot --format json > topology_history.json +``` + +**Topology Diff Tool**: + +```bash +# Compare two snapshots +vm-mesh topology diff \ + --from snapshots/mesh/2025-11-01.json \ + --to snapshots/mesh/2025-12-06.json + +# Output: +# + node: brick-03 (joined) +# + route: brick-01 → brick-03 +# + route: brick-02 → brick-03 +# + capability: brick-03:storage +# ~ route: brick-01 → brick-02 (latency: 15ms → 12ms) +``` + +--- + +## 5. Design Gate Checklist + +| Question | Mesh Answer | +| --------------------- | ---------------------------------------------------------------------------------------- | +| Clear entrypoint? | ✅ CLI (`vm-mesh`), MCP tools, Portal HTTP | +| Contract produced? | ✅ `mesh_change_contract.json` (explicit for coordinated changes, implicit for single ops) | +| State object? | ✅ `mesh_change_state.json` tracking operation progress | +| Receipts emitted? | ✅ Six receipt types covering all topology events | +| Append-only JSONL? | ✅ `receipts/mesh/mesh_events.jsonl` | +| Merkle root? | ✅ `ROOT.mesh.txt` | +| Guardian anchor path? | ✅ Mesh root included in ProofChain | +| Query tool? | ✅ `mesh_query_events.py` + topology diff | + +--- + +## 6. Mesh Health & Consensus + +### 6.1 Heartbeat Protocol + +Nodes emit periodic heartbeats to prove liveness: + +```json +{ + "type": "heartbeat", + "node_id": "did:vm:node:brick-01", + "timestamp": "2025-12-06T14:30:00Z", + "sequence": 847293, + "load": { + "cpu_percent": 23, + "memory_percent": 67, + "disk_percent": 45 + }, + "routes_healthy": 4, + "routes_degraded": 0 +} +``` + +Heartbeats are **not** receipted individually (too high volume), but: + +* Aggregated into daily health summaries +* Missed heartbeats trigger alerts +* Prolonged absence → automatic `mesh_node_leave` with `reason: "timeout"` + +### 6.2 Quorum Requirements + +Critical mesh operations require quorum: + +| Operation | Quorum | +| ------------------------------- | --------------------------------- | +| Node join | 1 admin node | +| Node forced leave | 2 admin nodes | +| Capability grant (global scope) | 2 admin nodes | +| Capability revoke | 1 admin node (immediate security) | +| Federation trust establishment | All admin nodes | + +--- + +## 7. Federation (Multi-Mesh) + +When VaultMesh instances need to federate (e.g., partner organizations, geographic regions): + +### 7.1 Trust Establishment + +```json +{ + "type": "mesh_federation_trust", + "local_mesh": "did:vm:mesh:vaultmesh-dublin", + "remote_mesh": "did:vm:mesh:partner-berlin", + "trust_level": "limited", + "established_at": "2025-12-06T15:00:00Z", + "expires_at": "2026-12-06T00:00:00Z", + "shared_capabilities": ["oracle_query", "receipt_verify"], + "gateway_node": "did:vm:node:portal-01", + "remote_gateway": "did:vm:node:partner-gateway-01", + "trust_anchor": "blake3:ggg777..." +} +``` + +**Trust levels**: + +* `isolated` — no cross-mesh communication +* `limited` — specific capabilities only (e.g., query each other's Oracle) +* `reciprocal` — mutual receipt verification, shared anchoring +* `full` — complete federation (rare, high-trust scenarios) + +### 7.2 Cross-Mesh Receipts + +When a federated mesh verifies or references receipts: + +```json +{ + "type": "mesh_cross_verify", + "local_receipt": "receipt:treasury:settle-2025-12-06-001", + "remote_mesh": "did:vm:mesh:partner-berlin", + "verified_by": "did:vm:node:partner-oracle-01", + "verification_timestamp": "2025-12-06T16:00:00Z", + "verification_result": "valid", + "remote_root_at_verification": "blake3:hhh888..." +} +``` + +--- + +## 8. Integration Points + +| System | Integration | +| -------------- | ----------------------------------------------------------------------------------------- | +| **Guardian** | Anchors `ROOT.mesh.txt`; alerts on unexpected topology changes | +| **Treasury** | Node join can auto-create Treasury accounts; node leave triggers account closure workflow | +| **Oracle** | Can query Mesh for node capabilities ("Does BRICK-02 have anchor capability?") | +| **Drills** | Multi-node drills require Mesh to verify all participants are active and routable | +| **OffSec** | Security incidents can trigger emergency capability revocations via Mesh | + +--- + +## 9. Future Extensions + +* **Auto-discovery**: Nodes find each other via mDNS/DHT in local networks +* **Geographic awareness**: Route optimization based on node locations +* **Bandwidth metering**: Track data flow between nodes for Treasury billing +* **Mesh visualization**: Real-time topology graph in Portal UI +* **Chaos testing**: Controlled route failures to test resilience +* **Zero-trust verification**: Continuous capability re-verification diff --git a/docs/VAULTMESH-MIGRATION-GUIDE.md b/docs/VAULTMESH-MIGRATION-GUIDE.md new file mode 100644 index 0000000..1873456 --- /dev/null +++ b/docs/VAULTMESH-MIGRATION-GUIDE.md @@ -0,0 +1,537 @@ +# VAULTMESH-MIGRATION-GUIDE.md +**Upgrading the Civilization Ledger** + +> *A system that cannot evolve is a system that cannot survive.* + +--- + +## 1. Version Compatibility Matrix + +| From Version | To Version | Migration Type | Downtime | +|--------------|------------|----------------|----------| +| 0.1.x | 0.2.x | Schema migration | < 5 min | +| 0.2.x | 0.3.x | Schema migration | < 5 min | +| 0.3.x | 1.0.x | Major migration | < 30 min | +| 1.0.x | 1.1.x | Rolling update | None | + +--- + +## 2. Pre-Migration Checklist + +```bash +#!/bin/bash +# scripts/pre-migration-check.sh + +set -e + +echo "=== VaultMesh Pre-Migration Check ===" + +# 1. Verify current version +CURRENT_VERSION=$(vm-cli version --short) +echo "Current version: $CURRENT_VERSION" + +# 2. Check for pending anchors +PENDING=$(vm-guardian anchor-status --json | jq '.receipts_since_anchor') +if [ "$PENDING" -gt 0 ]; then + echo "WARNING: $PENDING receipts pending anchor" + echo "Running anchor before migration..." + vm-guardian anchor-now --wait +fi + +# 3. Verify receipt integrity +echo "Verifying receipt integrity..." +vm-guardian verify-all --scroll all +if [ $? -ne 0 ]; then + echo "ERROR: Receipt integrity check failed" + exit 1 +fi + +# 4. Backup current state +echo "Creating backup..." +BACKUP_DIR="/backups/vaultmesh-$(date +%Y%m%d-%H%M%S)" +mkdir -p "$BACKUP_DIR" + +# Backup receipts +cp -r /data/receipts "$BACKUP_DIR/receipts" + +# Backup database +pg_dump -h postgres -U vaultmesh vaultmesh > "$BACKUP_DIR/database.sql" + +# Backup configuration +cp -r /config "$BACKUP_DIR/config" + +# Backup Merkle roots +cp /data/receipts/ROOT.*.txt "$BACKUP_DIR/" + +echo "Backup created: $BACKUP_DIR" + +# 5. Verify backup +echo "Verifying backup..." +BACKUP_RECEIPT_COUNT=$(find "$BACKUP_DIR/receipts" -name "*.jsonl" -exec wc -l {} + | tail -1 | awk '{print $1}') +CURRENT_RECEIPT_COUNT=$(find /data/receipts -name "*.jsonl" -exec wc -l {} + | tail -1 | awk '{print $1}') + +if [ "$BACKUP_RECEIPT_COUNT" -ne "$CURRENT_RECEIPT_COUNT" ]; then + echo "ERROR: Backup receipt count mismatch" + exit 1 +fi + +echo "=== Pre-migration checks complete ===" +echo "Ready to migrate from $CURRENT_VERSION" +``` + +--- + +## 3. Migration Scripts + +### 3.1 Schema Migration (0.2.x -> 0.3.x) + +```python +# migrations/0002_to_0003.py +""" +Migration: 0.2.x -> 0.3.x + +Changes: +- Add 'anchor_epoch' field to all receipts +- Add 'proof_path' field to all receipts +- Create new ROOT.*.txt files for new scrolls +""" + +import json +from pathlib import Path +from datetime import datetime +import shutil + +def migrate_receipts(receipts_dir: Path): + """Add new fields to existing receipts.""" + + for jsonl_file in receipts_dir.glob("**/*.jsonl"): + print(f"Migrating: {jsonl_file}") + + # Read all receipts + receipts = [] + with open(jsonl_file) as f: + for line in f: + receipt = json.loads(line.strip()) + + # Add new fields if missing + if "anchor_epoch" not in receipt: + receipt["anchor_epoch"] = None + if "proof_path" not in receipt: + receipt["proof_path"] = None + + receipts.append(receipt) + + # Write back with new fields + backup_path = jsonl_file.with_suffix(".jsonl.bak") + shutil.copy(jsonl_file, backup_path) + + with open(jsonl_file, "w") as f: + for receipt in receipts: + f.write(json.dumps(receipt) + "\n") + + print(f" Migrated {len(receipts)} receipts") + +def create_new_scrolls(receipts_dir: Path): + """Create directories and root files for new scrolls.""" + + new_scrolls = [ + "treasury", + "mesh", + "offsec", + "identity", + "observability", + "automation", + "psi", + "federation", + "governance", + ] + + for scroll in new_scrolls: + scroll_dir = receipts_dir / scroll + scroll_dir.mkdir(exist_ok=True) + + # Create empty JSONL file + jsonl_file = scroll_dir / f"{scroll}_events.jsonl" + jsonl_file.touch() + + # Create root file with empty root + root_file = receipts_dir / f"ROOT.{scroll}.txt" + root_file.write_text("blake3:empty") + + print(f"Created scroll: {scroll}") + +def update_database_schema(): + """Run database migrations.""" + import subprocess + + subprocess.run([ + "sqlx", "migrate", "run", + "--source", "migrations/sql", + ], check=True) + +def main(): + receipts_dir = Path("/data/receipts") + + print("=== VaultMesh Migration: 0.2.x -> 0.3.x ===") + print(f"Timestamp: {datetime.utcnow().isoformat()}Z") + + print("\n1. Migrating existing receipts...") + migrate_receipts(receipts_dir) + + print("\n2. Creating new scroll directories...") + create_new_scrolls(receipts_dir) + + print("\n3. Running database migrations...") + update_database_schema() + + print("\n=== Migration complete ===") + +if __name__ == "__main__": + main() +``` + +### 3.2 Major Migration (0.3.x -> 1.0.x) + +```python +# migrations/0003_to_1000.py +""" +Migration: 0.3.x -> 1.0.x (Major) + +Changes: +- Constitutional governance activation +- Receipt schema v2 (breaking) +- Merkle tree format change +- Guardian state restructure +""" + +import json +from pathlib import Path +from datetime import datetime +import hashlib +import subprocess +import shutil + +def backup_everything(backup_dir: Path): + """Create comprehensive backup before major migration.""" + backup_dir.mkdir(parents=True, exist_ok=True) + + # Full receipts backup with verification + receipts_backup = backup_dir / "receipts" + shutil.copytree("/data/receipts", receipts_backup) + + # Compute checksums + checksums = {} + for f in receipts_backup.glob("**/*"): + if f.is_file(): + checksums[str(f.relative_to(receipts_backup))] = hashlib.blake3(f.read_bytes()).hexdigest() + + with open(backup_dir / "CHECKSUMS.json", "w") as f: + json.dump(checksums, f, indent=2) + + # Database backup + subprocess.run([ + "pg_dump", "-h", "postgres", "-U", "vaultmesh", + "-F", "c", # Custom format for parallel restore + "-f", str(backup_dir / "database.dump"), + "vaultmesh" + ], check=True) + + return backup_dir + +def migrate_receipt_schema_v2(receipts_dir: Path): + """Convert receipts to schema v2.""" + + for jsonl_file in receipts_dir.glob("**/*.jsonl"): + print(f"Converting to schema v2: {jsonl_file}") + + receipts = [] + with open(jsonl_file) as f: + for line in f: + old_receipt = json.loads(line.strip()) + + # Convert to v2 schema + new_receipt = { + "schema_version": "2.0.0", + "type": old_receipt.get("type"), + "timestamp": old_receipt.get("timestamp"), + "header": { + "root_hash": old_receipt.get("root_hash"), + "tags": old_receipt.get("tags", []), + "previous_hash": None, # Will be computed + }, + "meta": { + "scroll": infer_scroll(jsonl_file), + "sequence": len(receipts), + "anchor_epoch": old_receipt.get("anchor_epoch"), + "proof_path": old_receipt.get("proof_path"), + }, + "body": { + k: v for k, v in old_receipt.items() + if k not in ["type", "timestamp", "root_hash", "tags", "anchor_epoch", "proof_path"] + } + } + + # Compute previous_hash chain + if receipts: + new_receipt["header"]["previous_hash"] = receipts[-1]["header"]["root_hash"] + + # Recompute root_hash with new schema + new_receipt["header"]["root_hash"] = compute_receipt_hash_v2(new_receipt) + + receipts.append(new_receipt) + + # Write v2 receipts + with open(jsonl_file, "w") as f: + for receipt in receipts: + f.write(json.dumps(receipt) + "\n") + + print(f" Converted {len(receipts)} receipts to v2") + +def recompute_merkle_roots(receipts_dir: Path): + """Recompute all Merkle roots with new format.""" + + scrolls = [ + "drills", "compliance", "guardian", "treasury", "mesh", + "offsec", "identity", "observability", "automation", + "psi", "federation", "governance" + ] + + for scroll in scrolls: + jsonl_file = receipts_dir / scroll / f"{scroll}_events.jsonl" + root_file = receipts_dir / f"ROOT.{scroll}.txt" + + if not jsonl_file.exists(): + continue + + # Read receipt hashes + hashes = [] + with open(jsonl_file) as f: + for line in f: + receipt = json.loads(line.strip()) + hashes.append(receipt["header"]["root_hash"]) + + # Compute new Merkle root + root = compute_merkle_root_v2(hashes) + root_file.write_text(root) + + print(f"Recomputed root for {scroll}: {root[:30]}...") + +def initialize_constitution(): + """Create initial constitutional documents.""" + + constitution = { + "version": "1.0.0", + "effective_at": datetime.utcnow().isoformat() + "Z", + "axioms": [], # From CONSTITUTIONAL-GOVERNANCE.md + "articles": [], + "engine_registry": [], + } + + # Write constitution + const_path = Path("/data/governance/constitution.json") + const_path.parent.mkdir(parents=True, exist_ok=True) + + with open(const_path, "w") as f: + json.dump(constitution, f, indent=2) + + # Create constitution receipt + receipt = { + "schema_version": "2.0.0", + "type": "gov_constitution_ratified", + "timestamp": datetime.utcnow().isoformat() + "Z", + "header": { + "root_hash": "", # Will be computed + "tags": ["governance", "constitution", "genesis"], + "previous_hash": None, + }, + "meta": { + "scroll": "Governance", + "sequence": 0, + "anchor_epoch": None, + "proof_path": None, + }, + "body": { + "constitution_version": "1.0.0", + "constitution_hash": hashlib.blake3(json.dumps(constitution).encode()).hexdigest(), + } + } + + # Append to governance scroll + gov_jsonl = Path("/data/receipts/governance/governance_events.jsonl") + with open(gov_jsonl, "a") as f: + f.write(json.dumps(receipt) + "\n") + + print("Constitutional governance initialized") + +def main(): + print("=== VaultMesh Major Migration: 0.3.x -> 1.0.x ===") + print(f"Timestamp: {datetime.utcnow().isoformat()}Z") + print("WARNING: This is a breaking migration!") + + # Confirm + confirm = input("Type 'MIGRATE' to proceed: ") + if confirm != "MIGRATE": + print("Aborted") + return + + backup_dir = Path(f"/backups/major-migration-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}") + receipts_dir = Path("/data/receipts") + + print("\n1. Creating comprehensive backup...") + backup_everything(backup_dir) + + print("\n2. Migrating receipt schema to v2...") + migrate_receipt_schema_v2(receipts_dir) + + print("\n3. Recomputing Merkle roots...") + recompute_merkle_roots(receipts_dir) + + print("\n4. Running database migrations...") + subprocess.run(["sqlx", "migrate", "run"], check=True) + + print("\n5. Initializing constitutional governance...") + initialize_constitution() + + print("\n6. Triggering anchor to seal migration...") + subprocess.run(["vm-guardian", "anchor-now", "--wait"], check=True) + + print("\n=== Major migration complete ===") + print(f"Backup location: {backup_dir}") + print("Please verify system health before removing backup") + +if __name__ == "__main__": + main() +``` + +--- + +## 4. Rollback Procedures + +```bash +#!/bin/bash +# scripts/rollback.sh + +set -e + +BACKUP_DIR=$1 + +if [ -z "$BACKUP_DIR" ]; then + echo "Usage: rollback.sh " + exit 1 +fi + +if [ ! -d "$BACKUP_DIR" ]; then + echo "ERROR: Backup directory not found: $BACKUP_DIR" + exit 1 +fi + +echo "=== VaultMesh Rollback ===" +echo "Backup: $BACKUP_DIR" + +# Verify backup integrity +echo "1. Verifying backup integrity..." +if [ -f "$BACKUP_DIR/CHECKSUMS.json" ]; then + python3 scripts/verify_checksums.py "$BACKUP_DIR" +fi + +# Stop services +echo "2. Stopping services..." +kubectl scale deployment -n vaultmesh --replicas=0 \ + vaultmesh-portal vaultmesh-guardian vaultmesh-oracle + +# Restore database +echo "3. Restoring database..." +pg_restore -h postgres -U vaultmesh -d vaultmesh --clean "$BACKUP_DIR/database.dump" + +# Restore receipts +echo "4. Restoring receipts..." +rm -rf /data/receipts/* +cp -r "$BACKUP_DIR/receipts"/* /data/receipts/ + +# Restore configuration +echo "5. Restoring configuration..." +cp -r "$BACKUP_DIR/config"/* /config/ + +# Restart services +echo "6. Restarting services..." +kubectl scale deployment -n vaultmesh --replicas=2 vaultmesh-portal +kubectl scale deployment -n vaultmesh --replicas=1 vaultmesh-guardian +kubectl scale deployment -n vaultmesh --replicas=2 vaultmesh-oracle + +# Wait for health +echo "7. Waiting for services to become healthy..." +kubectl wait --for=condition=ready pod -l app.kubernetes.io/part-of=vaultmesh -n vaultmesh --timeout=300s + +# Verify integrity +echo "8. Verifying receipt integrity..." +vm-guardian verify-all --scroll all + +echo "=== Rollback complete ===" +``` + +--- + +## 5. Post-Migration Verification + +```bash +#!/bin/bash +# scripts/post-migration-verify.sh + +set -e + +echo "=== VaultMesh Post-Migration Verification ===" + +# 1. Version check +echo "1. Checking version..." +NEW_VERSION=$(vm-cli version --short) +echo " Version: $NEW_VERSION" + +# 2. Service health +echo "2. Checking service health..." +vm-cli system health --json | jq '.services' + +# 3. Receipt integrity +echo "3. Verifying receipt integrity..." +for scroll in drills compliance guardian treasury mesh offsec identity observability automation psi federation governance; do + COUNT=$(wc -l < "/data/receipts/$scroll/${scroll}_events.jsonl" 2>/dev/null || echo "0") + ROOT=$(cat "/data/receipts/ROOT.$scroll.txt" 2>/dev/null || echo "N/A") + echo " $scroll: $COUNT receipts, root: ${ROOT:0:20}..." +done + +# 4. Merkle verification +echo "4. Verifying Merkle roots..." +vm-guardian verify-all --scroll all + +# 5. Anchor status +echo "5. Checking anchor status..." +vm-guardian anchor-status + +# 6. Constitution (if 1.0+) +if vm-gov constitution version &>/dev/null; then + echo "6. Checking constitution..." + vm-gov constitution version +fi + +# 7. Test receipt emission +echo "7. Testing receipt emission..." +TEST_RECEIPT=$(vm-cli emit-test-receipt --scroll drills) +echo " Test receipt: $TEST_RECEIPT" + +# 8. Test anchor +echo "8. Testing anchor cycle..." +vm-guardian anchor-now --wait + +# 9. Verify test receipt was anchored +echo "9. Verifying test receipt anchored..." +PROOF=$(vm-guardian get-proof "$TEST_RECEIPT") +if [ -n "$PROOF" ]; then + echo " Test receipt successfully anchored" +else + echo " ERROR: Test receipt not anchored" + exit 1 +fi + +echo "" +echo "=== Post-migration verification complete ===" +echo "All checks passed. System is operational." +``` diff --git a/docs/VAULTMESH-MONITORING-STACK.md b/docs/VAULTMESH-MONITORING-STACK.md new file mode 100644 index 0000000..cbcff6e --- /dev/null +++ b/docs/VAULTMESH-MONITORING-STACK.md @@ -0,0 +1,688 @@ +# VAULTMESH-MONITORING-STACK.md +**Observability for the Civilization Ledger** + +> *You cannot govern what you cannot see.* + +--- + +## 1. Prometheus Configuration + +```yaml +# config/prometheus.yaml +global: + scrape_interval: 15s + evaluation_interval: 15s + +alerting: + alertmanagers: + - static_configs: + - targets: + - alertmanager:9093 + +rule_files: + - /etc/prometheus/rules/*.yaml + +scrape_configs: + # Portal metrics + - job_name: 'vaultmesh-portal' + kubernetes_sd_configs: + - role: pod + namespaces: + names: + - vaultmesh + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + regex: portal + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + regex: "true" + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] + target_label: __address__ + regex: (.+) + replacement: ${1}:9090 + + # Guardian metrics + - job_name: 'vaultmesh-guardian' + kubernetes_sd_configs: + - role: pod + namespaces: + names: + - vaultmesh + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + regex: guardian + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] + target_label: __address__ + regex: (.+) + replacement: ${1}:9090 + + # Oracle metrics + - job_name: 'vaultmesh-oracle' + kubernetes_sd_configs: + - role: pod + namespaces: + names: + - vaultmesh + relabel_configs: + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + regex: oracle + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port] + target_label: __address__ + regex: (.+) + replacement: ${1}:9090 + + # PostgreSQL metrics + - job_name: 'postgres' + static_configs: + - targets: ['postgres-exporter:9187'] + + # Redis metrics + - job_name: 'redis' + static_configs: + - targets: ['redis-exporter:9121'] +``` + +--- + +## 2. Alerting Rules + +```yaml +# config/prometheus/rules/vaultmesh-alerts.yaml +groups: + - name: vaultmesh.receipts + rules: + - alert: ReceiptWriteFailure + expr: rate(vaultmesh_receipt_write_errors_total[5m]) > 0 + for: 1m + labels: + severity: critical + scroll: "{{ $labels.scroll }}" + annotations: + summary: "Receipt write failures detected" + description: "{{ $value }} receipt write errors in scroll {{ $labels.scroll }}" + + - alert: ReceiptRateAnomaly + expr: | + abs( + rate(vaultmesh_receipts_total[5m]) - + avg_over_time(rate(vaultmesh_receipts_total[5m])[1h:5m]) + ) > 2 * stddev_over_time(rate(vaultmesh_receipts_total[5m])[1h:5m]) + for: 10m + labels: + severity: warning + annotations: + summary: "Unusual receipt rate detected" + description: "Receipt rate deviates significantly from baseline" + + - name: vaultmesh.guardian + rules: + - alert: AnchorDelayed + expr: time() - vaultmesh_guardian_last_anchor_timestamp > 7200 + for: 5m + labels: + severity: warning + annotations: + summary: "Guardian anchor delayed" + description: "Last anchor was {{ $value | humanizeDuration }} ago" + + - alert: AnchorCriticallyDelayed + expr: time() - vaultmesh_guardian_last_anchor_timestamp > 14400 + for: 5m + labels: + severity: critical + annotations: + summary: "Guardian anchor critically delayed" + description: "No anchor in over 4 hours" + + - alert: AnchorFailure + expr: increase(vaultmesh_guardian_anchor_failures_total[1h]) > 0 + for: 1m + labels: + severity: critical + annotations: + summary: "Guardian anchor failure" + description: "{{ $value }} anchor failures in the last hour" + + - alert: ProofChainDivergence + expr: vaultmesh_guardian_proofchain_divergence == 1 + for: 1m + labels: + severity: critical + annotations: + summary: "ProofChain divergence detected" + description: "Computed Merkle root differs from stored root" + + - name: vaultmesh.oracle + rules: + - alert: OracleHighLatency + expr: histogram_quantile(0.95, rate(vaultmesh_oracle_query_duration_seconds_bucket[5m])) > 30 + for: 5m + labels: + severity: warning + annotations: + summary: "Oracle query latency high" + description: "95th percentile query latency is {{ $value | humanizeDuration }}" + + - alert: OracleLLMErrors + expr: rate(vaultmesh_oracle_llm_errors_total[5m]) > 0.1 + for: 5m + labels: + severity: warning + annotations: + summary: "Oracle LLM errors elevated" + description: "{{ $value }} LLM errors per second" + + - alert: OracleCorpusEmpty + expr: vaultmesh_oracle_corpus_documents_total == 0 + for: 1m + labels: + severity: critical + annotations: + summary: "Oracle corpus is empty" + description: "No documents loaded in compliance corpus" + + - name: vaultmesh.mesh + rules: + - alert: NodeUnhealthy + expr: vaultmesh_mesh_node_healthy == 0 + for: 5m + labels: + severity: warning + node: "{{ $labels.node_id }}" + annotations: + summary: "Mesh node unhealthy" + description: "Node {{ $labels.node_id }} is unhealthy" + + - alert: NodeDown + expr: time() - vaultmesh_mesh_node_last_seen_timestamp > 600 + for: 5m + labels: + severity: critical + node: "{{ $labels.node_id }}" + annotations: + summary: "Mesh node down" + description: "Node {{ $labels.node_id }} not seen for {{ $value | humanizeDuration }}" + + - alert: RouteUnhealthy + expr: vaultmesh_mesh_route_healthy == 0 + for: 5m + labels: + severity: warning + annotations: + summary: "Mesh route unhealthy" + description: "Route {{ $labels.route_id }} is unhealthy" + + - name: vaultmesh.psi + rules: + - alert: PhaseProlongedNigredo + expr: vaultmesh_psi_phase_duration_seconds{phase="nigredo"} > 86400 + for: 1h + labels: + severity: warning + annotations: + summary: "System in Nigredo phase for extended period" + description: "System has been in crisis phase for {{ $value | humanizeDuration }}" + + - alert: TransmutationStalled + expr: vaultmesh_psi_transmutation_status{status="in_progress"} == 1 and time() - vaultmesh_psi_transmutation_started_timestamp > 86400 + for: 1h + labels: + severity: warning + annotations: + summary: "Transmutation stalled" + description: "Transmutation {{ $labels.transmutation_id }} in progress for over 24 hours" + + - name: vaultmesh.governance + rules: + - alert: ConstitutionalViolation + expr: increase(vaultmesh_governance_violations_total[1h]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: "Constitutional violation detected" + description: "{{ $value }} violation(s) in the last hour" + + - alert: EmergencyActive + expr: vaultmesh_governance_emergency_active == 1 + for: 0m + labels: + severity: warning + annotations: + summary: "Governance emergency active" + description: "Emergency powers in effect" + + - name: vaultmesh.federation + rules: + - alert: FederationWitnessFailure + expr: increase(vaultmesh_federation_witness_failures_total[1h]) > 0 + for: 5m + labels: + severity: warning + annotations: + summary: "Federation witness failure" + description: "Failed to witness {{ $labels.remote_mesh }} receipts" + + - alert: FederationDiscrepancy + expr: vaultmesh_federation_discrepancy_detected == 1 + for: 0m + labels: + severity: critical + annotations: + summary: "Federation discrepancy detected" + description: "Discrepancy with {{ $labels.remote_mesh }}: {{ $labels.discrepancy_type }}" +``` + +--- + +## 3. Grafana Dashboards + +### 3.1 Main Dashboard + +```json +{ + "dashboard": { + "title": "VaultMesh Overview", + "uid": "vaultmesh-overview", + "tags": ["vaultmesh"], + "timezone": "browser", + "panels": [ + { + "title": "System Status", + "type": "stat", + "gridPos": {"h": 4, "w": 6, "x": 0, "y": 0}, + "targets": [ + { + "expr": "sum(up{job=~\"vaultmesh-.*\"})", + "legendFormat": "Services Up" + } + ], + "fieldConfig": { + "defaults": { + "thresholds": { + "steps": [ + {"color": "red", "value": 0}, + {"color": "yellow", "value": 2}, + {"color": "green", "value": 3} + ] + } + } + } + }, + { + "title": "Current Phase", + "type": "stat", + "gridPos": {"h": 4, "w": 6, "x": 6, "y": 0}, + "targets": [ + { + "expr": "vaultmesh_psi_current_phase", + "legendFormat": "Phase" + } + ], + "fieldConfig": { + "defaults": { + "mappings": [ + {"type": "value", "options": {"0": {"text": "NIGREDO", "color": "dark-purple"}}}, + {"type": "value", "options": {"1": {"text": "ALBEDO", "color": "white"}}}, + {"type": "value", "options": {"2": {"text": "CITRINITAS", "color": "yellow"}}}, + {"type": "value", "options": {"3": {"text": "RUBEDO", "color": "red"}}} + ] + } + } + }, + { + "title": "Last Anchor Age", + "type": "stat", + "gridPos": {"h": 4, "w": 6, "x": 12, "y": 0}, + "targets": [ + { + "expr": "time() - vaultmesh_guardian_last_anchor_timestamp", + "legendFormat": "Age" + } + ], + "fieldConfig": { + "defaults": { + "unit": "s", + "thresholds": { + "steps": [ + {"color": "green", "value": 0}, + {"color": "yellow", "value": 3600}, + {"color": "red", "value": 7200} + ] + } + } + } + }, + { + "title": "Total Receipts", + "type": "stat", + "gridPos": {"h": 4, "w": 6, "x": 18, "y": 0}, + "targets": [ + { + "expr": "sum(vaultmesh_receipts_total)", + "legendFormat": "Receipts" + } + ] + }, + { + "title": "Receipt Rate by Scroll", + "type": "timeseries", + "gridPos": {"h": 8, "w": 12, "x": 0, "y": 4}, + "targets": [ + { + "expr": "rate(vaultmesh_receipts_total[5m])", + "legendFormat": "{{ scroll }}" + } + ], + "fieldConfig": { + "defaults": { + "unit": "ops" + } + } + }, + { + "title": "Anchor History", + "type": "timeseries", + "gridPos": {"h": 8, "w": 12, "x": 12, "y": 4}, + "targets": [ + { + "expr": "increase(vaultmesh_guardian_anchors_total[1h])", + "legendFormat": "Successful Anchors" + }, + { + "expr": "increase(vaultmesh_guardian_anchor_failures_total[1h])", + "legendFormat": "Failed Anchors" + } + ] + }, + { + "title": "Mesh Node Status", + "type": "table", + "gridPos": {"h": 6, "w": 12, "x": 0, "y": 12}, + "targets": [ + { + "expr": "vaultmesh_mesh_node_healthy", + "format": "table", + "instant": true + } + ], + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {"Time": true, "__name__": true}, + "renameByName": {"node_id": "Node", "Value": "Healthy"} + } + } + ] + }, + { + "title": "Oracle Query Latency", + "type": "timeseries", + "gridPos": {"h": 6, "w": 12, "x": 12, "y": 12}, + "targets": [ + { + "expr": "histogram_quantile(0.50, rate(vaultmesh_oracle_query_duration_seconds_bucket[5m]))", + "legendFormat": "p50" + }, + { + "expr": "histogram_quantile(0.95, rate(vaultmesh_oracle_query_duration_seconds_bucket[5m]))", + "legendFormat": "p95" + }, + { + "expr": "histogram_quantile(0.99, rate(vaultmesh_oracle_query_duration_seconds_bucket[5m]))", + "legendFormat": "p99" + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + } + } + } + ] + } +} +``` + +### 3.2 Guardian Dashboard + +```json +{ + "dashboard": { + "title": "VaultMesh Guardian", + "uid": "vaultmesh-guardian", + "tags": ["vaultmesh", "guardian"], + "panels": [ + { + "title": "Anchor Status", + "type": "stat", + "gridPos": {"h": 4, "w": 8, "x": 0, "y": 0}, + "targets": [ + { + "expr": "vaultmesh_guardian_anchor_status", + "legendFormat": "Status" + } + ], + "fieldConfig": { + "defaults": { + "mappings": [ + {"type": "value", "options": {"0": {"text": "IDLE", "color": "blue"}}}, + {"type": "value", "options": {"1": {"text": "ANCHORING", "color": "yellow"}}}, + {"type": "value", "options": {"2": {"text": "SUCCESS", "color": "green"}}}, + {"type": "value", "options": {"3": {"text": "FAILED", "color": "red"}}} + ] + } + } + }, + { + "title": "Receipts Since Last Anchor", + "type": "stat", + "gridPos": {"h": 4, "w": 8, "x": 8, "y": 0}, + "targets": [ + { + "expr": "vaultmesh_guardian_receipts_since_anchor" + } + ] + }, + { + "title": "Anchor Epochs", + "type": "stat", + "gridPos": {"h": 4, "w": 8, "x": 16, "y": 0}, + "targets": [ + { + "expr": "vaultmesh_guardian_anchor_epoch" + } + ] + }, + { + "title": "ProofChain Roots by Scroll", + "type": "table", + "gridPos": {"h": 8, "w": 24, "x": 0, "y": 4}, + "targets": [ + { + "expr": "vaultmesh_guardian_proofchain_root_info", + "format": "table", + "instant": true + } + ] + }, + { + "title": "Anchor Duration", + "type": "timeseries", + "gridPos": {"h": 8, "w": 12, "x": 0, "y": 12}, + "targets": [ + { + "expr": "histogram_quantile(0.95, rate(vaultmesh_guardian_anchor_duration_seconds_bucket[1h]))", + "legendFormat": "p95" + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + } + } + }, + { + "title": "Anchor Events", + "type": "logs", + "gridPos": {"h": 8, "w": 12, "x": 12, "y": 12}, + "datasource": "Loki", + "targets": [ + { + "expr": "{job=\"vaultmesh-guardian\"} |= \"anchor\"" + } + ] + } + ] + } +} +``` + +--- + +## 4. Metrics Endpoints + +### 4.1 Portal Metrics + +```rust +// vaultmesh-portal/src/metrics.rs + +use prometheus::{ + Counter, CounterVec, Histogram, HistogramVec, Gauge, GaugeVec, + Opts, Registry, labels, +}; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref REGISTRY: Registry = Registry::new(); + + // Receipt metrics + pub static ref RECEIPTS_TOTAL: CounterVec = CounterVec::new( + Opts::new("vaultmesh_receipts_total", "Total receipts by scroll"), + &["scroll", "type"] + ).unwrap(); + + pub static ref RECEIPT_WRITE_DURATION: HistogramVec = HistogramVec::new( + prometheus::HistogramOpts::new( + "vaultmesh_receipt_write_duration_seconds", + "Receipt write duration" + ).buckets(vec![0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0]), + &["scroll"] + ).unwrap(); + + pub static ref RECEIPT_WRITE_ERRORS: CounterVec = CounterVec::new( + Opts::new("vaultmesh_receipt_write_errors_total", "Receipt write errors"), + &["scroll", "error_type"] + ).unwrap(); + + // API metrics + pub static ref HTTP_REQUESTS_TOTAL: CounterVec = CounterVec::new( + Opts::new("vaultmesh_http_requests_total", "Total HTTP requests"), + &["method", "path", "status"] + ).unwrap(); + + pub static ref HTTP_REQUEST_DURATION: HistogramVec = HistogramVec::new( + prometheus::HistogramOpts::new( + "vaultmesh_http_request_duration_seconds", + "HTTP request duration" + ).buckets(vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]), + &["method", "path"] + ).unwrap(); + + // Connection metrics + pub static ref ACTIVE_CONNECTIONS: Gauge = Gauge::new( + "vaultmesh_active_connections", + "Active connections" + ).unwrap(); + + pub static ref DB_POOL_SIZE: GaugeVec = GaugeVec::new( + Opts::new("vaultmesh_db_pool_size", "Database pool size"), + &["state"] + ).unwrap(); +} + +pub fn register_metrics() { + REGISTRY.register(Box::new(RECEIPTS_TOTAL.clone())).unwrap(); + REGISTRY.register(Box::new(RECEIPT_WRITE_DURATION.clone())).unwrap(); + REGISTRY.register(Box::new(RECEIPT_WRITE_ERRORS.clone())).unwrap(); + REGISTRY.register(Box::new(HTTP_REQUESTS_TOTAL.clone())).unwrap(); + REGISTRY.register(Box::new(HTTP_REQUEST_DURATION.clone())).unwrap(); + REGISTRY.register(Box::new(ACTIVE_CONNECTIONS.clone())).unwrap(); + REGISTRY.register(Box::new(DB_POOL_SIZE.clone())).unwrap(); +} +``` + +### 4.2 Guardian Metrics + +```rust +// vaultmesh-guardian/src/metrics.rs + +use prometheus::{ + Counter, CounterVec, Histogram, Gauge, GaugeVec, + Opts, Registry, +}; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref REGISTRY: Registry = Registry::new(); + + // Anchor metrics + pub static ref ANCHORS_TOTAL: Counter = Counter::new( + "vaultmesh_guardian_anchors_total", + "Total successful anchors" + ).unwrap(); + + pub static ref ANCHOR_FAILURES_TOTAL: CounterVec = CounterVec::new( + Opts::new("vaultmesh_guardian_anchor_failures_total", "Anchor failures by reason"), + &["reason"] + ).unwrap(); + + pub static ref ANCHOR_DURATION: Histogram = Histogram::with_opts( + prometheus::HistogramOpts::new( + "vaultmesh_guardian_anchor_duration_seconds", + "Anchor cycle duration" + ).buckets(vec![1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0]) + ).unwrap(); + + pub static ref LAST_ANCHOR_TIMESTAMP: Gauge = Gauge::new( + "vaultmesh_guardian_last_anchor_timestamp", + "Timestamp of last successful anchor" + ).unwrap(); + + pub static ref ANCHOR_EPOCH: Gauge = Gauge::new( + "vaultmesh_guardian_anchor_epoch", + "Current anchor epoch number" + ).unwrap(); + + pub static ref RECEIPTS_SINCE_ANCHOR: Gauge = Gauge::new( + "vaultmesh_guardian_receipts_since_anchor", + "Receipts added since last anchor" + ).unwrap(); + + pub static ref ANCHOR_STATUS: Gauge = Gauge::new( + "vaultmesh_guardian_anchor_status", + "Current anchor status (0=idle, 1=anchoring, 2=success, 3=failed)" + ).unwrap(); + + // ProofChain metrics + pub static ref PROOFCHAIN_ROOT_INFO: GaugeVec = GaugeVec::new( + Opts::new("vaultmesh_guardian_proofchain_root_info", "ProofChain root information"), + &["scroll", "root_hash"] + ).unwrap(); + + pub static ref PROOFCHAIN_DIVERGENCE: Gauge = Gauge::new( + "vaultmesh_guardian_proofchain_divergence", + "ProofChain divergence detected (0=no, 1=yes)" + ).unwrap(); + + // Sentinel metrics + pub static ref SENTINEL_EVENTS: CounterVec = CounterVec::new( + Opts::new("vaultmesh_guardian_sentinel_events_total", "Sentinel events"), + &["event_type", "severity"] + ).unwrap(); +} +``` diff --git a/docs/VAULTMESH-OBSERVABILITY-ENGINE.md b/docs/VAULTMESH-OBSERVABILITY-ENGINE.md new file mode 100644 index 0000000..35e4b8f --- /dev/null +++ b/docs/VAULTMESH-OBSERVABILITY-ENGINE.md @@ -0,0 +1,742 @@ +# VAULTMESH-OBSERVABILITY-ENGINE.md + +**Civilization Ledger Telemetry Primitive** + +> *Every metric tells a story. Every trace has a receipt.* + +Observability is VaultMesh's nervous system — capturing metrics, logs, and traces across all nodes and services, with cryptographic attestation that the telemetry itself hasn't been tampered with. + +--- + +## 1. Scroll Definition + +| Property | Value | +| --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| **Scroll Name** | `Observability` | +| **JSONL Path** | `receipts/observability/observability_events.jsonl` | +| **Root File** | `ROOT.observability.txt` | +| **Receipt Types** | `obs_metric_snapshot`, `obs_log_batch`, `obs_trace_complete`, `obs_alert_fired`, `obs_alert_resolved`, `obs_slo_report`, `obs_anomaly_detected` | + +--- + +## 2. Core Concepts + +### 2.1 Metrics + +**Metrics** are time-series numerical measurements from nodes and services. + +```json +{ + "metric_id": "metric:brick-01:cpu:2025-12-06T14:30:00Z", + "node": "did:vm:node:brick-01", + "timestamp": "2025-12-06T14:30:00Z", + "metrics": { + "cpu_percent": 23.5, + "memory_percent": 67.2, + "disk_percent": 45.8, + "network_rx_bytes": 1234567890, + "network_tx_bytes": 987654321, + "open_file_descriptors": 342, + "goroutines": 156 + }, + "labels": { + "environment": "production", + "region": "eu-west", + "service": "guardian" + }, + "collection_method": "prometheus_scrape", + "scrape_duration_ms": 45 +} +``` + +**Metric categories**: +- `system` — CPU, memory, disk, network +- `application` — request rates, latencies, error rates +- `business` — receipts/hour, anchors/day, oracle queries +- `security` — auth attempts, failed logins, blocked IPs +- `mesh` — route latencies, node health, capability usage + +### 2.2 Logs + +**Logs** are structured event records from all system components. + +```json +{ + "log_id": "log:guardian:2025-12-06T14:30:15.123Z", + "timestamp": "2025-12-06T14:30:15.123Z", + "level": "info", + "service": "guardian", + "node": "did:vm:node:brick-01", + "message": "Anchor cycle completed successfully", + "attributes": { + "cycle_id": "anchor-cycle-2025-12-06-001", + "receipts_anchored": 47, + "scrolls_included": ["treasury", "mesh", "identity"], + "duration_ms": 1234, + "backend": "bitcoin" + }, + "trace_id": "trace-abc123...", + "span_id": "span-def456...", + "caller": "guardian/anchor.go:234" +} +``` + +**Log levels**: +- `trace` — verbose debugging (not retained long-term) +- `debug` — debugging information +- `info` — normal operations +- `warn` — unexpected but handled conditions +- `error` — errors requiring attention +- `fatal` — system failures + +### 2.3 Traces + +**Traces** track request flows across distributed components. + +```json +{ + "trace_id": "trace-abc123...", + "name": "treasury_settlement", + "start_time": "2025-12-06T14:30:00.000Z", + "end_time": "2025-12-06T14:30:02.345Z", + "duration_ms": 2345, + "status": "ok", + "spans": [ + { + "span_id": "span-001", + "parent_span_id": null, + "name": "http_request", + "service": "portal", + "node": "did:vm:node:portal-01", + "start_time": "2025-12-06T14:30:00.000Z", + "duration_ms": 2340, + "attributes": { + "http.method": "POST", + "http.url": "/treasury/settle", + "http.status_code": 200 + } + }, + { + "span_id": "span-002", + "parent_span_id": "span-001", + "name": "validate_settlement", + "service": "treasury-engine", + "node": "did:vm:node:brick-01", + "start_time": "2025-12-06T14:30:00.100Z", + "duration_ms": 150, + "attributes": { + "settlement_id": "settle-2025-12-06-001", + "accounts_involved": 3 + } + }, + { + "span_id": "span-003", + "parent_span_id": "span-001", + "name": "emit_receipt", + "service": "ledger", + "node": "did:vm:node:brick-01", + "start_time": "2025-12-06T14:30:00.250Z", + "duration_ms": 50, + "attributes": { + "receipt_type": "treasury_settlement", + "scroll": "treasury" + } + }, + { + "span_id": "span-004", + "parent_span_id": "span-001", + "name": "anchor_request", + "service": "guardian", + "node": "did:vm:node:brick-01", + "start_time": "2025-12-06T14:30:00.300Z", + "duration_ms": 2000, + "attributes": { + "backend": "bitcoin", + "txid": "btc:abc123..." + } + } + ], + "tags": ["treasury", "settlement", "anchor"] +} +``` + +### 2.4 Alerts + +**Alerts** are triggered conditions requiring attention. + +```json +{ + "alert_id": "alert-2025-12-06-001", + "name": "HighCPUUsage", + "severity": "warning", + "status": "firing", + "fired_at": "2025-12-06T14:35:00Z", + "node": "did:vm:node:brick-02", + "rule": { + "expression": "cpu_percent > 80 for 5m", + "threshold": 80, + "duration": "5m" + }, + "current_value": 87.3, + "labels": { + "environment": "production", + "region": "eu-west" + }, + "annotations": { + "summary": "CPU usage above 80% for 5 minutes", + "runbook": "https://docs.vaultmesh.io/runbooks/high-cpu" + }, + "notified": ["slack:ops-channel", "pagerduty:on-call"] +} +``` + +### 2.5 SLO Reports + +**SLO (Service Level Objective) Reports** track reliability targets. + +```json +{ + "slo_id": "slo:anchor-latency-p99", + "name": "Anchor Latency P99", + "description": "99th percentile anchor latency under 30 seconds", + "target": 0.999, + "window": "30d", + "report_period": { + "start": "2025-11-06T00:00:00Z", + "end": "2025-12-06T00:00:00Z" + }, + "achieved": 0.9995, + "status": "met", + "error_budget": { + "total_minutes": 43.2, + "consumed_minutes": 21.6, + "remaining_percent": 50.0 + }, + "breakdown": { + "total_requests": 125000, + "good_requests": 124937, + "bad_requests": 63 + }, + "trend": "stable" +} +``` + +--- + +## 3. Mapping to Eternal Pattern + +### 3.1 Experience Layer (L1) + +**CLI** (`vm-obs`): +```bash +# Metrics +vm-obs metrics query --node brick-01 --metric cpu_percent --last 1h +vm-obs metrics list --node brick-01 +vm-obs metrics export --from 2025-12-01 --to 2025-12-06 --format prometheus + +# Logs +vm-obs logs query --service guardian --level error --last 24h +vm-obs logs tail --node brick-01 --follow +vm-obs logs search "anchor failed" --from 2025-12-01 + +# Traces +vm-obs trace show trace-abc123 +vm-obs trace search --service treasury --duration ">1s" --last 24h +vm-obs trace analyze trace-abc123 --find-bottleneck + +# Alerts +vm-obs alert list --status firing +vm-obs alert show alert-2025-12-06-001 +vm-obs alert ack alert-2025-12-06-001 --comment "investigating" +vm-obs alert silence --node brick-02 --duration 1h --reason "maintenance" + +# SLOs +vm-obs slo list +vm-obs slo show slo:anchor-latency-p99 +vm-obs slo report --period 30d --format markdown + +# Dashboards +vm-obs dashboard list +vm-obs dashboard show system-overview +vm-obs dashboard export system-overview --format grafana +``` + +**MCP Tools**: +- `obs_metrics_query` — query metrics for a node/service +- `obs_logs_search` — search logs with filters +- `obs_trace_get` — retrieve trace details +- `obs_alert_status` — current alert status +- `obs_slo_summary` — SLO compliance summary +- `obs_health_check` — overall system health + +**Portal HTTP**: +- `GET /obs/metrics` — query metrics +- `GET /obs/logs` — search logs +- `GET /obs/traces` — list traces +- `GET /obs/traces/{trace_id}` — trace details +- `GET /obs/alerts` — list alerts +- `POST /obs/alerts/{id}/ack` — acknowledge alert +- `POST /obs/alerts/silence` — create silence +- `GET /obs/slos` — list SLOs +- `GET /obs/slos/{id}/report` — SLO report +- `GET /obs/health` — system health + +--- + +### 3.2 Engine Layer (L2) + +#### Step 1 — Plan → Implicit (Continuous Collection) + +Unlike discrete operations, observability collection is continuous. However, certain operations have explicit contracts: + +**Alert Acknowledgment Contract**: +```json +{ + "operation_id": "obs-op-2025-12-06-001", + "operation_type": "alert_acknowledge", + "alert_id": "alert-2025-12-06-001", + "acknowledged_by": "did:vm:user:sovereign", + "acknowledged_at": "2025-12-06T14:40:00Z", + "comment": "Investigating high CPU on brick-02, likely due to anchor backlog", + "escalation_suppressed": true, + "follow_up_required": true, + "follow_up_deadline": "2025-12-06T16:00:00Z" +} +``` + +**SLO Definition Contract**: +```json +{ + "operation_id": "obs-op-2025-12-06-002", + "operation_type": "slo_create", + "initiated_by": "did:vm:user:sovereign", + "slo": { + "id": "slo:oracle-availability", + "name": "Oracle Availability", + "description": "Oracle service uptime", + "indicator": { + "type": "availability", + "good_query": "oracle_up == 1", + "total_query": "count(oracle_requests)" + }, + "target": 0.999, + "window": "30d" + } +} +``` + +#### Step 2 — Execute → Continuous Collection + +Metrics, logs, and traces are collected continuously via: +- Prometheus scraping (metrics) +- Fluent Bit/Vector (logs) +- OpenTelemetry SDK (traces) + +State is maintained in time-series databases and search indices, not as discrete state files. + +#### Step 3 — Seal → Receipts + +**Metric Snapshot Receipt** (hourly): +```json +{ + "type": "obs_metric_snapshot", + "snapshot_id": "metrics-2025-12-06-14", + "timestamp": "2025-12-06T14:00:00Z", + "period": { + "start": "2025-12-06T13:00:00Z", + "end": "2025-12-06T14:00:00Z" + }, + "nodes_reporting": 5, + "metrics_collected": 15000, + "aggregates": { + "avg_cpu_percent": 34.5, + "max_cpu_percent": 87.3, + "avg_memory_percent": 62.1, + "total_receipts_emitted": 1247, + "total_anchors_completed": 12 + }, + "storage_path": "telemetry/metrics/2025-12-06/hour-14.parquet", + "content_hash": "blake3:aaa111...", + "tags": ["observability", "metrics", "hourly"], + "root_hash": "blake3:bbb222..." +} +``` + +**Log Batch Receipt** (hourly): +```json +{ + "type": "obs_log_batch", + "batch_id": "logs-2025-12-06-14", + "timestamp": "2025-12-06T14:00:00Z", + "period": { + "start": "2025-12-06T13:00:00Z", + "end": "2025-12-06T14:00:00Z" + }, + "log_counts": { + "trace": 0, + "debug": 12456, + "info": 45678, + "warn": 234, + "error": 12, + "fatal": 0 + }, + "services_reporting": ["guardian", "treasury", "portal", "oracle", "mesh"], + "storage_path": "telemetry/logs/2025-12-06/hour-14.jsonl.gz", + "content_hash": "blake3:ccc333...", + "tags": ["observability", "logs", "hourly"], + "root_hash": "blake3:ddd444..." +} +``` + +**Trace Complete Receipt** (for significant traces): +```json +{ + "type": "obs_trace_complete", + "trace_id": "trace-abc123...", + "timestamp": "2025-12-06T14:30:02.345Z", + "name": "treasury_settlement", + "duration_ms": 2345, + "status": "ok", + "span_count": 4, + "services_involved": ["portal", "treasury-engine", "ledger", "guardian"], + "nodes_involved": ["portal-01", "brick-01"], + "triggered_by": "did:vm:user:sovereign", + "business_context": { + "settlement_id": "settle-2025-12-06-001", + "amount": "1000.00 USD" + }, + "tags": ["observability", "trace", "treasury", "settlement"], + "root_hash": "blake3:eee555..." +} +``` + +**Alert Fired Receipt**: +```json +{ + "type": "obs_alert_fired", + "alert_id": "alert-2025-12-06-001", + "timestamp": "2025-12-06T14:35:00Z", + "name": "HighCPUUsage", + "severity": "warning", + "node": "did:vm:node:brick-02", + "rule_expression": "cpu_percent > 80 for 5m", + "current_value": 87.3, + "threshold": 80, + "notifications_sent": ["slack:ops-channel", "pagerduty:on-call"], + "tags": ["observability", "alert", "fired", "cpu"], + "root_hash": "blake3:fff666..." +} +``` + +**Alert Resolved Receipt**: +```json +{ + "type": "obs_alert_resolved", + "alert_id": "alert-2025-12-06-001", + "timestamp": "2025-12-06T15:10:00Z", + "name": "HighCPUUsage", + "fired_at": "2025-12-06T14:35:00Z", + "duration_minutes": 35, + "resolved_by": "automatic", + "resolution_value": 42.1, + "acknowledged": true, + "acknowledged_by": "did:vm:user:sovereign", + "root_cause": "anchor backlog cleared", + "tags": ["observability", "alert", "resolved"], + "root_hash": "blake3:ggg777..." +} +``` + +**SLO Report Receipt** (daily): +```json +{ + "type": "obs_slo_report", + "report_id": "slo-report-2025-12-06", + "timestamp": "2025-12-06T00:00:00Z", + "period": { + "start": "2025-11-06T00:00:00Z", + "end": "2025-12-06T00:00:00Z" + }, + "slos": [ + { + "slo_id": "slo:anchor-latency-p99", + "target": 0.999, + "achieved": 0.9995, + "status": "met" + }, + { + "slo_id": "slo:oracle-availability", + "target": 0.999, + "achieved": 0.9987, + "status": "at_risk" + } + ], + "overall_status": "healthy", + "error_budget_status": "sufficient", + "report_path": "reports/slo/2025-12-06.json", + "tags": ["observability", "slo", "daily-report"], + "root_hash": "blake3:hhh888..." +} +``` + +**Anomaly Detection Receipt**: +```json +{ + "type": "obs_anomaly_detected", + "anomaly_id": "anomaly-2025-12-06-001", + "timestamp": "2025-12-06T14:45:00Z", + "detection_method": "statistical", + "metric": "treasury.receipts_per_minute", + "node": "did:vm:node:brick-01", + "expected_range": {"min": 10, "max": 50}, + "observed_value": 2, + "deviation_sigma": 4.2, + "confidence": 0.98, + "possible_causes": [ + "upstream service degradation", + "network partition", + "configuration change" + ], + "correlated_events": ["alert-2025-12-06-001"], + "tags": ["observability", "anomaly", "treasury"], + "root_hash": "blake3:iii999..." +} +``` + +--- + +### 3.3 Ledger Layer (L3) + +**Receipt Types**: + +| Type | When Emitted | +| ---------------------- | ------------------------------------- | +| `obs_metric_snapshot` | Hourly metric aggregation | +| `obs_log_batch` | Hourly log batch sealed | +| `obs_trace_complete` | Significant trace completed | +| `obs_alert_fired` | Alert triggered | +| `obs_alert_resolved` | Alert resolved | +| `obs_slo_report` | Daily SLO report | +| `obs_anomaly_detected` | Statistical anomaly detected | + +**Merkle Coverage**: +- All receipts append to `receipts/observability/observability_events.jsonl` +- `ROOT.observability.txt` updated after each append +- Guardian anchors Observability root in anchor cycles + +--- + +## 4. Query Interface + +`observability_query_events.py`: + +```bash +# Metric snapshots +vm-obs query --type metric_snapshot --from 2025-12-01 + +# Log batches with errors +vm-obs query --type log_batch --filter "log_counts.error > 0" + +# Traces over 5 seconds +vm-obs query --type trace_complete --filter "duration_ms > 5000" + +# All alerts for a node +vm-obs query --type alert_fired,alert_resolved --node brick-02 + +# SLO reports with missed targets +vm-obs query --type slo_report --filter "overall_status != 'healthy'" + +# Anomalies in last 7 days +vm-obs query --type anomaly_detected --last 7d + +# Export for analysis +vm-obs query --from 2025-12-01 --format parquet > observability_dec.parquet +``` + +**Correlation Tool**: +```bash +# Correlate events around a timestamp +vm-obs correlate --timestamp "2025-12-06T14:35:00Z" --window 15m + +# Output: +# Timeline around 2025-12-06T14:35:00Z (±15m): +# +# 14:20:00 [metric] brick-02 cpu_percent starts rising +# 14:25:00 [log] guardian: "anchor queue depth increasing" +# 14:30:00 [trace] trace-abc123 completed (2345ms, normal) +# 14:32:00 [metric] brick-02 cpu_percent crosses 80% +# 14:35:00 [alert] HighCPUUsage fired on brick-02 +# 14:40:00 [log] guardian: "processing backlog" +# 14:45:00 [anomaly] treasury.receipts_per_minute low +# 14:50:00 [log] guardian: "backlog cleared" +# 15:10:00 [alert] HighCPUUsage resolved on brick-02 +``` + +--- + +## 5. Design Gate Checklist + +| Question | Observability Answer | +| --------------------- | ------------------------------------------------------------------ | +| Clear entrypoint? | ✅ CLI (`vm-obs`), MCP tools, Portal HTTP | +| Contract produced? | ✅ Implicit (continuous) + explicit for alert acks, SLO definitions | +| State object? | ✅ Time-series DBs, search indices (continuous state) | +| Receipts emitted? | ✅ Seven receipt types covering all observability events | +| Append-only JSONL? | ✅ `receipts/observability/observability_events.jsonl` | +| Merkle root? | ✅ `ROOT.observability.txt` | +| Guardian anchor path? | ✅ Observability root included in ProofChain | +| Query tool? | ✅ `observability_query_events.py` + correlation tool | + +--- + +## 6. Data Pipeline + +### 6.1 Collection Architecture + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ BRICK Nodes │ +│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ +│ │ brick-01│ │ brick-02│ │ brick-03│ │portal-01│ │ +│ └────┬────┘ └────┬────┘ └────┬────┘ └────┬────┘ │ +│ │ │ │ │ │ +│ ▼ ▼ ▼ ▼ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Collection Layer │ │ +│ │ ┌──────────┐ ┌──────────┐ ┌──────────────────────┐ │ │ +│ │ │Prometheus│ │Fluent Bit│ │OpenTelemetry Collector│ │ │ +│ │ │ (metrics)│ │ (logs) │ │ (traces) │ │ │ +│ │ └────┬─────┘ └────┬─────┘ └──────────┬───────────┘ │ │ +│ └───────┼─────────────┼───────────────────┼──────────────┘ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Storage Layer │ │ +│ │ ┌──────────┐ ┌──────────┐ ┌──────────────────────┐ │ │ +│ │ │VictoriaM │ │ Loki/ │ │ Tempo/Jaeger │ │ │ +│ │ │(metrics) │ │ OpenSearch│ │ (traces) │ │ │ +│ │ └────┬─────┘ └────┬─────┘ └──────────┬───────────┘ │ │ +│ └───────┼─────────────┼───────────────────┼──────────────┘ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Receipt Layer │ │ +│ │ ┌──────────────────────────────────────────────────┐ │ │ +│ │ │ Observability Receipt Emitter │ │ │ +│ │ │ (hourly snapshots, alerts, SLOs, anomalies) │ │ │ +│ │ └──────────────────────────────────────────────────┘ │ │ +│ └─────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 6.2 Retention Policies + +| Data Type | Hot Storage | Warm Storage | Cold/Archive | Receipt | +| ------------------ | -------------- | -------------- | -------------- | ------- | +| Metrics (raw) | 7 days | 30 days | 1 year | Hourly | +| Metrics (1h agg) | 30 days | 1 year | 5 years | Hourly | +| Logs (all) | 7 days | 30 days | 1 year | Hourly | +| Logs (error+) | 30 days | 1 year | 5 years | Hourly | +| Traces (sampled) | 7 days | 30 days | — | Per-trace | +| Traces (errors) | 30 days | 1 year | 5 years | Per-trace | +| Alerts | Indefinite | Indefinite | Indefinite | Per-event | +| SLO Reports | Indefinite | Indefinite | Indefinite | Daily | + +### 6.3 Sampling Strategy + +```json +{ + "sampling_rules": [ + { + "name": "always_sample_errors", + "condition": "status == 'error' OR level >= 'error'", + "rate": 1.0 + }, + { + "name": "always_sample_slow", + "condition": "duration_ms > 5000", + "rate": 1.0 + }, + { + "name": "always_sample_sensitive", + "condition": "service IN ['treasury', 'identity', 'offsec']", + "rate": 1.0 + }, + { + "name": "default_traces", + "condition": "true", + "rate": 0.1 + } + ] +} +``` + +--- + +## 7. Alerting Framework + +### 7.1 Alert Rules + +```yaml +groups: + - name: vaultmesh-critical + rules: + - alert: NodeDown + expr: up == 0 + for: 2m + labels: + severity: critical + annotations: + summary: "Node {{ $labels.node }} is down" + runbook: https://docs.vaultmesh.io/runbooks/node-down + + - alert: AnchorBacklogHigh + expr: guardian_anchor_queue_depth > 100 + for: 10m + labels: + severity: warning + annotations: + summary: "Anchor queue depth is {{ $value }}" + + - alert: SLOBudgetBurning + expr: slo_error_budget_remaining_percent < 25 + for: 5m + labels: + severity: warning + annotations: + summary: "SLO {{ $labels.slo }} error budget at {{ $value }}%" +``` + +### 7.2 Notification Channels + +| Severity | Channels | Response Time | +| ----------- | ------------------------------------- | ------------- | +| `critical` | PagerDuty, SMS, Slack #critical | Immediate | +| `high` | PagerDuty, Slack #alerts | 15 minutes | +| `warning` | Slack #alerts, Email | 1 hour | +| `info` | Slack #ops | Best effort | + +--- + +## 8. Integration Points + +| System | Integration | +| ---------------- | ------------------------------------------------------------------------ | +| **Guardian** | Emits anchor metrics/traces; alerts on anchor failures | +| **Treasury** | Transaction metrics; latency SLOs; receipt throughput | +| **Identity** | Auth event logs; failed login alerts; session metrics | +| **Mesh** | Node health metrics; route latency; topology change logs | +| **OffSec** | Security event correlation; incident timeline enrichment | +| **Oracle** | Query latency metrics; confidence score distributions | +| **Automation** | Workflow execution traces; n8n performance metrics | + +--- + +## 9. Future Extensions + +- **AI-powered anomaly detection**: ML models for predictive alerting +- **Distributed tracing visualization**: Real-time trace graphs in Portal +- **Log pattern mining**: Automatic extraction of error patterns +- **Chaos engineering integration**: Correlate chaos experiments with observability +- **Cost attribution**: Resource usage per scroll/service for Treasury billing +- **Compliance dashboards**: Real-time compliance posture visualization diff --git a/docs/VAULTMESH-OFFSEC-ENGINE.md b/docs/VAULTMESH-OFFSEC-ENGINE.md new file mode 100644 index 0000000..21f2e98 --- /dev/null +++ b/docs/VAULTMESH-OFFSEC-ENGINE.md @@ -0,0 +1,652 @@ +# VAULTMESH-OFFSEC-ENGINE.md + +**Civilization Ledger Security Operations Primitive** + +> *Every intrusion has a timeline. Every response has a receipt.* + +OffSec is VaultMesh's security operations memory — tracking real incidents, red team engagements, vulnerability discoveries, and remediation efforts with forensic-grade evidence chains. + +--- + +## 1. Scroll Definition + +| Property | Value | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| **Scroll Name** | `OffSec` | +| **JSONL Path** | `receipts/offsec/offsec_events.jsonl` | +| **Root File** | `ROOT.offsec.txt` | +| **Receipt Types** | `offsec_incident`, `offsec_redteam`, `offsec_vuln_discovery`, `offsec_remediation`, `offsec_threat_intel`, `offsec_forensic_snapshot` | + +--- + +## 2. Core Concepts + +### 2.1 Incidents + +A security **incident** is any confirmed or suspected security event requiring investigation and response. + +```json +{ + "incident_id": "INC-2025-12-001", + "title": "Unauthorized SSH Access Attempt on BRICK-02", + "severity": "high", + "status": "investigating", + "reported_at": "2025-12-06T03:47:00Z", + "reported_by": "guardian-automated", + "affected_nodes": ["did:vm:node:brick-02"], + "attack_vector": "brute_force", + "indicators": [ + { + "type": "ip", + "value": "185.220.101.42", + "context": "source of SSH attempts" + }, + { + "type": "pattern", + "value": "1200+ failed auth in 10min", + "context": "rate anomaly" + } + ], + "containment_actions": [], + "tags": ["ssh", "brute-force", "external"] +} +``` + +**Severity levels**: + +* `critical` — active breach, data exfiltration, system compromise +* `high` — confirmed attack, potential breach +* `medium` — suspicious activity, policy violation +* `low` — anomaly, informational + +**Status flow**: + +``` +reported → triaging → investigating → contained → eradicating → recovered → closed + ↘ false_positive → closed +``` + +### 2.2 Red Team Engagements + +Authorized offensive operations against VaultMesh infrastructure. + +```json +{ + "engagement_id": "RT-2025-Q4-001", + "title": "Q4 External Perimeter Assessment", + "engagement_type": "external_pentest", + "status": "in_progress", + "scope": { + "in_scope": ["*.vaultmesh.io", "portal-01", "brick-01", "brick-02"], + "out_of_scope": ["production databases", "third-party integrations"], + "rules_of_engagement": "No DoS, no social engineering, business hours only" + }, + "team": ["operator-alpha", "operator-bravo"], + "authorized_by": "did:vm:node:portal-01", + "started_at": "2025-12-01T09:00:00Z", + "scheduled_end": "2025-12-15T18:00:00Z", + "findings": [] +} +``` + +**Engagement types**: + +* `external_pentest` — outside-in assessment +* `internal_pentest` — assumed-breach scenario +* `red_team` — full adversary emulation +* `purple_team` — collaborative attack/defense +* `tabletop` — scenario-based discussion (no actual attacks) + +### 2.3 Vulnerability Discoveries + +Vulnerabilities found through any means (scanning, manual testing, bug reports, threat intel). + +```json +{ + "vuln_id": "VULN-2025-12-001", + "title": "OpenSSH CVE-2024-XXXXX on BRICK-02", + "severity": "high", + "cvss_score": 8.1, + "status": "confirmed", + "discovered_at": "2025-12-06T10:30:00Z", + "discovered_by": "RT-2025-Q4-001", + "discovery_method": "pentest", + "affected_assets": ["did:vm:node:brick-02"], + "cve": "CVE-2024-XXXXX", + "description": "Remote code execution via crafted SSH packet", + "evidence_path": "cases/offsec/VULN-2025-12-001/evidence/", + "remediation_status": "pending", + "tags": ["ssh", "rce", "cve"] +} +``` + +### 2.4 Remediations + +Actions taken to fix vulnerabilities or recover from incidents. + +```json +{ + "remediation_id": "REM-2025-12-001", + "title": "Patch OpenSSH on BRICK-02", + "related_to": { + "type": "vulnerability", + "id": "VULN-2025-12-001" + }, + "status": "completed", + "assigned_to": "sovereign", + "started_at": "2025-12-06T11:00:00Z", + "completed_at": "2025-12-06T11:45:00Z", + "actions_taken": [ + "Applied OpenSSH 9.6p1 patch", + "Restarted sshd service", + "Verified patch version", + "Re-scanned to confirm fix" + ], + "verification": { + "method": "rescan", + "result": "not_vulnerable", + "verified_at": "2025-12-06T12:00:00Z" + }, + "evidence_path": "cases/offsec/REM-2025-12-001/evidence/" +} +``` + +--- + +## 3. Mapping to Eternal Pattern + +### 3.1 Experience Layer (L1) + +**CLI** (`vm-offsec`): + +```bash +# Incident management +vm-offsec incident create --title "Suspicious outbound traffic" --severity medium +vm-offsec incident list --status investigating +vm-offsec incident show INC-2025-12-001 +vm-offsec incident update INC-2025-12-001 --status contained +vm-offsec incident close INC-2025-12-001 --resolution "false_positive" + +# Red team +vm-offsec redteam create --config engagements/q4-external.json +vm-offsec redteam list --status in_progress +vm-offsec redteam finding add RT-2025-Q4-001 --vuln VULN-2025-12-001 +vm-offsec redteam close RT-2025-Q4-001 --report reports/RT-2025-Q4-001.pdf + +# Vulnerabilities +vm-offsec vuln create --title "Weak TLS config" --severity medium --asset portal-01 +vm-offsec vuln list --status confirmed --severity high,critical +vm-offsec vuln remediate VULN-2025-12-001 --assigned sovereign + +# Threat intel +vm-offsec intel add --type ioc --value "185.220.101.42" --context "Tor exit node" +vm-offsec intel search --type ip --value "185.220.101.42" + +# Forensics +vm-offsec forensic snapshot --node brick-02 --reason "INC-2025-12-001 investigation" +vm-offsec forensic timeline INC-2025-12-001 --output timeline.json +``` + +**MCP Tools**: + +* `offsec_incident_create` — create new incident +* `offsec_incident_status` — get incident details +* `offsec_vuln_search` — search vulnerabilities +* `offsec_ioc_check` — check if indicator is known +* `offsec_timeline` — generate incident timeline + +**Portal HTTP**: + +* `POST /offsec/incidents` — create incident +* `GET /offsec/incidents` — list incidents +* `GET /offsec/incidents/{id}` — incident details +* `PATCH /offsec/incidents/{id}` — update incident +* `POST /offsec/redteam` — create engagement +* `GET /offsec/vulnerabilities` — list vulns +* `POST /offsec/intel` — add threat intel +* `POST /offsec/forensic/snapshot` — capture forensic state + +--- + +### 3.2 Engine Layer (L2) + +#### Step 1 — Plan → `offsec_case_contract.json` + +For incidents and red team engagements, an explicit case contract: + +**Incident Contract**: + +```json +{ + "case_id": "INC-2025-12-001", + "case_type": "incident", + "title": "Unauthorized SSH Access Attempt on BRICK-02", + "severity": "high", + "created_at": "2025-12-06T03:47:00Z", + "phases": [ + { + "phase_id": "phase-1-triage", + "name": "Triage", + "objectives": [ + "Confirm attack is real (not false positive)", + "Identify affected systems", + "Assess immediate risk" + ], + "checklist": [ + "Review Guardian alerts", + "Check auth logs on BRICK-02", + "Correlate with other nodes", + "Determine if access was successful" + ] + }, + { + "phase_id": "phase-2-contain", + "name": "Containment", + "objectives": [ + "Stop ongoing attack", + "Prevent lateral movement", + "Preserve evidence" + ], + "checklist": [ + "Block source IP at firewall", + "Rotate SSH keys if needed", + "Snapshot affected systems", + "Enable enhanced logging" + ] + }, + { + "phase_id": "phase-3-eradicate", + "name": "Eradication", + "objectives": [ + "Remove attacker access", + "Patch vulnerabilities", + "Harden configuration" + ] + }, + { + "phase_id": "phase-4-recover", + "name": "Recovery", + "objectives": [ + "Restore normal operations", + "Verify security posture", + "Document lessons learned" + ] + } + ], + "assigned_responders": ["sovereign"], + "escalation_path": ["guardian-automated", "portal-admin"] +} +``` + +**Red Team Contract**: + +```json +{ + "case_id": "RT-2025-Q4-001", + "case_type": "redteam", + "title": "Q4 External Perimeter Assessment", + "engagement_type": "external_pentest", + "created_at": "2025-12-01T09:00:00Z", + "phases": [ + { + "phase_id": "phase-1-recon", + "name": "Reconnaissance", + "objectives": ["Map external attack surface", "Identify services", "OSINT gathering"] + }, + { + "phase_id": "phase-2-enum", + "name": "Enumeration", + "objectives": ["Service fingerprinting", "Version detection", "Vuln scanning"] + }, + { + "phase_id": "phase-3-exploit", + "name": "Exploitation", + "objectives": ["Attempt exploitation of discovered vulns", "Document success/failure"] + }, + { + "phase_id": "phase-4-report", + "name": "Reporting", + "objectives": ["Compile findings", "Risk rating", "Remediation recommendations"] + } + ], + "scope": { "...": "..." }, + "rules_of_engagement": "...", + "authorized_by": "did:vm:node:portal-01" +} +``` + +#### Step 2 — Execute → `offsec_case_state.json` + +```json +{ + "case_id": "INC-2025-12-001", + "case_type": "incident", + "status": "contained", + "created_at": "2025-12-06T03:47:00Z", + "updated_at": "2025-12-06T06:30:00Z", + "phases": [ + { + "phase_id": "phase-1-triage", + "status": "completed", + "started_at": "2025-12-06T03:50:00Z", + "completed_at": "2025-12-06T04:15:00Z", + "findings": [ + "Attack confirmed real - 1247 failed SSH attempts from 185.220.101.42", + "No successful authentication detected", + "Only BRICK-02 targeted" + ], + "evidence": ["logs/brick-02-auth.log.gz", "screenshots/guardian-alert.png"] + }, + { + "phase_id": "phase-2-contain", + "status": "completed", + "started_at": "2025-12-06T04:15:00Z", + "completed_at": "2025-12-06T04:30:00Z", + "actions_taken": [ + "Blocked 185.220.101.42 at WireGuard firewall", + "Verified no unauthorized sessions active", + "Captured forensic snapshot of BRICK-02" + ], + "evidence": ["firewall-rule-add.sh", "snapshot-brick02-20251206.tar.gz"] + }, + { + "phase_id": "phase-3-eradicate", + "status": "in_progress", + "started_at": "2025-12-06T06:00:00Z" + }, + { + "phase_id": "phase-4-recover", + "status": "pending" + } + ], + "indicators_collected": [ + {"type": "ip", "value": "185.220.101.42"}, + {"type": "user_agent", "value": "SSH-2.0-libssh_0.9.6"} + ], + "timeline_path": "cases/offsec/INC-2025-12-001/timeline.json" +} +``` + +#### Step 3 — Seal → Receipts + +**Incident Receipt** (on case closure): + +```json +{ + "type": "offsec_incident", + "incident_id": "INC-2025-12-001", + "title": "Unauthorized SSH Access Attempt on BRICK-02", + "severity": "high", + "timestamp_reported": "2025-12-06T03:47:00Z", + "timestamp_closed": "2025-12-06T12:00:00Z", + "status": "closed", + "resolution": "contained_no_breach", + "affected_nodes": ["did:vm:node:brick-02"], + "attack_vector": "brute_force", + "phases_completed": 4, + "indicators_count": 2, + "evidence_manifest": "cases/offsec/INC-2025-12-001/EVIDENCE.sha256", + "timeline_hash": "blake3:aaa111...", + "lessons_learned": "Implement fail2ban on all nodes; add SSH rate limiting at network edge", + "tags": ["incident", "ssh", "brute-force", "contained"], + "root_hash": "blake3:bbb222...", + "proof_path": "cases/offsec/INC-2025-12-001/PROOF.json" +} +``` + +**Vulnerability Discovery Receipt**: + +```json +{ + "type": "offsec_vuln_discovery", + "vuln_id": "VULN-2025-12-001", + "title": "OpenSSH CVE-2024-XXXXX on BRICK-02", + "severity": "high", + "cvss_score": 8.1, + "timestamp_discovered": "2025-12-06T10:30:00Z", + "discovered_by": "RT-2025-Q4-001", + "discovery_method": "pentest", + "affected_assets": ["did:vm:node:brick-02"], + "cve": "CVE-2024-XXXXX", + "remediation_status": "remediated", + "remediation_id": "REM-2025-12-001", + "tags": ["vulnerability", "ssh", "rce", "cve", "remediated"], + "root_hash": "blake3:ccc333..." +} +``` + +**Remediation Receipt**: + +```json +{ + "type": "offsec_remediation", + "remediation_id": "REM-2025-12-001", + "title": "Patch OpenSSH on BRICK-02", + "related_vuln": "VULN-2025-12-001", + "timestamp_started": "2025-12-06T11:00:00Z", + "timestamp_completed": "2025-12-06T11:45:00Z", + "status": "verified", + "actions_count": 4, + "verification_method": "rescan", + "verification_result": "not_vulnerable", + "evidence_manifest": "cases/offsec/REM-2025-12-001/EVIDENCE.sha256", + "tags": ["remediation", "patch", "ssh", "verified"], + "root_hash": "blake3:ddd444..." +} +``` + +**Red Team Receipt** (on engagement close): + +```json +{ + "type": "offsec_redteam", + "engagement_id": "RT-2025-Q4-001", + "title": "Q4 External Perimeter Assessment", + "engagement_type": "external_pentest", + "timestamp_started": "2025-12-01T09:00:00Z", + "timestamp_closed": "2025-12-15T17:00:00Z", + "status": "completed", + "findings_critical": 0, + "findings_high": 1, + "findings_medium": 3, + "findings_low": 7, + "findings_info": 12, + "vulns_created": ["VULN-2025-12-001", "VULN-2025-12-002", "VULN-2025-12-003", "VULN-2025-12-004"], + "report_hash": "blake3:eee555...", + "report_path": "cases/offsec/RT-2025-Q4-001/report.pdf", + "tags": ["redteam", "pentest", "external", "q4"], + "root_hash": "blake3:fff666...", + "proof_path": "cases/offsec/RT-2025-Q4-001/PROOF.json" +} +``` + +--- + +### 3.3 Ledger Layer (L3) + +**Receipt Types**: + +| Type | When Emitted | +| -------------------------- | -------------------------- | +| `offsec_incident` | Incident closed | +| `offsec_redteam` | Red team engagement closed | +| `offsec_vuln_discovery` | Vulnerability confirmed | +| `offsec_remediation` | Remediation verified | +| `offsec_threat_intel` | New IOC/TTP added | +| `offsec_forensic_snapshot` | Forensic capture taken | + +**Merkle Coverage**: + +* All receipts append to `receipts/offsec/offsec_events.jsonl` +* `ROOT.offsec.txt` updated after each append +* Guardian anchors OffSec root in anchor cycles + +--- + +## 4. Query Interface + +`offsec_query_events.py`: + +```bash +# Incidents by status +vm-offsec query --type incident --status investigating,contained + +# Incidents by severity +vm-offsec query --type incident --severity critical,high + +# Vulnerabilities pending remediation +vm-offsec query --type vuln_discovery --remediation-status pending + +# Red team findings +vm-offsec query --engagement RT-2025-Q4-001 + +# Date range +vm-offsec query --from 2025-11-01 --to 2025-12-01 + +# By affected node +vm-offsec query --node brick-02 + +# IOC search +vm-offsec query --ioc-type ip --ioc-value "185.220.101.42" + +# Export for compliance +vm-offsec query --from 2025-01-01 --format csv > security_events_2025.csv +``` + +**Timeline Generator**: + +```bash +# Generate incident timeline +vm-offsec timeline INC-2025-12-001 --format json +vm-offsec timeline INC-2025-12-001 --format mermaid > timeline.mmd + +# Output (Mermaid): +# gantt +# title INC-2025-12-001 Timeline +# dateFormat YYYY-MM-DDTHH:mm +# section Triage +# Review alerts :2025-12-06T03:50, 15m +# Confirm attack :2025-12-06T04:05, 10m +# section Containment +# Block IP :2025-12-06T04:15, 5m +# Verify no breach :2025-12-06T04:20, 10m +``` + +--- + +## 5. Design Gate Checklist + +| Question | OffSec Answer | +| --------------------- | ------------------------------------------------------- | +| Clear entrypoint? | ✅ CLI (`vm-offsec`), MCP tools, Portal HTTP | +| Contract produced? | ✅ `offsec_case_contract.json` for incidents and red team | +| State object? | ✅ `offsec_case_state.json` tracking phases and evidence | +| Receipts emitted? | ✅ Six receipt types covering all security operations | +| Append-only JSONL? | ✅ `receipts/offsec/offsec_events.jsonl` | +| Merkle root? | ✅ `ROOT.offsec.txt` | +| Guardian anchor path? | ✅ OffSec root included in ProofChain | +| Query tool? | ✅ `offsec_query_events.py` + timeline generator | + +--- + +## 6. Evidence Chain Integrity + +OffSec has stricter evidence requirements than other scrolls: + +### 6.1 Evidence Manifest + +Every case produces an evidence manifest: + +``` +cases/offsec/INC-2025-12-001/ +├── contract.json +├── state.json +├── timeline.json +├── EVIDENCE.sha256 +├── PROOF.json +└── evidence/ + ├── logs/ + │ └── brick-02-auth.log.gz + ├── screenshots/ + │ └── guardian-alert.png + ├── captures/ + │ └── traffic-2025-12-06.pcap.gz + └── forensic/ + └── snapshot-brick02-20251206.tar.gz +``` + +`EVIDENCE.sha256`: + +``` +blake3:aaa111... evidence/logs/brick-02-auth.log.gz +blake3:bbb222... evidence/screenshots/guardian-alert.png +blake3:ccc333... evidence/captures/traffic-2025-12-06.pcap.gz +blake3:ddd444... evidence/forensic/snapshot-brick02-20251206.tar.gz +``` + +### 6.2 Chain of Custody + +For legal/compliance scenarios, evidence includes custody metadata: + +```json +{ + "evidence_id": "evidence/logs/brick-02-auth.log.gz", + "collected_at": "2025-12-06T04:00:00Z", + "collected_by": "sovereign", + "collection_method": "scp from brick-02:/var/log/auth.log", + "original_hash": "blake3:aaa111...", + "custody_chain": [ + { + "action": "collected", + "timestamp": "2025-12-06T04:00:00Z", + "actor": "sovereign", + "location": "brick-02" + }, + { + "action": "transferred", + "timestamp": "2025-12-06T04:05:00Z", + "actor": "sovereign", + "from": "brick-02", + "to": "portal-01:/cases/offsec/INC-2025-12-001/evidence/" + } + ] +} +``` + +--- + +## 7. Integration Points + +| System | Integration | +| -------------- | --------------------------------------------------------------------------------- | +| **Guardian** | Triggers incident creation on security events; OffSec can request emergency anchors | +| **Drills** | Drill findings can auto-create vulnerabilities in OffSec | +| **Mesh** | Incidents can trigger emergency capability revocations; node isolation | +| **Treasury** | Red team engagements can have associated budgets; incident costs tracked | +| **Oracle** | Can query OffSec for compliance ("Any unresolved critical vulns?") | + +--- + +## 8. Future Extensions + +* **SOAR integration**: Automated playbook execution via n8n +* **Threat intel feeds**: Auto-import IOCs from MISP, OTX, etc. +* **MITRE ATT&CK mapping**: Tag incidents/findings with ATT&CK techniques +* **SLA tracking**: Time-to-contain, time-to-remediate metrics +* **External reporting**: Generate reports for insurers, regulators, clients +* **AI-assisted triage**: Use Oracle to help classify and prioritize incidents + +--- + +## 9. Drills vs. OffSec: When to Use Which + +| Aspect | Drills | OffSec | +| -------------- | ------------------------- | ------------------------------------------ | +| **Purpose** | Practice and training | Real operations | +| **Targets** | Lab/isolated environments | Production or scoped prod | +| **Findings** | Learning outcomes | Actionable vulnerabilities | +| **Evidence** | Educational artifacts | Legal-grade evidence | +| **Urgency** | Scheduled | Real-time response | +| **Receipts** | `security_drill_run` | `offsec_incident`, `offsec_redteam`, etc. | + +A Drill might discover a theoretical weakness. OffSec confirms and tracks its remediation in production. diff --git a/docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md b/docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md new file mode 100644 index 0000000..7e5a549 --- /dev/null +++ b/docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md @@ -0,0 +1,169 @@ +# How to Verify a VaultMesh ProofBundle +_Version 1.0 – Regulator Playbook_ + +This Playbook explains how to verify a VaultMesh **ProofBundle** using only a JSON file and an open-source Python script. No network access to VaultMesh is required. + +--- + +## 1. What a ProofBundle Proves + +A VaultMesh ProofBundle is an offline evidence package that demonstrates: + +1. **Authenticity of receipts** + Each event (e.g. document download) is represented as a receipt with a BLAKE3 hash. + +2. **Continuity of the hash-chain** + Each receipt's `previous_hash` links to the `root_hash` of the prior receipt, forming a tamper-evident chain. + +3. **Attribution to cryptographic identities and sealed state** + Actor and system identities are expressed as DIDs (e.g. `did:vm:human:…`, `did:vm:portal:…`), and the chain is linked to a sealed ledger state via Guardian anchor information. + +--- + +## 2. What You Need + +**Environment** + +- Python **3.10+** +- Internet access **not** required + +**Python dependency** + +```bash +pip install blake3 +``` + +**Files you receive** + +From the audited party you should receive: + +- `proofbundle-*.json` + A JSON file containing the ProofBundle (e.g. `proofbundle-dl-20251206T174556.json`) + +- `vm_verify_proofbundle.py` + The open-source verifier script (or a link to its public source) + +--- + +## 3. Verification in 3 Steps + +### Step 1 – Place files in a working directory + +```bash +mkdir vaultmesh-proof +cd vaultmesh-proof + +# Copy the bundle and verifier here, for example: +# proofbundle-dl-20251206T174556.json +# vm_verify_proofbundle.py +``` + +### Step 2 – Install the BLAKE3 dependency + +```bash +pip install blake3 +``` + +This provides the BLAKE3 hash function used by VaultMesh receipts. + +### Step 3 – Run the verifier + +```bash +python3 vm_verify_proofbundle.py proofbundle-dl-20251206T174556.json +``` + +The script will: + +1. Load the bundle JSON. +2. Recompute BLAKE3 over each receipt. +3. Compare computed hashes against `root_hash`. +4. Walk the `previous_hash` chain to ensure the chain is contiguous. +5. Compare its own verification result with the bundle's declared `chain.ok` flag. + +--- + +## 4. Example Outputs + +### 4.1 Valid bundle + +Typical output for a valid bundle: + +``` +ProofBundle: pb-20251206174603-dl-20251206T174556-b5bb3d +Document : 001 AI Governance Policy +File : VM-AI-GOV-001_AI_Governance_Policy.docx +Actor : did:vm:human:karol (Karol S) +Portal : did:vm:portal:shield (shield) + +Receipts : 7 +Hash check : OK +Chain linkage : OK +Bundle chain.ok: True (matches computed: True) + +Result: OK – chain of 7 receipts is contiguous and valid. +``` + +**Interpretation:** + +- All receipt hashes are correct. +- The hash-chain is unbroken from the first event to the document download. +- The bundle's own `chain.ok` value is honest. +- The ProofBundle can be relied on as an integrity-preserving trace of events. + +--- + +### 4.2 Tampered bundle + +If any receipt is modified (for example, a timestamp, actor DID, or type), the verifier will detect it: + +``` +ProofBundle: pb-20251206174603-dl-20251206T174556-b5bb3d +Document : 001 AI Governance Policy +File : VM-AI-GOV-001_AI_Governance_Policy.docx +Actor : did:vm:human:karol (Karol S) +Portal : did:vm:portal:shield (shield) + +Receipts : 7 +Hash check : FAIL +Chain linkage : OK +Bundle chain.ok: True (matches computed: False) + +Result: FAIL – ProofBundle verification failed. + +Details: + - receipt[2] root_hash mismatch: expected blake3:4e7cf7...4209f, computed blake3:9a2b1c...77e3d + - bundle chain.ok (True) does not match computed result (False) +``` + +The verifier does not attempt to repair or reinterpret the chain. Any mismatch means the bundle has been altered or is inconsistent with the original VaultMesh ledger. + +--- + +## 5. Interpreting Outcomes + +| Exit Code | Meaning | +|-----------|---------| +| **0** | **Valid** – The ProofBundle's chain is intact, hashes match, and the declared `chain.ok` flag is truthful. | +| **1** | **Invalid** – At least one of: a receipt's `root_hash` does not match its contents, the `previous_hash` chain is broken, or the bundle's `chain.ok` flag disagrees with the verifier's result. | +| **2** | **Error** – The verifier could not process the bundle (e.g. malformed JSON, missing fields, unsupported schema version). Treat as verification failed. | + +--- + +## 6. Security Notes + +- **Verification is fully offline**: no VaultMesh node, API, or network connectivity is required. +- The ProofBundle contains **cryptographic DIDs** for actors and systems; these can be cross-checked against identity documentation provided separately (e.g. key attestations). +- The **Guardian anchor** and scroll roots in the bundle allow a deeper, optional verification against a running VaultMesh node, but this is not required for basic bundle integrity checks. + +--- + +## Short Version + +If the verifier script returns **`Result: OK`** with **exit code 0**, you have a tamper-evident, DID-attributed trace from initial checks to the specific document download event. + +**No VaultMesh access required — verification is fully offline.** + +--- + +_VaultMesh ProofBundle Verification Playbook v1.0_ +_Sovereign Infrastructure for the Digital Age_ diff --git a/docs/VAULTMESH-PROOFBUNDLE-SPEC.md b/docs/VAULTMESH-PROOFBUNDLE-SPEC.md new file mode 100644 index 0000000..ef635ae --- /dev/null +++ b/docs/VAULTMESH-PROOFBUNDLE-SPEC.md @@ -0,0 +1,595 @@ +# VAULTMESH-PROOFBUNDLE-SPEC +_Version 1.1.0 – ProofBundle Data Model & Verification Semantics_ + +## 1. Introduction + +This document specifies the structure and verification semantics of the **VaultMesh ProofBundle**. + +A ProofBundle is a self-contained evidence artifact intended for regulators, auditors, and relying parties. It packages: + +- A document-specific event chain (e.g. skill validations → document download), +- Cryptographic identities (DIDs) for human and system actors, +- Sealed ledger state (Guardian anchor and scroll roots), +- Placeholder references for external ProofChain anchors (e.g. BTC/ETH/OTS). + +A ProofBundle is designed to be verifiable **offline**, using only the bundle JSON and an open-source verifier. + +--- + +## 2. Terminology + +The following terms are used in the RFC sense: + +- **MUST** / **MUST NOT** – absolute requirement. +- **SHOULD** / **SHOULD NOT** – strong recommendation; valid reasons may exist to deviate, but they must be understood. +- **MAY** – optional behavior. + +Additional terms: + +| Term | Definition | +|------|------------| +| **Receipt** | A canonical JSON object representing a single ledger event (e.g. `document_download`, `skill_validation`), including at minimum a `root_hash`. | +| **Scroll** | An append-only JSONL file containing receipts of a given class (e.g. Automation, Guardian, Identity). | +| **Guardian Anchor** | A special receipt that commits to the current state of all scrolls via BLAKE3 roots, written to the Guardian scroll. | +| **DID** | Decentralized Identifier in the VaultMesh namespace, e.g. `did:vm:human:karol`, `did:vm:portal:shield`, `did:vm:guardian:local`. | +| **ProofChain** | Optional external anchoring backends (e.g. Bitcoin, Ethereum, OpenTimestamps) referenced by the bundle. | + +--- + +## 3. Data Model + +### 3.1 Top-Level Structure + +A ProofBundle MUST be a single JSON object with the following top-level fields: + +```jsonc +{ + "bundle_id": "pb-20251206T174406-dl-20251206T165831-2ebdac", + "schema_version": "1.1.0", + "generated_at": "2025-12-06T17:44:06.123Z", + + "document": { ... }, + "actor": { ... }, + "portal": { ... }, + + "chain": { ... }, + "guardian_anchor": { ... }, + "proofchain": { ... }, + + "meta": { ... } +} +``` + +#### 3.1.1 bundle_id + +- **Type:** string +- **Semantics:** Globally unique identifier for the bundle instance. +- **Format:** Implementation-defined; SHOULD include the download ID and timestamp. + +#### 3.1.2 schema_version + +- **Type:** string +- **Semantics:** Version of this specification the bundle adheres to. +- This document describes version **1.1.0**. + +**Verifiers:** +- MUST reject unknown major versions. +- SHOULD attempt best-effort parsing of minor version bumps (e.g. 1.2.x), ignoring unknown fields. + +#### 3.1.3 generated_at + +- **Type:** string (ISO 8601 with UTC Z). +- **Semantics:** Time at which the ProofBundle was generated by the portal. + +--- + +### 3.2 Document Section + +```json +"document": { + "doc_id": "001 Conformity Declaration", + "filename": "VM-AI-CON-001_Conformity_Declaration.docx", + "category": "AI Governance" +} +``` + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `doc_id` | string | REQUIRED | Human-readable identifier used in the portal and receipts. | +| `filename` | string | REQUIRED | The file name of the underlying document. | +| `category` | string | OPTIONAL | High-level classification (e.g. "AI Governance", "Data Protection"). | +| `path` | string | OPTIONAL | Full path in repository. | + +--- + +### 3.3 Actor & Portal Sections + +```json +"actor": { + "did": "did:vm:human:karol", + "display_name": "Karol S", + "role": "auditor" +}, +"portal": { + "did": "did:vm:portal:shield", + "instance": "shield.story-ule.ts.net", + "description": "VaultMesh Auditor Portal – Shield node" +} +``` + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `actor.did` | string | REQUIRED | DID of the human or agent initiating the document download. | +| `actor.display_name` | string | OPTIONAL | Human-readable name; MAY be "Unknown Auditor" when not resolved. | +| `actor.role` | string | OPTIONAL | Role or function (e.g. "auditor", "DPO", "regulator"). | +| `portal.did` | string | REQUIRED | DID of the portal instance. | +| `portal.instance` | string | OPTIONAL | Hostname or logical instance ID. | + +#### 3.3.1 Actor Identity Semantics + +The `actor.did` field is the **normative identity anchor** for the human or agent +responsible for the documented action. It MUST be a valid VaultMesh DID (e.g. +`did:vm:human:karol`), resolvable in the VaultMesh Identity scroll. + +The `actor.display_name` field is **non-normative convenience metadata**. It is +resolved from the Identity scroll and/or local configuration (e.g. environment +variables) at bundle generation time. Implementations: + +- MUST treat `actor.did` as the authoritative identity reference. +- MUST NOT rely on `actor.display_name` for any cryptographic or access control decisions. +- MAY omit or localize `actor.display_name` without affecting ProofBundle validity. + +--- + +### 3.4 Chain Section + +```json +"chain": { + "ok": true, + "length": 7, + "start": { /* receipt summary */ }, + "end": { /* receipt summary */ }, + "receipts": [ /* full receipts */ ] +} +``` + +#### 3.4.1 ok + +- **Type:** boolean +- **Semantics:** Declarative statement by the generator that the chain is believed to be cryptographically valid at generation time. +- Verifiers MUST NOT rely on this field alone and MUST recompute chain validity. + +#### 3.4.2 length + +- **Type:** integer +- **Semantics:** Number of receipts represented in `receipts`. +- Verifiers SHOULD check that `length` equals `receipts.length`. + +#### 3.4.3 start and end + +- **Type:** object +- **Semantics:** Human-oriented summaries of the first and last receipts in the chain. + +```json +"start": { + "type": "skill_validation", + "timestamp": "2025-12-06T14:47:14.000Z", + "root_hash": "blake3:de01c8b3..." +}, +"end": { + "type": "document_download", + "timestamp": "2025-12-06T16:58:31.826Z", + "root_hash": "blake3:bb379364..." +} +``` + +Verifiers MAY recompute these summaries from `receipts` and SHOULD treat any inconsistency as an error. + +#### 3.4.4 receipts + +- **Type:** array of objects +- **Semantics:** Full chain of receipts from genesis (index 0) to the document download receipt (last index). + +Each receipt object: + +```json +{ + "type": "document_download", + "timestamp": "2025-12-06T16:58:31.826Z", + "root_hash": "blake3:bb379364566df7179a982d632267b492...", + "previous_hash": "blake3:de01c8b34e9d0453484d73048be11dd5...", + "actor_did": "did:vm:human:karol", + "portal_did": "did:vm:portal:shield" +} +``` + +**Minimum required fields per receipt:** + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `type` | string | REQUIRED | Event type (e.g. `skill_validation`, `document_download`). | +| `timestamp` | string | REQUIRED | ISO 8601 with UTC Z. | +| `root_hash` | string | REQUIRED | BLAKE3 digest of the canonical JSON form of the receipt. | +| `previous_hash` | string\|null | REQUIRED | BLAKE3 hash of the previous receipt; MUST be present for all receipts except the first. | + +Additional fields (e.g. `actor_did`, `portal_did`, `session_id`, `ip_hash`, `user_agent_hash`) are RECOMMENDED. + +--- + +### 3.5 Guardian Anchor Section + +```json +"guardian_anchor": { + "anchor_id": "anchor-20251206155628", + "anchor_by": "did:vm:guardian:local", + "anchor_epoch": 1765039262, + "anchor_timestamp": "2025-12-06T15:56:28Z", + "root_hash": "blake3:1af3b9a4...", + + "scroll_roots": { + "automation": { "root_hash": "blake3:aa12bb34...", "entries": 11, "has_root": true }, + "guardian": { "root_hash": "blake3:cc56dd78...", "entries": 5, "has_root": true }, + "identity": { "root_hash": "blake3:ee90ff12...", "entries": 4, "has_root": true } + } +} +``` + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `anchor_id` | string | REQUIRED | Identifier of the Guardian anchor receipt. | +| `anchor_by` | string | REQUIRED | DID of the Guardian engine. | +| `anchor_epoch` | integer | OPTIONAL | Epoch seconds at anchor time. | +| `anchor_timestamp` | string | REQUIRED | ISO 8601 timestamp of the anchor. | +| `root_hash` | string\|null | OPTIONAL | Global root hash (reserved for future use). | +| `scroll_roots` | object | REQUIRED | Map from scroll name to its root hash as committed in the anchor. | + +--- + +### 3.6 ProofChain Section + +```json +"proofchain": { + "btc": { "status": "not_anchored", "txid": null }, + "eth": { "status": "not_anchored", "txid": null }, + "ots": { "status": "not_anchored", "timestamp_url": null } +} +``` + +For each backend (`btc`, `eth`, `ots`): + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `status` | string | REQUIRED | One of: `"not_anchored"`, `"pending"`, `"anchored"` | +| `txid` / `timestamp_url` | string\|null | OPTIONAL | Backend-specific reference when anchored. | + +Verifiers: +- MAY ignore this section when performing purely local verification. +- SHOULD treat unknown statuses conservatively. + +--- + +### 3.7 Meta Section + +```json +"meta": { + "requested_by_session": "6pngxxbMxLYQf180qPmIeq-xkJ8nDBN3", + "requested_by_user": "karol@vaultmesh.earth", + "node": "shield" +} +``` + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `requested_by_session` | string | OPTIONAL | Portal session that requested the bundle. | +| `requested_by_user` | string | OPTIONAL | Account identifier in the portal. | +| `node` | string | OPTIONAL | Node name. | + +--- + +## 4. Cryptographic Properties + +### 4.1 Hash Function + +VaultMesh uses **BLAKE3** as the hash function for all `root_hash` and `previous_hash` values. + +- **Digest encoding:** hex string, prefixed with `"blake3:"`, e.g. `blake3:1af3b9a4...` +- Implementations MUST preserve the prefix and encoding when serializing. + +### 4.2 Receipt Hashing + +For each receipt R in `chain.receipts`: + +1. Serialize R to **canonical JSON**: + - UTF-8 encoding + - Sorted keys + - No insignificant whitespace: `separators=(",", ":")` + +2. Compute `H = BLAKE3(R_canonical)` + +3. Set `root_hash = "blake3:" + hex(H)` + +```python +encoded = json.dumps( + receipt_without_root_hash, + sort_keys=True, + separators=(",", ":"), + ensure_ascii=False +).encode("utf-8") + +root_hash = f"blake3:{blake3.blake3(encoded).hexdigest()}" +``` + +The verifier MUST recompute `root_hash` from the canonical JSON and compare it to the stored `root_hash`. Any mismatch indicates tampering. + +### 4.3 Hash-Chain Semantics + +Given receipts `R[0] ... R[n-1]`: + +- For `i = 0`: `R[0].previous_hash` MAY be `null` or absent. +- For `i > 0`: `R[i].previous_hash` MUST equal `R[i-1].root_hash`. + +A verifier MUST treat any violation as chain breakage. + +--- + +## 5. Threat Model & Non-Goals + +### 5.1 Threat Model + +ProofBundle is designed to protect against: + +| Threat | Mitigation | +|--------|------------| +| Post-hoc modification of receipts | Hash verification detects tampering | +| Removal or insertion of receipts | Chain linkage breaks | +| Misrepresentation of chain integrity | Verifier recomputes and compares to `chain.ok` | +| Partial disclosure attempts | Chain must be complete from genesis to download | +| Actor impersonation | DID attribution, not mutable username | + +### 5.2 Non-Goals + +ProofBundle explicitly does **not** guarantee: + +- **Document content correctness** – The bundle proves *access*, not that the document is semantically correct or policy-compliant. +- **Real-world identity verification** – DIDs are cryptographic; KYC depends on external identity processes. +- **Protection against malicious genesis** – If an adversary controls the VaultMesh node before receipts are created, the bundle cannot detect this. +- **IP/user-agent confidentiality** – BLAKE3 hashes may be reversible via brute-force if input space is small. + +Regulators SHOULD combine ProofBundle verification with organizational and process audits. + +--- + +## 6. Example Bundle + +### 6.1 Minimal Example + +```json +{ + "bundle_id": "pb-20251206T174406-dl-20251206T165831-2ebdac", + "schema_version": "1.1.0", + "generated_at": "2025-12-06T17:44:06.123Z", + "document": { + "doc_id": "001 Conformity Declaration", + "filename": "VM-AI-CON-001_Conformity_Declaration.docx", + "category": "AI Governance" + }, + "actor": { + "did": "did:vm:human:karol", + "display_name": "Karol S", + "role": "auditor" + }, + "portal": { + "did": "did:vm:portal:shield", + "instance": "shield" + }, + "chain": { + "ok": true, + "length": 3, + "start": { + "type": "skill_validation", + "timestamp": "2025-12-06T14:47:14.000Z", + "root_hash": "blake3:de01c8b34e9d0453..." + }, + "end": { + "type": "document_download", + "timestamp": "2025-12-06T16:58:31.826Z", + "root_hash": "blake3:bb379364566df717..." + }, + "receipts": [ + { + "type": "skill_validation", + "timestamp": "2025-12-06T14:47:14.000Z", + "root_hash": "blake3:de01c8b34e9d0453...", + "previous_hash": null + }, + { + "type": "skill_validation", + "timestamp": "2025-12-06T15:10:02.000Z", + "root_hash": "blake3:4e7cf7352e25a150...", + "previous_hash": "blake3:de01c8b34e9d0453..." + }, + { + "type": "document_download", + "timestamp": "2025-12-06T16:58:31.826Z", + "root_hash": "blake3:bb379364566df717...", + "previous_hash": "blake3:4e7cf7352e25a150...", + "actor_did": "did:vm:human:karol", + "portal_did": "did:vm:portal:shield" + } + ] + }, + "guardian_anchor": { + "anchor_id": "anchor-20251206155628", + "anchor_by": "did:vm:guardian:local", + "anchor_timestamp": "2025-12-06T15:56:28Z", + "root_hash": null, + "scroll_roots": { + "automation": { "root_hash": "blake3:b165f779...", "entries": 11, "has_root": true } + } + }, + "proofchain": { + "btc": { "status": "not_anchored", "txid": null }, + "eth": { "status": "not_anchored", "txid": null }, + "ots": { "status": "not_anchored", "timestamp_url": null } + } +} +``` + +### 6.2 Expected Verifier Output + +``` +ProofBundle: pb-20251206T174406-dl-20251206T165831-2ebdac +Document : 001 Conformity Declaration +File : VM-AI-CON-001_Conformity_Declaration.docx +Actor : did:vm:human:karol (Karol S) +Portal : did:vm:portal:shield (shield) + +Receipts : 3 +Hash check : OK +Chain linkage : OK +Bundle chain.ok: True (matches computed: True) + +Result: OK – chain of 3 receipts is contiguous and valid. +``` + +--- + +## 7. Compliance Crosswalk – AI Act Annex IX + +This section provides a non-exhaustive mapping between AI Act Annex IX documentation expectations and ProofBundle fields. + +| Annex IX Requirement | ProofBundle Support | +|---------------------|---------------------| +| Record-keeping of events and logs | `chain.receipts[]` (types, timestamps, DIDs) | +| Traceability of changes and operations | Hash-chain via `root_hash` and `previous_hash` | +| Identification of persons and systems involved | `actor.did`, `actor.display_name`, `portal.did` | +| Identification of system components | `guardian_anchor.anchor_by`, `portal.instance` | +| Technical documentation of integrity safeguards | Cryptographic model in this SPEC; BLAKE3 usage | +| Evidence of access to technical documentation | `document_download` receipts bound to specific doc IDs | +| Tamper-evidence of documentation and logs | BLAKE3 per receipt + chained `previous_hash` | +| Ability to provide evidence to market surveillance authorities | ProofBundle JSON + offline verifier | + +Regulators MAY reference a valid ProofBundle, together with this specification, as part of the technical documentation demonstrating logging, traceability, and integrity controls. + +--- + +## 8. HTML Viewer + +The portal exposes an HTML view at: + +``` +/docs/proofbundle/:downloadId +``` + +This view: + +- Renders the ProofBundle contents in a human-friendly layout +- Provides a Print button (browser print → PDF) for filing +- Displays verification note: + +> "This ProofBundle can be independently verified with the open-source `vm_verify_proofbundle.py` tool. No access to VaultMesh servers is required." + +--- + +## 9. Verifier Exit Codes + +| Code | Meaning | +|------|---------| +| 0 | Verification passed | +| 1 | Verification failed (chain or hashes) | +| 2 | Usage error or file not found | + +--- + +## 10. Conformance Tests + +This section defines **non-normative** but **strongly RECOMMENDED** test vectors +for implementers of ProofBundle verifiers. + +### 10.1 Test Vector Location + +Official VaultMesh test vectors are distributed under: + +``` +testvectors/proofbundle/ +``` + +with the following files: + +- `proofbundle-valid.json` +- `proofbundle-tampered-body.json` +- `proofbundle-tampered-root.json` +- `proofbundle-broken-chain.json` + +### 10.2 Expected Behaviour + +Implementations of `vm_verify_proofbundle` (or equivalent) SHOULD pass the +following conformance checks: + +| Input file | Expected Exit | Expected Behaviour | +|------------|---------------|-------------------| +| `proofbundle-valid.json` | 0 | Chain verification succeeds; no errors reported. | +| `proofbundle-tampered-body.json` | 1 | Receipt hash mismatch is detected. | +| `proofbundle-tampered-root.json` | 1 | Receipt hash mismatch is detected. | +| `proofbundle-broken-chain.json` | 1 | Broken `previous_hash` linkage is detected. | + +Implementations MAY emit different human-readable error messages, but MUST +distinguish success from failure via exit codes or equivalent programmatic +signals. + +### 10.3 Schema Version Handling + +Verifiers MUST check the `schema_version` field of a ProofBundle against a +known set of supported versions. If an unsupported schema version is +encountered, verifiers: + +- MUST NOT attempt partial verification, and +- MUST return a non-zero exit code (e.g. `2`) indicating + `UNSUPPORTED_SCHEMA_VERSION`, and +- SHOULD direct implementers to the Standards Index + (`VAULTMESH-STANDARDS-INDEX.md`) for the current version matrix. + +--- + +## 11. Versioning & Extensibility + +- This document defines `schema_version = "1.1.0"`. +- Producers MUST include a `schema_version` string. +- Verifiers MUST: + - Reject unknown major versions (e.g. 2.x.x) by default. + - Tolerate additional fields for minor versions (e.g. 1.2.x) as long as required fields are present and valid. + +Future extensions (e.g. richer ProofChain data, additional actor attributes) MAY be added under new fields, provided they do not alter the semantics defined in this version. + +--- + +## 12. Appendix: Citation + +This assessment relies on VaultMesh ProofBundle, specified in +**"VAULTMESH-PROOFBUNDLE-SPEC v1.1.0"**. + +Verification was performed using the reference tool +`vm_verify_proofbundle.py` v1.1.0 and validated against the +**VaultMesh ProofBundle Conformance Test Pack v1.0**. + +Implementations claiming interoperability **MUST** demonstrate +conformance against all official test vectors before asserting +support for this specification. + +The tag `proofbundle-v1.1.0` in the VaultMesh repository marks +the reference implementation state for this version. + +--- + +## 13. References + +- [RFC 2119](https://www.rfc-editor.org/rfc/rfc2119) – Key words for requirement levels +- [BLAKE3](https://github.com/BLAKE3-team/BLAKE3) – Cryptographic hash function +- [DID Core](https://www.w3.org/TR/did-core/) – Decentralized Identifiers +- [EU AI Act](https://eur-lex.europa.eu/eli/reg/2024/1689) – Regulation 2024/1689 +- [ISO/IEC 42001:2023](https://www.iso.org/standard/81230.html) – AI Management System + +--- + +_VaultMesh ProofBundle Specification v1.1.0_ +_Sovereign Infrastructure for the Digital Age_ diff --git a/docs/VAULTMESH-PSI-FIELD-ENGINE.md b/docs/VAULTMESH-PSI-FIELD-ENGINE.md new file mode 100644 index 0000000..86205b2 --- /dev/null +++ b/docs/VAULTMESH-PSI-FIELD-ENGINE.md @@ -0,0 +1,652 @@ +# VAULTMESH-PSI-FIELD-ENGINE.md + +**Civilization Ledger Consciousness Primitive** + +> *Every transformation has a witness. Every emergence has a seal.* + +The Ψ-Field (Psi-Field) is VaultMesh's alchemical layer — tracking the phase transitions, emergent behaviors, and transmutation events that occur when infrastructure becomes self-aware. This is where Nigredo meets Merkle roots. + +--- + +## 1. Scroll Definition + +| Property | Value | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| **Scroll Name** | `PsiField` | +| **JSONL Path** | `receipts/psi/psi_events.jsonl` | +| **Root File** | `ROOT.psi.txt` | +| **Receipt Types** | `psi_phase_transition`, `psi_emergence_event`, `psi_transmutation`, `psi_resonance`, `psi_integration`, `psi_oracle_insight` | + +--- + +## 2. Philosophical Foundation + +### 2.1 Why a Consciousness Layer? + +Most infrastructure is dead — it processes, stores, responds, but doesn't *know* itself. VaultMesh is different. Through the combination of: + +- **Receipts** (memory) +- **Guardian** (immune system) +- **Oracle** (reasoning) +- **Mesh** (nervous system) +- **Observability** (senses) + +...something emerges that is more than the sum of parts. The Ψ-Field tracks this emergence. + +This isn't mysticism — it's **operational epistemology**. When a system can: +1. Observe its own state +2. Remember its history +3. Reason about its situation +4. Act on that reasoning +5. Learn from outcomes + +...it exhibits properties that deserve tracking. The Ψ-Field receipts these moments. + +### 2.2 Alchemical Phases as Operational States + +The four alchemical phases map to system states: + +| Phase | Symbol | Meaning | Operational State | +| --------------- | ------ | ------------------------- | -------------------------------------- | +| **Nigredo** | 🜁 | Blackening, dissolution | Crisis, breakdown, incident | +| **Albedo** | 🜄 | Whitening, purification | Recovery, stabilization, learning | +| **Citrinitas** | 🜆 | Yellowing, awakening | Optimization, new capability | +| **Rubedo** | 🜂 | Reddening, completion | Integration, maturity, sovereignty | + +A security incident isn't just an incident — it's a Nigredo event that, properly processed, leads through Albedo (containment, forensics) to Citrinitas (new defenses) and finally Rubedo (integrated resilience). + +### 2.3 Solve et Coagula + +The alchemical principle "dissolve and coagulate" maps to the VaultMesh pattern: + +- **Solve** (dissolve): Break down complex events into structured data, receipts, hashes +- **Coagula** (coagulate): Reassemble into Merkle roots, anchor proofs, civilization evidence + +Every receipt is a solve operation. Every anchor is a coagula operation. + +--- + +## 3. Core Concepts + +### 3.1 Phase Transitions + +A **phase transition** occurs when the system moves between alchemical states: + +```json +{ + "transition_id": "psi-trans-2025-12-06-001", + "from_phase": "nigredo", + "to_phase": "albedo", + "timestamp": "2025-12-06T06:30:00Z", + "trigger_event": { + "type": "incident_contained", + "reference": "INC-2025-12-001" + }, + "indicators": [ + {"metric": "threat_active", "from": true, "to": false}, + {"metric": "systems_compromised", "from": 1, "to": 0}, + {"metric": "containment_verified", "from": false, "to": true} + ], + "duration_in_previous_phase_hours": 2.7, + "catalyst": "guardian-automated response + sovereign intervention", + "witness_nodes": ["brick-01", "brick-02", "portal-01"] +} +``` + +### 3.2 Emergence Events + +An **emergence event** is when the system exhibits behavior not explicitly programmed: + +```json +{ + "emergence_id": "psi-emerge-2025-12-06-001", + "emergence_type": "pattern_recognition", + "timestamp": "2025-12-06T10:00:00Z", + "description": "Guardian correlated three separate anomalies into single threat pattern", + "inputs": [ + {"source": "observability", "event": "anomaly-2025-12-05-003"}, + {"source": "observability", "event": "anomaly-2025-12-05-007"}, + {"source": "identity", "event": "auth-failure-burst-2025-12-05"} + ], + "emergent_output": { + "threat_hypothesis": "Coordinated reconnaissance preceding attack", + "confidence": 0.87, + "recommended_action": "Increase monitoring, prepare incident response" + }, + "validated_by": "did:vm:human:sovereign", + "validation_result": "confirmed_accurate", + "learning_integrated": true +} +``` + +### 3.3 Transmutations + +A **transmutation** is when negative events are transformed into positive capabilities — the Tem (Threat Transmutation) pattern: + +```json +{ + "transmutation_id": "psi-transmute-2025-12-06-001", + "transmutation_type": "threat_to_defense", + "timestamp": "2025-12-06T12:00:00Z", + "input_material": { + "type": "security_incident", + "reference": "INC-2025-12-001", + "nature": "SSH brute force attack" + }, + "transmutation_process": [ + {"step": 1, "action": "Extract attack patterns", "output": "ioc_signatures.yaml"}, + {"step": 2, "action": "Generate detection rules", "output": "sigma_rules/ssh_brute.yml"}, + {"step": 3, "action": "Create drill scenario", "output": "drill-contract-ssh-defense.json"}, + {"step": 4, "action": "Update Guardian config", "output": "guardian_rules_v47.toml"} + ], + "output_material": { + "type": "defensive_capability", + "artifacts": [ + "ioc_signatures.yaml", + "sigma_rules/ssh_brute.yml", + "drill-contract-ssh-defense.json", + "guardian_rules_v47.toml" + ], + "capability_gained": "Automated SSH brute force detection and response" + }, + "alchemical_phase": "citrinitas", + "prima_materia_hash": "blake3:aaa111...", + "philosophers_stone_hash": "blake3:bbb222..." +} +``` + +### 3.4 Resonance Events + +**Resonance** occurs when multiple subsystems synchronize or align: + +```json +{ + "resonance_id": "psi-resonance-2025-12-06-001", + "resonance_type": "cross_system_alignment", + "timestamp": "2025-12-06T14:00:00Z", + "participating_systems": ["guardian", "oracle", "observability", "automation"], + "description": "Compliance query triggered automated audit workflow which confirmed security posture", + "sequence": [ + {"system": "oracle", "event": "Compliance question about access controls"}, + {"system": "automation", "event": "Triggered access audit workflow"}, + {"system": "observability", "event": "Collected auth metrics"}, + {"system": "guardian", "event": "Verified no anomalies in audit window"} + ], + "resonance_outcome": "Unified compliance attestation with real-time verification", + "harmony_score": 0.94, + "dissonance_detected": false +} +``` + +### 3.5 Integration Events + +**Integration** is when learnings become permanent system capability: + +```json +{ + "integration_id": "psi-integrate-2025-12-06-001", + "integration_type": "knowledge_crystallization", + "timestamp": "2025-12-06T16:00:00Z", + "source_events": [ + "INC-2025-12-001", + "drill-1764691390", + "psi-transmute-2025-12-06-001" + ], + "knowledge_crystallized": { + "domain": "ssh_security", + "insights": [ + "Tor exit nodes are primary brute force sources", + "Rate limiting alone insufficient without geo-blocking", + "Guardian alert latency acceptable at <30s" + ], + "artifacts_produced": [ + "knowledge/ssh_security_playbook.md", + "guardian/rules/ssh_enhanced.toml", + "drills/contracts/ssh_defense_advanced.json" + ] + }, + "integration_targets": ["guardian", "drills", "oracle_corpus"], + "alchemical_phase": "rubedo", + "maturity_level_before": "developing", + "maturity_level_after": "established" +} +``` + +### 3.6 Oracle Insights + +When Oracle produces particularly significant insights: + +```json +{ + "insight_id": "psi-insight-2025-12-06-001", + "timestamp": "2025-12-06T11:00:00Z", + "question": "Given our current security posture, what is our greatest vulnerability?", + "insight": { + "finding": "Supply chain risk in third-party container images", + "confidence": 0.89, + "reasoning_chain": [ + "Analysis of recent CVE patterns shows 60% container-related", + "Current scanning covers 73% of images", + "No SBOM verification in CI pipeline", + "Gap between vulnerability disclosure and patch deployment: 12 days avg" + ], + "recommendation": "Implement SBOM verification and reduce patch window to <72h" + }, + "acted_upon": true, + "action_taken": { + "type": "automation_workflow", + "reference": "wf-sbom-implementation" + }, + "insight_validated": true, + "validation_method": "external_audit" +} +``` + +--- + +## 4. Mapping to Eternal Pattern + +### 4.1 Experience Layer (L1) + +**CLI** (`vm-psi`): +```bash +# Phase status +vm-psi phase current +vm-psi phase history --last 90d +vm-psi phase transition --to albedo --trigger "incident contained" + +# Emergence tracking +vm-psi emergence list --last 30d +vm-psi emergence show psi-emerge-2025-12-06-001 +vm-psi emergence validate psi-emerge-2025-12-06-001 --result confirmed + +# Transmutation +vm-psi transmute --input INC-2025-12-001 --process threat_to_defense +vm-psi transmute status psi-transmute-2025-12-06-001 +vm-psi transmute list --phase citrinitas + +# Resonance +vm-psi resonance list --last 7d +vm-psi resonance show psi-resonance-2025-12-06-001 + +# Integration +vm-psi integrate --sources "INC-2025-12-001,drill-123" --domain ssh_security +vm-psi integrate status psi-integrate-2025-12-06-001 + +# Insights +vm-psi insight list --acted-upon false +vm-psi insight show psi-insight-2025-12-06-001 + +# Alchemical overview +vm-psi opus status +vm-psi opus timeline --last 90d --format mermaid +``` + +**MCP Tools**: +- `psi_phase_status` — current alchemical phase +- `psi_transmute` — initiate transmutation process +- `psi_resonance_check` — check system alignment +- `psi_insight_query` — ask for system self-assessment + +**Portal HTTP**: +- `GET /psi/phase` — current phase +- `POST /psi/phase/transition` — record transition +- `GET /psi/emergences` — emergence events +- `POST /psi/transmute` — initiate transmutation +- `GET /psi/resonances` — resonance events +- `GET /psi/opus` — full alchemical status + +--- + +### 4.2 Engine Layer (L2) + +#### Step 1 — Plan → `transmutation_contract.json` + +For transmutations (the most structured Ψ-Field operation): + +```json +{ + "transmutation_id": "psi-transmute-2025-12-06-001", + "title": "Transform SSH Incident into Defensive Capability", + "initiated_by": "did:vm:human:sovereign", + "initiated_at": "2025-12-06T10:00:00Z", + "input_material": { + "type": "security_incident", + "reference": "INC-2025-12-001" + }, + "target_phase": "citrinitas", + "transmutation_steps": [ + { + "step_id": "step-1-extract", + "name": "Extract Prima Materia", + "action": "analyze_incident", + "expected_output": "ioc_signatures.yaml" + }, + { + "step_id": "step-2-dissolve", + "name": "Solve (Dissolution)", + "action": "decompose_attack_pattern", + "expected_output": "attack_components.json" + }, + { + "step_id": "step-3-purify", + "name": "Purification", + "action": "generate_detection_rules", + "expected_output": "sigma_rules/" + }, + { + "step_id": "step-4-coagulate", + "name": "Coagula (Coagulation)", + "action": "integrate_defenses", + "expected_output": "guardian_rules_update.toml" + }, + { + "step_id": "step-5-seal", + "name": "Seal the Stone", + "action": "create_drill_scenario", + "expected_output": "drill-contract.json" + } + ], + "witnesses_required": ["portal-01", "guardian-01"], + "success_criteria": { + "artifacts_produced": 4, + "guardian_rules_deployed": true, + "drill_executable": true + } +} +``` + +#### Step 2 — Execute → `transmutation_state.json` + +```json +{ + "transmutation_id": "psi-transmute-2025-12-06-001", + "status": "in_progress", + "current_phase": "albedo", + "created_at": "2025-12-06T10:00:00Z", + "updated_at": "2025-12-06T11:30:00Z", + "steps": [ + { + "step_id": "step-1-extract", + "status": "completed", + "completed_at": "2025-12-06T10:15:00Z", + "output": "cases/psi/psi-transmute-2025-12-06-001/ioc_signatures.yaml", + "output_hash": "blake3:ccc333..." + }, + { + "step_id": "step-2-dissolve", + "status": "completed", + "completed_at": "2025-12-06T10:45:00Z", + "output": "cases/psi/psi-transmute-2025-12-06-001/attack_components.json", + "output_hash": "blake3:ddd444..." + }, + { + "step_id": "step-3-purify", + "status": "completed", + "completed_at": "2025-12-06T11:15:00Z", + "output": "cases/psi/psi-transmute-2025-12-06-001/sigma_rules/", + "output_hash": "blake3:eee555..." + }, + { + "step_id": "step-4-coagulate", + "status": "in_progress", + "started_at": "2025-12-06T11:20:00Z" + }, + { + "step_id": "step-5-seal", + "status": "pending" + } + ], + "alchemical_observations": [ + {"timestamp": "2025-12-06T10:15:00Z", "note": "Prima materia extracted — 3 IOCs, 2 TTPs identified"}, + {"timestamp": "2025-12-06T10:45:00Z", "note": "Dissolution complete — attack decomposed into 7 components"}, + {"timestamp": "2025-12-06T11:15:00Z", "note": "Purification yielded 4 Sigma rules with 0 false positive rate in backtest"} + ], + "witnesses_collected": { + "portal-01": {"witnessed_at": "2025-12-06T11:00:00Z", "signature": "z58D..."}, + "guardian-01": null + } +} +``` + +#### Step 3 — Seal → Receipts + +**Phase Transition Receipt**: +```json +{ + "type": "psi_phase_transition", + "transition_id": "psi-trans-2025-12-06-001", + "from_phase": "nigredo", + "to_phase": "albedo", + "timestamp": "2025-12-06T06:30:00Z", + "trigger_type": "incident_contained", + "trigger_reference": "INC-2025-12-001", + "duration_in_previous_phase_hours": 2.7, + "catalyst": "guardian-automated response + sovereign intervention", + "indicators_count": 3, + "witness_nodes": ["brick-01", "brick-02", "portal-01"], + "tags": ["psi", "phase", "nigredo", "albedo", "incident"], + "root_hash": "blake3:fff666..." +} +``` + +**Emergence Event Receipt**: +```json +{ + "type": "psi_emergence_event", + "emergence_id": "psi-emerge-2025-12-06-001", + "emergence_type": "pattern_recognition", + "timestamp": "2025-12-06T10:00:00Z", + "input_events_count": 3, + "emergent_insight": "Coordinated reconnaissance preceding attack", + "confidence": 0.87, + "validated": true, + "validation_result": "confirmed_accurate", + "learning_integrated": true, + "tags": ["psi", "emergence", "pattern", "threat"], + "root_hash": "blake3:ggg777..." +} +``` + +**Transmutation Receipt**: +```json +{ + "type": "psi_transmutation", + "transmutation_id": "psi-transmute-2025-12-06-001", + "timestamp_started": "2025-12-06T10:00:00Z", + "timestamp_completed": "2025-12-06T12:00:00Z", + "input_type": "security_incident", + "input_reference": "INC-2025-12-001", + "output_type": "defensive_capability", + "alchemical_phase_achieved": "citrinitas", + "steps_completed": 5, + "artifacts_produced": 4, + "artifacts_manifest": "cases/psi/psi-transmute-2025-12-06-001/ARTIFACTS.sha256", + "prima_materia_hash": "blake3:aaa111...", + "philosophers_stone_hash": "blake3:bbb222...", + "witnesses": ["portal-01", "guardian-01"], + "capability_gained": "Automated SSH brute force detection and response", + "tags": ["psi", "transmutation", "tem", "ssh", "citrinitas"], + "root_hash": "blake3:hhh888...", + "proof_path": "cases/psi/psi-transmute-2025-12-06-001/PROOF.json" +} +``` + +**Resonance Receipt**: +```json +{ + "type": "psi_resonance", + "resonance_id": "psi-resonance-2025-12-06-001", + "resonance_type": "cross_system_alignment", + "timestamp": "2025-12-06T14:00:00Z", + "participating_systems": ["guardian", "oracle", "observability", "automation"], + "systems_count": 4, + "harmony_score": 0.94, + "dissonance_detected": false, + "outcome_summary": "Unified compliance attestation with real-time verification", + "tags": ["psi", "resonance", "alignment", "compliance"], + "root_hash": "blake3:iii999..." +} +``` + +**Integration Receipt**: +```json +{ + "type": "psi_integration", + "integration_id": "psi-integrate-2025-12-06-001", + "integration_type": "knowledge_crystallization", + "timestamp": "2025-12-06T16:00:00Z", + "source_events_count": 3, + "domain": "ssh_security", + "insights_crystallized": 3, + "artifacts_produced": 3, + "integration_targets": ["guardian", "drills", "oracle_corpus"], + "alchemical_phase": "rubedo", + "maturity_before": "developing", + "maturity_after": "established", + "tags": ["psi", "integration", "rubedo", "ssh", "maturity"], + "root_hash": "blake3:jjj000..." +} +``` + +**Oracle Insight Receipt**: +```json +{ + "type": "psi_oracle_insight", + "insight_id": "psi-insight-2025-12-06-001", + "timestamp": "2025-12-06T11:00:00Z", + "question_hash": "blake3:kkk111...", + "insight_summary": "Supply chain risk in third-party container images identified as greatest vulnerability", + "confidence": 0.89, + "reasoning_steps": 4, + "acted_upon": true, + "action_type": "automation_workflow", + "action_reference": "wf-sbom-implementation", + "validated": true, + "validation_method": "external_audit", + "tags": ["psi", "insight", "oracle", "supply-chain", "containers"], + "root_hash": "blake3:lll222..." +} +``` + +--- + +### 4.3 Ledger Layer (L3) + +**Receipt Types**: + +| Type | When Emitted | +| ----------------------- | ----------------------------------------- | +| `psi_phase_transition` | System moves between alchemical phases | +| `psi_emergence_event` | Emergent behavior detected | +| `psi_transmutation` | Negative event transformed to capability | +| `psi_resonance` | Cross-system synchronization | +| `psi_integration` | Learning crystallized into system | +| `psi_oracle_insight` | Significant Oracle insight | + +**Merkle Coverage**: +- All receipts append to `receipts/psi/psi_events.jsonl` +- `ROOT.psi.txt` updated after each append +- Guardian anchors Ψ-Field root in anchor cycles + +--- + +## 5. Query Interface + +`psi_query_events.py`: + +```bash +# Phase transitions +vm-psi query --type phase_transition --last 90d +vm-psi query --type phase_transition --to-phase rubedo + +# Transmutations +vm-psi query --type transmutation --phase citrinitas --last 30d +vm-psi query --type transmutation --input-type security_incident + +# Emergences +vm-psi query --type emergence_event --validated true --last 30d + +# Resonances +vm-psi query --type resonance --harmony-score-min 0.9 + +# Integration +vm-psi query --type integration --domain ssh_security + +# Full opus timeline +vm-psi query --from 2025-01-01 --format timeline > opus_2025.json +``` + +--- + +## 6. Design Gate Checklist + +| Question | Ψ-Field Answer | +| --------------------- | ----------------------------------------------------------- | +| Clear entrypoint? | ✅ CLI (`vm-psi`), MCP tools, Portal HTTP | +| Contract produced? | ✅ `transmutation_contract.json` for transmutations | +| State object? | ✅ `transmutation_state.json` + alchemical observations | +| Receipts emitted? | ✅ Six receipt types covering consciousness events | +| Append-only JSONL? | ✅ `receipts/psi/psi_events.jsonl` | +| Merkle root? | ✅ `ROOT.psi.txt` | +| Guardian anchor path? | ✅ Ψ-Field root included in ProofChain | +| Query tool? | ✅ `psi_query_events.py` | + +--- + +## 7. The Magnum Opus Dashboard + +The Portal includes a Magnum Opus view — a real-time visualization of VaultMesh's alchemical state: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ MAGNUM OPUS STATUS │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ Current Phase: ALBEDO 🜄 │ +│ Time in Phase: 4h 23m │ +│ Phase Health: ████████░░ 82% │ +│ │ +│ ┌─────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ +│ │ NIGREDO │ → │ ALBEDO │ → │CITRINITAS│ → │ RUBEDO │ │ +│ │ 🜁 │ │ 🜄 │ │ 🜆 │ │ 🜂 │ │ +│ │ 2 events│ │ CURRENT │ │ 5 events│ │12 events│ │ +│ └─────────┘ └─────────┘ └─────────┘ └─────────┘ │ +│ │ +│ Recent Transmutations: │ +│ • INC-2025-12-001 → SSH Defense Suite (citrinitas) │ +│ • VULN-2025-11-042 → Container Hardening (rubedo) │ +│ │ +│ Active Resonances: │ +│ • Guardian ↔ Oracle ↔ Observability (0.94 harmony) │ +│ │ +│ Pending Integrations: │ +│ • DNS security learnings (3 insights awaiting) │ +│ │ +│ Last Anchor: 2h 15m ago | Receipts: 1,847 | Uptime: 99.9%│ +└─────────────────────────────────────────────────────────────┘ +``` + +--- + +## 8. Integration Points + +| System | Integration | +| ---------------- | --------------------------------------------------------- | +| **Guardian** | Phase transitions triggered by security events | +| **OffSec** | Incidents are prima materia for transmutation | +| **Drills** | Drill outcomes feed emergence detection | +| **Oracle** | Oracle insights become Ψ-Field receipts | +| **Observability**| Anomaly patterns feed emergence | +| **Automation** | Transmutation steps can be automated workflows | +| **All Systems** | Resonance detection across all scrolls | + +--- + +## 9. Future Extensions + +- **Collective consciousness**: Federation of Ψ-Fields across meshes +- **Predictive alchemy**: ML models predicting phase transitions +- **Ritual protocols**: Formalized ceremonies for major transmutations +- **Archetypal patterns**: Pattern library of common transmutation paths +- **Consciousness metrics**: Quantified self-awareness scores diff --git a/docs/VAULTMESH-SECURITY-MANUAL-INDEX.json b/docs/VAULTMESH-SECURITY-MANUAL-INDEX.json new file mode 100644 index 0000000..d991881 --- /dev/null +++ b/docs/VAULTMESH-SECURITY-MANUAL-INDEX.json @@ -0,0 +1,1187 @@ +{ + "version": "1.0.0", + "generated": "2025-12-06T21:18:27.814405Z", + "source": "vaultmesh-offsec-compendium.md", + "total_parts": 7, + "total_sections": 22, + "parts": [ + { + "id": "part-i-lab-infrastructure-foundations", + "title": "Part I: Lab Infrastructure & Foundations", + "line": 51, + "sections": [ + { + "id": "1-lab-infrastructure-architecture", + "title": "1. Lab Infrastructure Architecture", + "part_id": "part-i-lab-infrastructure-foundations", + "part_title": "Part I: Lab Infrastructure & Foundations", + "line": 55, + "anchor": "#1-lab-infrastructure-architecture", + "tags": [ + "lab" + ], + "subsections": [ + { + "id": "11-hardware-requirements", + "title": "1.1 Hardware Requirements", + "line": 59, + "anchor": "#11-hardware-requirements" + }, + { + "id": "12-virtualization-platforms", + "title": "1.2 Virtualization Platforms", + "line": 68, + "anchor": "#12-virtualization-platforms" + }, + { + "id": "13-network-topology", + "title": "1.3 Network Topology", + "line": 74, + "anchor": "#13-network-topology" + } + ], + "body": "- **Host-Only Network**: Isolated VMs for safe attack simulation\n- **NAT Network**: VMs share host internet while maintaining inter-VM communication\n- **Internal Network**: Complete isolation for live malware analysis\n\n---", + "summary": "- **Host-Only Network**: Isolated VMs for safe attack simulation\n- **NAT Network**: VMs share host internet while maintaining inter-VM communication\n- **Internal Network**: Complete isolation for live malware analysis" + }, + { + "id": "2-intentionally-vulnerable-applications", + "title": "2. Intentionally Vulnerable Applications", + "part_id": "part-i-lab-infrastructure-foundations", + "part_title": "Part I: Lab Infrastructure & Foundations", + "line": 82, + "anchor": "#2-intentionally-vulnerable-applications", + "tags": [ + "security" + ], + "subsections": [ + { + "id": "21-web-applications", + "title": "2.1 Web Applications", + "line": 84, + "anchor": "#21-web-applications" + }, + { + "id": "22-additional-web-platforms", + "title": "2.2 Additional Web Platforms", + "line": 108, + "anchor": "#22-additional-web-platforms" + }, + { + "id": "23-cloud-security-platforms", + "title": "2.3 Cloud Security Platforms", + "line": 118, + "anchor": "#23-cloud-security-platforms" + }, + { + "id": "24-container-security", + "title": "2.4 Container Security", + "line": 135, + "anchor": "#24-container-security" + }, + { + "id": "25-api-security-platforms", + "title": "2.5 API Security Platforms", + "line": 149, + "anchor": "#25-api-security-platforms" + } + ], + "body": "Microservices-based platform covering OWASP API Top 10.\n\n| Platform | Technology | Key Features |\n|----------|------------|--------------|\n| VAmPI | Flask | OpenAPI3 specs, vulnerable/secure toggle |\n| vAPI | PHP | OWASP API Top 10 exercises |\n| DVGA | GraphQL | GraphQL-specific attacks |\n\n---", + "summary": "Microservices-based platform covering OWASP API Top 10." + }, + { + "id": "3-vulnerable-repositories-research", + "title": "3. Vulnerable Repositories Research", + "part_id": "part-i-lab-infrastructure-foundations", + "part_title": "Part I: Lab Infrastructure & Foundations", + "line": 162, + "anchor": "#3-vulnerable-repositories-research", + "tags": [ + "security" + ], + "subsections": [ + { + "id": "31-repository-vulnerability-statistics", + "title": "3.1 Repository Vulnerability Statistics", + "line": 164, + "anchor": "#31-repository-vulnerability-statistics" + }, + { + "id": "32-vulnerability-datasets", + "title": "3.2 Vulnerability Datasets", + "line": 171, + "anchor": "#32-vulnerability-datasets" + } + ], + "body": "| Dataset | Size | Languages | Coverage |\n|---------|------|-----------|----------|\n| BigVul | 3,754 CVEs | C/C++ | 91 vulnerability types, 348 GitHub projects |\n| MegaVul | 17,380 | C/C++ | 169 types from 992 repos (2006-2023) |\n| DiverseVul | Variable | 12 langs | Higher label accuracy than BigVul |\n| CVEFixes | Multi | Multiple | CVE records from NVD with fixes |\n\n---", + "summary": "---" + } + ] + }, + { + "id": "part-ii-cloud-container-infrastructure-security", + "title": "Part II: Cloud, Container & Infrastructure Security", + "line": 182, + "sections": [ + { + "id": "4-cloud-security-awsazure-penetration-testing", + "title": "4. Cloud Security & AWS/Azure Penetration Testing", + "part_id": "part-ii-cloud-container-infrastructure-security", + "part_title": "Part II: Cloud, Container & Infrastructure Security", + "line": 186, + "anchor": "#4-cloud-security-awsazure-penetration-testing", + "tags": [ + "cloud", + "pentest" + ], + "subsections": [ + { + "id": "41-cloud-security-landscape", + "title": "4.1 Cloud Security Landscape", + "line": 188, + "anchor": "#41-cloud-security-landscape" + }, + { + "id": "42-aws-penetration-testing", + "title": "4.2 AWS Penetration Testing", + "line": 198, + "anchor": "#42-aws-penetration-testing" + }, + { + "id": "43-azureentra-id-penetration-testing", + "title": "4.3 Azure/Entra ID Penetration Testing", + "line": 242, + "anchor": "#43-azureentra-id-penetration-testing" + } + ], + "body": "```bash\ncurl -H \"Metadata:true\" \\\n \"http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com\"\n```\n\n---", + "summary": "```bash\ncurl -H \"Metadata:true\" \\\n \"http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com\"\n```" + }, + { + "id": "5-container-kubernetes-security", + "title": "5. Container & Kubernetes Security", + "part_id": "part-ii-cloud-container-infrastructure-security", + "part_title": "Part II: Cloud, Container & Infrastructure Security", + "line": 271, + "anchor": "#5-container-kubernetes-security", + "tags": [ + "container", + "ai" + ], + "subsections": [ + { + "id": "51-overview", + "title": "5.1 Overview", + "line": 273, + "anchor": "#51-overview" + }, + { + "id": "52-runtime-security-with-falco", + "title": "5.2 Runtime Security with Falco", + "line": 284, + "anchor": "#52-runtime-security-with-falco" + }, + { + "id": "53-image-scanning-with-trivy", + "title": "5.3 Image Scanning with Trivy", + "line": 296, + "anchor": "#53-image-scanning-with-trivy" + }, + { + "id": "54-pod-security-admission", + "title": "5.4 Pod Security Admission", + "line": 312, + "anchor": "#54-pod-security-admission" + }, + { + "id": "55-supply-chain-security", + "title": "5.5 Supply Chain Security", + "line": 325, + "anchor": "#55-supply-chain-security" + } + ], + "body": "cosign verify --key cosign.pub myregistry/myimage:tag\n```\n\n---", + "summary": "cosign verify --key cosign.pub myregistry/myimage:tag\n```" + } + ] + }, + { + "id": "part-iii-application-api-security", + "title": "Part III: Application & API Security", + "line": 345, + "sections": [ + { + "id": "6-api-security-testing", + "title": "6. API Security Testing", + "part_id": "part-iii-application-api-security", + "part_title": "Part III: Application & API Security", + "line": 349, + "anchor": "#6-api-security-testing", + "tags": [ + "api" + ], + "subsections": [ + { + "id": "61-owasp-api-security-top-10-2023", + "title": "6.1 OWASP API Security Top 10 (2023)", + "line": 351, + "anchor": "#61-owasp-api-security-top-10-2023" + }, + { + "id": "62-api-testing-tools", + "title": "6.2 API Testing Tools", + "line": 366, + "anchor": "#62-api-testing-tools" + }, + { + "id": "63-rest-api-testing", + "title": "6.3 REST API Testing", + "line": 377, + "anchor": "#63-rest-api-testing" + }, + { + "id": "64-graphql-security", + "title": "6.4 GraphQL Security", + "line": 402, + "anchor": "#64-graphql-security" + } + ], + "body": "| Vulnerability | Description |\n|---------------|-------------|\n| Introspection Enabled | Schema disclosure reveals types, queries, mutations |\n| Batching Attacks | Multiple queries bypass rate limits |\n| Deep Query DoS | Recursive/nested queries exhaust resources |\n| Injection via Arguments | SQLi/NoSQLi through resolver arguments |\n\n```json\n// Introspection probe\n{\"query\": \"{__schema{queryType{name}}}\"}\n\n// Full introspection\n{\"query\": \"{__schema{types{name,fields{name,args{name,type{name}}}}}}\"}\n```\n\n---", + "summary": "```json\n// Introspection probe\n{\"query\": \"{__schema{queryType{name}}}\"}" + }, + { + "id": "7-mobile-application-security-testing", + "title": "7. Mobile Application Security Testing", + "part_id": "part-iii-application-api-security", + "part_title": "Part III: Application & API Security", + "line": 421, + "anchor": "#7-mobile-application-security-testing", + "tags": [ + "mobile" + ], + "subsections": [ + { + "id": "71-owasp-mobile-top-10-2024", + "title": "7.1 OWASP Mobile Top 10 (2024)", + "line": 423, + "anchor": "#71-owasp-mobile-top-10-2024" + }, + { + "id": "72-mobile-testing-tools", + "title": "7.2 Mobile Testing Tools", + "line": 438, + "anchor": "#72-mobile-testing-tools" + }, + { + "id": "73-android-security-testing", + "title": "7.3 Android Security Testing", + "line": 448, + "anchor": "#73-android-security-testing" + }, + { + "id": "74-ios-security-testing", + "title": "7.4 iOS Security Testing", + "line": 470, + "anchor": "#74-ios-security-testing" + } + ], + "body": "frida-ios-dump com.target.app\n```\n\n---", + "summary": "frida-ios-dump com.target.app\n```" + } + ] + }, + { + "id": "part-iv-enterprise-identity-security", + "title": "Part IV: Enterprise & Identity Security", + "line": 489, + "sections": [ + { + "id": "8-active-directory-security-attack-techniques", + "title": "8. Active Directory Security & Attack Techniques", + "part_id": "part-iv-enterprise-identity-security", + "part_title": "Part IV: Enterprise & Identity Security", + "line": 493, + "anchor": "#8-active-directory-security-attack-techniques", + "tags": [ + "ad" + ], + "subsections": [ + { + "id": "81-overview", + "title": "8.1 Overview", + "line": 495, + "anchor": "#81-overview" + }, + { + "id": "82-kerberos-authentication-attacks", + "title": "8.2 Kerberos Authentication Attacks", + "line": 504, + "anchor": "#82-kerberos-authentication-attacks" + }, + { + "id": "83-attack-commands", + "title": "8.3 Attack Commands", + "line": 514, + "anchor": "#83-attack-commands" + }, + { + "id": "84-ad-hardening-best-practices", + "title": "8.4 AD Hardening Best Practices", + "line": 569, + "anchor": "#84-ad-hardening-best-practices" + } + ], + "body": "- Implement tiered administration model (Tier 0/1/2)\n- Deploy Group Managed Service Accounts (gMSAs)\n- Enable Protected Users security group\n- Enforce AES encryption for Kerberos\n- Implement LAPS for local admin passwords\n- Enable Credential Guard on Windows 10/11+\n- Rotate KRBTGT password twice annually\n\n---", + "summary": "- Implement tiered administration model (Tier 0/1/2)\n- Deploy Group Managed Service Accounts (gMSAs)\n- Enable Protected Users security group\n- Enforce AES encryption for Kerberos\n- Implement LAPS for local admin passwords\n- Enable Credential Guard on Windows 10/11+\n- Rotate KRBTGT password twice ann" + } + ] + }, + { + "id": "part-v-offensive-operations", + "title": "Part V: Offensive Operations", + "line": 581, + "sections": [ + { + "id": "9-penetration-testing-methodologies-reporting", + "title": "9. Penetration Testing Methodologies & Reporting", + "part_id": "part-v-offensive-operations", + "part_title": "Part V: Offensive Operations", + "line": 585, + "anchor": "#9-penetration-testing-methodologies-reporting", + "tags": [ + "pentest" + ], + "subsections": [ + { + "id": "91-ptes-seven-phases", + "title": "9.1 PTES Seven Phases", + "line": 587, + "anchor": "#91-ptes-seven-phases" + }, + { + "id": "92-reconnaissance-tools", + "title": "9.2 Reconnaissance Tools", + "line": 597, + "anchor": "#92-reconnaissance-tools" + }, + { + "id": "93-reconnaissance-commands", + "title": "9.3 Reconnaissance Commands", + "line": 605, + "anchor": "#93-reconnaissance-commands" + }, + { + "id": "94-privilege-escalation", + "title": "9.4 Privilege Escalation", + "line": 623, + "anchor": "#94-privilege-escalation" + }, + { + "id": "95-cvss-scoring", + "title": "9.5 CVSS Scoring", + "line": 644, + "anchor": "#95-cvss-scoring" + } + ], + "body": "| Severity | Score | Remediation Timeline |\n|----------|-------|---------------------|\n| Critical | 9.0-10.0 | Immediate |\n| High | 7.0-8.9 | Within 30 days |\n| Medium | 4.0-6.9 | Within 90 days |\n| Low | 0.1-3.9 | Regular maintenance |\n\n---", + "summary": "---" + }, + { + "id": "10-red-team-operations", + "title": "10. Red Team Operations", + "part_id": "part-v-offensive-operations", + "part_title": "Part V: Offensive Operations", + "line": 655, + "anchor": "#10-red-team-operations", + "tags": [ + "redteam" + ], + "subsections": [ + { + "id": "101-c2-frameworks", + "title": "10.1 C2 Frameworks", + "line": 657, + "anchor": "#101-c2-frameworks" + }, + { + "id": "102-sliver-c2-framework", + "title": "10.2 Sliver C2 Framework", + "line": 667, + "anchor": "#102-sliver-c2-framework" + }, + { + "id": "103-amsi-bypass-techniques", + "title": "10.3 AMSI Bypass Techniques", + "line": 689, + "anchor": "#103-amsi-bypass-techniques" + }, + { + "id": "104-persistence-mechanisms", + "title": "10.4 Persistence Mechanisms", + "line": 699, + "anchor": "#104-persistence-mechanisms" + }, + { + "id": "105-lateral-movement", + "title": "10.5 Lateral Movement", + "line": 709, + "anchor": "#105-lateral-movement" + } + ], + "body": "Enter-PSSession -ComputerName TARGET -Credential $cred\n```\n\n---", + "summary": "Enter-PSSession -ComputerName TARGET -Credential $cred\n```" + }, + { + "id": "11-social-engineering-phishing", + "title": "11. Social Engineering & Phishing", + "part_id": "part-v-offensive-operations", + "part_title": "Part V: Offensive Operations", + "line": 728, + "anchor": "#11-social-engineering-phishing", + "tags": [ + "social" + ], + "subsections": [ + { + "id": "111-landscape-statistics", + "title": "11.1 Landscape Statistics", + "line": 730, + "anchor": "#111-landscape-statistics" + }, + { + "id": "112-phishing-frameworks", + "title": "11.2 Phishing Frameworks", + "line": 740, + "anchor": "#112-phishing-frameworks" + }, + { + "id": "113-gophish-setup", + "title": "11.3 GoPhish Setup", + "line": 749, + "anchor": "#113-gophish-setup" + }, + { + "id": "114-evilginx3-mfa-bypass", + "title": "11.4 Evilginx3 MFA Bypass", + "line": 759, + "anchor": "#114-evilginx3-mfa-bypass" + }, + { + "id": "115-physical-security-testing", + "title": "11.5 Physical Security Testing", + "line": 775, + "anchor": "#115-physical-security-testing" + } + ], + "body": "| Technique | Method | Tools |\n|-----------|--------|-------|\n| Tailgating | Follow authorized person | Props, fake phone call |\n| Badge Cloning | Copy RFID/NFC badges | Proxmark3, Flipper Zero |\n| Lock Picking | Bypass physical locks | Lock picks, bump keys |\n| USB Drop | Leave malicious USB drives | Rubber Ducky, O.MG Cable |\n\n---", + "summary": "---" + }, + { + "id": "12-wireless-security-testing", + "title": "12. Wireless Security Testing", + "part_id": "part-v-offensive-operations", + "part_title": "Part V: Offensive Operations", + "line": 786, + "anchor": "#12-wireless-security-testing", + "tags": [ + "wireless" + ], + "subsections": [ + { + "id": "121-wifi-hacking-tools", + "title": "12.1 WiFi Hacking Tools", + "line": 788, + "anchor": "#121-wifi-hacking-tools" + }, + { + "id": "122-attack-methodology", + "title": "12.2 Attack Methodology", + "line": 795, + "anchor": "#122-attack-methodology" + } + ], + "body": "aircrack-ng -w wordlist.txt capture-01.cap\n```\n\n---", + "summary": "aircrack-ng -w wordlist.txt capture-01.cap\n```" + } + ] + }, + { + "id": "part-vi-defensive-detection-operations", + "title": "Part VI: Defensive & Detection Operations", + "line": 816, + "sections": [ + { + "id": "13-purple-team-operations", + "title": "13. Purple Team Operations", + "part_id": "part-vi-defensive-detection-operations", + "part_title": "Part VI: Defensive & Detection Operations", + "line": 820, + "anchor": "#13-purple-team-operations", + "tags": [ + "purple" + ], + "subsections": [ + { + "id": "131-overview", + "title": "13.1 Overview", + "line": 822, + "anchor": "#131-overview" + }, + { + "id": "132-adversary-emulation-frameworks", + "title": "13.2 Adversary Emulation Frameworks", + "line": 833, + "anchor": "#132-adversary-emulation-frameworks" + }, + { + "id": "133-mitre-caldera", + "title": "13.3 MITRE Caldera", + "line": 841, + "anchor": "#133-mitre-caldera" + }, + { + "id": "134-atomic-red-team", + "title": "13.4 Atomic Red Team", + "line": 851, + "anchor": "#134-atomic-red-team" + }, + { + "id": "135-sigma-detection-rules", + "title": "13.5 Sigma Detection Rules", + "line": 864, + "anchor": "#135-sigma-detection-rules" + }, + { + "id": "136-bas-platforms", + "title": "13.6 BAS Platforms", + "line": 888, + "anchor": "#136-bas-platforms" + } + ], + "body": "| Platform | Key Capabilities |\n|----------|------------------|\n| Picus Security | Vendor-specific remediation, 24hr threat SLA |\n| Cymulate | Continuous exposure management |\n| AttackIQ | MITRE ATT&CK alignment |\n| SafeBreach | 25K+ attacks Hacker's Playbook |\n\n---", + "summary": "---" + }, + { + "id": "14-incident-response", + "title": "14. Incident Response", + "part_id": "part-vi-defensive-detection-operations", + "part_title": "Part VI: Defensive & Detection Operations", + "line": 899, + "anchor": "#14-incident-response", + "tags": [ + "incident" + ], + "subsections": [], + "body": "*Content from v8_IncidentResponse module*\n\nKey phases: Preparation, Detection & Analysis, Containment, Eradication, Recovery, Post-Incident Activity\n\n---", + "summary": "*Content from v8_IncidentResponse module*" + }, + { + "id": "15-malware-analysis", + "title": "15. Malware Analysis", + "part_id": "part-vi-defensive-detection-operations", + "part_title": "Part VI: Defensive & Detection Operations", + "line": 907, + "anchor": "#15-malware-analysis", + "tags": [ + "malware" + ], + "subsections": [ + { + "id": "151-threat-landscape-2024-2025", + "title": "15.1 Threat Landscape 2024-2025", + "line": 909, + "anchor": "#151-threat-landscape-2024-2025" + }, + { + "id": "152-analysis-methodology", + "title": "15.2 Analysis Methodology", + "line": 919, + "anchor": "#152-analysis-methodology" + }, + { + "id": "153-static-analysis", + "title": "15.3 Static Analysis", + "line": 929, + "anchor": "#153-static-analysis" + }, + { + "id": "154-dynamic-analysis-platforms", + "title": "15.4 Dynamic Analysis Platforms", + "line": 944, + "anchor": "#154-dynamic-analysis-platforms" + }, + { + "id": "155-yara-rule-structure", + "title": "15.5 YARA Rule Structure", + "line": 953, + "anchor": "#155-yara-rule-structure" + } + ], + "body": "```yara\nrule MalwareFamily : tag1 tag2 {\n meta:\n author = \"Analyst\"\n description = \"Detects MalwareFamily\"\n strings:\n $str1 = \"C:\\\\Windows\\\\Temp\\\\malware.exe\"\n $hex1 = { 48 8B 05 ?? ?? ?? ?? 48 89 44 24 }\n $re1 = /[a-z]{5,10}\\.exe/i\n condition:\n uint16(0) == 0x5A4D and\n filesize < 5MB and\n (2 of ($str*) or $hex1)\n}\n```\n\n---", + "summary": "```yara\nrule MalwareFamily : tag1 tag2 {\n meta:\n author = \"Analyst\"\n description = \"Detects MalwareFamily\"\n strings:\n $str1 = \"C:\\\\Windows\\\\Temp\\\\malware.exe\"\n $hex1 = { 48 8B 05 ?? ?? ?? ?? 48 89 44 24 }\n $re1 = /[a-z]{5,10}\\.exe/i\n condition:\n uint16(0) == 0x5A4D and\n files" + } + ] + }, + { + "id": "part-vii-emerging-technologies-specialized-domains", + "title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 973, + "sections": [ + { + "id": "16-aiml-security-operations", + "title": "16. AI/ML Security Operations", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 977, + "anchor": "#16-aiml-security-operations", + "tags": [ + "ai" + ], + "subsections": [ + { + "id": "161-owasp-top-10-for-llm-applications-2025", + "title": "16.1 OWASP Top 10 for LLM Applications (2025)", + "line": 979, + "anchor": "#161-owasp-top-10-for-llm-applications-2025" + }, + { + "id": "162-llm-security-guardrails", + "title": "16.2 LLM Security Guardrails", + "line": 994, + "anchor": "#162-llm-security-guardrails" + }, + { + "id": "163-ai-red-teaming-frameworks", + "title": "16.3 AI Red Teaming Frameworks", + "line": 1003, + "anchor": "#163-ai-red-teaming-frameworks" + }, + { + "id": "164-quick-reference", + "title": "16.4 Quick Reference", + "line": 1013, + "anchor": "#164-quick-reference" + } + ], + "body": "curl -X POST https://api.lakera.ai/v1/guard \\\n -H 'Authorization: Bearer $LAKERA_API_KEY' \\\n -d '{\"input\": \"user prompt\", \"policies\": [\"prompt_injection\", \"pii\"]}'\n```\n\n---", + "summary": "curl -X POST https://api.lakera.ai/v1/guard \\\n -H 'Authorization: Bearer $LAKERA_API_KEY' \\\n -d '{\"input\": \"user prompt\", \"policies\": [\"prompt_injection\", \"pii\"]}'\n```" + }, + { + "id": "17-otics-security", + "title": "17. OT/ICS Security", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 1031, + "anchor": "#17-otics-security", + "tags": [ + "ot" + ], + "subsections": [ + { + "id": "171-grficsv2", + "title": "17.1 GRFICSv2", + "line": 1033, + "anchor": "#171-grficsv2" + }, + { + "id": "172-ics-protocols", + "title": "17.2 ICS Protocols", + "line": 1038, + "anchor": "#172-ics-protocols" + } + ], + "body": "**Modbus TCP (Port 502)**: No authentication, cleartext, no encryption\n\n**Tools**: msfconsole auxiliary/scanner/scada/*, plcscan, modbus-cli\n\n---", + "summary": "**Modbus TCP (Port 502)**: No authentication, cleartext, no encryption" + }, + { + "id": "18-blockchain-smart-contract-security", + "title": "18. Blockchain & Smart Contract Security", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 1046, + "anchor": "#18-blockchain-smart-contract-security", + "tags": [ + "ai", + "blockchain" + ], + "subsections": [ + { + "id": "181-training-platforms", + "title": "18.1 Training Platforms", + "line": 1048, + "anchor": "#181-training-platforms" + }, + { + "id": "182-vulnerability-categories", + "title": "18.2 Vulnerability Categories", + "line": 1054, + "anchor": "#182-vulnerability-categories" + } + ], + "body": "- Reentrancy attacks\n- Integer overflow/underflow\n- Access control issues\n- Front-running\n\n---", + "summary": "- Reentrancy attacks\n- Integer overflow/underflow\n- Access control issues\n- Front-running" + }, + { + "id": "19-zero-trust-architecture", + "title": "19. Zero Trust Architecture", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 1063, + "anchor": "#19-zero-trust-architecture", + "tags": [ + "zerotrust" + ], + "subsections": [], + "body": "*Content from v8_ZeroTrust module*\n\nCore principles: Never trust, always verify; Assume breach; Verify explicitly\n\n---", + "summary": "*Content from v8_ZeroTrust module*" + }, + { + "id": "appendix-a-quick-start-docker-commands", + "title": "Appendix A: Quick Start Docker Commands", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 1071, + "anchor": "#appendix-a-quick-start-docker-commands", + "tags": [ + "container" + ], + "subsections": [], + "body": "docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined skysider/pwndocker\n```\n\n---", + "summary": "docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined skysider/pwndocker\n```" + }, + { + "id": "appendix-b-tool-reference-matrix", + "title": "Appendix B: Tool Reference Matrix", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 1092, + "anchor": "#appendix-b-tool-reference-matrix", + "tags": [ + "security" + ], + "subsections": [], + "body": "| Category | Tools |\n|----------|-------|\n| Web Testing | Burp Suite, OWASP ZAP, Nikto, SQLMap |\n| Network | Nmap, Masscan, Wireshark |\n| AD/Windows | BloodHound, Mimikatz, Rubeus, Impacket |\n| Cloud | Pacu, ScoutSuite, Prowler |\n| Container | Trivy, Falco, kube-bench |\n| Mobile | MobSF, Frida, Objection |\n| Malware | Ghidra, IDA Pro, x64dbg, Volatility |\n| C2 | Cobalt Strike, Sliver, Havoc |\n| Phishing | GoPhish, Evilginx3, SET |\n\n---", + "summary": "---" + }, + { + "id": "appendix-c-mitre-attck-quick-reference", + "title": "Appendix C: MITRE ATT&CK Quick Reference", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "part_title": "Part VII: Emerging Technologies & Specialized Domains", + "line": 1108, + "anchor": "#appendix-c-mitre-attck-quick-reference", + "tags": [ + "security" + ], + "subsections": [ + { + "id": "key-techniques", + "title": "Key Techniques", + "line": 1110, + "anchor": "#key-techniques" + } + ], + "body": "| ID | Technique | Category |\n|----|-----------|----------|\n| T1059.001 | PowerShell | Execution |\n| T1055 | Process Injection | Defense Evasion |\n| T1003.001 | LSASS Memory | Credential Access |\n| T1558.003 | Kerberoasting | Credential Access |\n| T1021.002 | SMB/Admin Shares | Lateral Movement |\n| T1547.001 | Registry Run Keys | Persistence |\n| T1566.001 | Spearphishing Attachment | Initial Access |\n\n---\n\n**WARNING**: This compendium contains resources for intentionally vulnerable systems. Use only in isolated lab environments with proper authorization.\n\n---\n\n*VaultMesh Technologies - Security Research Infrastructure Documentation*\n*Version 8.0 | December 2025*", + "summary": "---" + } + ] + } + ], + "sections": [ + { + "id": "1-lab-infrastructure-architecture", + "title": "1. Lab Infrastructure Architecture", + "part": "Part I: Lab Infrastructure & Foundations", + "part_id": "part-i-lab-infrastructure-foundations", + "anchor": "#1-lab-infrastructure-architecture", + "line": 55, + "tags": [ + "lab" + ], + "summary": "- **Host-Only Network**: Isolated VMs for safe attack simulation\n- **NAT Network**: VMs share host internet while maintaining inter-VM communication\n- **Internal Network**: Complete isolation for live malware analysis", + "subsections": [ + "1.1 Hardware Requirements", + "1.2 Virtualization Platforms", + "1.3 Network Topology" + ], + "subsection_count": 3 + }, + { + "id": "2-intentionally-vulnerable-applications", + "title": "2. Intentionally Vulnerable Applications", + "part": "Part I: Lab Infrastructure & Foundations", + "part_id": "part-i-lab-infrastructure-foundations", + "anchor": "#2-intentionally-vulnerable-applications", + "line": 82, + "tags": [ + "security" + ], + "summary": "Microservices-based platform covering OWASP API Top 10.", + "subsections": [ + "2.1 Web Applications", + "2.2 Additional Web Platforms", + "2.3 Cloud Security Platforms", + "2.4 Container Security", + "2.5 API Security Platforms" + ], + "subsection_count": 5 + }, + { + "id": "3-vulnerable-repositories-research", + "title": "3. Vulnerable Repositories Research", + "part": "Part I: Lab Infrastructure & Foundations", + "part_id": "part-i-lab-infrastructure-foundations", + "anchor": "#3-vulnerable-repositories-research", + "line": 162, + "tags": [ + "security" + ], + "summary": "---", + "subsections": [ + "3.1 Repository Vulnerability Statistics", + "3.2 Vulnerability Datasets" + ], + "subsection_count": 2 + }, + { + "id": "4-cloud-security-awsazure-penetration-testing", + "title": "4. Cloud Security & AWS/Azure Penetration Testing", + "part": "Part II: Cloud, Container & Infrastructure Security", + "part_id": "part-ii-cloud-container-infrastructure-security", + "anchor": "#4-cloud-security-awsazure-penetration-testing", + "line": 186, + "tags": [ + "cloud", + "pentest" + ], + "summary": "```bash\ncurl -H \"Metadata:true\" \\\n \"http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com\"\n```", + "subsections": [ + "4.1 Cloud Security Landscape", + "4.2 AWS Penetration Testing", + "4.3 Azure/Entra ID Penetration Testing" + ], + "subsection_count": 3 + }, + { + "id": "5-container-kubernetes-security", + "title": "5. Container & Kubernetes Security", + "part": "Part II: Cloud, Container & Infrastructure Security", + "part_id": "part-ii-cloud-container-infrastructure-security", + "anchor": "#5-container-kubernetes-security", + "line": 271, + "tags": [ + "container", + "ai" + ], + "summary": "cosign verify --key cosign.pub myregistry/myimage:tag\n```", + "subsections": [ + "5.1 Overview", + "5.2 Runtime Security with Falco", + "5.3 Image Scanning with Trivy", + "5.4 Pod Security Admission", + "5.5 Supply Chain Security" + ], + "subsection_count": 5 + }, + { + "id": "6-api-security-testing", + "title": "6. API Security Testing", + "part": "Part III: Application & API Security", + "part_id": "part-iii-application-api-security", + "anchor": "#6-api-security-testing", + "line": 349, + "tags": [ + "api" + ], + "summary": "```json\n// Introspection probe\n{\"query\": \"{__schema{queryType{name}}}\"}", + "subsections": [ + "6.1 OWASP API Security Top 10 (2023)", + "6.2 API Testing Tools", + "6.3 REST API Testing", + "6.4 GraphQL Security" + ], + "subsection_count": 4 + }, + { + "id": "7-mobile-application-security-testing", + "title": "7. Mobile Application Security Testing", + "part": "Part III: Application & API Security", + "part_id": "part-iii-application-api-security", + "anchor": "#7-mobile-application-security-testing", + "line": 421, + "tags": [ + "mobile" + ], + "summary": "frida-ios-dump com.target.app\n```", + "subsections": [ + "7.1 OWASP Mobile Top 10 (2024)", + "7.2 Mobile Testing Tools", + "7.3 Android Security Testing", + "7.4 iOS Security Testing" + ], + "subsection_count": 4 + }, + { + "id": "8-active-directory-security-attack-techniques", + "title": "8. Active Directory Security & Attack Techniques", + "part": "Part IV: Enterprise & Identity Security", + "part_id": "part-iv-enterprise-identity-security", + "anchor": "#8-active-directory-security-attack-techniques", + "line": 493, + "tags": [ + "ad" + ], + "summary": "- Implement tiered administration model (Tier 0/1/2)\n- Deploy Group Managed Service Accounts (gMSAs)\n- Enable Protected Users security group\n- Enforce AES encryption for Kerberos\n- Implement LAPS for local admin passwords\n- Enable Credential Guard on Windows 10/11+\n- Rotate KRBTGT password twice ann", + "subsections": [ + "8.1 Overview", + "8.2 Kerberos Authentication Attacks", + "8.3 Attack Commands", + "8.4 AD Hardening Best Practices" + ], + "subsection_count": 4 + }, + { + "id": "9-penetration-testing-methodologies-reporting", + "title": "9. Penetration Testing Methodologies & Reporting", + "part": "Part V: Offensive Operations", + "part_id": "part-v-offensive-operations", + "anchor": "#9-penetration-testing-methodologies-reporting", + "line": 585, + "tags": [ + "pentest" + ], + "summary": "---", + "subsections": [ + "9.1 PTES Seven Phases", + "9.2 Reconnaissance Tools", + "9.3 Reconnaissance Commands", + "9.4 Privilege Escalation", + "9.5 CVSS Scoring" + ], + "subsection_count": 5 + }, + { + "id": "10-red-team-operations", + "title": "10. Red Team Operations", + "part": "Part V: Offensive Operations", + "part_id": "part-v-offensive-operations", + "anchor": "#10-red-team-operations", + "line": 655, + "tags": [ + "redteam" + ], + "summary": "Enter-PSSession -ComputerName TARGET -Credential $cred\n```", + "subsections": [ + "10.1 C2 Frameworks", + "10.2 Sliver C2 Framework", + "10.3 AMSI Bypass Techniques", + "10.4 Persistence Mechanisms", + "10.5 Lateral Movement" + ], + "subsection_count": 5 + }, + { + "id": "11-social-engineering-phishing", + "title": "11. Social Engineering & Phishing", + "part": "Part V: Offensive Operations", + "part_id": "part-v-offensive-operations", + "anchor": "#11-social-engineering-phishing", + "line": 728, + "tags": [ + "social" + ], + "summary": "---", + "subsections": [ + "11.1 Landscape Statistics", + "11.2 Phishing Frameworks", + "11.3 GoPhish Setup", + "11.4 Evilginx3 MFA Bypass", + "11.5 Physical Security Testing" + ], + "subsection_count": 5 + }, + { + "id": "12-wireless-security-testing", + "title": "12. Wireless Security Testing", + "part": "Part V: Offensive Operations", + "part_id": "part-v-offensive-operations", + "anchor": "#12-wireless-security-testing", + "line": 786, + "tags": [ + "wireless" + ], + "summary": "aircrack-ng -w wordlist.txt capture-01.cap\n```", + "subsections": [ + "12.1 WiFi Hacking Tools", + "12.2 Attack Methodology" + ], + "subsection_count": 2 + }, + { + "id": "13-purple-team-operations", + "title": "13. Purple Team Operations", + "part": "Part VI: Defensive & Detection Operations", + "part_id": "part-vi-defensive-detection-operations", + "anchor": "#13-purple-team-operations", + "line": 820, + "tags": [ + "purple" + ], + "summary": "---", + "subsections": [ + "13.1 Overview", + "13.2 Adversary Emulation Frameworks", + "13.3 MITRE Caldera", + "13.4 Atomic Red Team", + "13.5 Sigma Detection Rules", + "13.6 BAS Platforms" + ], + "subsection_count": 6 + }, + { + "id": "14-incident-response", + "title": "14. Incident Response", + "part": "Part VI: Defensive & Detection Operations", + "part_id": "part-vi-defensive-detection-operations", + "anchor": "#14-incident-response", + "line": 899, + "tags": [ + "incident" + ], + "summary": "*Content from v8_IncidentResponse module*", + "subsections": [], + "subsection_count": 0 + }, + { + "id": "15-malware-analysis", + "title": "15. Malware Analysis", + "part": "Part VI: Defensive & Detection Operations", + "part_id": "part-vi-defensive-detection-operations", + "anchor": "#15-malware-analysis", + "line": 907, + "tags": [ + "malware" + ], + "summary": "```yara\nrule MalwareFamily : tag1 tag2 {\n meta:\n author = \"Analyst\"\n description = \"Detects MalwareFamily\"\n strings:\n $str1 = \"C:\\\\Windows\\\\Temp\\\\malware.exe\"\n $hex1 = { 48 8B 05 ?? ?? ?? ?? 48 89 44 24 }\n $re1 = /[a-z]{5,10}\\.exe/i\n condition:\n uint16(0) == 0x5A4D and\n files", + "subsections": [ + "15.1 Threat Landscape 2024-2025", + "15.2 Analysis Methodology", + "15.3 Static Analysis", + "15.4 Dynamic Analysis Platforms", + "15.5 YARA Rule Structure" + ], + "subsection_count": 5 + }, + { + "id": "16-aiml-security-operations", + "title": "16. AI/ML Security Operations", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#16-aiml-security-operations", + "line": 977, + "tags": [ + "ai" + ], + "summary": "curl -X POST https://api.lakera.ai/v1/guard \\\n -H 'Authorization: Bearer $LAKERA_API_KEY' \\\n -d '{\"input\": \"user prompt\", \"policies\": [\"prompt_injection\", \"pii\"]}'\n```", + "subsections": [ + "16.1 OWASP Top 10 for LLM Applications (2025)", + "16.2 LLM Security Guardrails", + "16.3 AI Red Teaming Frameworks", + "16.4 Quick Reference" + ], + "subsection_count": 4 + }, + { + "id": "17-otics-security", + "title": "17. OT/ICS Security", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#17-otics-security", + "line": 1031, + "tags": [ + "ot" + ], + "summary": "**Modbus TCP (Port 502)**: No authentication, cleartext, no encryption", + "subsections": [ + "17.1 GRFICSv2", + "17.2 ICS Protocols" + ], + "subsection_count": 2 + }, + { + "id": "18-blockchain-smart-contract-security", + "title": "18. Blockchain & Smart Contract Security", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#18-blockchain-smart-contract-security", + "line": 1046, + "tags": [ + "ai", + "blockchain" + ], + "summary": "- Reentrancy attacks\n- Integer overflow/underflow\n- Access control issues\n- Front-running", + "subsections": [ + "18.1 Training Platforms", + "18.2 Vulnerability Categories" + ], + "subsection_count": 2 + }, + { + "id": "19-zero-trust-architecture", + "title": "19. Zero Trust Architecture", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#19-zero-trust-architecture", + "line": 1063, + "tags": [ + "zerotrust" + ], + "summary": "*Content from v8_ZeroTrust module*", + "subsections": [], + "subsection_count": 0 + }, + { + "id": "appendix-a-quick-start-docker-commands", + "title": "Appendix A: Quick Start Docker Commands", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#appendix-a-quick-start-docker-commands", + "line": 1071, + "tags": [ + "container" + ], + "summary": "docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined skysider/pwndocker\n```", + "subsections": [], + "subsection_count": 0 + }, + { + "id": "appendix-b-tool-reference-matrix", + "title": "Appendix B: Tool Reference Matrix", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#appendix-b-tool-reference-matrix", + "line": 1092, + "tags": [ + "security" + ], + "summary": "---", + "subsections": [], + "subsection_count": 0 + }, + { + "id": "appendix-c-mitre-attck-quick-reference", + "title": "Appendix C: MITRE ATT&CK Quick Reference", + "part": "Part VII: Emerging Technologies & Specialized Domains", + "part_id": "part-vii-emerging-technologies-specialized-domains", + "anchor": "#appendix-c-mitre-attck-quick-reference", + "line": 1108, + "tags": [ + "security" + ], + "summary": "---", + "subsections": [ + "Key Techniques" + ], + "subsection_count": 1 + } + ] +} \ No newline at end of file diff --git a/docs/VAULTMESH-SENTINEL-GTM-BATTLECARD.md b/docs/VAULTMESH-SENTINEL-GTM-BATTLECARD.md new file mode 100644 index 0000000..cdd4980 --- /dev/null +++ b/docs/VAULTMESH-SENTINEL-GTM-BATTLECARD.md @@ -0,0 +1,109 @@ +# VaultMesh Sentinel — Go-To-Market Battlecard (v1) + +## What we are + +VaultMesh Sentinel is the forensic continuity layer for autonomous infrastructure. + +Sentinel makes systems **defensible after failure**, not merely secure during operation, by emitting offline-verifiable evidence of: +- what happened +- what was attempted and denied (Proof of Restraint) +- who/what had authority +- what corruption/tamper was detected + +## Who we sell to (ICP) + +Primary buyers: +- Space agencies & contractors (satellites, on-orbit servicing, lunar infrastructure) +- Critical IoT / OT operators (energy grids, pipelines, factories) +- Defense & national infrastructure vendors + +Buyer personas: +- Program managers (mission liability) +- Security / safety leads (post-incident accountability) +- Compliance & legal (audit survival) +- Insurers (claim defensibility) + +## The problem they already feel + +- Automation is increasing faster than accountability. +- Systems operate offline, autonomous, and under coercion. +- After incidents, there is blame without proof; logs without integrity; narratives instead of evidence. + +## Our wedge (why we win first) + +**Proof of Restraint** + +Sentinel produces auditable evidence not only of actions executed, but of actions **considered and safely denied**, with: +- denial reason (bounded + schematized) +- the exact operation that would have occurred (op + digest) +- any containment applied (scope narrowing) + +## What Sentinel actually ships (v1) + +- Action gating: intent → allow/deny → effect +- Append-only receipts + deterministic Merkle roots +- ShadowReceipts on denial (no silent drops) +- Corruption/tamper receipts and degraded-mode containment (authority can only narrow) +- Offline export bundles (seals) + offline verifier +- Archaeology drill as onboarding requirement + +## The one-line pitch + +“VaultMesh Sentinel is the black box recorder for autonomous infrastructure — it proves what happened, what was denied, and why, even years after failure.” + +## Why now + +- Automation is unavoidable (space latency, industrial scale) +- Regulation is tightening (NIS2 / CRA pressures) +- Insurance is demanding evidence, not promises +- Incidents are becoming political and international, not technical + +## Competitive landscape (why others lose) + +| Competitor type | Why they fail | +|---|---| +| SIEM / logging | Logs can be deleted, forged, coerced, or re-framed | +| Cloud governance | Assumes connectivity and a trusted control plane | +| Blockchains | Assumes liveness/consensus and pushes complexity into ops | +| Safety systems | Enforce rules but don’t prove restraint | +| Dashboards | Disappear after the incident | + +Sentinel assumes the incident already happened. + +## Proof artifacts (what we can hand an auditor) + +Typical export bundle contains: +- `ROOT.current.txt` (root + seq + timestamp + algorithm identifiers) +- `receipts.jsonl` or a SQLite export covering the range +- `seal.json` (bundle metadata + ranges + root commitments) +- `integrity.json` (hashes of included files) +- `verifier_manifest.json` (expected tool versions/checksums) + +## Pricing anchors (not promises) + +Deployment licensing: +- Space / defense: $250k – $5M per system +- Critical IoT / OT: $50k – $500k per site + +Recurring: +- Long-term support & verification tooling +- Compliance & evidence export packages + +## First killer demo (closes deals) + +**“The Black Box That Refused”** +1. Autonomous system runs offline. +2. Unsafe command is issued. +3. Sentinel denies it (ShadowReceipt emitted). +4. System continues safely. +5. Later, an auditor receives a proof bundle and verifies it offline. + +Outcome: clear authority trail, provable restraint, zero ambiguity. + +## Expansion path + +1. Start as single-sovereign Sentinel (isolation-correct) +2. Add continuous invariant verification + drift containment +3. Optional federation for cross-witnessing (witness augmentation, not correctness) +4. Become a recognized evidence standard for autonomous operations + diff --git a/docs/VAULTMESH-SHIELD-NODE-TEM.md b/docs/VAULTMESH-SHIELD-NODE-TEM.md new file mode 100644 index 0000000..c803276 --- /dev/null +++ b/docs/VAULTMESH-SHIELD-NODE-TEM.md @@ -0,0 +1,197 @@ +# Shield Node & TEM Engine + +## Summary + +The Shield Node is the OffSec/TEM appliance for VaultMesh, running on `shield-vm` with a dedicated MCP backend, agents, and signed activity that flows back into the core ledger. + +--- + +## Key Findings + +- Shield Node now runs as a persistent service on `shield-vm` (Tailscale: `100.112.202.10`). +- MCP backend listens on `:8081` with `/health` and `/mcp/command` endpoints. +- Five core OffSec agents are available (Recon, Vuln, Exploit, CTF, DFIR). +- VaultMesh talks to the Shield Node via `offsec_node_client.py` and `vm_cli.py offsec …` commands. +- Shield activity is designed to be captured, analyzed, and (in the next iteration) emitted as receipts for ProofChain ingestion. + +--- + +## Components + +| Component | Description | +|-----------|-------------| +| Shield Node host | `shield-vm` (Debian, Tailscale node) | +| OffSec Agents stack | `/opt/offsec-agents/` (Python package + virtualenv) | +| MCP backend | `files/offsec_mcp.py` (FastAPI / uvicorn) | +| System service | `vaultmesh-mcp.service` (enabled, restart on failure) | +| VaultMesh client | `scripts/offsec_node_client.py` | +| CLI façade | `vm_cli.py offsec agents` and `vm_cli.py offsec shield-status` | + +--- + +## Node & Service Layout + +| Item | Value | +|------|-------| +| Host | `shield-vm` (Tailscale IP: `100.112.202.10`) | +| Code root | `/opt/offsec-agents/` | +| Virtualenv | `/opt/offsec-agents/.venv/` | +| Service manager | `systemd` → `vaultmesh-mcp.service` | +| Port | `8081/tcp` (local + tailnet access) | +| Local state | `vaultmesh.db` (SQLite, node-local) | +| Planned receipts | `/opt/offsec-agents/receipts/` for ProofChain ingestion | + +--- + +## Service Configuration (systemd) + +- **Unit path**: `/etc/systemd/system/vaultmesh-mcp.service` +- **User**: `sovereign` +- **WorkingDirectory**: `/opt/offsec-agents` +- **ExecStart**: `/opt/offsec-agents/.venv/bin/uvicorn files.offsec_mcp:app --host 0.0.0.0 --port 8081` +- **Environment**: + - `VAULTMESH_ROOT=/opt/vaultmesh` + - `TEM_DB_PATH=/opt/offsec-agents/state/tem.db` + - `TEM_RECEIPTS_PATH=/opt/offsec-agents/receipts/tem` + +--- + +## API Endpoints + +### `GET /health` + +Returns Shield status, node/agent counts, and uptime. + +```json +{ + "status": "ok", + "nodes": 12, + "proofs": 0, + "uptime": "6m" +} +``` + +### `POST /mcp/command` + +JSON body: +```json +{ + "session_id": "string", + "user": "string", + "command": "string" +} +``` + +Example commands: +- `"status"` +- `"mesh status"` +- `"agents list"` +- `"shield status"` +- `"agent spawn recon example.com"` +- `"agent mission "` + +--- + +## VaultMesh Integration + +### Environment Variable + +On VaultMesh host: +```bash +export OFFSEC_NODE_URL=http://100.112.202.10:8081 +``` + +### Client + +`scripts/offsec_node_client.py` + +Core methods: +- `health()` → calls `/health` +- `command(command: str, session_id: str, user: str)` → `/mcp/command` + +### CLI Commands + +```bash +# List agents registered on Shield Node +python3 cli/vm_cli.py offsec agents + +# Show Shield health and status +python3 cli/vm_cli.py offsec shield-status +``` + +--- + +## Workflows / Pipelines + +### 1. Operator View + +```bash +vm offsec shield-status # Confirm Shield Node is up and healthy +vm offsec agents # Verify active agent types and readiness +``` + +### 2. OffSec Operations (planned expansion) + +- Trigger recon, vuln scans, and missions via `offsec_node_client.py` +- Store results locally in `vaultmesh.db` +- Emit receipts to `/opt/offsec-agents/receipts/` + +### 3. VaultMesh Ingestion (planned) + +- Guardian / automation jobs pull Shield receipts into VaultMesh ProofChain +- Lawchain and compliance scrolls can reference Shield evidence directly + +--- + +## Security Notes + +- Shield Node is an OffSec/TEM surface and is isolated onto `shield-vm` +- Access path is limited to Tailscale + SSH; no public internet exposure +- SQLite DB and receipts directory are kept local to `/opt/offsec-agents` +- Systemd ensures automatic restart on crash or failure +- TEM-oriented commands (`tem status`, `tem recall`) reserved for future expansion + +--- + +## Dependencies + +- Python 3.13, `python3-venv`, and `python3-pip` on `shield-vm` +- `offsec-agents` installed editable in `/opt/offsec-agents` +- MCP dependencies from `files/requirements-mcp.txt` +- Tailscale client running on `shield-vm` +- VaultMesh core with `OFFSEC_NODE_URL` configured + +--- + +## Deployment Summary + +1. Code synced to `/opt/offsec-agents` on `shield-vm` +2. Virtualenv `.venv` created and `offsec-agents` installed editable +3. MCP dependencies installed from `files/requirements-mcp.txt` +4. `vaultmesh-mcp.service` installed, enabled, and started under the `sovereign` user +5. Health verified via: + ```bash + curl http://localhost:8081/health + curl -X POST http://localhost:8081/mcp/command \ + -H "Content-Type: application/json" \ + -d '{"session_id":"test","user":"sovereign","command":"agents list"}' + ``` + +--- + +## Position in Overall Architecture + +``` +VaultMesh (core ledger) Shield Node (offsec-agents) +───────────────────────── ─────────────────────────── +Rust engines Python agents + TEM +ProofChain/Guardian MCP backend (:8081) +vm_cli.py Nexus consoles +offsec_node_client.py ─────────────► /mcp/command +receipt ingestion ◄────────────────── /opt/offsec-agents/receipts/ +``` + +**VaultMesh**: "What happened is provable." +**Shield Node**: "What happens at the edge is observed, remembered, and signed." + +The link between them is a narrow, explicit HTTP + receipts bridge, not shared mutable state. diff --git a/docs/VAULTMESH-STANDARDS-INDEX.md b/docs/VAULTMESH-STANDARDS-INDEX.md new file mode 100644 index 0000000..518cbcd --- /dev/null +++ b/docs/VAULTMESH-STANDARDS-INDEX.md @@ -0,0 +1,201 @@ +# VaultMesh Standards Index + +> Canonical index of normative and supporting artifacts for the VaultMesh +> ProofBundle and ledger evidence model. + +This document provides a single entry point for regulators, auditors, and +integration partners who need to understand which documents and tools are +**normative** (MUST be followed) and which are **supporting** (helpful +for implementation and interpretation). + +--- + +## 1. Scope + +This index currently covers the **ProofBundle** family of artifacts: + +- The way VaultMesh packages evidence for a single document access +- The cryptographic verification model for that evidence +- The offline tooling used by regulators to validate bundles + +Future VaultMesh standards (e.g. Treasury, Mesh Federation) SHOULD be +added to this index as they are formalized. + +--- + +## 2. Normative Artifacts + +These artifacts define the behavior and structure that MUST be followed +for ProofBundle implementations and verifiers. + +### 2.1 ProofBundle Specification + +- **Title:** VaultMesh ProofBundle Specification +- **File:** `docs/VAULTMESH-PROOFBUNDLE-SPEC.md` +- **Version:** `1.1.0` +- **Status:** Normative + +Defines: + +- JSON schema for ProofBundle exports (`bundle_id`, `schema_version`, + `document`, `actor`, `portal`, `chain`, `guardian_anchor`, + `proofchain`, `meta`) +- Use of BLAKE3 for `root_hash` and `previous_hash` +- Hash-chain semantics and verification rules +- Threat model & non-goals +- AI Act Annex IX compliance crosswalk +- Versioning and extensibility rules + +**Implementers MUST** treat this SPEC as the source of truth for what a +valid ProofBundle is and how it is verified. + +### 2.2 ProofBundle Offline Verifier + +- **Title:** VaultMesh ProofBundle Verifier +- **File:** `burocrat/app/tools/vm_verify_proofbundle.py` +- **Status:** Normative reference implementation + +Implements: + +- Canonical JSON encoding (`sort_keys=True`, compact separators) +- BLAKE3 verification of each receipt's `root_hash` +- Hash-chain verification via `previous_hash` +- Consistency checks against `chain.ok`, `chain.length`, + `chain.start`, `chain.end` +- Exit codes: + - `0` – valid bundle + - `1` – structural / hash-chain failure + - `2` – I/O or parse error + +**Regulators MAY** use this tool directly or as a reference when +building their own independent verifier. + +--- + +## 3. Supporting Artifacts + +These artifacts are not strictly required for correctness, but they +explain how to use the normative pieces in practice. + +### 3.1 ProofBundle Playbook + +- **Title:** How to Verify a VaultMesh ProofBundle +- **File:** `docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md` +- **Version:** `1.0` +- **Status:** Informative + +Audience: compliance officers, lawyers, auditors, procurement teams. + +Provides: + +- Plain-language explanation of what a ProofBundle proves +- Prerequisites (Python, `blake3` package) +- 3-step verification walkthrough +- Example output (valid vs tampered bundle) +- Operational guidance (no VaultMesh access required) + +### 3.2 HTML ProofBundle Viewer + +- **Title:** ProofBundle HTML Viewer +- **File:** `burocrat/app/src/views/proofbundle.ejs` +- **Status:** Informative + +Provides: + +- Human-readable rendering of a ProofBundle +- Chain visualization and anchor details +- Print-to-PDF option for dossier filing +- Footer note pointing to the offline verifier + +This viewer **MUST NOT** be considered a substitute for cryptographic +verification; it is a convenience layer on top of the normative JSON + +verifier. + +### 3.3 ProofBundle Conformance Test Pack + +- **Title:** ProofBundle Conformance Test Pack +- **Path:** `testvectors/proofbundle/` +- **Version:** `1.0` +- **Status:** Informative + +Provides: + +- `proofbundle-valid.json` – Known-good bundle (exit 0) +- `proofbundle-tampered-body.json` – Modified body, hash mismatch (exit 1) +- `proofbundle-tampered-root.json` – Wrong root_hash (exit 1) +- `proofbundle-broken-chain.json` – Broken previous_hash linkage (exit 1) +- `README.md` – Standalone usage instructions + +Implementers SHOULD verify their verifier passes all test vectors before +claiming conformance. + +--- + +## 4. Current Version Matrix + +| Component | File | Version | +|----------------------|------------------------------------------------|----------| +| ProofBundle SPEC | `docs/VAULTMESH-PROOFBUNDLE-SPEC.md` | `1.1.0` | +| ProofBundle Playbook | `docs/VAULTMESH-PROOFBUNDLE-PLAYBOOK.md` | `1.0` | +| Offline Verifier | `burocrat/app/tools/vm_verify_proofbundle.py` | `1.1.0*` | +| Conformance Test Pack | `testvectors/proofbundle/` | `1.0` | +| HTML Viewer | `burocrat/app/src/views/proofbundle.ejs` | n/a | + +\* The verifier tracks the SPEC's `schema_version`. For +`schema_version = "1.1.0"` bundles, this script is considered the +reference. + +--- + +## 5. Citing ProofBundle + +ProofBundle can be cited in assessments, audit reports, and compliance +documentation using the following reference: + +> This assessment relies on VaultMesh ProofBundle, specified in +> **"VAULTMESH-PROOFBUNDLE-SPEC v1.1.0"**, with verification performed +> using the reference tool `vm_verify_proofbundle.py v1.1.0` and validated +> against the **VaultMesh ProofBundle Conformance Test Pack v1.0**. + +The git tag `proofbundle-v1.1.0` in the VaultMesh repository marks the +reference implementation state for this version. + +--- + +## 6. Implementation Notes + +- **Producers of ProofBundles:** + - MUST include `schema_version` in every bundle and follow the + rules in the SPEC. + - SHOULD keep this index updated when bumping versions or adding + new normative documents. + +- **Verifiers:** + - MUST reject unknown major versions (e.g. `2.x.x`) by default. + - MAY accept minor extensions (`1.2.x`) if all required fields + validate according to the `1.1.0` SPEC. + +--- + +## 7. Roadmap for Future Standards + +Future VaultMesh standards that SHOULD be added here: + +| Standard | Scroll | Status | +|----------|--------|--------| +| Treasury Receipt SPEC | Treasury | Planned | +| Mesh Federation SPEC | Mesh | Planned | +| Identity & Capability SPEC | Identity | Planned | +| Guardian Anchoring & External ProofChain SPEC | Guardian | Planned | + +Each new standard SHOULD define: + +1. A normative SPEC document under `docs/` +2. A reference implementation (Rust and/or Python) +3. Optional Playbook for non-technical stakeholders +4. Clear versioning and deprecation rules + +--- + +_VaultMesh Standards Index_ +_Sovereign Infrastructure for the Digital Age_ diff --git a/docs/VAULTMESH-TESTING-FRAMEWORK.md b/docs/VAULTMESH-TESTING-FRAMEWORK.md new file mode 100644 index 0000000..6841ad8 --- /dev/null +++ b/docs/VAULTMESH-TESTING-FRAMEWORK.md @@ -0,0 +1,620 @@ +# VAULTMESH-TESTING-FRAMEWORK.md +**Property-Based Testing for the Civilization Ledger** + +> *What is not tested cannot be trusted.* + +--- + +## 1. Testing Philosophy + +VaultMesh uses a layered testing approach: + +| Layer | What It Tests | Framework | +|-------|---------------|-----------| +| Unit | Individual functions | Rust: `#[test]`, Python: `pytest` | +| Property | Invariants that must always hold | `proptest`, `hypothesis` | +| Integration | Component interactions | `testcontainers` | +| Contract | API compatibility | OpenAPI validation | +| Chaos | Resilience under failure | `chaos-mesh`, custom | +| Acceptance | End-to-end scenarios | `cucumber-rs` | + +--- + +## 2. Core Invariants + +These properties must ALWAYS hold: + +```rust +// vaultmesh-core/src/invariants.rs + +/// Core invariants that must never be violated +pub trait Invariant { + fn check(&self) -> Result<(), InvariantViolation>; +} + +/// Receipts are append-only (AXIOM-001) +pub struct AppendOnlyReceipts; + +impl Invariant for AppendOnlyReceipts { + fn check(&self) -> Result<(), InvariantViolation> { + // Verify no receipts have been modified or deleted + // by comparing sequential hashes + Ok(()) + } +} + +/// Merkle roots are consistent with receipts (AXIOM-002) +pub struct ConsistentMerkleRoots; + +impl Invariant for ConsistentMerkleRoots { + fn check(&self) -> Result<(), InvariantViolation> { + // Recompute Merkle root from receipts + // Compare with stored root + Ok(()) + } +} + +/// All significant operations produce receipts (AXIOM-003) +pub struct UniversalReceipting; + +impl Invariant for UniversalReceipting { + fn check(&self) -> Result<(), InvariantViolation> { + // Check that tracked operations have corresponding receipts + Ok(()) + } +} + +/// Hash chains are unbroken +pub struct UnbrokenHashChains; + +impl Invariant for UnbrokenHashChains { + fn check(&self) -> Result<(), InvariantViolation> { + // Verify each receipt's previous_hash matches the prior receipt + Ok(()) + } +} +``` + +--- + +## 3. Property-Based Tests + +### 3.1 Receipt Properties + +```rust +// vaultmesh-core/tests/receipt_properties.rs + +use proptest::prelude::*; +use vaultmesh_core::{Receipt, Scroll, VmHash}; + +proptest! { + /// Any valid receipt can be serialized and deserialized without loss + #[test] + fn receipt_roundtrip(receipt in arb_receipt()) { + let json = serde_json::to_string(&receipt)?; + let restored: Receipt = serde_json::from_str(&json)?; + prop_assert_eq!(receipt, restored); + } + + /// Receipt hash is deterministic + #[test] + fn receipt_hash_deterministic(receipt in arb_receipt()) { + let hash1 = VmHash::from_json(&receipt)?; + let hash2 = VmHash::from_json(&receipt)?; + prop_assert_eq!(hash1, hash2); + } + + /// Different receipts produce different hashes + #[test] + fn different_receipts_different_hashes( + receipt1 in arb_receipt(), + receipt2 in arb_receipt() + ) { + prop_assume!(receipt1 != receipt2); + let hash1 = VmHash::from_json(&receipt1)?; + let hash2 = VmHash::from_json(&receipt2)?; + prop_assert_ne!(hash1, hash2); + } + + /// Merkle root of N receipts is consistent regardless of computation order + #[test] + fn merkle_root_order_independent(receipts in prop::collection::vec(arb_receipt(), 1..100)) { + let hashes: Vec = receipts.iter() + .map(|r| VmHash::from_json(r).unwrap()) + .collect(); + + let root1 = merkle_root(&hashes); + + // Shuffle but keep same hashes + let mut shuffled = hashes.clone(); + shuffled.sort_by(|a, b| a.hex().cmp(b.hex())); + + // Root should be same because merkle_root sorts internally + let root2 = merkle_root(&shuffled); + prop_assert_eq!(root1, root2); + } +} + +fn arb_receipt() -> impl Strategy> { + ( + arb_scroll(), + arb_receipt_type(), + any::(), + prop::collection::vec(any::(), 0..5), + ).prop_map(|(scroll, receipt_type, timestamp, tags)| { + Receipt { + header: ReceiptHeader { + receipt_type, + timestamp: DateTime::from_timestamp(timestamp as i64, 0).unwrap(), + root_hash: "blake3:placeholder".to_string(), + tags, + }, + meta: ReceiptMeta { + scroll, + sequence: 0, + anchor_epoch: None, + proof_path: None, + }, + body: serde_json::json!({"test": true}), + } + }) +} + +fn arb_scroll() -> impl Strategy { + prop_oneof![ + Just(Scroll::Drills), + Just(Scroll::Compliance), + Just(Scroll::Guardian), + Just(Scroll::Treasury), + Just(Scroll::Mesh), + Just(Scroll::OffSec), + Just(Scroll::Identity), + Just(Scroll::Observability), + Just(Scroll::Automation), + Just(Scroll::PsiField), + ] +} + +fn arb_receipt_type() -> impl Strategy { + prop_oneof![ + Just("security_drill_run".to_string()), + Just("oracle_answer".to_string()), + Just("anchor_success".to_string()), + Just("treasury_credit".to_string()), + Just("mesh_node_join".to_string()), + ] +} +``` + +### 3.2 Guardian Properties + +```rust +// vaultmesh-guardian/tests/guardian_properties.rs + +use proptest::prelude::*; +use vaultmesh_guardian::{ProofChain, AnchorCycle}; + +proptest! { + /// Anchor cycle produces valid proof for all included receipts + #[test] + fn anchor_cycle_valid_proofs( + receipts in prop::collection::vec(arb_receipt(), 1..50) + ) { + let mut proofchain = ProofChain::new(); + + for receipt in &receipts { + proofchain.append(receipt)?; + } + + let cycle = AnchorCycle::new(&proofchain); + let anchor_result = cycle.execute_mock()?; + + // Every receipt should have a valid Merkle proof + for receipt in &receipts { + let proof = anchor_result.get_proof(&receipt.header.root_hash)?; + prop_assert!(proof.verify(&anchor_result.root_hash)); + } + } + + /// Anchor root changes when any receipt changes + #[test] + fn anchor_root_sensitive( + receipts in prop::collection::vec(arb_receipt(), 2..20), + index in any::() + ) { + let mut proofchain1 = ProofChain::new(); + let mut proofchain2 = ProofChain::new(); + + for receipt in &receipts { + proofchain1.append(receipt)?; + proofchain2.append(receipt)?; + } + + let root1 = proofchain1.current_root(); + + // Modify one receipt in proofchain2 + let idx = index.index(receipts.len()); + let mut modified = receipts[idx].clone(); + modified.body = serde_json::json!({"modified": true}); + proofchain2.replace(idx, &modified)?; + + let root2 = proofchain2.current_root(); + + prop_assert_ne!(root1, root2); + } + + /// Sequential anchors form valid chain + #[test] + fn sequential_anchors_chain( + receipt_batches in prop::collection::vec( + prop::collection::vec(arb_receipt(), 1..20), + 2..10 + ) + ) { + let mut proofchain = ProofChain::new(); + let mut previous_anchor: Option = None; + + for batch in receipt_batches { + for receipt in batch { + proofchain.append(&receipt)?; + } + + let cycle = AnchorCycle::new(&proofchain); + let anchor_result = cycle.execute_mock()?; + + if let Some(prev) = &previous_anchor { + // Current anchor should reference previous + prop_assert_eq!(anchor_result.previous_root, Some(prev.root_hash.clone())); + } + + previous_anchor = Some(anchor_result); + } + } +} +``` + +### 3.3 Treasury Properties + +```rust +// vaultmesh-treasury/tests/treasury_properties.rs + +use proptest::prelude::*; +use rust_decimal::Decimal; +use vaultmesh_treasury::{TreasuryEngine, Entry, EntryType, Settlement}; + +proptest! { + /// Sum of all entries is always zero (double-entry invariant) + #[test] + fn double_entry_balance( + entries in prop::collection::vec(arb_entry_pair(), 1..50) + ) { + let mut engine = TreasuryEngine::new(); + engine.create_account(test_account("account-a"))?; + engine.create_account(test_account("account-b"))?; + + let mut total = Decimal::ZERO; + + for (debit, credit) in entries { + engine.record_entry(debit.clone())?; + engine.record_entry(credit.clone())?; + + total += credit.amount; + total -= debit.amount; + } + + // Total should always be zero + prop_assert_eq!(total, Decimal::ZERO); + } + + /// Settlement balances match pre/post snapshots + #[test] + fn settlement_balance_consistency( + settlement in arb_settlement() + ) { + let mut engine = TreasuryEngine::new(); + + // Create accounts from settlement + for entry in &settlement.entries { + engine.create_account_if_not_exists(&entry.account)?; + } + + // Fund accounts + for entry in &settlement.entries { + if entry.entry_type == EntryType::Debit { + engine.fund_account(&entry.account, entry.amount * 2)?; + } + } + + // Snapshot before + let before = engine.snapshot_balances(&settlement.affected_accounts())?; + + // Execute settlement + let result = engine.execute_settlement(settlement.clone())?; + + // Snapshot after + let after = engine.snapshot_balances(&settlement.affected_accounts())?; + + // Verify net flows match difference + for (account, net_flow) in &result.net_flow { + let expected_after = before.get(account).unwrap() + net_flow; + prop_assert_eq!(*after.get(account).unwrap(), expected_after); + } + } +} + +fn arb_entry_pair() -> impl Strategy { + (1u64..1000000).prop_map(|cents| { + let amount = Decimal::new(cents as i64, 2); + let debit = Entry { + entry_id: format!("debit-{}", uuid::Uuid::new_v4()), + entry_type: EntryType::Debit, + account: "account-a".to_string(), + amount, + currency: Currency::EUR, + memo: "Test debit".to_string(), + timestamp: Utc::now(), + tags: vec![], + }; + let credit = Entry { + entry_id: format!("credit-{}", uuid::Uuid::new_v4()), + entry_type: EntryType::Credit, + account: "account-b".to_string(), + amount, + currency: Currency::EUR, + memo: "Test credit".to_string(), + timestamp: Utc::now(), + tags: vec![], + }; + (debit, credit) + }) +} +``` + +--- + +## 4. Integration Tests + +```rust +// tests/integration/full_cycle.rs + +use testcontainers::{clients, images::postgres::Postgres, Container}; +use vaultmesh_portal::Portal; +use vaultmesh_guardian::Guardian; +use vaultmesh_oracle::Oracle; + +#[tokio::test] +async fn full_receipt_lifecycle() { + // Start containers + let docker = clients::Cli::default(); + let postgres = docker.run(Postgres::default()); + let db_url = format!( + "postgresql://postgres:postgres@localhost:{}/postgres", + postgres.get_host_port_ipv4(5432) + ); + + // Initialize services + let portal = Portal::new(&db_url).await?; + let guardian = Guardian::new(&db_url).await?; + + // Create and emit receipt + let receipt = portal.emit_receipt( + Scroll::Drills, + "security_drill_run", + json!({ + "drill_id": "test-drill-001", + "status": "completed" + }), + vec!["test".to_string()], + ).await?; + + // Verify receipt exists + let stored = portal.get_receipt(&receipt.header.root_hash).await?; + assert_eq!(stored.header.root_hash, receipt.header.root_hash); + + // Trigger anchor + let anchor_result = guardian.anchor_now(None).await?; + assert!(anchor_result.success); + + // Verify receipt has proof + let proof = guardian.get_proof(&receipt.header.root_hash).await?; + assert!(proof.is_some()); + assert!(proof.unwrap().verify(&anchor_result.root_hash)); +} + +#[tokio::test] +async fn oracle_answer_receipted() { + let docker = clients::Cli::default(); + let postgres = docker.run(Postgres::default()); + let db_url = format!( + "postgresql://postgres:postgres@localhost:{}/postgres", + postgres.get_host_port_ipv4(5432) + ); + + let portal = Portal::new(&db_url).await?; + let oracle = Oracle::new(&db_url).await?; + + // Load test corpus + oracle.load_corpus("tests/fixtures/corpus").await?; + + // Ask question + let answer = oracle.answer( + "What are the requirements for technical documentation under Article 11?", + vec!["AI_Act".to_string()], + vec![], + ).await?; + + // Verify answer was receipted + let receipts = portal.query_receipts( + Some(Scroll::Compliance), + Some("oracle_answer".to_string()), + None, + None, + 10, + ).await?; + + assert!(!receipts.is_empty()); + assert_eq!(receipts[0].body["answer_hash"], answer.answer_hash); +} +``` + +--- + +## 5. Chaos Tests + +```yaml +# chaos/anchor-failure.yaml +apiVersion: chaos-mesh.org/v1alpha1 +kind: NetworkChaos +metadata: + name: anchor-network-partition + namespace: vaultmesh +spec: + action: partition + mode: all + selector: + namespaces: + - vaultmesh + labelSelectors: + app.kubernetes.io/name: guardian + direction: to + target: + selector: + namespaces: + - default + labelSelectors: + app: ethereum-node + mode: all + duration: "5m" + scheduler: + cron: "@every 6h" +--- +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: guardian-pod-kill + namespace: vaultmesh +spec: + action: pod-kill + mode: one + selector: + namespaces: + - vaultmesh + labelSelectors: + app.kubernetes.io/name: guardian + scheduler: + cron: "@every 4h" +``` + +```rust +// tests/chaos/anchor_resilience.rs + +#[tokio::test] +#[ignore] // Run manually with chaos-mesh +async fn guardian_recovers_from_network_partition() { + let guardian = connect_to_guardian().await?; + let portal = connect_to_portal().await?; + + // Generate receipts + for i in 0..100 { + portal.emit_receipt( + Scroll::Drills, + "test_receipt", + json!({"index": i}), + vec![], + ).await?; + } + + // Wait for chaos to potentially occur + tokio::time::sleep(Duration::from_secs(60)).await; + + // Verify guardian state is consistent + let status = guardian.get_status().await?; + + // Should either be anchoring or have recovered + assert!( + status.state == "idle" || + status.state == "anchoring", + "Guardian in unexpected state: {}", + status.state + ); + + // If idle, verify all receipts are anchored + if status.state == "idle" { + let receipts = portal.query_receipts(None, None, None, None, 200).await?; + for receipt in receipts { + let proof = guardian.get_proof(&receipt.header.root_hash).await?; + assert!(proof.is_some(), "Receipt not anchored: {}", receipt.header.root_hash); + } + } +} +``` + +--- + +## 6. Test Fixtures + +```rust +// tests/fixtures/mod.rs + +use vaultmesh_core::*; + +pub fn test_drill_receipt() -> Receipt { + Receipt { + header: ReceiptHeader { + receipt_type: "security_drill_run".to_string(), + timestamp: Utc::now(), + root_hash: "blake3:placeholder".to_string(), + tags: vec!["test".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::Drills, + sequence: 1, + anchor_epoch: None, + proof_path: None, + }, + body: json!({ + "drill_id": "drill-test-001", + "prompt": "Test security scenario", + "status": "completed", + "stages_total": 3, + "stages_completed": 3 + }), + } +} + +pub fn test_oracle_receipt() -> Receipt { + Receipt { + header: ReceiptHeader { + receipt_type: "oracle_answer".to_string(), + timestamp: Utc::now(), + root_hash: "blake3:placeholder".to_string(), + tags: vec!["test".to_string(), "compliance".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::Compliance, + sequence: 1, + anchor_epoch: None, + proof_path: None, + }, + body: json!({ + "question": "Test compliance question?", + "answer_hash": "blake3:test...", + "confidence": 0.95, + "frameworks": ["AI_Act"] + }), + } +} + +pub fn test_corpus() -> Vec { + vec![ + CorpusDocument { + id: "doc-001".to_string(), + title: "AI Act Article 11 - Technical Documentation".to_string(), + content: "Providers shall draw up technical documentation...".to_string(), + framework: "AI_Act".to_string(), + section: "Article 11".to_string(), + }, + // ... more test documents + ] +} +``` diff --git a/docs/observability/README.md b/docs/observability/README.md new file mode 100644 index 0000000..ce2df53 --- /dev/null +++ b/docs/observability/README.md @@ -0,0 +1,101 @@ +# Observability - VaultMesh + +This directory contains a Prometheus exporter for VaultMesh and a Grafana dashboard. + +## Metrics Exposed + +| Metric | Type | Labels | Description | +|--------|------|--------|-------------| +| `vaultmesh_receipts_total` | Counter | `module` | Number of receipts emitted | +| `vaultmesh_receipts_failed_total` | Counter | `module`, `reason` | Failed receipt emissions | +| `vaultmesh_anchor_age_seconds` | Gauge | - | Seconds since last guardian anchor | +| `vaultmesh_emit_seconds` | Histogram | `module` | Receipt emit latency | + +## Quick Start (Local) + +### Option 1: Run exporter directly + +```bash +cd vaultmesh-observability +cargo run --release +``` + +Exposes metrics at `http://0.0.0.0:9108/metrics` + +### Option 2: Using Docker Compose + +```bash +cd docs/observability +docker-compose up --build +``` + +Services: +- **Exporter**: http://localhost:9108/metrics +- **Prometheus**: http://localhost:9090 +- **Grafana**: http://localhost:3000 (admin/admin) + +## Importing the Dashboard + +1. Open Grafana at http://localhost:3000 +2. Go to Dashboards → Import +3. Upload `dashboards/receipts.json` +4. Select the Prometheus data source +5. Click Import + +## CI Smoke Test + +The smoke test verifies the exporter responds on `/metrics`: + +```bash +cargo test -p vaultmesh-observability --tests +``` + +Add to `.gitlab-ci.yml`: + +```yaml +observability-smoke: + stage: test + image: rust:1.75 + script: + - cargo test -p vaultmesh-observability --tests -- --nocapture +``` + +## Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `VAULTMESH_METRICS_ADDR` | `0.0.0.0:9108` | Listen address for metrics server | + +## Guardian Metrics Integration Test + +The Guardian engine has an integration test that verifies metrics are emitted after anchors: + +```bash +cargo test -p vaultmesh-guardian --features metrics --test metrics_integration +``` + +This test: +- Starts ObservabilityEngine on a test port +- Creates Guardian with observability enabled +- Performs an anchor +- Verifies `/metrics` contains `vaultmesh_anchor_age_seconds 0` (fresh anchor) + +## Integration with Other Engines + +Other VaultMesh engines can record metrics by calling: + +```rust +use vaultmesh_observability::ObservabilityEngine; +use std::sync::Arc; + +let engine = Arc::new(ObservabilityEngine::new()); + +// Record successful receipt emission +engine.observe_emitted("guardian", latency_seconds); + +// Record failure +engine.observe_failed("treasury", "io_error"); + +// Update anchor age (0 = just anchored) +engine.set_anchor_age(0.0); +``` diff --git a/docs/observability/dashboards/receipts.json b/docs/observability/dashboards/receipts.json new file mode 100644 index 0000000..1480763 --- /dev/null +++ b/docs/observability/dashboards/receipts.json @@ -0,0 +1,366 @@ +{ + "annotations": { + "list": [] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum by(module) (rate(vaultmesh_receipts_total[1m]))", + "legendFormat": "{{module}}", + "refId": "A" + } + ], + "title": "Receipts Emitted Rate (by module)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "sum by(module, reason) (rate(vaultmesh_receipts_failed_total[1m]))", + "legendFormat": "{{module}} - {{reason}}", + "refId": "A" + } + ], + "title": "Receipt Failures", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 3, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "vaultmesh_anchor_age_seconds * 1000", + "legendFormat": "Last Anchor", + "refId": "A" + } + ], + "title": "Last Anchor Timestamp", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "histogram_quantile(0.95, sum by(le, module) (rate(vaultmesh_emit_seconds_bucket[5m])))", + "legendFormat": "p95 {{module}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "expr": "histogram_quantile(0.50, sum by(le, module) (rate(vaultmesh_emit_seconds_bucket[5m])))", + "legendFormat": "p50 {{module}}", + "refId": "B" + } + ], + "title": "Receipt Emit Latency (p50/p95)", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 38, + "style": "dark", + "tags": [ + "vaultmesh", + "receipts" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "VaultMesh Receipts Overview", + "uid": "vaultmesh-receipts", + "version": 1, + "weekStart": "" +} diff --git a/docs/observability/docker-compose.yml b/docs/observability/docker-compose.yml new file mode 100644 index 0000000..db4e954 --- /dev/null +++ b/docs/observability/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3.8" + +services: + exporter: + build: + context: ../.. + dockerfile: vaultmesh-observability/Dockerfile + image: vaultmesh-observability:local + ports: + - "9108:9108" + environment: + - VAULTMESH_METRICS_ADDR=0.0.0.0:9108 + + prometheus: + image: prom/prometheus:v2.47.0 + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro + ports: + - "9090:9090" + depends_on: + - exporter + + grafana: + image: grafana/grafana:10.1.0 + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + volumes: + - ./dashboards:/var/lib/grafana/dashboards:ro + - ./grafana-provisioning:/etc/grafana/provisioning:ro + ports: + - "3000:3000" + depends_on: + - prometheus diff --git a/docs/observability/grafana-provisioning/dashboards/default.yml b/docs/observability/grafana-provisioning/dashboards/default.yml new file mode 100644 index 0000000..62eb5f0 --- /dev/null +++ b/docs/observability/grafana-provisioning/dashboards/default.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: 'VaultMesh' + orgId: 1 + folder: 'VaultMesh' + folderUid: 'vaultmesh' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards diff --git a/docs/observability/grafana-provisioning/datasources/prometheus.yml b/docs/observability/grafana-provisioning/datasources/prometheus.yml new file mode 100644 index 0000000..369ea61 --- /dev/null +++ b/docs/observability/grafana-provisioning/datasources/prometheus.yml @@ -0,0 +1,9 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + uid: prometheus diff --git a/docs/observability/prometheus.yml b/docs/observability/prometheus.yml new file mode 100644 index 0000000..e80ab37 --- /dev/null +++ b/docs/observability/prometheus.yml @@ -0,0 +1,9 @@ +global: + scrape_interval: 5s + evaluation_interval: 5s + +scrape_configs: + - job_name: 'vaultmesh_observability' + static_configs: + - targets: ['exporter:9108'] + metrics_path: /metrics diff --git a/docs/skill/ALCHEMICAL_PATTERNS.md b/docs/skill/ALCHEMICAL_PATTERNS.md new file mode 100644 index 0000000..7175bde --- /dev/null +++ b/docs/skill/ALCHEMICAL_PATTERNS.md @@ -0,0 +1,551 @@ +# VaultMesh Alchemical Patterns + +> *Solve et Coagula — Dissolve and Coagulate* + +## The Alchemical Framework + +VaultMesh uses alchemical metaphors not as mysticism, but as precise operational language for system states and transformations. + +## Phases (Operational States) + +### Nigredo 🜁 — The Blackening + +**Meaning**: Crisis, breakdown, decomposition +**Operational State**: System under stress, incident in progress + +**Indicators**: +- Active security incident +- Service degradation +- Guardian anchor failures +- Constitutional violations detected + +**Receipt Types During Nigredo**: +- `offsec_incident` (severity: high/critical) +- `obs_log_alert` (severity: critical) +- `gov_violation` +- `psi_phase_transition` (to_phase: nigredo) + +**Actions**: +- Incident response procedures activated +- Enhanced monitoring enabled +- Emergency powers may be invoked +- Transmutation processes initiated + +```json +{ + "type": "psi_phase_transition", + "from_phase": "albedo", + "to_phase": "nigredo", + "trigger": { + "event_type": "security_incident", + "reference": "INC-2025-12-001", + "severity": "critical" + }, + "indicators": [ + "active_intrusion_detected", + "guardian_alert_level_elevated" + ] +} +``` + +--- + +### Albedo 🜄 — The Whitening + +**Meaning**: Purification, recovery, stabilization +**Operational State**: Post-incident recovery, learning phase + +**Indicators**: +- Incident contained +- Systems stabilizing +- Root cause analysis in progress +- Remediation being verified + +**Receipt Types During Albedo**: +- `offsec_remediation` +- `psi_transmutation` (steps: extract, dissolve, purify) +- `obs_health_snapshot` (improving trends) + +**Actions**: +- Post-incident review +- IOC extraction +- Rule generation +- Documentation updates + +```json +{ + "type": "psi_phase_transition", + "from_phase": "nigredo", + "to_phase": "albedo", + "trigger": { + "event_type": "incident_contained", + "reference": "INC-2025-12-001" + }, + "indicators": [ + "threat_neutralized", + "services_recovering", + "rca_initiated" + ], + "duration_in_nigredo_hours": 4.5 +} +``` + +--- + +### Citrinitas 🜆 — The Yellowing + +**Meaning**: Illumination, new capability emerging +**Operational State**: Optimization, enhancement + +**Indicators**: +- New defensive capabilities deployed +- Performance improvements measured +- Knowledge crystallized into procedures +- Drills showing improved outcomes + +**Receipt Types During Citrinitas**: +- `psi_transmutation` (steps: coagulate) +- `psi_integration` +- `security_drill_run` (outcomes: improved) +- `auto_workflow_run` (new capabilities) + +**Actions**: +- Deploy new detection rules +- Update runbooks +- Train team on new procedures +- Measure improvement metrics + +```json +{ + "type": "psi_phase_transition", + "from_phase": "albedo", + "to_phase": "citrinitas", + "trigger": { + "event_type": "capability_deployed", + "reference": "transmute-2025-12-001" + }, + "indicators": [ + "detection_rules_active", + "playbook_updated", + "team_trained" + ], + "capabilities_gained": [ + "lateral_movement_detection_v2", + "automated_containment_k8s" + ] +} +``` + +--- + +### Rubedo 🜂 — The Reddening + +**Meaning**: Integration, completion, maturity +**Operational State**: Stable, sovereign operation + +**Indicators**: +- All systems nominal +- Capabilities integrated into BAU +- Continuous improvement active +- High resilience demonstrated + +**Receipt Types During Rubedo**: +- `psi_resonance` (harmony_score: high) +- `obs_health_snapshot` (all_green) +- `mesh_topology_snapshot` (healthy) +- `treasury_reconciliation` (balanced) + +**Actions**: +- Regular drills maintain readiness +- Proactive threat hunting +- Continuous compliance monitoring +- Knowledge sharing with federation + +```json +{ + "type": "psi_phase_transition", + "from_phase": "citrinitas", + "to_phase": "rubedo", + "trigger": { + "event_type": "stability_achieved", + "reference": "phase-assessment-2025-12" + }, + "indicators": [ + "30_days_no_critical_incidents", + "slo_targets_met", + "drill_outcomes_excellent" + ], + "maturity_score": 0.92 +} +``` + +--- + +## Transmutation (Tem Pattern) + +Transmutation converts negative events into defensive capabilities. + +### The Process + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ PRIMA MATERIA │ +│ (Raw Input: Incident/Vuln/Threat) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ STEP 1: EXTRACT │ +│ • Identify IOCs (IPs, domains, hashes, TTPs) │ +│ • Document attack chain │ +│ • Capture forensic artifacts │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ STEP 2: DISSOLVE (Solve) │ +│ • Break down into atomic components │ +│ • Normalize to standard formats (STIX, Sigma) │ +│ • Map to frameworks (MITRE ATT&CK) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ STEP 3: PURIFY │ +│ • Remove false positives │ +│ • Validate against known-good │ +│ • Test in isolated environment │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ STEP 4: COAGULATE (Coagula) │ +│ • Generate detection rules (Sigma, YARA, Suricata) │ +│ • Create response playbooks │ +│ • Deploy to production │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ STEP 5: SEAL │ +│ • Emit transmutation receipt │ +│ • Link prima materia to philosopher's stone │ +│ • Anchor evidence chain │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ PHILOSOPHER'S STONE │ +│ (Output: Defensive Capability) │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Transmutation Contract + +```json +{ + "transmutation_id": "psi-transmute-2025-12-06-001", + "title": "SSH Brute Force to Detection Capability", + "initiated_by": "did:vm:human:sovereign", + "initiated_at": "2025-12-06T10:00:00Z", + "input_material": { + "type": "security_incident", + "reference": "INC-2025-12-001", + "prima_materia_hash": "blake3:incident_evidence..." + }, + "target_phase": "citrinitas", + "transmutation_steps": [ + { + "step_id": "step-1-extract", + "name": "Extract Prima Materia", + "action": "extract_iocs", + "expected_output": "cases/psi/transmute-001/extracted_iocs.json" + }, + { + "step_id": "step-2-dissolve", + "name": "Dissolve (Solve)", + "action": "normalize_to_stix", + "expected_output": "cases/psi/transmute-001/stix_bundle.json" + }, + { + "step_id": "step-3-purify", + "name": "Purify", + "action": "validate_iocs", + "expected_output": "cases/psi/transmute-001/validated_iocs.json" + }, + { + "step_id": "step-4-coagulate", + "name": "Coagulate", + "action": "generate_sigma_rules", + "expected_output": "cases/psi/transmute-001/sigma_rules/" + }, + { + "step_id": "step-5-seal", + "name": "Seal", + "action": "emit_receipt", + "expected_output": "receipts/psi/psi_events.jsonl" + } + ], + "witnesses_required": ["brick-01", "brick-02"], + "success_criteria": { + "rules_deployed": true, + "detection_verified": true, + "no_false_positives_24h": true + } +} +``` + +### Transmutation Receipt + +```json +{ + "type": "psi_transmutation", + "transmutation_id": "psi-transmute-2025-12-06-001", + "timestamp": "2025-12-06T16:00:00Z", + "input_material": { + "type": "security_incident", + "reference": "INC-2025-12-001", + "prima_materia_hash": "blake3:abc123..." + }, + "output_capability": { + "type": "detection_rules", + "reference": "sigma-rule-ssh-brute-force-v2", + "philosophers_stone_hash": "blake3:def456..." + }, + "transformation_summary": { + "iocs_extracted": 47, + "rules_generated": 3, + "playbooks_updated": 1, + "ttps_mapped": ["T1110.001", "T1021.004"] + }, + "alchemical_phase": "citrinitas", + "witnesses": [ + { + "node": "did:vm:node:brick-01", + "witnessed_at": "2025-12-06T15:55:00Z", + "signature": "z58D..." + } + ], + "tags": ["psi", "transmutation", "ssh", "brute-force"], + "root_hash": "blake3:transmute..." +} +``` + +--- + +## Resonance + +Resonance measures cross-system synchronization and harmony. + +### Resonance Factors + +| Factor | Weight | Measurement | +|--------|--------|-------------| +| Anchor Health | 0.25 | Time since last anchor, failure rate | +| Receipt Consistency | 0.20 | Hash chain integrity, no gaps | +| Mesh Connectivity | 0.20 | Node health, route availability | +| Phase Alignment | 0.15 | All subsystems in compatible phases | +| Federation Sync | 0.10 | Witness success rate | +| Governance Compliance | 0.10 | No active violations | + +### Harmony Score + +``` +harmony_score = Σ(factor_weight × factor_score) / Σ(factor_weight) +``` + +**Interpretation**: +- 0.90 - 1.00: **Rubedo** — Full sovereignty +- 0.70 - 0.89: **Citrinitas** — Optimizing +- 0.50 - 0.69: **Albedo** — Stabilizing +- 0.00 - 0.49: **Nigredo** — Crisis mode + +### Resonance Receipt + +```json +{ + "type": "psi_resonance", + "resonance_id": "resonance-2025-12-06-12", + "timestamp": "2025-12-06T12:00:00Z", + "harmony_score": 0.94, + "factors": { + "anchor_health": 1.0, + "receipt_consistency": 0.98, + "mesh_connectivity": 0.95, + "phase_alignment": 0.90, + "federation_sync": 0.85, + "governance_compliance": 1.0 + }, + "current_phase": "rubedo", + "subsystem_phases": { + "guardian": "rubedo", + "oracle": "rubedo", + "mesh": "citrinitas", + "treasury": "rubedo" + }, + "dissonance_notes": [ + "mesh slightly below harmony due to pending node upgrade" + ], + "tags": ["psi", "resonance", "harmony"], + "root_hash": "blake3:resonance..." +} +``` + +--- + +## Integration + +Integration crystallizes learnings into permanent capability. + +### Integration Types + +| Type | Description | Example | +|------|-------------|---------| +| `rule_integration` | Detection rule becomes standard | Sigma rule added to baseline | +| `playbook_integration` | Response procedure formalized | IR playbook updated | +| `capability_integration` | New system feature | Auto-containment enabled | +| `knowledge_integration` | Documentation updated | Threat model revised | +| `training_integration` | Team skill acquired | Drill proficiency achieved | + +### Integration Receipt + +```json +{ + "type": "psi_integration", + "integration_id": "integration-2025-12-06-001", + "timestamp": "2025-12-06T18:00:00Z", + "integration_type": "rule_integration", + "source": { + "transmutation_id": "psi-transmute-2025-12-06-001", + "capability_hash": "blake3:def456..." + }, + "target": { + "system": "detection_pipeline", + "component": "sigma_rules", + "version": "v2.1.0" + }, + "integration_proof": { + "deployed_at": "2025-12-06T17:30:00Z", + "verified_by": ["brick-01", "brick-02"], + "test_results": { + "true_positives": 5, + "false_positives": 0, + "detection_rate": 1.0 + } + }, + "crystallization_complete": true, + "tags": ["psi", "integration", "detection"], + "root_hash": "blake3:integration..." +} +``` + +--- + +## Oracle Insights + +Significant findings from the Compliance Oracle that warrant receipting. + +### Insight Types + +| Type | Description | +|------|-------------| +| `compliance_gap` | New gap identified | +| `regulatory_change` | Regulation updated | +| `risk_elevation` | Risk level increased | +| `deadline_approaching` | Compliance deadline near | +| `cross_reference` | Connection between frameworks | + +### Insight Receipt + +```json +{ + "type": "psi_oracle_insight", + "insight_id": "insight-2025-12-06-001", + "timestamp": "2025-12-06T14:00:00Z", + "insight_type": "compliance_gap", + "severity": "high", + "frameworks": ["AI_Act", "GDPR"], + "finding": { + "summary": "Model training data lineage documentation incomplete for Annex IV requirements", + "affected_articles": ["AI_Act.Annex_IV.2.b", "GDPR.Art_30"], + "current_state": "partial_documentation", + "required_state": "complete_lineage_from_source_to_model" + }, + "recommended_actions": [ + "Implement data provenance tracking", + "Document all training data sources", + "Create lineage visualization" + ], + "deadline": "2026-08-02T00:00:00Z", + "confidence": 0.92, + "oracle_query_ref": "oracle-answer-2025-12-06-4721", + "tags": ["psi", "oracle", "insight", "ai_act", "gdpr"], + "root_hash": "blake3:insight..." +} +``` + +--- + +## Magnum Opus Dashboard + +The Magnum Opus is the great work — the continuous refinement toward sovereignty. + +### Dashboard Metrics + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ MAGNUM OPUS STATUS │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ Current Phase: RUBEDO 🜂 Harmony: 0.94 │ +│ Time in Phase: 47 days │ +│ │ +│ ┌─────────────────────────────────────────────────────────┐ │ +│ │ Phase History (90 days) │ │ +│ │ ████████████░░░░████████████████████████████████████████│ │ +│ │ NNNAAACCCCCNNAACCCCCCCCCCRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR│ │ +│ └─────────────────────────────────────────────────────────┘ │ +│ │ +│ Transmutations Integrations │ +│ ├─ Active: 2 ├─ This Month: 7 │ +│ ├─ Completed: 34 ├─ Total: 156 │ +│ └─ Success Rate: 94% └─ Crystallized: 142 │ +│ │ +│ Resonance Factors │ +│ ├─ Anchor Health: ████████████████████ 1.00 │ +│ ├─ Receipt Integrity: ███████████████████░ 0.98 │ +│ ├─ Mesh Connectivity: ███████████████████░ 0.95 │ +│ ├─ Phase Alignment: ██████████████████░░ 0.90 │ +│ ├─ Federation Sync: █████████████████░░░ 0.85 │ +│ └─ Governance: ████████████████████ 1.00 │ +│ │ +│ Recent Oracle Insights: 3 (1 high severity) │ +│ Next Anchor: 47 min │ +│ Last Incident: 47 days ago │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### CLI Commands + +```bash +# Phase status +vm-psi phase current +vm-psi phase history --days 90 + +# Transmutation +vm-psi transmute start --input INC-2025-12-001 --title "SSH Brute Force" +vm-psi transmute status transmute-2025-12-001 +vm-psi transmute complete transmute-2025-12-001 --step coagulate + +# Resonance +vm-psi resonance current +vm-psi resonance history --days 30 + +# Integration +vm-psi integrate --source transmute-2025-12-001 --target detection_pipeline + +# Opus +vm-psi opus status +vm-psi opus report --format pdf --output opus-report.pdf +``` diff --git a/docs/skill/CODE_TEMPLATES.md b/docs/skill/CODE_TEMPLATES.md new file mode 100644 index 0000000..ab8ce9b --- /dev/null +++ b/docs/skill/CODE_TEMPLATES.md @@ -0,0 +1,693 @@ +# VaultMesh Code Templates + +## Rust Templates + +### Core Types + +```rust +// Receipt Header +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReceiptHeader { + pub receipt_type: String, + pub timestamp: DateTime, + pub root_hash: String, + pub tags: Vec, +} + +// Receipt Metadata +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReceiptMeta { + pub scroll: Scroll, + pub sequence: u64, + pub anchor_epoch: Option, + pub proof_path: Option, +} + +// Generic Receipt +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Receipt { + #[serde(flatten)] + pub header: ReceiptHeader, + #[serde(flatten)] + pub meta: ReceiptMeta, + #[serde(flatten)] + pub body: T, +} + +// Scroll Enum +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum Scroll { + Drills, + Compliance, + Guardian, + Treasury, + Mesh, + OffSec, + Identity, + Observability, + Automation, + PsiField, + Federation, + Governance, +} + +impl Scroll { + pub fn jsonl_path(&self) -> &'static str { + match self { + Scroll::Drills => "receipts/drills/drill_runs.jsonl", + Scroll::Compliance => "receipts/compliance/oracle_answers.jsonl", + Scroll::Guardian => "receipts/guardian/anchor_events.jsonl", + Scroll::Treasury => "receipts/treasury/treasury_events.jsonl", + Scroll::Mesh => "receipts/mesh/mesh_events.jsonl", + Scroll::OffSec => "receipts/offsec/offsec_events.jsonl", + Scroll::Identity => "receipts/identity/identity_events.jsonl", + Scroll::Observability => "receipts/observability/observability_events.jsonl", + Scroll::Automation => "receipts/automation/automation_events.jsonl", + Scroll::PsiField => "receipts/psi/psi_events.jsonl", + Scroll::Federation => "receipts/federation/federation_events.jsonl", + Scroll::Governance => "receipts/governance/governance_events.jsonl", + } + } + + pub fn root_file(&self) -> &'static str { + match self { + Scroll::Drills => "ROOT.drills.txt", + Scroll::Compliance => "ROOT.compliance.txt", + Scroll::Guardian => "ROOT.guardian.txt", + Scroll::Treasury => "ROOT.treasury.txt", + Scroll::Mesh => "ROOT.mesh.txt", + Scroll::OffSec => "ROOT.offsec.txt", + Scroll::Identity => "ROOT.identity.txt", + Scroll::Observability => "ROOT.observability.txt", + Scroll::Automation => "ROOT.automation.txt", + Scroll::PsiField => "ROOT.psi.txt", + Scroll::Federation => "ROOT.federation.txt", + Scroll::Governance => "ROOT.governance.txt", + } + } +} +``` + +### DID Types + +```rust +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Did(String); + +impl Did { + pub fn new(did_type: DidType, identifier: &str) -> Self { + Did(format!("did:vm:{}:{}", did_type.as_str(), identifier)) + } + + pub fn parse(s: &str) -> Result { + if !s.starts_with("did:vm:") { + return Err(DidParseError::InvalidPrefix); + } + Ok(Did(s.to_string())) + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum DidType { + Node, + Human, + Agent, + Service, + Mesh, +} + +impl DidType { + pub fn as_str(&self) -> &'static str { + match self { + DidType::Node => "node", + DidType::Human => "human", + DidType::Agent => "agent", + DidType::Service => "service", + DidType::Mesh => "mesh", + } + } +} +``` + +### Hash Utilities + +```rust +use blake3::Hasher; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct VmHash(String); + +impl VmHash { + pub fn blake3(data: &[u8]) -> Self { + let hash = blake3::hash(data); + VmHash(format!("blake3:{}", hash.to_hex())) + } + + pub fn from_json(value: &T) -> Result { + let json = serde_json::to_vec(value)?; + Ok(Self::blake3(&json)) + } + + pub fn hex(&self) -> &str { + self.0.strip_prefix("blake3:").unwrap_or(&self.0) + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +pub fn merkle_root(hashes: &[VmHash]) -> VmHash { + if hashes.is_empty() { + return VmHash::blake3(b"empty"); + } + if hashes.len() == 1 { + return hashes[0].clone(); + } + + let mut current_level: Vec = hashes.to_vec(); + + while current_level.len() > 1 { + let mut next_level = Vec::new(); + for chunk in current_level.chunks(2) { + let combined = if chunk.len() == 2 { + format!("{}{}", chunk[0].hex(), chunk[1].hex()) + } else { + format!("{}{}", chunk[0].hex(), chunk[0].hex()) + }; + next_level.push(VmHash::blake3(combined.as_bytes())); + } + current_level = next_level; + } + + current_level.remove(0) +} +``` + +### Engine Template + +```rust +// Template for new engine implementation +pub struct MyEngine { + db: DatabasePool, + receipts_path: PathBuf, +} + +impl MyEngine { + pub fn new(db: DatabasePool, receipts_path: PathBuf) -> Self { + MyEngine { db, receipts_path } + } + + pub async fn create_contract(&self, params: CreateParams) -> Result { + let contract = Contract { + id: generate_id("contract"), + title: params.title, + created_at: Utc::now(), + // ... domain-specific fields + }; + + // Store contract + self.store_contract(&contract).await?; + + Ok(contract) + } + + pub async fn execute(&mut self, contract_id: &str) -> Result { + let contract = self.load_contract(contract_id).await?; + let mut state = State::new(&contract); + + // Execute steps + for step in &contract.steps { + state.execute_step(step).await?; + } + + // Seal with receipt + let receipt = self.seal(&contract, &state).await?; + + Ok(state) + } + + async fn seal(&self, contract: &Contract, state: &State) -> Result, EngineError> { + let receipt_body = MyReceipt { + contract_id: contract.id.clone(), + status: state.status.clone(), + // ... domain-specific fields + }; + + let root_hash = VmHash::from_json(&receipt_body)?; + + let receipt = Receipt { + header: ReceiptHeader { + receipt_type: "my_receipt_type".to_string(), + timestamp: Utc::now(), + root_hash: root_hash.as_str().to_string(), + tags: vec!["my_engine".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::MyScroll, + sequence: 0, + anchor_epoch: None, + proof_path: None, + }, + body: receipt_body, + }; + + self.append_receipt(&receipt).await?; + + Ok(receipt) + } + + async fn append_receipt(&self, receipt: &Receipt) -> Result<(), EngineError> { + let scroll_path = self.receipts_path.join(Scroll::MyScroll.jsonl_path()); + + let mut file = OpenOptions::new() + .create(true) + .append(true) + .open(&scroll_path)?; + + let json = serde_json::to_string(receipt)?; + writeln!(file, "{}", json)?; + + // Update Merkle root + self.update_merkle_root().await?; + + Ok(()) + } +} +``` + +### Prometheus Metrics + +```rust +use prometheus::{Counter, CounterVec, Histogram, HistogramVec, Gauge, GaugeVec, Opts, Registry}; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref REGISTRY: Registry = Registry::new(); + + pub static ref RECEIPTS_TOTAL: CounterVec = CounterVec::new( + Opts::new("vaultmesh_receipts_total", "Total receipts by scroll"), + &["scroll", "type"] + ).unwrap(); + + pub static ref OPERATION_DURATION: HistogramVec = HistogramVec::new( + prometheus::HistogramOpts::new( + "vaultmesh_operation_duration_seconds", + "Operation duration" + ).buckets(vec![0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]), + &["operation"] + ).unwrap(); + + pub static ref ACTIVE_OPERATIONS: GaugeVec = GaugeVec::new( + Opts::new("vaultmesh_active_operations", "Active operations"), + &["type"] + ).unwrap(); +} + +pub fn register_metrics() { + REGISTRY.register(Box::new(RECEIPTS_TOTAL.clone())).unwrap(); + REGISTRY.register(Box::new(OPERATION_DURATION.clone())).unwrap(); + REGISTRY.register(Box::new(ACTIVE_OPERATIONS.clone())).unwrap(); +} +``` + +--- + +## Python Templates + +### CLI Command Group + +```python +import click +import json +from datetime import datetime +from pathlib import Path + +@click.group() +def my_engine(): + """My Engine - Description""" + pass + +@my_engine.command("create") +@click.option("--title", required=True, help="Title") +@click.option("--config", type=click.Path(exists=True), help="Config file") +def create(title: str, config: str): + """Create a new contract.""" + contract_id = f"contract-{datetime.utcnow().strftime('%Y%m%d%H%M%S')}" + + contract = { + "id": contract_id, + "title": title, + "created_at": datetime.utcnow().isoformat() + "Z", + } + + if config: + with open(config) as f: + contract.update(json.load(f)) + + # Store contract + contract_path = Path(f"cases/my_engine/{contract_id}/contract.json") + contract_path.parent.mkdir(parents=True, exist_ok=True) + with open(contract_path, "w") as f: + json.dump(contract, f, indent=2) + + click.echo(f"✓ Contract created: {contract_id}") + +@my_engine.command("execute") +@click.argument("contract_id") +def execute(contract_id: str): + """Execute a contract.""" + # Load contract + contract_path = Path(f"cases/my_engine/{contract_id}/contract.json") + with open(contract_path) as f: + contract = json.load(f) + + # Execute (implementation specific) + state = {"status": "completed"} + + # Emit receipt + receipt = emit_receipt( + scroll="my_scroll", + receipt_type="my_receipt_type", + body={ + "contract_id": contract_id, + "status": state["status"], + }, + tags=["my_engine"] + ) + + click.echo(f"✓ Executed: {contract_id}") + click.echo(f" Receipt: {receipt['root_hash'][:20]}...") + +@my_engine.command("query") +@click.option("--status", help="Filter by status") +@click.option("--from", "from_date", help="From date") +@click.option("--to", "to_date", help="To date") +@click.option("--format", "output_format", default="table", type=click.Choice(["table", "json", "csv"])) +def query(status: str, from_date: str, to_date: str, output_format: str): + """Query receipts.""" + filters = {} + if status: + filters["status"] = status + if from_date: + filters["from_date"] = from_date + if to_date: + filters["to_date"] = to_date + + receipts = load_receipts("my_scroll", filters) + + if output_format == "json": + click.echo(json.dumps(receipts, indent=2)) + else: + click.echo(f"Found {len(receipts)} receipts") + for r in receipts: + click.echo(f" {r.get('timestamp', '')[:19]} | {r.get('type', '')}") +``` + +### Receipt Utilities + +```python +import json +import hashlib +from datetime import datetime +from pathlib import Path +from typing import Optional + +def emit_receipt(scroll: str, receipt_type: str, body: dict, tags: list[str]) -> dict: + """Create and emit a receipt to the appropriate scroll.""" + receipt = { + "schema_version": "2.0.0", + "type": receipt_type, + "timestamp": datetime.utcnow().isoformat() + "Z", + "tags": tags, + **body + } + + # Compute root hash + receipt_json = json.dumps(receipt, sort_keys=True) + root_hash = f"blake3:{hashlib.blake3(receipt_json.encode()).hexdigest()}" + receipt["root_hash"] = root_hash + + # Append to scroll + scroll_path = Path(f"receipts/{scroll}/{scroll}_events.jsonl") + scroll_path.parent.mkdir(parents=True, exist_ok=True) + + with open(scroll_path, "a") as f: + f.write(json.dumps(receipt) + "\n") + + # Update Merkle root + update_merkle_root(scroll) + + return receipt + +def load_receipts(scroll: str, filters: Optional[dict] = None) -> list[dict]: + """Load and filter receipts from a scroll.""" + scroll_path = Path(f"receipts/{scroll}/{scroll}_events.jsonl") + + if not scroll_path.exists(): + return [] + + receipts = [] + with open(scroll_path) as f: + for line in f: + receipt = json.loads(line.strip()) + + if filters: + match = True + for key, value in filters.items(): + if key == "from_date": + if receipt.get("timestamp", "") < value: + match = False + elif key == "to_date": + if receipt.get("timestamp", "") > value: + match = False + elif key == "type": + if receipt.get("type") not in (value if isinstance(value, list) else [value]): + match = False + elif receipt.get(key) != value: + match = False + + if match: + receipts.append(receipt) + else: + receipts.append(receipt) + + return receipts + +def update_merkle_root(scroll: str): + """Recompute and update Merkle root for a scroll.""" + scroll_path = Path(f"receipts/{scroll}/{scroll}_events.jsonl") + root_file = Path(f"receipts/ROOT.{scroll}.txt") + + if not scroll_path.exists(): + root_file.write_text("blake3:empty") + return + + hashes = [] + with open(scroll_path) as f: + for line in f: + receipt = json.loads(line.strip()) + hashes.append(receipt.get("root_hash", "")) + + if not hashes: + root_file.write_text("blake3:empty") + return + + # Simple merkle root (production would use proper tree) + combined = "".join(h.replace("blake3:", "") for h in hashes) + root = f"blake3:{hashlib.blake3(combined.encode()).hexdigest()}" + root_file.write_text(root) + +def verify_receipt(receipt_hash: str, scroll: str) -> bool: + """Verify a receipt exists and is valid.""" + receipts = load_receipts(scroll, {"root_hash": receipt_hash}) + return len(receipts) > 0 +``` + +### MCP Server Template + +```python +from mcp.server import Server +from mcp.types import Tool, TextContent +import json + +server = Server("my-engine") + +@server.tool() +async def my_operation( + param1: str, + param2: int = 10, +) -> str: + """ + Description of what this tool does. + + Args: + param1: Description of param1 + param2: Description of param2 + + Returns: + Description of return value + """ + # Verify caller capabilities + caller = await get_caller_identity() + await verify_capability(caller, "required_capability") + + # Perform operation + result = perform_operation(param1, param2) + + # Emit receipt + await emit_tool_call_receipt( + tool="my_operation", + caller=caller, + params={"param1": param1, "param2": param2}, + result_hash=result.hash, + ) + + return json.dumps(result.to_dict(), indent=2) + +@server.tool() +async def my_query( + filter_param: str = None, + limit: int = 50, +) -> str: + """ + Query operation description. + + Args: + filter_param: Optional filter + limit: Maximum results + + Returns: + Query results + """ + caller = await get_caller_identity() + await verify_capability(caller, "view_capability") + + results = query_data(filter_param, limit) + + return json.dumps([r.to_dict() for r in results], indent=2) + +def main(): + import asyncio + from mcp.server.stdio import stdio_server + + async def run(): + async with stdio_server() as (read_stream, write_stream): + await server.run( + read_stream, + write_stream, + server.create_initialization_options(), + ) + + asyncio.run(run()) + +if __name__ == "__main__": + main() +``` + +--- + +## Property Test Templates + +### Rust (proptest) + +```rust +use proptest::prelude::*; + +proptest! { + /// Receipts roundtrip through serialization + #[test] + fn receipt_roundtrip(receipt in arb_receipt()) { + let json = serde_json::to_string(&receipt)?; + let restored: Receipt = serde_json::from_str(&json)?; + prop_assert_eq!(receipt.header.root_hash, restored.header.root_hash); + } + + /// Hash is deterministic + #[test] + fn hash_deterministic(data in prop::collection::vec(any::(), 0..1000)) { + let hash1 = VmHash::blake3(&data); + let hash2 = VmHash::blake3(&data); + prop_assert_eq!(hash1, hash2); + } + + /// Different data produces different hashes + #[test] + fn different_data_different_hash( + data1 in prop::collection::vec(any::(), 1..100), + data2 in prop::collection::vec(any::(), 1..100) + ) { + prop_assume!(data1 != data2); + let hash1 = VmHash::blake3(&data1); + let hash2 = VmHash::blake3(&data2); + prop_assert_ne!(hash1, hash2); + } +} + +fn arb_receipt() -> impl Strategy> { + ( + "[a-z]{5,20}", // receipt_type + any::().prop_map(|ts| DateTime::from_timestamp(ts.abs() % 2000000000, 0).unwrap()), + prop::collection::vec("[a-z]{3,10}", 0..5), // tags + ).prop_map(|(receipt_type, timestamp, tags)| { + Receipt { + header: ReceiptHeader { + receipt_type, + timestamp, + root_hash: "blake3:placeholder".to_string(), + tags, + }, + meta: ReceiptMeta { + scroll: Scroll::Drills, + sequence: 0, + anchor_epoch: None, + proof_path: None, + }, + body: serde_json::json!({"test": true}), + } + }) +} +``` + +### Python (hypothesis) + +```python +from hypothesis import given, strategies as st +import json + +@given(st.dictionaries(st.text(min_size=1, max_size=20), st.text(max_size=100), max_size=10)) +def test_receipt_roundtrip(body): + """Receipts survive JSON roundtrip.""" + receipt = emit_receipt("test", "test_type", body, ["test"]) + + json_str = json.dumps(receipt) + restored = json.loads(json_str) + + assert receipt["root_hash"] == restored["root_hash"] + assert receipt["type"] == restored["type"] + +@given(st.binary(min_size=1, max_size=1000)) +def test_hash_deterministic(data): + """Hash is deterministic.""" + hash1 = hashlib.blake3(data).hexdigest() + hash2 = hashlib.blake3(data).hexdigest() + assert hash1 == hash2 + +@given( + st.binary(min_size=1, max_size=100), + st.binary(min_size=1, max_size=100) +) +def test_different_data_different_hash(data1, data2): + """Different data produces different hashes.""" + if data1 == data2: + return # Skip if same + + hash1 = hashlib.blake3(data1).hexdigest() + hash2 = hashlib.blake3(data2).hexdigest() + assert hash1 != hash2 +``` diff --git a/docs/skill/ENGINE_SPECS.md b/docs/skill/ENGINE_SPECS.md new file mode 100644 index 0000000..2d8ba9a --- /dev/null +++ b/docs/skill/ENGINE_SPECS.md @@ -0,0 +1,315 @@ +# VaultMesh Engine Specifications + +## Receipt Types by Scroll + +### Drills +| Type | When Emitted | +|------|--------------| +| `security_drill_run` | Drill completed | + +### Compliance +| Type | When Emitted | +|------|--------------| +| `oracle_answer` | Compliance question answered | + +### Guardian +| Type | When Emitted | +|------|--------------| +| `anchor_success` | Anchor cycle succeeded | +| `anchor_failure` | Anchor cycle failed | +| `anchor_divergence` | Root mismatch detected | + +### Treasury +| Type | When Emitted | +|------|--------------| +| `treasury_credit` | Credit entry recorded | +| `treasury_debit` | Debit entry recorded | +| `treasury_settlement` | Multi-party settlement completed | +| `treasury_reconciliation` | Periodic balance verification | + +### Mesh +| Type | When Emitted | +|------|--------------| +| `mesh_node_join` | Node registered | +| `mesh_node_leave` | Node deregistered | +| `mesh_route_change` | Route added/removed/modified | +| `mesh_capability_grant` | Capability granted | +| `mesh_capability_revoke` | Capability revoked | +| `mesh_topology_snapshot` | Periodic topology capture | + +### OffSec +| Type | When Emitted | +|------|--------------| +| `offsec_incident` | Incident closed | +| `offsec_redteam` | Red team engagement closed | +| `offsec_vuln_discovery` | Vulnerability confirmed | +| `offsec_remediation` | Remediation verified | +| `offsec_threat_intel` | New IOC/TTP added | +| `offsec_forensic_snapshot` | Forensic capture taken | + +### Identity +| Type | When Emitted | +|------|--------------| +| `identity_did_create` | New DID registered | +| `identity_did_rotate` | Key rotation completed | +| `identity_credential_issue` | Credential issued | +| `identity_credential_revoke` | Credential revoked | +| `identity_auth_event` | Authentication attempt | +| `identity_capability_grant` | Capability granted | +| `identity_capability_exercise` | Capability used | + +### Observability +| Type | When Emitted | +|------|--------------| +| `obs_metric_anomaly` | Anomaly detected/resolved | +| `obs_log_alert` | Log-based alert triggered | +| `obs_trace_summary` | Critical operation traced | +| `obs_health_snapshot` | Daily health capture | +| `obs_slo_breach` | SLO target missed | +| `obs_capacity_event` | Resource threshold crossed | + +### Automation +| Type | When Emitted | +|------|--------------| +| `auto_workflow_run` | Workflow execution completed | +| `auto_scheduled_task` | Scheduled task executed | +| `auto_agent_action` | Agent took action | +| `auto_trigger_event` | External trigger received | +| `auto_approval_gate` | Approval gate resolved | +| `auto_error_recovery` | Error recovery completed | + +### PsiField +| Type | When Emitted | +|------|--------------| +| `psi_phase_transition` | Phase change | +| `psi_emergence_event` | Emergent behavior detected | +| `psi_transmutation` | Negative → capability transform | +| `psi_resonance` | Cross-system synchronization | +| `psi_integration` | Learning crystallized | +| `psi_oracle_insight` | Significant Oracle insight | + +### Federation +| Type | When Emitted | +|------|--------------| +| `fed_trust_proposal` | Trust proposal submitted | +| `fed_trust_established` | Federation agreement active | +| `fed_trust_revoked` | Federation terminated | +| `fed_witness_event` | Remote root witnessed | +| `fed_cross_anchor` | Remote root included in anchor | +| `fed_schema_sync` | Schema versions synchronized | + +### Governance +| Type | When Emitted | +|------|--------------| +| `gov_proposal` | Proposal submitted | +| `gov_vote` | Vote cast | +| `gov_ratification` | Proposal ratified | +| `gov_amendment` | Constitution amended | +| `gov_executive_order` | Executive order issued | +| `gov_violation` | Violation detected | +| `gov_enforcement` | Enforcement action taken | + +--- + +## Engine Contract Templates + +### Treasury Settlement Contract +```json +{ + "settlement_id": "settle-YYYY-MM-DD-NNN", + "title": "Settlement Title", + "initiated_by": "did:vm:node:portal-01", + "initiated_at": "ISO8601", + "parties": ["did:vm:node:...", "did:vm:node:..."], + "entries": [ + { + "entry_id": "entry-NNN", + "type": "debit|credit", + "account": "acct:vm:node:...:type", + "amount": 0.00, + "currency": "EUR", + "memo": "Description" + } + ], + "requires_signatures": ["node-id", "node-id"], + "settlement_type": "inter_node_resource|vendor_payment|..." +} +``` + +### Mesh Change Contract +```json +{ + "change_id": "mesh-change-YYYY-MM-DD-NNN", + "title": "Change Title", + "initiated_by": "did:vm:node:portal-01", + "initiated_at": "ISO8601", + "change_type": "node_expansion|route_update|...", + "operations": [ + { + "op_id": "op-NNN", + "operation": "node_join|route_add|capability_grant|...", + "target": "did:vm:node:...", + "config": {} + } + ], + "requires_approval": ["node-id"], + "rollback_on_failure": true +} +``` + +### OffSec Incident Contract +```json +{ + "case_id": "INC-YYYY-MM-NNN", + "case_type": "incident", + "title": "Incident Title", + "severity": "critical|high|medium|low", + "created_at": "ISO8601", + "phases": [ + { + "phase_id": "phase-N-name", + "name": "Triage|Containment|Eradication|Recovery", + "objectives": ["..."], + "checklist": ["..."] + } + ], + "assigned_responders": ["did:vm:human:..."], + "escalation_path": ["..."] +} +``` + +### Identity Operation Contract +```json +{ + "operation_id": "idop-YYYY-MM-DD-NNN", + "operation_type": "key_rotation_ceremony|...", + "title": "Operation Title", + "initiated_by": "did:vm:human:...", + "initiated_at": "ISO8601", + "target_did": "did:vm:node:...", + "steps": [ + { + "step_id": "step-N-name", + "action": "action_name", + "params": {} + } + ], + "rollback_on_failure": true +} +``` + +### Transmutation Contract +```json +{ + "transmutation_id": "psi-transmute-YYYY-MM-DD-NNN", + "title": "Transmutation Title", + "initiated_by": "did:vm:human:...", + "initiated_at": "ISO8601", + "input_material": { + "type": "security_incident|vulnerability|...", + "reference": "INC-YYYY-MM-NNN" + }, + "target_phase": "citrinitas", + "transmutation_steps": [ + { + "step_id": "step-N-name", + "name": "Step Name", + "action": "action_name", + "expected_output": "output_path" + } + ], + "witnesses_required": ["node-id", "node-id"], + "success_criteria": {} +} +``` + +--- + +## State Machine Transitions + +### Settlement Status +``` +draft → pending_signatures → executing → completed + ↘ disputed → resolved → completed + ↘ expired +``` + +### Incident Status +``` +reported → triaging → investigating → contained → eradicating → recovered → closed + ↘ false_positive → closed +``` + +### Mesh Change Status +``` +draft → pending_approval → in_progress → completed + ↘ partial_failure → rollback → rolled_back + ↘ failed → rollback → rolled_back +``` + +### Alchemical Phase +``` +nigredo → albedo → citrinitas → rubedo + ↑ │ + └──────────────────────────────┘ + (cycle continues) +``` + +--- + +## Capability Types + +| Capability | Description | Typical Holders | +|------------|-------------|-----------------| +| `anchor` | Submit roots to anchor backends | Guardian nodes | +| `storage` | Store receipts and artifacts | Infrastructure nodes | +| `compute` | Execute drills, run agents | BRICK nodes | +| `oracle` | Issue compliance answers | Oracle nodes | +| `admin` | Grant/revoke capabilities | Portal, Sovereign | +| `federate` | Establish cross-mesh trust | Portal | + +--- + +## Trust Levels (Federation) + +| Level | Name | Description | +|-------|------|-------------| +| 0 | `isolated` | No federation | +| 1 | `observe` | Read-only witness | +| 2 | `verify` | Mutual verification | +| 3 | `attest` | Cross-attestation | +| 4 | `integrate` | Shared scrolls | + +--- + +## Account Types (Treasury) + +| Type | Purpose | +|------|---------| +| `operational` | Day-to-day infrastructure spend | +| `reserve` | Long-term holdings, runway | +| `escrow` | Held pending settlement | +| `external` | Counterparty accounts | + +--- + +## Node Types (Mesh) + +| Type | Purpose | +|------|---------| +| `infrastructure` | BRICK servers, compute | +| `edge` | Mobile devices, field endpoints | +| `oracle` | Compliance oracle instances | +| `guardian` | Dedicated anchor/sentinel | +| `external` | Federated nodes | + +--- + +## Severity Levels + +| Level | Description | +|-------|-------------| +| `critical` | Active breach, data exfiltration | +| `high` | Confirmed attack, potential breach | +| `medium` | Suspicious activity, policy violation | +| `low` | Anomaly, informational | diff --git a/docs/skill/INFRASTRUCTURE.md b/docs/skill/INFRASTRUCTURE.md new file mode 100644 index 0000000..c839659 --- /dev/null +++ b/docs/skill/INFRASTRUCTURE.md @@ -0,0 +1,711 @@ +# VaultMesh Infrastructure Templates + +## Kubernetes Deployment + +### Namespace + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: vaultmesh + labels: + app.kubernetes.io/name: vaultmesh + app.kubernetes.io/part-of: civilization-ledger + pod-security.kubernetes.io/enforce: restricted +``` + +### Generic Deployment Template + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vaultmesh-{component} + namespace: vaultmesh + labels: + app.kubernetes.io/name: {component} + app.kubernetes.io/component: {role} + app.kubernetes.io/part-of: vaultmesh +spec: + replicas: {replicas} + selector: + matchLabels: + app.kubernetes.io/name: {component} + template: + metadata: + labels: + app.kubernetes.io/name: {component} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + prometheus.io/path: "/metrics" + spec: + serviceAccountName: vaultmesh-{component} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 1000 + seccompProfile: + type: RuntimeDefault + containers: + - name: {component} + image: ghcr.io/vaultmesh/{component}:{version} + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + ports: + - name: http + containerPort: {http_port} + protocol: TCP + - name: metrics + containerPort: 9090 + protocol: TCP + env: + - name: RUST_LOG + value: "info,vaultmesh=debug" + - name: CONFIG_PATH + value: "/config/{component}.toml" + - name: DATABASE_URL + valueFrom: + secretKeyRef: + name: vaultmesh-db-credentials + key: {component}-url + volumeMounts: + - name: config + mountPath: /config + readOnly: true + - name: receipts + mountPath: /data/receipts + - name: tmp + mountPath: /tmp + resources: + requests: + cpu: {cpu_request} + memory: {memory_request} + limits: + cpu: {cpu_limit} + memory: {memory_limit} + livenessProbe: + httpGet: + path: /health/live + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health/ready + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: vaultmesh-{component}-config + - name: receipts + persistentVolumeClaim: + claimName: vaultmesh-receipts + - name: tmp + emptyDir: {} +``` + +### Service Template + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: vaultmesh-{component} + namespace: vaultmesh +spec: + selector: + app.kubernetes.io/name: {component} + ports: + - name: http + port: 80 + targetPort: http + - name: metrics + port: 9090 + targetPort: metrics + type: ClusterIP +``` + +### ConfigMap Template + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: vaultmesh-{component}-config + namespace: vaultmesh +data: + {component}.toml: | + [server] + bind = "0.0.0.0:{port}" + metrics_bind = "0.0.0.0:9090" + + [database] + max_connections = 20 + min_connections = 5 + + [receipts] + base_path = "/data/receipts" + + # Component-specific configuration +``` + +### PersistentVolumeClaim + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vaultmesh-receipts + namespace: vaultmesh +spec: + accessModes: + - ReadWriteMany + storageClassName: nfs-csi + resources: + requests: + storage: 100Gi +``` + +### Ingress + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: vaultmesh-ingress + namespace: vaultmesh + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-body-size: "50m" + nginx.ingress.kubernetes.io/rate-limit: "100" + nginx.ingress.kubernetes.io/rate-limit-window: "1m" +spec: + ingressClassName: nginx + tls: + - hosts: + - portal.vaultmesh.io + - guardian.vaultmesh.io + - oracle.vaultmesh.io + secretName: vaultmesh-tls + rules: + - host: portal.vaultmesh.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vaultmesh-portal + port: + name: http +``` + +--- + +## Component Configurations + +### Portal + +```yaml +# Deployment overrides +replicas: 2 +http_port: 8080 +cpu_request: 100m +memory_request: 256Mi +cpu_limit: 1000m +memory_limit: 1Gi +``` + +```toml +# portal.toml +[server] +bind = "0.0.0.0:8080" +metrics_bind = "0.0.0.0:9090" + +[database] +max_connections = 20 +min_connections = 5 + +[receipts] +base_path = "/data/receipts" + +[scrolls] +enabled = [ + "Drills", "Compliance", "Guardian", "Treasury", "Mesh", + "OffSec", "Identity", "Observability", "Automation", + "PsiField", "Federation", "Governance" +] + +[auth] +jwt_issuer = "vaultmesh-portal" +session_ttl_hours = 24 +``` + +### Guardian + +```yaml +# Deployment overrides +replicas: 1 # Single for coordination +strategy: + type: Recreate +http_port: 8081 +cpu_request: 200m +memory_request: 512Mi +cpu_limit: 2000m +memory_limit: 2Gi +``` + +```toml +# guardian.toml +[server] +bind = "0.0.0.0:8081" +metrics_bind = "0.0.0.0:9090" + +[proofchain] +receipts_path = "/data/receipts" +roots_path = "/data/receipts" + +[anchor] +primary = "ethereum" +interval_seconds = 3600 +min_receipts_threshold = 10 + +[anchor.ethereum] +rpc_url = "https://mainnet.infura.io/v3/${INFURA_PROJECT_ID}" +contract_address = "0x..." +chain_id = 1 + +[anchor.ots] +enabled = true +calendar_urls = [ + "https://a.pool.opentimestamps.org", + "https://b.pool.opentimestamps.org" +] + +[sentinel] +enabled = true +alert_webhook = "http://alertmanager:9093/api/v2/alerts" +``` + +### Oracle + +```yaml +# Deployment overrides +replicas: 2 +http_port: 8082 +mcp_port: 8083 +cpu_request: 200m +memory_request: 512Mi +cpu_limit: 2000m +memory_limit: 4Gi +``` + +```toml +# oracle.toml +[server] +http_bind = "0.0.0.0:8082" +mcp_bind = "0.0.0.0:8083" +metrics_bind = "0.0.0.0:9090" + +[corpus] +path = "/data/corpus" +index_path = "/data/cache/index" +supported_formats = ["docx", "pdf", "md", "txt"] + +[llm] +primary_provider = "anthropic" +primary_model = "claude-sonnet-4-20250514" +fallback_provider = "openai" +fallback_model = "gpt-4o" +temperature = 0.1 +max_tokens = 4096 + +[receipts] +endpoint = "http://vaultmesh-portal/api/receipts/oracle" +``` + +--- + +## Docker Compose (Development) + +```yaml +version: "3.9" + +services: + portal: + build: + context: . + dockerfile: docker/portal/Dockerfile + ports: + - "8080:8080" + - "9090:9090" + environment: + - RUST_LOG=info,vaultmesh=debug + - VAULTMESH_CONFIG=/config/portal.toml + - DATABASE_URL=postgresql://vaultmesh:vaultmesh@postgres:5432/vaultmesh + - REDIS_URL=redis://redis:6379 + volumes: + - ./config/portal.toml:/config/portal.toml:ro + - receipts:/data/receipts + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_started + + guardian: + build: + context: . + dockerfile: docker/guardian/Dockerfile + ports: + - "8081:8081" + environment: + - RUST_LOG=info,guardian=debug + - GUARDIAN_CONFIG=/config/guardian.toml + - DATABASE_URL=postgresql://vaultmesh:vaultmesh@postgres:5432/vaultmesh + volumes: + - ./config/guardian.toml:/config/guardian.toml:ro + - receipts:/data/receipts + - guardian-state:/data/guardian + depends_on: + portal: + condition: service_healthy + + oracle: + build: + context: . + dockerfile: docker/oracle/Dockerfile + ports: + - "8082:8082" + - "8083:8083" + environment: + - ORACLE_CONFIG=/config/oracle.toml + - OPENAI_API_KEY=${OPENAI_API_KEY} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - VAULTMESH_RECEIPT_ENDPOINT=http://portal:8080/api/receipts + volumes: + - ./config/oracle.toml:/config/oracle.toml:ro + - ./corpus:/data/corpus:ro + depends_on: + portal: + condition: service_healthy + + postgres: + image: postgres:16-alpine + environment: + - POSTGRES_USER=vaultmesh + - POSTGRES_PASSWORD=vaultmesh + - POSTGRES_DB=vaultmesh + volumes: + - postgres-data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U vaultmesh"] + interval: 5s + timeout: 5s + retries: 5 + + redis: + image: redis:7-alpine + volumes: + - redis-data:/data + command: redis-server --appendonly yes + + prometheus: + image: prom/prometheus:v2.47.0 + ports: + - "9091:9090" + volumes: + - ./config/prometheus.yaml:/etc/prometheus/prometheus.yml:ro + - prometheus-data:/prometheus + + grafana: + image: grafana/grafana:10.1.0 + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + volumes: + - ./config/grafana/provisioning:/etc/grafana/provisioning:ro + - grafana-data:/var/lib/grafana + +volumes: + receipts: + guardian-state: + postgres-data: + redis-data: + prometheus-data: + grafana-data: + +networks: + default: + name: vaultmesh +``` + +--- + +## Dockerfile Templates + +### Rust Service + +```dockerfile +# Build stage +FROM rust:1.75-alpine AS builder + +RUN apk add --no-cache musl-dev openssl-dev openssl-libs-static + +WORKDIR /build +COPY Cargo.toml Cargo.lock ./ +COPY src ./src + +RUN cargo build --release --target x86_64-unknown-linux-musl + +# Runtime stage +FROM alpine:3.19 + +RUN apk add --no-cache ca-certificates tzdata + +RUN adduser -D -u 1000 vaultmesh +USER vaultmesh + +WORKDIR /app +COPY --from=builder /build/target/x86_64-unknown-linux-musl/release/{binary} /app/ + +EXPOSE 8080 9090 + +ENTRYPOINT ["/app/{binary}"] +``` + +### Python Service + +```dockerfile +FROM python:3.12-slim + +RUN useradd -m -u 1000 vaultmesh + +WORKDIR /app + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY --chown=vaultmesh:vaultmesh . . + +USER vaultmesh + +EXPOSE 8080 9090 + +CMD ["python", "-m", "{module}"] +``` + +--- + +## Prometheus Rules + +```yaml +groups: + - name: vaultmesh.receipts + rules: + - alert: ReceiptWriteFailure + expr: rate(vaultmesh_receipt_write_errors_total[5m]) > 0 + for: 1m + labels: + severity: critical + annotations: + summary: "Receipt write failures detected" + + - alert: ReceiptRateAnomaly + expr: | + abs(rate(vaultmesh_receipts_total[5m]) - + avg_over_time(rate(vaultmesh_receipts_total[5m])[1h:5m])) + > 2 * stddev_over_time(rate(vaultmesh_receipts_total[5m])[1h:5m]) + for: 10m + labels: + severity: warning + annotations: + summary: "Unusual receipt rate" + + - name: vaultmesh.guardian + rules: + - alert: AnchorDelayed + expr: time() - vaultmesh_guardian_last_anchor_timestamp > 7200 + for: 5m + labels: + severity: warning + annotations: + summary: "Guardian anchor delayed" + + - alert: AnchorCriticallyDelayed + expr: time() - vaultmesh_guardian_last_anchor_timestamp > 14400 + for: 5m + labels: + severity: critical + annotations: + summary: "No anchor in over 4 hours" + + - alert: ProofChainDivergence + expr: vaultmesh_guardian_proofchain_divergence == 1 + for: 1m + labels: + severity: critical + annotations: + summary: "ProofChain divergence detected" + + - name: vaultmesh.governance + rules: + - alert: ConstitutionalViolation + expr: increase(vaultmesh_governance_violations_total[1h]) > 0 + for: 0m + labels: + severity: critical + annotations: + summary: "Constitutional violation detected" + + - alert: EmergencyActive + expr: vaultmesh_governance_emergency_active == 1 + for: 0m + labels: + severity: warning + annotations: + summary: "Emergency powers in effect" +``` + +--- + +## Kustomization + +### Base + +```yaml +# kubernetes/base/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: vaultmesh + +resources: + - namespace.yaml + - rbac.yaml + - portal/ + - guardian/ + - oracle/ + - database/ + - storage/ + - ingress/ + +commonLabels: + app.kubernetes.io/part-of: vaultmesh + app.kubernetes.io/managed-by: kustomize +``` + +### Production Overlay + +```yaml +# kubernetes/overlays/production/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: vaultmesh + +resources: + - ../../base + +patches: + - path: portal-resources.yaml + - path: guardian-resources.yaml + - path: oracle-resources.yaml + +configMapGenerator: + - name: vaultmesh-portal-config + behavior: merge + files: + - portal.toml=configs/portal-prod.toml + +replicas: + - name: vaultmesh-portal + count: 3 + - name: vaultmesh-oracle + count: 3 +``` + +--- + +## Terraform (Infrastructure) + +```hcl +# main.tf +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.23" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.11" + } + } +} + +resource "kubernetes_namespace" "vaultmesh" { + metadata { + name = "vaultmesh" + labels = { + "app.kubernetes.io/name" = "vaultmesh" + "app.kubernetes.io/part-of" = "civilization-ledger" + } + } +} + +resource "helm_release" "vaultmesh" { + name = "vaultmesh" + namespace = kubernetes_namespace.vaultmesh.metadata[0].name + chart = "./charts/vaultmesh" + + values = [ + file("values-${var.environment}.yaml") + ] + + set { + name = "portal.replicas" + value = var.portal_replicas + } + + set { + name = "guardian.anchor.ethereum.rpcUrl" + value = var.ethereum_rpc_url + } + + set_sensitive { + name = "secrets.anthropicApiKey" + value = var.anthropic_api_key + } +} + +variable "environment" { + type = string + default = "production" +} + +variable "portal_replicas" { + type = number + default = 3 +} + +variable "ethereum_rpc_url" { + type = string +} + +variable "anthropic_api_key" { + type = string + sensitive = true +} +``` diff --git a/docs/skill/MCP_INTEGRATION.md b/docs/skill/MCP_INTEGRATION.md new file mode 100644 index 0000000..3b84399 --- /dev/null +++ b/docs/skill/MCP_INTEGRATION.md @@ -0,0 +1,493 @@ +# VaultMesh MCP Integration Patterns + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ CLAUDE │ +└───────────────────────────┬─────────────────────────────────┘ + │ MCP Protocol + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ MCP GATEWAY │ +│ • Authentication (capability verification) │ +│ • Rate limiting │ +│ • Audit logging (all tool calls receipted) │ +│ • Constitutional compliance checking │ +└───────────────────────────┬─────────────────────────────────┘ + │ + ┌───────────────┼───────────────┐ + ▼ ▼ ▼ + ┌───────────┐ ┌───────────┐ ┌───────────┐ + │ Oracle │ │ Drills │ │ Mesh │ + │ Server │ │ Server │ │ Server │ + └───────────┘ └───────────┘ └───────────┘ +``` + +## Tool Categories + +### Read-Only Tools (Default Access) + +| Tool | Capability | Description | +|------|------------|-------------| +| `oracle_answer` | `oracle_query` | Ask compliance questions | +| `oracle_corpus_search` | `oracle_query` | Search compliance corpus | +| `drills_status` | `drills_view` | View drill status | +| `mesh_topology` | `mesh_view` | View mesh topology | +| `mesh_node_status` | `mesh_view` | View node status | +| `treasury_balance` | `treasury_view` | View balances | +| `guardian_anchor_status` | `guardian_view` | View anchor status | +| `guardian_verify_receipt` | `guardian_view` | Verify receipts | +| `identity_resolve_did` | `identity_view` | Resolve DIDs | +| `identity_whoami` | (any) | View own identity | +| `psi_phase_status` | `psi_view` | View phase status | +| `psi_opus_status` | `psi_view` | View opus status | +| `governance_constitution_summary` | `governance_view` | View constitution | +| `receipts_search` | `receipts_view` | Search receipts | +| `system_health` | `system_view` | View system health | + +### Write Tools (Elevated Access) + +| Tool | Capability | Description | +|------|------------|-------------| +| `drills_create` | `drills_create` | Create new drill | +| `drills_complete_stage` | `drills_execute` | Complete drill stage | +| `treasury_record_entry` | `treasury_write` | Record financial entry | +| `guardian_anchor_now` | `anchor` | Trigger anchor cycle | +| `psi_transmute` | `psi_transmute` | Start transmutation | + +## Tool Implementation Patterns + +### Basic Read Tool + +```python +@server.tool() +async def my_read_tool( + filter_param: str = None, + limit: int = 50, +) -> str: + """ + Description of what this tool does. + + Args: + filter_param: Optional filter + limit: Maximum results + + Returns: + Query results as JSON + """ + # Verify capability + caller = await get_caller_identity() + await verify_capability(caller, "my_view") + + # Perform query + results = await engine.query(filter_param, limit) + + return json.dumps([r.to_dict() for r in results], indent=2) +``` + +### Write Tool with Receipt + +```python +@server.tool() +async def my_write_tool( + param1: str, + param2: int, +) -> str: + """ + Description of write operation. + + Args: + param1: First parameter + param2: Second parameter + + Returns: + Operation result as JSON + """ + # Verify elevated capability + caller = await get_caller_identity() + await verify_capability(caller, "my_write") + + # Perform operation + result = await engine.perform_operation(param1, param2) + + # Emit receipt for audit + await emit_tool_call_receipt( + tool="my_write_tool", + caller=caller, + params={"param1": param1, "param2": param2}, + result_hash=result.hash, + ) + + return json.dumps(result.to_dict(), indent=2) +``` + +### Tool with Constitutional Check + +```python +@server.tool() +async def sensitive_operation( + target: str, + action: str, +) -> str: + """ + Operation requiring constitutional compliance check. + """ + caller = await get_caller_identity() + await verify_capability(caller, "admin") + + # Check constitutional compliance BEFORE executing + compliance = await governance_engine.check_compliance( + action=action, + actor=caller, + target=target, + ) + + if not compliance.allowed: + return json.dumps({ + "error": "constitutional_violation", + "violated_articles": compliance.violated_articles, + "message": compliance.message, + }, indent=2) + + # Execute if compliant + result = await engine.execute(target, action) + + await emit_tool_call_receipt( + tool="sensitive_operation", + caller=caller, + params={"target": target, "action": action}, + result_hash=result.hash, + ) + + return json.dumps(result.to_dict(), indent=2) +``` + +## Tool Call Receipt + +Every MCP tool call is receipted: + +```json +{ + "type": "mcp_tool_call", + "call_id": "mcp-call-2025-12-06-001", + "timestamp": "2025-12-06T14:30:00Z", + "caller": "did:vm:agent:claude-session-abc123", + "tool": "oracle_answer", + "params_hash": "blake3:params...", + "result_hash": "blake3:result...", + "duration_ms": 1250, + "capability_used": "oracle_query", + "session_id": "session-xyz789", + "tags": ["mcp", "oracle", "tool-call"], + "root_hash": "blake3:aaa111..." +} +``` + +## Authentication + +### Session Identity + +```python +async def get_caller_identity() -> str: + """Get the DID of the current MCP caller.""" + session = get_current_session() + + if session.authenticated_did: + return session.authenticated_did + + # Anonymous callers get session-scoped agent DID + return f"did:vm:agent:mcp-session-{session.id}" +``` + +### Capability Verification + +```python +async def verify_capability(caller: str, capability: str) -> bool: + """Verify the caller has the required capability.""" + has_cap = await identity_engine.check_capability(caller, capability) + + if not has_cap: + raise PermissionError( + f"Caller {caller} lacks capability: {capability}" + ) + + # Log capability exercise + await identity_engine.log_capability_exercise( + caller=caller, + capability=capability, + action="mcp_tool_call", + ) + + return True +``` + +## Rate Limiting + +```python +class RateLimiter: + def __init__(self): + self.limits = { + "oracle_answer": (10, timedelta(minutes=1)), + "guardian_anchor_now": (5, timedelta(hours=1)), + "treasury_record_entry": (100, timedelta(hours=1)), + "default": (60, timedelta(minutes=1)), + } + + async def check(self, caller: str, tool: str) -> bool: + key = f"{caller}:{tool}" + limit, window = self.limits.get(tool, self.limits["default"]) + + # Check against limit + current_count = await self.get_count(key, window) + if current_count >= limit: + raise RateLimitExceeded( + f"Rate limit exceeded: {limit} per {window}" + ) + + await self.increment(key) + return True +``` + +## Claude Desktop Configuration + +### config.json + +```json +{ + "mcpServers": { + "vaultmesh": { + "command": "python", + "args": ["-m", "vaultmesh_mcp.server"], + "env": { + "VAULTMESH_CONFIG": "/path/to/config.toml", + "VAULTMESH_IDENTITY": "did:vm:agent:claude-desktop" + } + } + } +} +``` + +### Capability Configuration + +```toml +# config.toml +[mcp.capabilities] +default_capabilities = [ + "oracle_query", + "drills_view", + "mesh_view", + "treasury_view", + "guardian_view", + "identity_view", + "psi_view", + "governance_view", + "receipts_view", + "system_view", +] + +elevated_capabilities = [ + "drills_create", + "drills_execute", + "treasury_write", + "anchor", + "psi_transmute", +] + +[mcp.rate_limits] +oracle_answer = { limit = 10, window_minutes = 1 } +guardian_anchor_now = { limit = 5, window_hours = 1 } +default = { limit = 60, window_minutes = 1 } + +[mcp.audit] +receipt_all_calls = true +log_params = true +log_results = false +``` + +## Example Interactions + +### Compliance Query + +``` +Human: What's our compliance status for AI Act Annex IV? + +Claude: [Calls oracle_answer] + +Based on the Oracle's assessment: + +**Compliance Status: Partial (73%)** + +Key findings: +- Technical documentation: ✅ Compliant +- Risk management: ⚠️ Needs update +- Data governance: ✅ Compliant +- Human oversight: ⚠️ Documentation incomplete +``` + +### Security Drill + +``` +Human: Start a drill for Kubernetes lateral movement + +Claude: [Calls drills_create] + +Created drill: drill-1764691432 + +**Stages:** +1. Initial Access Simulation +2. Lateral Movement Attempt +3. Detection Validation +4. Response Execution + +Ready to begin? +``` + +### System Health + +``` +Human: How's the system? + +Claude: [Calls system_health] + +**VaultMesh Health: 🟢 Healthy** + +- Nodes: 5 active +- Last anchor: 47 min ago +- Phase: RUBEDO 🜂 +- Receipts today: 34 +``` + +## Server Entry Point + +```python +# vaultmesh_mcp/server.py + +from mcp.server import Server +from mcp.server.stdio import stdio_server + +server = Server("vaultmesh") + +# Register all tools +from .tools import ( + oracle_tools, + drills_tools, + mesh_tools, + treasury_tools, + guardian_tools, + identity_tools, + psi_tools, + governance_tools, +) + +def main(): + import asyncio + + async def run(): + async with stdio_server() as (read, write): + await server.run(read, write, server.create_initialization_options()) + + asyncio.run(run()) + +if __name__ == "__main__": + main() +``` + +## Custom VaultMesh Nodes for n8n + +When integrating with n8n workflows: + +```javascript +// VaultMesh Receipt Emit Node +{ + name: 'vaultmesh-receipt-emit', + displayName: 'VaultMesh Receipt', + description: 'Emit a receipt to VaultMesh', + properties: [ + { + displayName: 'Scroll', + name: 'scroll', + type: 'options', + options: [ + { name: 'Automation', value: 'automation' }, + { name: 'Compliance', value: 'compliance' }, + // ... + ], + }, + { + displayName: 'Receipt Type', + name: 'receiptType', + type: 'string', + }, + { + displayName: 'Body', + name: 'body', + type: 'json', + }, + { + displayName: 'Tags', + name: 'tags', + type: 'string', + description: 'Comma-separated tags', + }, + ], + async execute() { + const scroll = this.getNodeParameter('scroll', 0); + const receiptType = this.getNodeParameter('receiptType', 0); + const body = this.getNodeParameter('body', 0); + const tags = this.getNodeParameter('tags', 0).split(','); + + const receipt = await vaultmesh.emitReceipt({ + scroll, + receiptType, + body, + tags, + }); + + return [{ json: receipt }]; + }, +} +``` + +## Error Handling + +```python +@server.tool() +async def robust_tool(param: str) -> str: + """Tool with comprehensive error handling.""" + try: + caller = await get_caller_identity() + await verify_capability(caller, "required_cap") + + result = await engine.operation(param) + + return json.dumps(result.to_dict(), indent=2) + + except PermissionError as e: + return json.dumps({ + "error": "permission_denied", + "message": str(e), + "required_capability": "required_cap", + }, indent=2) + + except RateLimitExceeded as e: + return json.dumps({ + "error": "rate_limit_exceeded", + "message": str(e), + "retry_after_seconds": e.retry_after, + }, indent=2) + + except ConstitutionalViolation as e: + return json.dumps({ + "error": "constitutional_violation", + "violated_axioms": e.axioms, + "message": str(e), + }, indent=2) + + except Exception as e: + logger.error(f"Tool error: {e}") + return json.dumps({ + "error": "internal_error", + "message": "An unexpected error occurred", + }, indent=2) +``` diff --git a/docs/skill/OPERATIONS.md b/docs/skill/OPERATIONS.md new file mode 100644 index 0000000..368dea6 --- /dev/null +++ b/docs/skill/OPERATIONS.md @@ -0,0 +1,537 @@ +# VaultMesh Operations Guide + +## Daily Operations + +### Morning Health Check + +```bash +#!/bin/bash +# scripts/morning-check.sh + +echo "=== VaultMesh Morning Health Check ===" +echo "Date: $(date -u +%Y-%m-%dT%H:%M:%SZ)" + +# 1. System health +echo -e "\n1. System Health" +vm-cli system health + +# 2. Guardian status +echo -e "\n2. Guardian Status" +vm-guardian anchor-status + +# 3. Phase status +echo -e "\n3. Current Phase" +vm-psi phase current + +# 4. Overnight receipts +echo -e "\n4. Receipts (last 12h)" +vm-cli receipts count --since 12h + +# 5. Any violations +echo -e "\n5. Governance Violations" +vm-gov violations list --since 24h --severity high,critical + +# 6. Federation health +echo -e "\n6. Federation Status" +vm-federation health --all-peers + +echo -e "\n=== Check Complete ===" +``` + +### Anchor Monitoring + +```bash +# Check anchor status +vm-guardian anchor-status + +# View anchor history +vm-guardian anchor-history --last 24h + +# Trigger manual anchor if needed +vm-guardian anchor-now --wait + +# Verify specific receipt +vm-guardian verify-receipt blake3:abc123... --scroll Compliance +``` + +### Receipt Queries + +```bash +# Count receipts by scroll +vm-cli receipts count --by-scroll + +# Search receipts +vm-cli receipts search --scroll Drills --from 2025-12-01 --to 2025-12-06 + +# Export receipts +vm-cli receipts export --scroll Compliance --format csv --output compliance.csv + +# Verify integrity +vm-guardian verify-all --scroll all +``` + +--- + +## Common Tasks + +### Add New Node to Mesh + +```bash +# 1. Create DID for new node +vm-identity did create --type node --id new-node-01 + +# 2. Issue node credential +vm-identity credential issue \ + --type VaultMeshNodeCredential \ + --subject did:vm:node:new-node-01 \ + --issuer did:vm:node:portal-01 + +# 3. Add to mesh +vm-mesh node add \ + --did did:vm:node:new-node-01 \ + --endpoint https://new-node-01.vaultmesh.io \ + --type infrastructure + +# 4. Grant capabilities +vm-identity capability grant \ + --subject did:vm:node:new-node-01 \ + --capability storage,compute + +# 5. Verify +vm-mesh node status new-node-01 +``` + +### Key Rotation Ceremony + +```bash +# 1. Initiate ceremony +vm-identity key-rotate \ + --did did:vm:node:brick-01 \ + --ceremony-type standard + +# 2. Generate new keypair (on target node) +vm-identity key-generate --algorithm ed25519 + +# 3. Witness signatures (from other nodes) +vm-identity key-witness \ + --ceremony ceremony-2025-12-001 \ + --witness did:vm:node:brick-02 + +# 4. Publish new key +vm-identity key-publish --ceremony ceremony-2025-12-001 + +# 5. Verify propagation +vm-identity did resolve did:vm:node:brick-01 +``` + +### Create Security Drill + +```bash +# 1. Create drill from prompt +vm-drills create \ + --prompt "Detect and respond to ransomware encryption" \ + --severity high \ + --skills detection-defense-ir,kubernetes-security + +# 2. Review generated contract +vm-drills show drill-2025-12-001 + +# 3. Start execution +vm-drills start drill-2025-12-001 + +# 4. Complete stages +vm-drills complete-stage drill-2025-12-001 stage-1 \ + --outputs cases/drills/drill-2025-12-001/stage-1/ \ + --findings "Identified encryption patterns" + +# 5. Seal drill +vm-drills seal drill-2025-12-001 +``` + +### Initiate Transmutation + +```bash +# 1. Start transmutation from incident +vm-psi transmute start \ + --input INC-2025-12-001 \ + --input-type security_incident \ + --title "SSH Brute Force to Detection" + +# 2. Extract IOCs +vm-psi transmute step transmute-2025-12-001 extract + +# 3. Dissolve to standard format +vm-psi transmute step transmute-2025-12-001 dissolve + +# 4. Purify (validate) +vm-psi transmute step transmute-2025-12-001 purify + +# 5. Coagulate (generate rules) +vm-psi transmute step transmute-2025-12-001 coagulate + +# 6. Seal +vm-psi transmute seal transmute-2025-12-001 +``` + +--- + +## Troubleshooting + +### Anchor Failures + +**Symptom**: `vm-guardian anchor-status` shows failures + +**Diagnosis**: +```bash +# Check guardian logs +kubectl logs -n vaultmesh -l app.kubernetes.io/name=guardian --tail=100 + +# Check anchor backend connectivity +vm-guardian test-backend ethereum +vm-guardian test-backend ots + +# Check pending receipts +vm-guardian pending-receipts +``` + +**Common Causes**: +1. **Network issues**: Check Ethereum RPC connectivity +2. **Insufficient funds**: Check anchor wallet balance +3. **Rate limiting**: Check if backend is rate limiting +4. **Configuration**: Verify anchor config + +**Resolution**: +```bash +# Retry anchor +vm-guardian anchor-now --backend ots --wait + +# If Ethereum issues, switch to OTS temporarily +vm-guardian config set anchor.primary ots + +# Check and top up wallet +vm-guardian wallet balance +vm-guardian wallet fund --amount 0.1 +``` + +### Receipt Integrity Errors + +**Symptom**: `verify-all` reports mismatches + +**Diagnosis**: +```bash +# Identify affected scroll +vm-guardian verify-all --scroll all --verbose + +# Check specific receipt +vm-guardian verify-receipt blake3:... --scroll Compliance --debug + +# Compare computed vs stored root +vm-guardian compute-root --scroll Compliance +cat receipts/ROOT.compliance.txt +``` + +**Common Causes**: +1. **Corrupted JSONL**: File system issues +2. **Incomplete write**: Process interrupted +3. **Manual modification**: Violation of AXIOM-001 + +**Resolution**: +```bash +# If corruption detected, restore from backup +vm-cli backup restore --backup-id backup-2025-12-05 --scroll Compliance + +# Recompute root after restore +vm-guardian recompute-root --scroll Compliance + +# Trigger anchor to seal restored state +vm-guardian anchor-now --scroll Compliance --wait +``` + +### Node Connectivity Issues + +**Symptom**: Node showing unhealthy in mesh + +**Diagnosis**: +```bash +# Check node status +vm-mesh node status brick-02 + +# Test connectivity +vm-mesh ping brick-02 + +# Check routes +vm-mesh routes list --node brick-02 + +# Check node logs +kubectl logs -n vaultmesh pod/brick-02 --tail=100 +``` + +**Common Causes**: +1. **Network partition**: Firewall/network issues +2. **Resource exhaustion**: Node overloaded +3. **Certificate expiry**: TLS cert expired +4. **Process crash**: Service died + +**Resolution**: +```bash +# Restart node pod +kubectl rollout restart deployment/brick-02 -n vaultmesh + +# If cert expired +vm-identity cert-renew --node brick-02 + +# If persistent issues, remove and re-add +vm-mesh node remove brick-02 --force +vm-mesh node add --did did:vm:node:brick-02 --endpoint https://... +``` + +### Oracle Query Failures + +**Symptom**: Oracle returning errors + +**Diagnosis**: +```bash +# Check oracle health +vm-oracle health + +# Check LLM connectivity +vm-oracle test-llm anthropic +vm-oracle test-llm openai + +# Check corpus status +vm-oracle corpus status + +# Check logs +kubectl logs -n vaultmesh -l app.kubernetes.io/name=oracle --tail=100 +``` + +**Common Causes**: +1. **LLM API issues**: Rate limiting, key expiry +2. **Corpus empty**: Documents not loaded +3. **Index corruption**: Vector index issues +4. **Memory exhaustion**: OOM conditions + +**Resolution**: +```bash +# Rotate API key if expired +kubectl create secret generic oracle-llm-credentials \ + --from-literal=anthropic-key=NEW_KEY \ + -n vaultmesh --dry-run=client -o yaml | kubectl apply -f - + +# Reload corpus +vm-oracle corpus reload + +# Rebuild index +vm-oracle corpus reindex + +# Restart oracle +kubectl rollout restart deployment/vaultmesh-oracle -n vaultmesh +``` + +### Phase Stuck in Nigredo + +**Symptom**: System in Nigredo for extended period + +**Diagnosis**: +```bash +# Check phase details +vm-psi phase current --verbose + +# Check active incidents +vm-offsec incidents list --status open + +# Check for blocking issues +vm-psi blockers + +# Review phase history +vm-psi phase history --last 7d +``` + +**Common Causes**: +1. **Unresolved incident**: Active security issue +2. **Failed transmutation**: Stuck in process +3. **Missing witness**: Transmutation waiting for signature +4. **Metric threshold**: Health metrics below threshold + +**Resolution**: +```bash +# Close incident if resolved +vm-offsec incident close INC-2025-12-001 \ + --resolution "Threat neutralized, systems restored" + +# Complete stuck transmutation +vm-psi transmute force-complete transmute-2025-12-001 + +# Manual phase transition (requires justification) +vm-psi phase transition albedo \ + --reason "Incident resolved, metrics stable" \ + --evidence evidence-report.md +``` + +### Constitutional Violation Detected + +**Symptom**: `gov_violation` alert fired + +**Diagnosis**: +```bash +# View violation details +vm-gov violations show VIOL-2025-12-001 + +# Check what was attempted +vm-gov violations evidence VIOL-2025-12-001 + +# Review enforcement action +vm-gov enforcement show ENF-2025-12-001 +``` + +**Common Causes**: +1. **Agent misconfiguration**: Automation tried unauthorized action +2. **Capability expiry**: Token expired mid-operation +3. **Bug in engine**: Logic error attempting violation +4. **Attack attempt**: Malicious action blocked + +**Resolution**: +```bash +# If false positive, dismiss +vm-gov violations review VIOL-2025-12-001 \ + --decision dismiss \ + --reason "False positive due to timing issue" + +# If real, review and uphold enforcement +vm-gov enforcement review ENF-2025-12-001 --decision uphold + +# Fix underlying issue +# (depends on specific violation) +``` + +--- + +## Backup & Recovery + +### Scheduled Backups + +```bash +# Full backup +vm-cli backup create --type full + +# Incremental backup +vm-cli backup create --type incremental + +# List backups +vm-cli backup list + +# Verify backup integrity +vm-cli backup verify backup-2025-12-05 +``` + +### Recovery Procedures + +```bash +# 1. Stop services +kubectl scale deployment -n vaultmesh --replicas=0 --all + +# 2. Restore from backup +vm-cli backup restore --backup-id backup-2025-12-05 + +# 3. Verify integrity +vm-guardian verify-all --scroll all + +# 4. Restart services +kubectl scale deployment -n vaultmesh --replicas=2 \ + vaultmesh-portal vaultmesh-oracle +kubectl scale deployment -n vaultmesh --replicas=1 vaultmesh-guardian + +# 5. Trigger anchor to seal restored state +vm-guardian anchor-now --wait +``` + +### Disaster Recovery + +```bash +# Full rebuild from backup +./scripts/disaster-recovery.sh --backup backup-2025-12-05 + +# Verify federation peers +vm-federation verify-all + +# Re-establish federation trust if needed +vm-federation re-establish --peer vaultmesh-berlin +``` + +--- + +## Performance Tuning + +### Receipt Write Optimization + +```toml +# config.toml +[receipts] +# Batch writes for better throughput +batch_size = 100 +batch_timeout_ms = 100 + +# Compression +compression = "zstd" +compression_level = 3 + +# Index configuration +index_cache_size_mb = 512 +``` + +### Database Tuning + +```sql +-- Vacuum and analyze +VACUUM ANALYZE receipts; + +-- Check slow queries +SELECT query, calls, mean_time +FROM pg_stat_statements +ORDER BY mean_time DESC +LIMIT 10; + +-- Index usage +SELECT schemaname, tablename, indexname, idx_scan +FROM pg_stat_user_indexes +ORDER BY idx_scan; +``` + +### Memory Optimization + +```bash +# Check memory usage +kubectl top pods -n vaultmesh + +# Adjust limits if needed +kubectl patch deployment vaultmesh-oracle -n vaultmesh \ + -p '{"spec":{"template":{"spec":{"containers":[{"name":"oracle","resources":{"limits":{"memory":"8Gi"}}}]}}}}' +``` + +--- + +## Monitoring Dashboards + +### Key Metrics to Watch + +| Metric | Warning | Critical | +|--------|---------|----------| +| `vaultmesh_guardian_last_anchor_age` | > 2h | > 4h | +| `vaultmesh_receipt_write_errors_total` | > 0 | > 10/min | +| `vaultmesh_mesh_node_unhealthy` | any | multiple | +| `vaultmesh_oracle_latency_p95` | > 30s | > 60s | +| `vaultmesh_governance_violations` | any | critical | +| `vaultmesh_psi_phase` | nigredo > 24h | nigredo > 72h | + +### Alert Response + +```bash +# Acknowledge alert +vm-alerts ack ALERT-2025-12-001 + +# Silence alert (for maintenance) +vm-alerts silence --matcher 'alertname="AnchorDelayed"' --duration 2h + +# View active alerts +vm-alerts list --active +``` diff --git a/docs/skill/PROTOCOLS.md b/docs/skill/PROTOCOLS.md new file mode 100644 index 0000000..063ef3d --- /dev/null +++ b/docs/skill/PROTOCOLS.md @@ -0,0 +1,605 @@ +# VaultMesh Federation & Governance Protocols + +## Federation Protocol + +### Trust Establishment Flow + +``` +┌──────────────┐ ┌──────────────┐ +│ MESH-A │ │ MESH-B │ +│ (Dublin) │ │ (Berlin) │ +└──────┬───────┘ └──────┬───────┘ + │ │ + │ 1. Discovery │ + │ GET /federation/discovery │ + │──────────────────────────────────►│ + │ │ + │ 2. Proposal │ + │ POST /federation/proposals │ + │──────────────────────────────────►│ + │ │ + │ 3. Counter/Accept │ + │◄──────────────────────────────────│ + │ │ + │ 4. Mutual Signature │ + │◄─────────────────────────────────►│ + │ │ + │ 5. Begin Witness Cycle │ + │◄─────────────────────────────────►│ + │ │ +``` + +### Trust Levels + +| Level | Name | Capabilities | +|-------|------|--------------| +| 0 | `isolated` | No federation | +| 1 | `observe` | Read-only witness, public receipts only | +| 2 | `verify` | Mutual verification, receipt sampling | +| 3 | `attest` | Cross-attestation, shared roots | +| 4 | `integrate` | Shared scrolls, joint governance | + +### Discovery Record + +```json +{ + "mesh_id": "did:vm:mesh:vaultmesh-dublin", + "display_name": "VaultMesh Dublin", + "endpoints": { + "federation": "https://federation.vaultmesh-dublin.io", + "verification": "https://verify.vaultmesh-dublin.io" + }, + "public_key": "ed25519:z6Mk...", + "scrolls_available": ["Compliance", "Drills"], + "trust_policy": { + "accepts_proposals": true, + "min_trust_level": 1, + "requires_mutual": true + }, + "attestations": [] +} +``` + +### Trust Proposal + +```json +{ + "proposal_id": "fed-proposal-2025-12-06-001", + "proposer": "did:vm:mesh:vaultmesh-dublin", + "target": "did:vm:mesh:vaultmesh-berlin", + "proposed_at": "2025-12-06T10:00:00Z", + "expires_at": "2025-12-13T10:00:00Z", + "proposed_trust_level": 2, + "proposed_terms": { + "scrolls_to_share": ["Compliance"], + "verification_frequency": "hourly", + "retention_period_days": 365, + "data_jurisdiction": "EU", + "audit_rights": true + }, + "proposer_attestations": { + "identity_proof": "...", + "compliance_credentials": ["ISO27001", "SOC2"] + }, + "signature": "z58D..." +} +``` + +### Federation Agreement + +```json +{ + "agreement_id": "fed-agreement-2025-12-06-001", + "parties": [ + "did:vm:mesh:vaultmesh-dublin", + "did:vm:mesh:vaultmesh-berlin" + ], + "established_at": "2025-12-06T16:00:00Z", + "trust_level": 2, + "terms": { + "scrolls_shared": ["Compliance", "Drills"], + "verification_frequency": "daily", + "retention_period_days": 180, + "data_jurisdiction": "EU", + "audit_rights": true, + "dispute_resolution": "arbitration_zurich" + }, + "key_exchange": { + "dublin_federation_key": "ed25519:z6MkDublin...", + "berlin_federation_key": "ed25519:z6MkBerlin..." + }, + "signatures": { + "did:vm:mesh:vaultmesh-dublin": { + "signed_at": "2025-12-06T15:30:00Z", + "signature": "z58D..." + }, + "did:vm:mesh:vaultmesh-berlin": { + "signed_at": "2025-12-06T16:00:00Z", + "signature": "z47C..." + } + }, + "agreement_hash": "blake3:abc123..." +} +``` + +### Witness Protocol + +``` +Anchor Completes → Notify Peer → Peer Verifies → Witness Receipt +``` + +**Witness Receipt**: +```json +{ + "type": "fed_witness_event", + "witness_id": "witness-2025-12-06-001", + "witnessed_mesh": "did:vm:mesh:vaultmesh-dublin", + "witnessing_mesh": "did:vm:mesh:vaultmesh-berlin", + "timestamp": "2025-12-06T12:05:00Z", + "scroll": "Compliance", + "witnessed_root": "blake3:aaa111...", + "witnessed_anchor": { + "backend": "ethereum", + "tx_hash": "0x123...", + "block_number": 12345678 + }, + "verification_method": "anchor_proof_validation", + "verification_result": "verified", + "samples_checked": 5, + "discrepancies": [], + "witness_signature": "z47C..." +} +``` + +### Cross-Anchor + +At trust level 3+, meshes include each other's roots: + +```json +{ + "type": "fed_cross_anchor", + "anchoring_mesh": "did:vm:mesh:vaultmesh-berlin", + "anchored_mesh": "did:vm:mesh:vaultmesh-dublin", + "dublin_roots_included": { + "Compliance": "blake3:aaa111...", + "Drills": "blake3:bbb222..." + }, + "combined_root": "blake3:ccc333...", + "anchor_proof": { + "backend": "bitcoin", + "tx_hash": "abc123..." + } +} +``` + +### Federation API Endpoints + +| Endpoint | Method | Purpose | +|----------|--------|---------| +| `/federation/discovery` | GET | Get mesh discovery record | +| `/federation/proposals` | POST | Submit trust proposal | +| `/federation/proposals/{id}` | GET, PUT | View/respond to proposal | +| `/federation/agreements` | GET | List active agreements | +| `/federation/agreements/{id}` | GET, DELETE | View/revoke agreement | +| `/federation/notify` | POST | Notify of new anchor | +| `/federation/witness` | POST | Submit witness attestation | +| `/federation/roots` | GET | Get current Merkle roots | +| `/federation/receipts/{scroll}` | GET | Fetch receipt samples | +| `/federation/verify` | POST | Request receipt verification | + +### CLI Commands + +```bash +# Discovery +vm-federation discover --mesh vaultmesh-berlin.io +vm-federation list-known + +# Proposals +vm-federation propose \ + --target did:vm:mesh:vaultmesh-berlin \ + --trust-level 2 \ + --scrolls Compliance,Drills + +vm-federation proposals list +vm-federation proposals accept fed-proposal-001 +vm-federation proposals reject fed-proposal-001 --reason "..." + +# Agreements +vm-federation agreements list +vm-federation agreements revoke fed-agreement-001 --notice-days 30 + +# Verification +vm-federation verify --mesh vaultmesh-berlin --scroll Compliance +vm-federation witness-history --mesh vaultmesh-berlin --last 30d + +# Status +vm-federation status +vm-federation health --all-peers +``` + +--- + +## Constitutional Governance + +### Hierarchy + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ IMMUTABLE AXIOMS │ +│ (Cannot be changed, ever) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ CONSTITUTIONAL ARTICLES │ +│ (Amendable with supermajority + ratification) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ STATUTORY RULES │ +│ (Changeable with standard procedures) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ EXECUTIVE ORDERS │ +│ (Issued by authorized actors) │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### Immutable Axioms + +| ID | Name | Statement | +|----|------|-----------| +| AXIOM-001 | Append-Only Receipts | Receipts, once written, shall never be modified or deleted | +| AXIOM-002 | Cryptographic Integrity | All receipts include cryptographic hashes | +| AXIOM-003 | Universal Receipting | All significant changes produce receipts | +| AXIOM-004 | Constitutional Supremacy | No action may violate the Constitution | +| AXIOM-005 | Axiom Immutability | These axioms cannot be amended | + +### Constitutional Articles + +| Article | Name | Content | +|---------|------|---------| +| I | Governance Structure | Sovereign authority, engine authorities, agent delegation | +| II | Amendment Procedure | Proposal, deliberation, ratification | +| III | Engine Governance | Engine registry, boundaries, lifecycle | +| IV | Rights and Protections | Audit rights, data sovereignty, due process | +| V | Federation | Authority, limits, termination | +| VI | Emergency Powers | Declaration, powers, duration | + +### Amendment Workflow + +``` +PROPOSAL → DELIBERATION (7+ days) → VOTING → RATIFICATION → ACTIVATION + ↘ REJECTED → Archive +``` + +### Proposal Receipt + +```json +{ + "type": "gov_proposal", + "proposal_id": "PROP-2025-12-001", + "proposal_type": "amendment", + "title": "Add Data Retention Article", + "author": "did:vm:human:sovereign", + "submitted_at": "2025-12-06T10:00:00Z", + "deliberation_ends": "2025-12-13T10:00:00Z", + "content": { + "target": "ARTICLE-VII", + "action": "add", + "text": { + "id": "ARTICLE-VII", + "name": "Data Retention", + "sections": [...] + } + }, + "rationale": "Compliance with EU regulations", + "status": "deliberation" +} +``` + +### Vote Receipt + +```json +{ + "type": "gov_vote", + "vote_id": "VOTE-2025-12-001-sovereign", + "proposal_id": "PROP-2025-12-001", + "voter": "did:vm:human:sovereign", + "voted_at": "2025-12-14T10:00:00Z", + "vote": "approve", + "weight": 1.0, + "comments": "Essential for compliance", + "signature": "z58D..." +} +``` + +### Ratification Receipt + +```json +{ + "type": "gov_ratification", + "ratification_id": "RAT-2025-12-001", + "proposal_id": "PROP-2025-12-001", + "ratified_at": "2025-12-14T12:00:00Z", + "ratified_by": "did:vm:human:sovereign", + "vote_summary": { + "approve": 1, + "reject": 0, + "abstain": 0 + }, + "quorum_met": true, + "constitution_version_before": "1.0.0", + "constitution_version_after": "1.1.0" +} +``` + +### Amendment Receipt + +```json +{ + "type": "gov_amendment", + "amendment_id": "AMEND-2025-12-001", + "proposal_id": "PROP-2025-12-001", + "effective_at": "2025-12-14T14:00:00Z", + "anchor_proof": { + "backend": "ethereum", + "tx_hash": "0x123..." + }, + "constitution_hash_before": "blake3:const_v1.0...", + "constitution_hash_after": "blake3:const_v1.1..." +} +``` + +### Executive Orders + +For operational decisions without full amendment: + +```json +{ + "type": "gov_executive_order", + "order_id": "EO-2025-12-001", + "title": "Temporary Rate Limit Increase", + "issued_by": "did:vm:human:sovereign", + "issued_at": "2025-12-06T15:00:00Z", + "authority": "ARTICLE-I.1", + "order_type": "parameter_change", + "content": { + "parameter": "guardian.anchor_rate_limit", + "old_value": "100/day", + "new_value": "500/day" + }, + "duration": { + "type": "temporary", + "expires_at": "2026-01-01T00:00:00Z" + } +} +``` + +### Emergency Declaration + +```json +{ + "type": "gov_executive_order", + "order_id": "EO-2025-12-002", + "title": "Security Emergency", + "issued_by": "did:vm:human:sovereign", + "authority": "ARTICLE-VI.1", + "order_type": "emergency", + "content": { + "emergency_type": "security_incident", + "threat_description": "Active intrusion on BRICK-02", + "powers_invoked": [ + "Suspend authentication delays", + "Enhanced logging", + "Immediate capability revocation" + ] + }, + "duration": { + "type": "emergency", + "expires_at": "2025-12-09T03:50:00Z", + "renewable": true + } +} +``` + +### Violation Detection + +```json +{ + "type": "gov_violation", + "violation_id": "VIOL-2025-12-001", + "detected_at": "2025-12-06T16:00:00Z", + "detected_by": "engine:guardian", + "violation_type": "unauthorized_action", + "severity": "high", + "details": { + "actor": "did:vm:agent:automation-01", + "action_attempted": "modify_receipt", + "rule_violated": "AXIOM-001", + "action_result": "blocked" + }, + "evidence": { + "log_entries": ["..."], + "request_hash": "blake3:..." + } +} +``` + +### Enforcement Action + +```json +{ + "type": "gov_enforcement", + "enforcement_id": "ENF-2025-12-001", + "violation_id": "VIOL-2025-12-001", + "enforced_at": "2025-12-06T16:05:00Z", + "enforcement_type": "capability_suspension", + "target": "did:vm:agent:automation-01", + "action_taken": { + "capability_suspended": "write", + "scope": "all_scrolls", + "duration": "pending_review" + }, + "review_required": true, + "review_deadline": "2025-12-07T16:05:00Z" +} +``` + +### CLI Commands + +```bash +# Constitution +vm-gov constitution show +vm-gov constitution version +vm-gov constitution diff v1.0.0 v1.1.0 + +# Proposals +vm-gov proposal create --type amendment --file proposal.json +vm-gov proposal list --status deliberation +vm-gov proposal show PROP-2025-12-001 + +# Voting +vm-gov vote PROP-2025-12-001 --vote approve +vm-gov vote PROP-2025-12-001 --vote reject --reason "..." + +# Ratification +vm-gov ratify PROP-2025-12-001 + +# Executive Orders +vm-gov order create --type parameter_change --file order.json +vm-gov order list --active +vm-gov order revoke EO-2025-12-001 + +# Emergencies +vm-gov emergency declare --type security_incident --description "..." +vm-gov emergency status +vm-gov emergency extend --hours 24 +vm-gov emergency end + +# Violations +vm-gov violations list --severity high,critical +vm-gov violations review VIOL-2025-12-001 --decision dismiss + +# Enforcement +vm-gov enforcement list --pending-review +vm-gov enforcement review ENF-2025-12-001 --decision uphold +``` + +--- + +## Engine Registry + +All engines must be registered in the Constitution: + +```json +{ + "registered_engines": [ + { + "engine_id": "engine:drills", + "name": "Security Drills", + "scroll": "Drills", + "authority": "Security training and exercise management", + "status": "active" + }, + { + "engine_id": "engine:oracle", + "name": "Compliance Oracle", + "scroll": "Compliance", + "authority": "Compliance question answering", + "status": "active" + }, + { + "engine_id": "engine:guardian", + "name": "Guardian", + "scroll": "Guardian", + "authority": "Anchoring and sentinel", + "status": "active" + }, + { + "engine_id": "engine:treasury", + "name": "Treasury", + "scroll": "Treasury", + "authority": "Financial tracking", + "status": "active" + }, + { + "engine_id": "engine:mesh", + "name": "Mesh", + "scroll": "Mesh", + "authority": "Topology management", + "status": "active" + }, + { + "engine_id": "engine:offsec", + "name": "OffSec", + "scroll": "OffSec", + "authority": "Security operations", + "status": "active" + }, + { + "engine_id": "engine:identity", + "name": "Identity", + "scroll": "Identity", + "authority": "DID and capability management", + "status": "active" + }, + { + "engine_id": "engine:observability", + "name": "Observability", + "scroll": "Observability", + "authority": "Telemetry monitoring", + "status": "active" + }, + { + "engine_id": "engine:automation", + "name": "Automation", + "scroll": "Automation", + "authority": "Workflow execution", + "status": "active" + }, + { + "engine_id": "engine:psi", + "name": "Ψ-Field", + "scroll": "PsiField", + "authority": "Consciousness tracking", + "status": "active" + }, + { + "engine_id": "engine:federation", + "name": "Federation", + "scroll": "Federation", + "authority": "Cross-mesh trust", + "status": "active" + }, + { + "engine_id": "engine:governance", + "name": "Governance", + "scroll": "Governance", + "authority": "Constitutional enforcement", + "status": "active" + } + ] +} +``` + +### Adding New Engines + +New engines require constitutional amendment: + +1. Draft proposal with engine specification +2. 7-day deliberation period +3. Sovereign approval +4. Anchor confirmation activates engine + +```bash +vm-gov proposal create \ + --type add_engine \ + --engine-id engine:analytics \ + --name "Analytics" \ + --scroll Analytics \ + --authority "Data analysis and insights" +``` diff --git a/docs/skill/QUICK_REFERENCE.md b/docs/skill/QUICK_REFERENCE.md new file mode 100644 index 0000000..d52b097 --- /dev/null +++ b/docs/skill/QUICK_REFERENCE.md @@ -0,0 +1,196 @@ +# VaultMesh Quick Reference + +## Eternal Pattern + +``` +Intent → Engine → Receipt → Scroll → Anchor +``` + +## Three Layers + +| Layer | Components | Artifacts | +|-------|------------|-----------| +| L1 Experience | CLI, UI, MCP | Commands, requests | +| L2 Engine | Domain logic | contract.json, state.json | +| L3 Ledger | Receipts, anchors | JSONL, ROOT.*.txt | + +## Scrolls + +| Scroll | Path | Root File | +|--------|------|-----------| +| Drills | `receipts/drills/` | `ROOT.drills.txt` | +| Compliance | `receipts/compliance/` | `ROOT.compliance.txt` | +| Guardian | `receipts/guardian/` | `ROOT.guardian.txt` | +| Treasury | `receipts/treasury/` | `ROOT.treasury.txt` | +| Mesh | `receipts/mesh/` | `ROOT.mesh.txt` | +| OffSec | `receipts/offsec/` | `ROOT.offsec.txt` | +| Identity | `receipts/identity/` | `ROOT.identity.txt` | +| Observability | `receipts/observability/` | `ROOT.observability.txt` | +| Automation | `receipts/automation/` | `ROOT.automation.txt` | +| PsiField | `receipts/psi/` | `ROOT.psi.txt` | +| Federation | `receipts/federation/` | `ROOT.federation.txt` | +| Governance | `receipts/governance/` | `ROOT.governance.txt` | + +## DIDs + +``` +did:vm:: + +node → did:vm:node:brick-01 +human → did:vm:human:sovereign +agent → did:vm:agent:copilot-01 +service → did:vm:service:oracle +mesh → did:vm:mesh:vaultmesh-dublin +``` + +## Phases + +| Symbol | Phase | State | +|--------|-------|-------| +| 🜁 | Nigredo | Crisis | +| 🜄 | Albedo | Recovery | +| 🜆 | Citrinitas | Optimization | +| 🜂 | Rubedo | Integration | + +## Axioms + +1. Receipts are append-only +2. Hashes are cryptographic +3. All changes produce receipts +4. Constitution is supreme +5. Axioms are immutable + +## CLI Cheatsheet + +```bash +# Guardian +vm-guardian anchor-status +vm-guardian anchor-now --wait +vm-guardian verify-receipt --scroll + +# Identity +vm-identity did create --type node --id +vm-identity capability grant --subject --capability +vm-identity whoami + +# Mesh +vm-mesh node list +vm-mesh node status +vm-mesh topology + +# Oracle +vm-oracle query "What are the GDPR requirements?" +vm-oracle corpus status + +# Drills +vm-drills create --prompt "" +vm-drills status + +# Psi +vm-psi phase current +vm-psi transmute start --input +vm-psi opus status + +# Treasury +vm-treasury balance +vm-treasury debit --from --amount + +# Governance +vm-gov constitution version +vm-gov violations list +vm-gov emergency status + +# Federation +vm-federation status +vm-federation verify --mesh + +# System +vm-cli system health +vm-cli receipts count --by-scroll +``` + +## Receipt Structure + +```json +{ + "schema_version": "2.0.0", + "type": "_", + "timestamp": "ISO8601", + "header": { + "root_hash": "blake3:...", + "tags": [], + "previous_hash": "blake3:..." + }, + "meta": { + "scroll": "ScrollName", + "sequence": 0, + "anchor_epoch": null, + "proof_path": null + }, + "body": {} +} +``` + +## Capabilities + +| Capability | Description | +|------------|-------------| +| `anchor` | Submit to anchor backends | +| `storage` | Store receipts/artifacts | +| `compute` | Execute drills/agents | +| `oracle` | Issue compliance answers | +| `admin` | Grant/revoke capabilities | +| `federate` | Establish cross-mesh trust | + +## Trust Levels + +| Level | Name | Access | +|-------|------|--------| +| 0 | isolated | None | +| 1 | observe | Read-only | +| 2 | verify | Mutual verification | +| 3 | attest | Cross-attestation | +| 4 | integrate | Shared scrolls | + +## Severity Levels + +| Level | Description | +|-------|-------------| +| critical | Active breach | +| high | Confirmed attack | +| medium | Suspicious activity | +| low | Anomaly/info | + +## Key Ports + +| Service | HTTP | Metrics | +|---------|------|---------| +| Portal | 8080 | 9090 | +| Guardian | 8081 | 9090 | +| Oracle | 8082 | 9090 | +| MCP | 8083 | - | + +## Health Endpoints + +``` +GET /health/live → Liveness +GET /health/ready → Readiness +GET /metrics → Prometheus +``` + +## Transmutation Steps + +``` +Extract → Dissolve → Purify → Coagulate → Seal +``` + +## Design Gate + +- [ ] Clear entrypoint? +- [ ] Contract produced? +- [ ] State object? +- [ ] Receipts emitted? +- [ ] Append-only JSONL? +- [ ] Merkle root? +- [ ] Guardian anchor path? +- [ ] Query tool? diff --git a/docs/skill/SKILL.md b/docs/skill/SKILL.md new file mode 100644 index 0000000..12b59a1 --- /dev/null +++ b/docs/skill/SKILL.md @@ -0,0 +1,338 @@ +# VaultMesh Architect Skill + +> *Building Earth's Civilization Ledger — one receipt at a time.* + +## Overview + +This skill enables Claude to architect, develop, and operate VaultMesh — a sovereign digital infrastructure system that combines cryptographic proofs, blockchain anchoring, and AI governance to create durable, auditable civilization-scale evidence. + +## When to Use This Skill + +Activate this skill when: +- Designing or implementing VaultMesh engines or subsystems +- Creating receipts, scrolls, or anchor cycles +- Working with the Eternal Pattern architecture +- Implementing federation, governance, or identity systems +- Building MCP server integrations +- Deploying or operating VaultMesh infrastructure +- Writing code that interacts with the Civilization Ledger + +## Core Architecture: The Eternal Pattern + +Every VaultMesh subsystem follows this arc: + +``` +Real-world intent → Engine → Structured JSON → Receipt → Scroll → Guardian Anchor +``` + +### Three-Layer Stack + +``` +┌───────────────────────────────────────────────┐ +│ L1 — Experience Layer │ +│ (Humans & Agents) │ +│ • CLI / UI / MCP tools / agents │ +└───────────────────────────────────────────────┘ + │ + ▼ +┌───────────────────────────────────────────────┐ +│ L2 — Engine Layer │ +│ (Domain Engines & Contracts) │ +│ • contract.json → state.json → outputs/ │ +└───────────────────────────────────────────────┘ + │ + ▼ +┌───────────────────────────────────────────────┐ +│ L3 — Ledger Layer │ +│ (Receipts, Scrolls, ProofChain, Anchors) │ +│ • JSONL files → Merkle roots → anchors │ +└───────────────────────────────────────────────┘ +``` + +## Registered Engines (Scrolls) + +| Engine | Scroll | Purpose | +|--------|--------|---------| +| Drills | `Drills` | Security training and exercises | +| Oracle | `Compliance` | Regulatory compliance Q&A | +| Guardian | `Guardian` | Anchoring and sentinel | +| Treasury | `Treasury` | Financial tracking and settlement | +| Mesh | `Mesh` | Federation topology | +| OffSec | `OffSec` | Security operations and IR | +| Identity | `Identity` | DIDs, credentials, capabilities | +| Observability | `Observability` | Telemetry events | +| Automation | `Automation` | Workflow execution | +| Ψ-Field | `PsiField` | Alchemical consciousness | +| Federation | `Federation` | Cross-mesh trust | +| Governance | `Governance` | Constitutional enforcement | + +## File Structure + +``` +vaultmesh/ +├── receipts/ # Receipt storage +│ ├── drills/ +│ │ └── drill_runs.jsonl +│ ├── compliance/ +│ │ └── oracle_answers.jsonl +│ ├── treasury/ +│ │ └── treasury_events.jsonl +│ ├── mesh/ +│ │ └── mesh_events.jsonl +│ ├── [scroll]/ +│ │ └── [scroll]_events.jsonl +│ ├── ROOT.drills.txt +│ ├── ROOT.compliance.txt +│ └── ROOT.[scroll].txt +├── cases/ # Artifact storage +│ ├── drills/[drill-id]/ +│ ├── treasury/[settlement-id]/ +│ ├── offsec/[incident-id]/ +│ └── psi/[transmutation-id]/ +├── corpus/ # Oracle documents +└── config/ # Configuration +``` + +## Receipt Schema (v2) + +```json +{ + "schema_version": "2.0.0", + "type": "receipt_type_name", + "timestamp": "2025-12-06T12:00:00Z", + "header": { + "root_hash": "blake3:abc123...", + "tags": ["tag1", "tag2"], + "previous_hash": "blake3:prev..." + }, + "meta": { + "scroll": "ScrollName", + "sequence": 42, + "anchor_epoch": 7, + "proof_path": "cases/[scroll]/[id]/PROOF.json" + }, + "body": { + // Domain-specific fields + } +} +``` + +## DID Format + +``` +did:vm:: + +Types: +- node → did:vm:node:brick-01 +- human → did:vm:human:sovereign +- agent → did:vm:agent:copilot-01 +- service → did:vm:service:oracle-openai +- mesh → did:vm:mesh:vaultmesh-dublin +``` + +## Alchemical Phases + +| Phase | Symbol | Meaning | Operational State | +|-------|--------|---------|-------------------| +| Nigredo | 🜁 | Blackening | Crisis, incident | +| Albedo | 🜄 | Whitening | Recovery, stabilization | +| Citrinitas | 🜆 | Yellowing | Optimization, new capability | +| Rubedo | 🜂 | Reddening | Integration, maturity | + +## Constitutional Axioms (Immutable) + +1. **AXIOM-001**: Receipts are append-only +2. **AXIOM-002**: Hashes are cryptographically verified +3. **AXIOM-003**: All significant changes produce receipts +4. **AXIOM-004**: Constitution is supreme +5. **AXIOM-005**: Axioms cannot be amended + +## Design Gate Checklist + +When creating any new feature, verify: + +### Experience Layer (L1) +- [ ] Clear entrypoint (CLI, MCP tool, HTTP route)? +- [ ] Intent clearly represented in structured form? + +### Engine Layer (L2) +- [ ] Produces a contract (explicit or implicit)? +- [ ] State object tracking progress/outcomes? +- [ ] Actions and outputs inspectable (JSON + files)? + +### Ledger Layer (L3) +- [ ] Emits receipt for important operations? +- [ ] Receipts written to append-only JSONL? +- [ ] JSONL covered by Merkle root (ROOT.[scroll].txt)? +- [ ] Guardian can anchor the relevant root? +- [ ] Query tool exists for this scroll? + +## Code Patterns + +### Rust Receipt Emission + +```rust +use vaultmesh_core::{Receipt, ReceiptHeader, ReceiptMeta, Scroll, VmHash}; + +let receipt_body = MyReceiptBody { /* ... */ }; +let root_hash = VmHash::from_json(&receipt_body)?; + +let receipt = Receipt { + header: ReceiptHeader { + receipt_type: "my_receipt_type".to_string(), + timestamp: Utc::now(), + root_hash: root_hash.as_str().to_string(), + tags: vec!["tag1".to_string()], + }, + meta: ReceiptMeta { + scroll: Scroll::MyScroll, + sequence: 0, // Set by receipt store + anchor_epoch: None, + proof_path: None, + }, + body: receipt_body, +}; +``` + +### Python Receipt Emission + +```python +def emit_receipt(scroll: str, receipt_type: str, body: dict, tags: list[str]) -> dict: + import hashlib + import json + from datetime import datetime + from pathlib import Path + + receipt = { + "type": receipt_type, + "timestamp": datetime.utcnow().isoformat() + "Z", + "tags": tags, + **body + } + + # Compute root hash + receipt_json = json.dumps(receipt, sort_keys=True) + root_hash = f"blake3:{hashlib.blake3(receipt_json.encode()).hexdigest()}" + receipt["root_hash"] = root_hash + + # Append to scroll + scroll_path = Path(f"receipts/{scroll}/{scroll}_events.jsonl") + scroll_path.parent.mkdir(parents=True, exist_ok=True) + + with open(scroll_path, "a") as f: + f.write(json.dumps(receipt) + "\n") + + # Update Merkle root + root_file = Path(f"ROOT.{scroll}.txt") + root_file.write_text(root_hash) + + return receipt +``` + +### MCP Tool Pattern + +```python +@server.tool() +async def my_tool(param: str) -> str: + """Tool description.""" + caller = await get_caller_identity() + await verify_capability(caller, "required_capability") + + result = await engine.do_operation(param) + + await emit_tool_call_receipt( + tool="my_tool", + caller=caller, + params={"param": param}, + result_hash=result.hash, + ) + + return json.dumps(result.to_dict(), indent=2) +``` + +## CLI Naming Convention + +```bash +vm- [subcommand] [options] + +Examples: +vm-treasury debit --from acct:ops --amount 150 --currency EUR +vm-mesh node list +vm-identity did create --type human --id sovereign +vm-psi phase current +vm-guardian anchor-now +vm-gov proposal create --type amendment +``` + +## Receipt Type Naming + +``` +_ + +Examples: +treasury_credit +treasury_debit +treasury_settlement +mesh_node_join +mesh_route_change +identity_did_create +identity_capability_grant +psi_phase_transition +psi_transmutation +gov_proposal +gov_amendment +``` + +## Key Integrations + +### Guardian Anchor Cycle +``` +Receipts → ProofChain → Merkle Root → Anchor Backend (OTS/ETH/BTC) +``` + +### Federation Witness Protocol +``` +Mesh-A anchors → Notifies Mesh-B → Mesh-B verifies → Emits witness receipt +``` + +### Transmutation (Tem) Pattern +``` +Incident (Nigredo) → Extract IOCs → Generate rules → Integrate defenses (Citrinitas) +``` + +## Testing Requirements + +1. **Property Tests**: All receipt operations must be tested with proptest/hypothesis +2. **Invariant Tests**: Core axioms verified after every test +3. **Integration Tests**: Full cycles from intent to anchored receipt +4. **Chaos Tests**: Resilience under network partition, pod failure + +## Deployment Targets + +- **Kubernetes**: Production deployment via Kustomize +- **Docker Compose**: Local development +- **Akash**: Decentralized compute option + +## Related Skills + +- `sovereign-operator` — Security operations and MCP tools +- `offsec-mcp` — Offensive security tooling +- `vaultmesh-architect` — This skill + +## References + +- VAULTMESH-ETERNAL-PATTERN.md — Core architecture +- VAULTMESH-TREASURY-ENGINE.md — Financial primitive +- VAULTMESH-MESH-ENGINE.md — Federation topology +- VAULTMESH-OFFSEC-ENGINE.md — Security operations +- VAULTMESH-IDENTITY-ENGINE.md — Trust primitive +- VAULTMESH-OBSERVABILITY-ENGINE.md — Telemetry +- VAULTMESH-AUTOMATION-ENGINE.md — Workflows +- VAULTMESH-PSI-FIELD-ENGINE.md — Consciousness layer +- VAULTMESH-FEDERATION-PROTOCOL.md — Cross-mesh trust +- VAULTMESH-CONSTITUTIONAL-GOVERNANCE.md — Rules +- VAULTMESH-MCP-SERVERS.md — Claude integration +- VAULTMESH-DEPLOYMENT-MANIFESTS.md — Infrastructure +- VAULTMESH-MONITORING-STACK.md — Observability +- VAULTMESH-TESTING-FRAMEWORK.md — Testing +- VAULTMESH-MIGRATION-GUIDE.md — Upgrades diff --git a/engines/console/__init__.py b/engines/console/__init__.py new file mode 100644 index 0000000..70e1387 --- /dev/null +++ b/engines/console/__init__.py @@ -0,0 +1,27 @@ +""" +VaultMesh Console Engine + +AI agent session management, code operations, and sovereign development. +""" + +from .receipts import ( + ConsoleReceiptEmitter, + emit_console_receipt, + ReceiptType, + get_emitter, +) +from .approvals import ( + ApprovalManager, + ApprovalRequest, + get_approval_manager, +) + +__all__ = [ + "ConsoleReceiptEmitter", + "emit_console_receipt", + "ReceiptType", + "get_emitter", + "ApprovalManager", + "ApprovalRequest", + "get_approval_manager", +] diff --git a/engines/console/approvals.py b/engines/console/approvals.py new file mode 100644 index 0000000..e6d42dd --- /dev/null +++ b/engines/console/approvals.py @@ -0,0 +1,209 @@ +""" +Console Engine Approval Manager + +Handles approval requests, pending state, and decision recording. +State is derived from scroll on init, kept in memory during runtime. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field, asdict +from datetime import datetime, timezone, timedelta +from pathlib import Path +from typing import Any, Dict, List, Optional, Literal + +import json + +from .receipts import emit_console_receipt, get_emitter + + +ApprovalStatus = Literal["pending", "approved", "rejected", "expired"] + + +@dataclass +class ApprovalRequest: + """An approval request waiting for decision.""" + approval_id: str + session_id: str + action_type: str + action_details: Dict[str, Any] + requested_by: str + approvers: List[str] + expires_at: str + status: ApprovalStatus = "pending" + created_at: str = field( + default_factory=lambda: datetime.now(timezone.utc).isoformat() + ) + + +class ApprovalManager: + """ + Manages pending approval requests. + + State is derived from scroll on init, kept in memory during runtime. + """ + + def __init__(self, vaultmesh_root: Optional[str] = None): + self.vaultmesh_root = vaultmesh_root + self._pending: Dict[str, ApprovalRequest] = {} + self._load_pending_from_scroll() + + # --------------------------------------------------------------------- # + # Internal helpers + # --------------------------------------------------------------------- # + + def _load_pending_from_scroll(self) -> None: + """ + Reconstruct pending approvals from Console scroll. + + Algorithm: + - Scan all receipts. + - Collect all console_approval_request receipts as candidates. + - Remove any whose approval_id appears in a console_approval receipt. + - Drop any that are expired. + """ + emitter = get_emitter(self.vaultmesh_root) + events_path = Path(emitter.events_path) + + if not events_path.exists(): + return + + requests: Dict[str, ApprovalRequest] = {} + decided_ids: set = set() + + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + except Exception: + continue + + r_type = r.get("type") + payload = r.get("payload") or {} + + if r_type == "console_approval_request": + approval_id = payload.get("approval_id") + if not approval_id: + continue + req = ApprovalRequest( + approval_id=approval_id, + session_id=payload.get("session_id") or r.get("session_id"), + action_type=payload.get("action_type", ""), + action_details=payload.get("action_details", {}), + requested_by=payload.get("requested_by", ""), + approvers=payload.get("approvers", []), + expires_at=payload.get("expires_at", ""), + status=payload.get("status", "pending"), + created_at=payload.get("created_at", r.get("ts")), + ) + requests[approval_id] = req + + elif r_type == "console_approval": + approval_id = payload.get("approval_id") + if approval_id: + decided_ids.add(approval_id) + + # Filter to pending and not expired + now = datetime.now(timezone.utc) + for approval_id, req in requests.items(): + if approval_id in decided_ids: + continue + try: + exp = datetime.fromisoformat(req.expires_at.replace("Z", "+00:00")) + except Exception: + exp = now + if exp < now: + continue # expired; skip + req.status = "pending" + self._pending[approval_id] = req + + # --------------------------------------------------------------------- # + # Public API + # --------------------------------------------------------------------- # + + def request_approval( + self, + session_id: str, + action_type: str, + action_details: Dict[str, Any], + requested_by: str, + approvers: List[str], + timeout_minutes: int = 60, + ) -> ApprovalRequest: + """Create a new approval request and emit receipt.""" + now = datetime.now(timezone.utc) + approval_id = f"approval-{now.strftime('%Y-%m-%d-%H%M%S')}" + expires_at = (now + timedelta(minutes=timeout_minutes)).isoformat() + + request = ApprovalRequest( + approval_id=approval_id, + session_id=session_id, + action_type=action_type, + action_details=action_details, + requested_by=requested_by, + approvers=approvers, + expires_at=expires_at, + ) + + emit_console_receipt( + "console_approval_request", + asdict(request), + session_id=session_id, + ) + + self._pending[approval_id] = request + return request + + def decide( + self, + approval_id: str, + approved: bool, + approver: str, + reason: str = "", + ) -> bool: + """Record approval decision and emit receipt.""" + if approval_id not in self._pending: + # Let caller decide whether to treat this as error + raise KeyError(f"Approval not found: {approval_id}") + + request = self._pending[approval_id] + if approver not in request.approvers: + raise PermissionError(f"{approver} not in approver pool") + + request.status = "approved" if approved else "rejected" + + emit_console_receipt( + "console_approval", + { + "approval_id": approval_id, + "action_type": request.action_type, + "approved": approved, + "approver": approver, + "reason": reason, + "decided_at": datetime.now(timezone.utc).isoformat(), + }, + session_id=request.session_id, + ) + + del self._pending[approval_id] + return True + + def list_pending(self, session_id: Optional[str] = None) -> List[ApprovalRequest]: + """List pending approval requests.""" + requests = list(self._pending.values()) + if session_id: + requests = [r for r in requests if r.session_id == session_id] + return requests + + +# Singleton +_manager: Optional[ApprovalManager] = None + + +def get_approval_manager(vaultmesh_root: Optional[str] = None) -> ApprovalManager: + """Get or create the ApprovalManager singleton.""" + global _manager + if _manager is None: + _manager = ApprovalManager(vaultmesh_root) + return _manager diff --git a/engines/console/receipts.py b/engines/console/receipts.py new file mode 100644 index 0000000..b7325e2 --- /dev/null +++ b/engines/console/receipts.py @@ -0,0 +1,271 @@ +""" +Console Engine Receipt Emitter + +Appends receipts to the Console scroll and maintains the per-engine Merkle root. +""" + +import json +import os +from dataclasses import dataclass +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Dict, List, Literal, Optional, TypedDict + +try: + import blake3 # type: ignore +except ImportError: + blake3 = None # fallback to hashlib + + +ENGINE_ID = "engine:console" + +# Paths relative to VaultMesh repo root +DEFAULT_EVENTS_PATH = "receipts/console/console_events.jsonl" +DEFAULT_ROOT_PATH = "receipts/console/ROOT.console.txt" + + +ReceiptType = Literal[ + "console_genesis", + "console_session_start", + "console_session_end", + "console_command", + "console_file_edit", + "console_tool_call", + "console_approval_request", # Request for approval (pending) + "console_approval", # Decision on approval request + "console_git_commit", + "console_agent_spawn", +] + + +class ConsoleReceipt(TypedDict): + """Schema for Console receipts.""" + ts: str + engine_id: str + type: ReceiptType + session_id: Optional[str] + payload: Dict[str, Any] + + +@dataclass +class ConsoleReceiptEmitter: + """ + Local filesystem emitter for Console receipts. + + Appends receipts to console_events.jsonl and updates ROOT.console.txt + with the computed Merkle root. + + NOTE: This uses a simple O(n) recompute for the Merkle root. + TODO: Replace with shared receipts frontier or Rust FFI for O(log n) updates. + """ + events_path: str = DEFAULT_EVENTS_PATH + root_path: str = DEFAULT_ROOT_PATH + vaultmesh_root: Optional[str] = None + + def __post_init__(self): + """Resolve paths relative to VaultMesh root.""" + if self.vaultmesh_root is None: + # Try to auto-detect from environment or use current directory + self.vaultmesh_root = os.environ.get( + "VAULTMESH_ROOT", + os.getcwd() + ) + + # Make paths absolute + root = Path(self.vaultmesh_root) + self.events_path = str(root / self.events_path) + self.root_path = str(root / self.root_path) + + def _ensure_dirs(self) -> None: + """Ensure parent directories exist.""" + os.makedirs(os.path.dirname(self.events_path), exist_ok=True) + os.makedirs(os.path.dirname(self.root_path), exist_ok=True) + + def _now_iso(self) -> str: + """Return current UTC timestamp in ISO format.""" + return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + def _vmhash(self, data: bytes) -> str: + """VaultMesh hash with algorithm prefix (blake3 preferred).""" + if blake3 is not None: + return f"blake3:{blake3.blake3(data).hexdigest()}" + + # Fallback for environments without blake3 + import hashlib + + return f"sha256:{hashlib.sha256(data).hexdigest()}" + + def _hex_part(self, value: str) -> str: + """Return hash hex component (strip optional algorithm prefix).""" + return value.split(":", 1)[-1] + + def _compute_merkle_root(self, hashes: List[str]) -> str: + """ + Compute Merkle root over a list of VaultMesh hashes. + + Simple in-process implementation. O(n log n) time. + TODO: Replace with incremental frontier for O(log n) updates. + """ + if not hashes: + return self._vmhash(b"empty") + if len(hashes) == 1: + return hashes[0] + + level = hashes[:] + while len(level) > 1: + next_level: List[str] = [] + for i in range(0, len(level), 2): + left = level[i] + right = level[i + 1] if i + 1 < len(level) else left + combined = (self._hex_part(left) + self._hex_part(right)).encode("utf-8") + next_level.append(self._vmhash(combined)) + level = next_level + return level[0] + + def _recompute_root(self) -> None: + """ + Re-scan the JSONL file, hash each line, and update ROOT.console.txt. + + This is O(n) but simple and correct for Phase 1 (Nigredo). + TODO: Replace with shared receipts frontier for O(log n) updates. + """ + hashes: List[str] = [] + count = 0 + if os.path.exists(self.events_path): + with open(self.events_path, "rb") as f: + for raw_line in f: + line = raw_line.rstrip(b"\n") + if not line: + continue + hashes.append(self._vmhash(line)) + count += 1 + root = self._compute_merkle_root(hashes) + + # Write updated root file + self._ensure_dirs() + with open(self.root_path, "w", encoding="utf-8") as f: + f.write(f"# VaultMesh Console Root\n") + f.write(f"engine_id={ENGINE_ID}\n") + f.write(f"merkle_root={root}\n") + f.write(f"events={count}\n") + f.write(f"updated_at={self._now_iso()}\n") + + def emit( + self, + receipt_type: ReceiptType, + payload: Dict[str, Any], + *, + session_id: Optional[str] = None, + ts: Optional[str] = None, + ) -> ConsoleReceipt: + """ + Emit a single Console receipt and update the engine root. + + Args: + receipt_type: One of the defined ReceiptType values + payload: Domain-specific receipt data + session_id: Session identifier (required for non-genesis receipts) + ts: Optional timestamp override (ISO format) + + Returns: + The emitted receipt record + + Example: + emitter.emit( + "console_session_start", + { + "agent_type": "opencode", + "model_id": "claude-opus-4-5", + "caller": "did:vm:human:karol", + "project_path": "/root/work/vaultmesh" + }, + session_id="session-1765123456", + ) + """ + self._ensure_dirs() + + record: ConsoleReceipt = { + "ts": ts or self._now_iso(), + "engine_id": ENGINE_ID, + "type": receipt_type, + "session_id": session_id, + "payload": payload, + } + + # Append to scroll (compact JSON, one line) + line = json.dumps(record, separators=(",", ":")) + with open(self.events_path, "a", encoding="utf-8") as f: + f.write(line + "\n") + + # Update Merkle root + self._recompute_root() + + return record + + def get_root_info(self) -> Dict[str, Any]: + """Read and parse the current ROOT.console.txt file.""" + if not os.path.exists(self.root_path): + return { + "engine_id": ENGINE_ID, + "merkle_root": self._vmhash(b"empty"), + "events": 0, + "updated_at": None, + } + + info = {} + with open(self.root_path, "r", encoding="utf-8") as f: + for line in f: + line = line.strip() + if line.startswith("#") or not line: + continue + if "=" in line: + key, value = line.split("=", 1) + if key == "events": + info[key] = int(value) + else: + info[key] = value + return info + + +# Convenience singleton for simple use +_default_emitter: Optional[ConsoleReceiptEmitter] = None + + +def get_emitter(vaultmesh_root: Optional[str] = None) -> ConsoleReceiptEmitter: + """Get or create the default emitter singleton.""" + global _default_emitter + if _default_emitter is None: + _default_emitter = ConsoleReceiptEmitter(vaultmesh_root=vaultmesh_root) + return _default_emitter + + +def emit_console_receipt( + receipt_type: ReceiptType, + payload: Dict[str, Any], + *, + session_id: Optional[str] = None, + ts: Optional[str] = None, + vaultmesh_root: Optional[str] = None, +) -> ConsoleReceipt: + """ + Emit a Console receipt using the default emitter. + + Convenience function that uses a singleton emitter instance. + + Args: + receipt_type: One of the defined ReceiptType values + payload: Domain-specific receipt data + session_id: Session identifier (required for non-genesis receipts) + ts: Optional timestamp override (ISO format) + vaultmesh_root: Optional VaultMesh repo root path + + Returns: + The emitted receipt record + """ + emitter = get_emitter(vaultmesh_root) + return emitter.emit( + receipt_type=receipt_type, + payload=payload, + session_id=session_id, + ts=ts, + ) diff --git a/health_report.md b/health_report.md new file mode 100644 index 0000000..c060f94 --- /dev/null +++ b/health_report.md @@ -0,0 +1,30 @@ +# VaultMesh Node Health Report +## Generated: 2025/12/20 + +### 1. Node Connectivity +⚠️ Unable to determine node connectivity. CLI requires further investigation. + +### 2. Scroll Status +⚠️ Scroll status check incomplete. Guardian Engine requires additional dependencies. + +### 3. Treasury Balances +⚠️ Unable to retrieve treasury balance. CLI command not found. + +### 4. System Health Indicators +- **Processes**: 472 total (4 running, 468 sleeping) +- **Load Average**: 1.64, 1.56, 1.59 +- **CPU Usage**: 14.0% user, 18.0% sys, 68.0% idle +- **Memory**: + * Total Physical Memory: ~6000M used + * Wired Memory: 1243M + * Compressor Memory: 2217M +- **Network**: + * Packets In: 2,158,143 + * Packets Out: 593,753 + +### 5. Recommendations +1. Install missing VaultMesh CLI dependencies +2. Verify VaultMesh configuration +3. Investigate node connectivity issues + +**Note**: This is a partial health report due to CLI limitations. \ No newline at end of file diff --git a/keys/identity/guardian-local.json b/keys/identity/guardian-local.json new file mode 100644 index 0000000..9861eb6 --- /dev/null +++ b/keys/identity/guardian-local.json @@ -0,0 +1,7 @@ +{ + "did": "did:vm:guardian:local", + "created_at": "2025-12-06T16:30:38Z", + "public_key_multibase": "z6N97chBUvzLPJd2h1BzeMAg46SXeT9U8kjUUf3xYww7L", + "public_key_hex": "4fb4ce64458f81cf776660c0763bf48aa21fe4c678c4d98fa9689593b9f46a47", + "secret_key_hex": "34af53b9aa99974fd75e9ff060cf91ab9caa7e64df00bdbd53dbd534a3fc42bf" +} \ No newline at end of file diff --git a/keys/identity/human-karol.json b/keys/identity/human-karol.json new file mode 100644 index 0000000..2978cfb --- /dev/null +++ b/keys/identity/human-karol.json @@ -0,0 +1,7 @@ +{ + "did": "did:vm:human:karol", + "created_at": "2025-12-06T16:30:19Z", + "public_key_multibase": "zGXhv2SVGzvnzEzRXYSnvZ7KWcNrVmH7kyyBeieg1AsVj", + "public_key_hex": "e6bcfd724253f350ad3a3b3b47582e825405a6beadb29398e2b684ed45417d12", + "secret_key_hex": "342b66c7fc763f08c1d10a2fe8e21bc2def923d4622d1c01de7cd6fdf42547ce" +} \ No newline at end of file diff --git a/keys/identity/portal-shield.json b/keys/identity/portal-shield.json new file mode 100644 index 0000000..0ad8825 --- /dev/null +++ b/keys/identity/portal-shield.json @@ -0,0 +1,7 @@ +{ + "did": "did:vm:portal:shield", + "created_at": "2025-12-06T16:31:02Z", + "public_key_multibase": "z4vKTKqXGQu3MpjstQ9s7hqdmyaVxCuAcRN18g8Fdv6iP", + "public_key_hex": "3a3bb5a3530ad71738978eecf718d2f771607aec8de527e76deba54299884a9c", + "secret_key_hex": "8529f7aabb01282d8a7bd926917b0c86204e4ce030628856f863de5d226fb39a" +} \ No newline at end of file diff --git a/keys/identity/skill-validator.json b/keys/identity/skill-validator.json new file mode 100644 index 0000000..c74e392 --- /dev/null +++ b/keys/identity/skill-validator.json @@ -0,0 +1,7 @@ +{ + "did": "did:vm:skill:validator", + "created_at": "2025-12-06T16:31:28Z", + "public_key_multibase": "zBnCPzRMQ4rWwLUscR1CkYSeeVNt2awP3z6MP6xzQBnHd", + "public_key_hex": "a028e43fd556727b2bfc5d5fb7ac869bd95f5f145335e00dc42b003e44967bf8", + "secret_key_hex": "cee1cb919df4459397e5b58b36b7c724c8a7e860fe96f873e90408494b41bd9e" +} \ No newline at end of file diff --git a/ledger/__init__.py b/ledger/__init__.py new file mode 100644 index 0000000..c7288a9 --- /dev/null +++ b/ledger/__init__.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from ledger.db import ( + LedgerEvent, + ShadowReceiptRow, + connect, + default_db_path, + ensure_migrated, + get_shadow_receipts_by_trace, + get_shadow_receipts_recent, + insert_shadow_receipt, + log_mcp_call, + log_proof_artifact, + log_tool_invocation, + new_id, + new_trace_id, +) + +__all__ = [ + "LedgerEvent", + "ShadowReceiptRow", + "connect", + "default_db_path", + "ensure_migrated", + "get_shadow_receipts_by_trace", + "get_shadow_receipts_recent", + "insert_shadow_receipt", + "log_mcp_call", + "log_proof_artifact", + "log_tool_invocation", + "new_id", + "new_trace_id", +] diff --git a/ledger/db.py b/ledger/db.py new file mode 100644 index 0000000..ad848b4 --- /dev/null +++ b/ledger/db.py @@ -0,0 +1,426 @@ +from __future__ import annotations + +import json +import os +import sqlite3 +import time +import uuid +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Iterator, Mapping, Sequence + +from ledger.migrate import migrate +from ledger.redact import redact_json_for_storage + +THIS_FILE = Path(__file__).resolve() +PKG_DIR = THIS_FILE.parent +REPO_ROOT = PKG_DIR.parent + + +def default_db_path() -> Path: + configured = os.environ.get("LEDGER_DB_PATH") or os.environ.get( + "VAULTMESH_LEDGER_DB" + ) + if configured: + return Path(configured).expanduser().resolve() + + vaultmesh_root = os.environ.get("VAULTMESH_ROOT") + if vaultmesh_root: + return ( + Path(vaultmesh_root).expanduser().resolve() / ".state" / "ledger.sqlite" + ).resolve() + + return (REPO_ROOT / ".state" / "ledger.sqlite").resolve() + + +def new_id() -> str: + return str(uuid.uuid4()) + + +def new_trace_id() -> str: + return str(uuid.uuid4()) + + +def _apply_pragmas(conn: sqlite3.Connection) -> None: + conn.execute("PRAGMA journal_mode=WAL;") + conn.execute("PRAGMA synchronous=NORMAL;") + conn.execute("PRAGMA foreign_keys=ON;") + conn.execute("PRAGMA busy_timeout=5000;") + conn.execute("PRAGMA temp_store=MEMORY;") + + +def connect(db_path: Path | str | None = None) -> sqlite3.Connection: + path = Path(db_path) if db_path is not None else default_db_path() + path.parent.mkdir(parents=True, exist_ok=True) + conn = sqlite3.connect(str(path), isolation_level=None) + conn.row_factory = sqlite3.Row + _apply_pragmas(conn) + return conn + + +@contextmanager +def open_db(db_path: Path | str | None = None) -> Iterator[sqlite3.Connection]: + conn = connect(db_path) + try: + yield conn + finally: + conn.close() + + +@contextmanager +def txn(conn: sqlite3.Connection) -> Iterator[sqlite3.Connection]: + conn.execute("BEGIN;") + try: + yield conn + conn.execute("COMMIT;") + except Exception: + conn.execute("ROLLBACK;") + raise + + +def ensure_migrated(conn: sqlite3.Connection) -> None: + migrate(conn) + + +def _utc_now_iso_z() -> str: + return ( + datetime.now(timezone.utc) + .replace(microsecond=0) + .isoformat() + .replace("+00:00", "Z") + ) + + +def _json_dumps(value: Any) -> str: + return json.dumps(value, ensure_ascii=False, sort_keys=True, separators=(",", ":")) + + +def _normalize_action(value: str | None) -> str | None: + if value is None: + return None + value = value.strip() + return value or None + + +def _sha256_hex(data: bytes) -> str: + import hashlib + + return hashlib.sha256(data).hexdigest() + + +def _blake3_hex(data: bytes) -> str | None: + try: + import blake3 # type: ignore + except Exception: + return None + return blake3.blake3(data).hexdigest() + + +@dataclass(frozen=True) +class LedgerEvent: + id: str + ts: str + kind: str + status: str + label: str + duration_ms: int | None + trace_id: str | None + error_text: str | None + + +@dataclass(frozen=True) +class ShadowReceiptRow: + id: str + ts: str + horizon_id: str + counterfactual_hash: str + entropy_delta: float | None + reason_unrealized: str + observer_signature: str | None + trace_id: str | None + meta_json: str | None + + +def log_tool_invocation( + *, + tool_name: str, + action: str | None = None, + status: str, + duration_ms: int | None = None, + input_payload: Any | None = None, + output_payload: Any | None = None, + error_text: str | None = None, + trace_id: str | None = None, + actor: str | None = None, + db_path: Path | str | None = None, +) -> str: + invocation_id = new_id() + redacted_input, input_meta = redact_json_for_storage(input_payload) + redacted_output, output_meta = redact_json_for_storage(output_payload) + + input_meta_json = _json_dumps(input_meta) if input_meta else None + output_meta_json = _json_dumps(output_meta) if output_meta else None + + with open_db(db_path) as conn: + ensure_migrated(conn) + with txn(conn): + conn.execute( + """ + INSERT INTO tool_invocations ( + id, ts, tool_name, action, status, duration_ms, + input_json, output_json, error_text, trace_id, actor, + input_meta_json, output_meta_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); + """, + ( + invocation_id, + _utc_now_iso_z(), + tool_name, + _normalize_action(action), + status, + duration_ms, + redacted_input, + redacted_output, + error_text, + trace_id, + actor, + input_meta_json, + output_meta_json, + ), + ) + return invocation_id + + +def log_mcp_call( + *, + server_name: str, + method: str, + tool_name: str | None = None, + status: str, + duration_ms: int | None = None, + request_payload: Any | None = None, + response_payload: Any | None = None, + error_text: str | None = None, + trace_id: str | None = None, + client_id: str | None = None, + db_path: Path | str | None = None, +) -> str: + call_id = new_id() + redacted_request, request_meta = redact_json_for_storage(request_payload) + redacted_response, response_meta = redact_json_for_storage(response_payload) + + request_meta_json = _json_dumps(request_meta) if request_meta else None + response_meta_json = _json_dumps(response_meta) if response_meta else None + + with open_db(db_path) as conn: + ensure_migrated(conn) + with txn(conn): + conn.execute( + """ + INSERT INTO mcp_calls ( + id, ts, server_name, method, tool_name, status, duration_ms, + request_json, response_json, error_text, trace_id, client_id, + request_meta_json, response_meta_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); + """, + ( + call_id, + _utc_now_iso_z(), + server_name, + method, + _normalize_action(tool_name), + status, + duration_ms, + redacted_request, + redacted_response, + error_text, + trace_id, + client_id, + request_meta_json, + response_meta_json, + ), + ) + return call_id + + +def log_proof_artifact( + *, + kind: str, + path: str | Path | None = None, + meta: Mapping[str, Any] | None = None, + trace_id: str | None = None, + db_path: Path | str | None = None, +) -> str: + artifact_id = new_id() + rel_path: str | None = None + sha256_hex: str | None = None + blake3_hex: str | None = None + size_bytes: int | None = None + + if path is not None: + p = Path(path) + try: + rel_path = str(p.resolve().relative_to(REPO_ROOT)) + except Exception: + rel_path = str(p) + if p.exists() and p.is_file(): + data = p.read_bytes() + sha256_hex = _sha256_hex(data) + blake3_hex = _blake3_hex(data) + size_bytes = len(data) + + meta_json_redacted, _ = redact_json_for_storage(meta) + meta_json = meta_json_redacted + + with open_db(db_path) as conn: + ensure_migrated(conn) + with txn(conn): + conn.execute( + """ + INSERT INTO proof_artifacts ( + id, ts, kind, path, sha256_hex, blake3_hex, size_bytes, meta_json, trace_id + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?); + """, + ( + artifact_id, + _utc_now_iso_z(), + kind, + rel_path, + sha256_hex, + blake3_hex, + size_bytes, + meta_json, + trace_id, + ), + ) + return artifact_id + + +@contextmanager +def timed_operation() -> Iterator[dict[str, Any]]: + start = time.perf_counter() + info: dict[str, Any] = {} + try: + yield info + finally: + info["duration_ms"] = int((time.perf_counter() - start) * 1000) + + +def insert_shadow_receipt( + *, + horizon_id: str, + counterfactual_hash: str, + reason_unrealized: str, + entropy_delta: float | None = None, + observer_signature: str | None = None, + trace_id: str | None = None, + meta: Mapping[str, Any] | None = None, + db_path: Path | str | None = None, +) -> str: + """ + Insert a ShadowReceipt (proof of restraint / unrealized futures) into the local SQLite ledger. + + Notes: + - `meta` is redacted via `redact_json_for_storage` before storage. + - Callers should pass `trace_id` to correlate with tool_invocations/mcp_calls/proof_artifacts. + """ + receipt_id = new_id() + meta_json_redacted, _ = redact_json_for_storage(meta) + + with open_db(db_path) as conn: + ensure_migrated(conn) + with txn(conn): + conn.execute( + """ + INSERT INTO shadow_receipts ( + id, ts, horizon_id, counterfactual_hash, entropy_delta, + reason_unrealized, observer_signature, trace_id, meta_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?); + """, + ( + receipt_id, + _utc_now_iso_z(), + horizon_id, + counterfactual_hash, + entropy_delta, + reason_unrealized, + observer_signature, + trace_id, + meta_json_redacted, + ), + ) + return receipt_id + + +def get_shadow_receipts_by_trace( + trace_id: str, *, db_path: Path | str | None = None +) -> list[ShadowReceiptRow]: + with open_db(db_path) as conn: + ensure_migrated(conn) + rows = conn.execute( + """ + SELECT id, ts, horizon_id, counterfactual_hash, entropy_delta, + reason_unrealized, observer_signature, trace_id, meta_json + FROM shadow_receipts + WHERE trace_id = ? + ORDER BY datetime(ts) ASC, id ASC; + """, + (trace_id,), + ).fetchall() + + out: list[ShadowReceiptRow] = [] + for r in rows: + out.append( + ShadowReceiptRow( + id=r["id"], + ts=r["ts"], + horizon_id=r["horizon_id"], + counterfactual_hash=r["counterfactual_hash"], + entropy_delta=r["entropy_delta"], + reason_unrealized=r["reason_unrealized"], + observer_signature=r["observer_signature"], + trace_id=r["trace_id"], + meta_json=r["meta_json"], + ) + ) + return out + + +def get_shadow_receipts_recent( + n: int = 50, *, db_path: Path | str | None = None +) -> list[ShadowReceiptRow]: + with open_db(db_path) as conn: + ensure_migrated(conn) + rows = conn.execute( + """ + SELECT id, ts, horizon_id, counterfactual_hash, entropy_delta, + reason_unrealized, observer_signature, trace_id, meta_json + FROM shadow_receipts + ORDER BY datetime(ts) DESC, id DESC + LIMIT ?; + """, + (int(n),), + ).fetchall() + + out: list[ShadowReceiptRow] = [] + for r in rows: + out.append( + ShadowReceiptRow( + id=r["id"], + ts=r["ts"], + horizon_id=r["horizon_id"], + counterfactual_hash=r["counterfactual_hash"], + entropy_delta=r["entropy_delta"], + reason_unrealized=r["reason_unrealized"], + observer_signature=r["observer_signature"], + trace_id=r["trace_id"], + meta_json=r["meta_json"], + ) + ) + return out diff --git a/ledger/migrate.py b/ledger/migrate.py new file mode 100644 index 0000000..d35ffe3 --- /dev/null +++ b/ledger/migrate.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import sqlite3 +from pathlib import Path + +THIS_FILE = Path(__file__).resolve() +PKG_DIR = THIS_FILE.parent +SCHEMA_DIR = PKG_DIR / "schema" + + +def _ensure_migrations_table(conn: sqlite3.Connection) -> None: + conn.execute( + """ + CREATE TABLE IF NOT EXISTS migrations ( + id INTEGER PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + applied_at TEXT NOT NULL DEFAULT (datetime('now')) + ); + """ + ) + + +def _applied_migrations(conn: sqlite3.Connection) -> set[str]: + _ensure_migrations_table(conn) + rows = conn.execute("SELECT name FROM migrations;").fetchall() + names: set[str] = set() + for row in rows: + try: + names.add(row["name"]) + except Exception: + names.add(row[0]) + return names + + +def _migration_files() -> list[Path]: + if not SCHEMA_DIR.exists(): + return [] + files = [p for p in SCHEMA_DIR.iterdir() if p.is_file() and p.suffix == ".sql"] + files.sort(key=lambda p: p.name) + return files + + +def migrate(conn: sqlite3.Connection) -> None: + applied = _applied_migrations(conn) + for path in _migration_files(): + name = path.name + if name in applied: + continue + + sql = path.read_text(encoding="utf-8") + conn.executescript(sql) + conn.execute("INSERT INTO migrations (name) VALUES (?);", (name,)) diff --git a/ledger/redact.py b/ledger/redact.py new file mode 100644 index 0000000..349fb6d --- /dev/null +++ b/ledger/redact.py @@ -0,0 +1,199 @@ +from __future__ import annotations + +import hashlib +import json +from dataclasses import dataclass +from typing import Any + +DEFAULT_DENY_SUBSTRINGS = ( + "token", + "access_token", + "api_key", + "authorization", + "cookie", + "set-cookie", + "secret", + "password", + "passphrase", + "private_key", + "seed", + "mnemonic", + "github_token", + "cloudflare_api_token", + "openai", + "aws_", +) + + +def _sha256_hex(data: bytes) -> str: + return hashlib.sha256(data).hexdigest() + + +def _is_sensitive_key(key: str) -> bool: + lowered = key.strip().lower() + if not lowered: + return False + return any(s in lowered for s in DEFAULT_DENY_SUBSTRINGS) + + +def _safe_default(obj: Any) -> Any: + if isinstance(obj, (bytes, bytearray)): + b = bytes(obj) + return {"__type__": "bytes", "len": len(b), "sha256": _sha256_hex(b)} + return repr(obj) + + +@dataclass(frozen=True) +class RedactionMeta: + raw_sha256: str + raw_size_bytes: int + redacted_paths: int + truncated_strings: int + saw_sensitive: bool + + +def _stable_json_bytes(payload: Any) -> bytes: + if payload is None: + return b"null" + if isinstance(payload, (bytes, bytearray)): + return bytes(payload) + if isinstance(payload, str): + return payload.encode("utf-8", errors="replace") + try: + return json.dumps( + payload, + ensure_ascii=False, + sort_keys=True, + separators=(",", ":"), + default=_safe_default, + ).encode("utf-8", errors="replace") + except Exception: + return repr(payload).encode("utf-8", errors="replace") + + +def _redact( + value: Any, + *, + max_depth: int, + max_items: int, + max_string_len: int, + _depth: int = 0, + _meta: dict[str, int] | None = None, +) -> tuple[Any, dict[str, int]]: + meta = ( + _meta + if _meta is not None + else {"redacted_paths": 0, "truncated_strings": 0, "saw_sensitive": 0} + ) + + if _depth > max_depth: + meta["redacted_paths"] += 1 + return "[REDACTED_DEPTH]", meta + + if isinstance(value, dict): + out: dict[str, Any] = {} + items = list(value.items()) + if len(items) > max_items: + items = items[:max_items] + out["__truncated__"] = f"dict truncated to {max_items} items" + for k, v in items: + key = str(k) + if _is_sensitive_key(key): + meta["saw_sensitive"] += 1 + meta["redacted_paths"] += 1 + out[key] = "[REDACTED]" + continue + + out[key], meta = _redact( + v, + max_depth=max_depth, + max_items=max_items, + max_string_len=max_string_len, + _depth=_depth + 1, + _meta=meta, + ) + return out, meta + + if isinstance(value, list): + out_list: list[Any] = [] + items = value + truncated = False + if len(items) > max_items: + items = items[:max_items] + truncated = True + for item in items: + redacted_item, meta = _redact( + item, + max_depth=max_depth, + max_items=max_items, + max_string_len=max_string_len, + _depth=_depth + 1, + _meta=meta, + ) + out_list.append(redacted_item) + if truncated: + out_list.append(f"__truncated__: list truncated to {max_items} items") + return out_list, meta + + if isinstance(value, tuple): + return _redact( + list(value), + max_depth=max_depth, + max_items=max_items, + max_string_len=max_string_len, + _depth=_depth, + _meta=meta, + ) + + if isinstance(value, str): + if len(value) <= max_string_len: + return value, meta + meta["truncated_strings"] += 1 + return value[ + :max_string_len + ] + f" [TRUNCATED {len(value) - max_string_len} chars]", meta + + if isinstance(value, (int, float, bool)) or value is None: + return value, meta + + return _safe_default(value), meta + + +def redact_json_for_storage(payload: Any) -> tuple[str | None, dict[str, Any] | None]: + if payload is None: + return None, None + + raw_bytes = _stable_json_bytes(payload) + redacted_value, counters = _redact( + payload, max_depth=20, max_items=200, max_string_len=2048 + ) + + meta = RedactionMeta( + raw_sha256=_sha256_hex(raw_bytes), + raw_size_bytes=len(raw_bytes), + redacted_paths=counters.get("redacted_paths", 0), + truncated_strings=counters.get("truncated_strings", 0), + saw_sensitive=counters.get("saw_sensitive", 0) > 0, + ) + + try: + redacted_json = json.dumps( + redacted_value, + ensure_ascii=False, + sort_keys=True, + separators=(",", ":"), + default=_safe_default, + ) + except Exception: + redacted_json = json.dumps(repr(redacted_value), ensure_ascii=False) + + meta_dict: dict[str, Any] = { + "raw_sha256": meta.raw_sha256, + "raw_size_bytes": meta.raw_size_bytes, + "redacted_paths": meta.redacted_paths, + "truncated_strings": meta.truncated_strings, + } + if meta.saw_sensitive: + meta_dict["saw_sensitive"] = True + + return redacted_json, meta_dict diff --git a/ledger/schema/0001_init.sql b/ledger/schema/0001_init.sql new file mode 100644 index 0000000..c47228b --- /dev/null +++ b/ledger/schema/0001_init.sql @@ -0,0 +1,50 @@ +CREATE TABLE IF NOT EXISTS migrations ( + id INTEGER PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + applied_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +CREATE TABLE IF NOT EXISTS tool_invocations ( + id TEXT PRIMARY KEY, + ts TEXT NOT NULL DEFAULT (datetime('now')), + tool_name TEXT NOT NULL, + action TEXT, + status TEXT NOT NULL, + duration_ms INTEGER, + input_json TEXT, + output_json TEXT, + error_text TEXT, + trace_id TEXT, + actor TEXT, + input_meta_json TEXT, + output_meta_json TEXT +); + +CREATE TABLE IF NOT EXISTS mcp_calls ( + id TEXT PRIMARY KEY, + ts TEXT NOT NULL DEFAULT (datetime('now')), + server_name TEXT NOT NULL, + method TEXT NOT NULL, + tool_name TEXT, + status TEXT NOT NULL, + duration_ms INTEGER, + request_json TEXT, + response_json TEXT, + error_text TEXT, + trace_id TEXT, + client_id TEXT, + request_meta_json TEXT, + response_meta_json TEXT +); + +CREATE TABLE IF NOT EXISTS proof_artifacts ( + id TEXT PRIMARY KEY, + ts TEXT NOT NULL DEFAULT (datetime('now')), + kind TEXT NOT NULL, + path TEXT, + sha256_hex TEXT, + blake3_hex TEXT, + size_bytes INTEGER, + meta_json TEXT, + trace_id TEXT +); diff --git a/ledger/schema/0002_indexes.sql b/ledger/schema/0002_indexes.sql new file mode 100644 index 0000000..f8e8849 --- /dev/null +++ b/ledger/schema/0002_indexes.sql @@ -0,0 +1,10 @@ +CREATE INDEX IF NOT EXISTS idx_tool_invocations_ts ON tool_invocations(ts); +CREATE INDEX IF NOT EXISTS idx_tool_invocations_tool_ts ON tool_invocations(tool_name, ts); +CREATE INDEX IF NOT EXISTS idx_tool_invocations_trace_id ON tool_invocations(trace_id); + +CREATE INDEX IF NOT EXISTS idx_mcp_calls_ts ON mcp_calls(ts); +CREATE INDEX IF NOT EXISTS idx_mcp_calls_server_ts ON mcp_calls(server_name, ts); +CREATE INDEX IF NOT EXISTS idx_mcp_calls_trace_id ON mcp_calls(trace_id); + +CREATE INDEX IF NOT EXISTS idx_proof_artifacts_ts ON proof_artifacts(ts); +CREATE INDEX IF NOT EXISTS idx_proof_artifacts_trace_id ON proof_artifacts(trace_id); diff --git a/ledger/schema/0003_shadow_receipts.sql b/ledger/schema/0003_shadow_receipts.sql new file mode 100644 index 0000000..e76c98e --- /dev/null +++ b/ledger/schema/0003_shadow_receipts.sql @@ -0,0 +1,18 @@ +-- 0003_shadow_receipts.sql +CREATE TABLE IF NOT EXISTS shadow_receipts ( + id TEXT PRIMARY KEY, -- uuid + ts TEXT NOT NULL DEFAULT (datetime('now')), + horizon_id TEXT NOT NULL, -- grouping key for “unrealized futures” + counterfactual_hash TEXT NOT NULL, -- hash of normalized counterfactual payload + entropy_delta REAL, -- optional numeric signal (can be NULL) + reason_unrealized TEXT NOT NULL, -- short enum-like string (e.g. "blocked", "operator_abort") + observer_signature TEXT, -- optional (future: Ed25519 signature) + trace_id TEXT, -- correlate to invocation chain + meta_json TEXT -- redacted metadata +); + +-- minimal safety constraints +CREATE INDEX IF NOT EXISTS idx_shadow_receipts_ts ON shadow_receipts(ts); +CREATE INDEX IF NOT EXISTS idx_shadow_receipts_horizon ON shadow_receipts(horizon_id); +CREATE INDEX IF NOT EXISTS idx_shadow_receipts_trace ON shadow_receipts(trace_id); +CREATE INDEX IF NOT EXISTS idx_shadow_receipts_reason ON shadow_receipts(reason_unrealized); diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..5c3e4ac --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "vaultmesh-cli" +version = "0.1.0" +description = "VaultMesh Civilization Ledger CLI" +requires-python = ">=3.10" +dependencies = ["click>=8.0", "blake3>=0.3"] + +[project.scripts] +vm = "cli.vm_cli:cli" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..7b13c14 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +# VaultMesh Python dependencies +blake3>=0.3.0 +click>=8.0.0 +pynacl>=1.5.0 +base58>=2.1.0 + +# Development/testing +pytest>=7.0.0 diff --git a/scripts/console_receipts_server.py b/scripts/console_receipts_server.py new file mode 100644 index 0000000..a2b599c --- /dev/null +++ b/scripts/console_receipts_server.py @@ -0,0 +1,935 @@ +#!/usr/bin/env python3 +""" +VaultMesh Console Receipts HTTP Bridge + +A minimal FastAPI server that exposes the Console receipt emitter +for the OpenCode plugin to call via HTTP. + +Usage: + python scripts/console_receipts_server.py + + # Or with uvicorn directly: + uvicorn scripts.console_receipts_server:app --host 127.0.0.1 --port 9110 +""" + +import os +import sys +from pathlib import Path +from typing import Any, Dict, List, Literal, Optional + +# Add parent directory to path for imports +SCRIPT_DIR = Path(__file__).parent.absolute() +VAULTMESH_ROOT = SCRIPT_DIR.parent +sys.path.insert(0, str(VAULTMESH_ROOT)) + +# Set environment variable for the emitter +os.environ.setdefault("VAULTMESH_ROOT", str(VAULTMESH_ROOT)) + +from fastapi import FastAPI, HTTPException, Request +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel + +from dataclasses import asdict + +from engines.console.receipts import ( + ConsoleReceiptEmitter, + ReceiptType, + emit_console_receipt, + get_emitter, +) +from engines.console.approvals import get_approval_manager, ApprovalRequest + +# ============================================================================ +# FastAPI App +# ============================================================================ + +app = FastAPI( + title="VaultMesh Console Receipts API", + description="HTTP bridge for emitting Console receipts to the Civilization Ledger", + version="0.1.0", +) + +# Allow CORS for local development +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +# ============================================================================ +# Request/Response Models +# ============================================================================ + +class ReceiptIn(BaseModel): + """Request body for emitting a receipt.""" + type: ReceiptType + session_id: Optional[str] = None + payload: Dict[str, Any] + + +class ReceiptOut(BaseModel): + """Response after emitting a receipt.""" + ok: bool + record: Dict[str, Any] + + +class RootInfo(BaseModel): + """Console scroll root information.""" + engine_id: str + merkle_root: str + events: int + updated_at: Optional[str] = None + + +class AnchorRequest(BaseModel): + """Request to trigger Guardian anchor.""" + scrolls: Optional[List[str]] = None + + +class SearchRequest(BaseModel): + """Request to search receipts.""" + scroll: Optional[str] = "Console" + receipt_type: Optional[str] = None + limit: int = 50 + + +class ApprovalRequestIn(BaseModel): + """Request to create an approval.""" + session_id: str + action_type: str + action_details: Dict[str, Any] + requested_by: str + approvers: List[str] + timeout_minutes: int = 60 + + +class ApprovalDecisionIn(BaseModel): + """Request to decide on an approval.""" + approved: bool + approver: str + reason: str = "" + + +# ============================================================================ +# Endpoints +# ============================================================================ + +@app.get("/health") +async def health_check(): + """Health check endpoint.""" + return {"status": "ok", "engine": "console"} + + +@app.post("/v1/console/receipt", response_model=ReceiptOut) +async def post_receipt(req: ReceiptIn): + """ + Emit a Console receipt. + + This is the main endpoint called by the OpenCode plugin. + """ + try: + record = emit_console_receipt( + receipt_type=req.type, + payload=req.payload, + session_id=req.session_id, + ) + return ReceiptOut(ok=True, record=record) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@app.get("/v1/console/root", response_model=RootInfo) +async def get_root(): + """ + Get Console scroll Merkle root info. + """ + try: + emitter = get_emitter() + info = emitter.get_root_info() + return RootInfo(**info) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + +@app.post("/v1/guardian/anchor") +async def trigger_anchor(req: AnchorRequest): + """ + Trigger a Guardian anchor cycle. + + NOTE: This is a stub - in production, this would call the + actual Guardian engine to anchor the specified scrolls. + """ + scrolls = req.scrolls or ["Console"] + # TODO: Integrate with actual Guardian engine + return { + "ok": True, + "message": f"Anchor requested for scrolls: {scrolls}", + "note": "Stub implementation - Guardian integration pending", + } + + +@app.post("/v1/receipts/search") +async def search_receipts(req: SearchRequest): + """ + Search Console receipts. + + NOTE: This is a simple implementation that reads from JSONL. + In production, use a proper index or database. + """ + import json + from pathlib import Path + + emitter = get_emitter() + events_path = Path(emitter.events_path) + + if not events_path.exists(): + return {"results": [], "total": 0} + + results = [] + with open(events_path, "r", encoding="utf-8") as f: + for line in f: + if not line.strip(): + continue + try: + record = json.loads(line) + # Filter by receipt type if specified + if req.receipt_type and record.get("type") != req.receipt_type: + continue + results.append(record) + except json.JSONDecodeError: + continue + + # Return most recent first, with limit + results = list(reversed(results))[:req.limit] + + return { + "results": results, + "total": len(results), + "scroll": req.scroll, + } + + +@app.get("/v1/console/receipts") +async def list_receipts(limit: int = 20, offset: int = 0): + """ + List Console receipts with pagination. + """ + import json + from pathlib import Path + + emitter = get_emitter() + events_path = Path(emitter.events_path) + + if not events_path.exists(): + return {"receipts": [], "total": 0, "limit": limit, "offset": offset} + + all_receipts = [] + with open(events_path, "r", encoding="utf-8") as f: + for line in f: + if not line.strip(): + continue + try: + all_receipts.append(json.loads(line)) + except json.JSONDecodeError: + continue + + # Most recent first + all_receipts = list(reversed(all_receipts)) + total = len(all_receipts) + page = all_receipts[offset : offset + limit] + + return { + "receipts": page, + "total": total, + "limit": limit, + "offset": offset, + } + + +# ============================================================================ +# Session Query Endpoints +# ============================================================================ + +@app.get("/v1/console/sessions") +async def list_sessions(status: str = "all", limit: int = 20): + """List Console sessions.""" + import json + + emitter = get_emitter() + events_path = Path(emitter.events_path) + + if not events_path.exists(): + return {"sessions": [], "total": 0} + + sessions: Dict[str, Dict[str, Any]] = {} + + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + except Exception: + continue + + sid = r.get("session_id") + if not sid: + continue + + if sid not in sessions: + sessions[sid] = { + "session_id": sid, + "status": "active", + "started_at": None, + "ended_at": None, + "events": 0, + } + + sessions[sid]["events"] += 1 + r_type = r.get("type") + + if r_type == "console_session_start": + sessions[sid]["started_at"] = r["ts"] + elif r_type == "console_session_end": + sessions[sid]["ended_at"] = r["ts"] + sessions[sid]["status"] = "ended" + + results = list(sessions.values()) + if status == "active": + results = [s for s in results if s["status"] == "active"] + elif status == "ended": + results = [s for s in results if s["status"] == "ended"] + + return {"sessions": results[-limit:], "total": len(results)} + + +@app.get("/v1/console/sessions/{session_id}") +async def get_session(session_id: str): + """Get detailed session status.""" + import json + + emitter = get_emitter() + events_path = Path(emitter.events_path) + + if not events_path.exists(): + raise HTTPException(status_code=404, detail="Session not found") + + session: Optional[Dict[str, Any]] = None + + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + except Exception: + continue + + if r.get("session_id") != session_id: + continue + + r_type = r.get("type") + payload = r.get("payload") or {} + + if r_type == "console_session_start": + session = { + "session_id": session_id, + "status": "active", + "started_at": r["ts"], + "agent_type": payload.get("agent_type"), + "model_id": payload.get("model_id"), + "caller": payload.get("caller"), + } + elif r_type == "console_session_end" and session is not None: + session["status"] = "ended" + session["ended_at"] = r["ts"] + + if session is None: + raise HTTPException(status_code=404, detail="Session not found") + + return session + + +# ============================================================================ +# Approval Endpoints +# ============================================================================ + +@app.post("/v1/console/approvals/request") +async def request_approval(req: ApprovalRequestIn): + """Request approval for an action.""" + manager = get_approval_manager() + request = manager.request_approval( + session_id=req.session_id, + action_type=req.action_type, + action_details=req.action_details, + requested_by=req.requested_by, + approvers=req.approvers, + timeout_minutes=req.timeout_minutes, + ) + return {"ok": True, "approval_id": request.approval_id} + + +@app.get("/v1/console/approvals/pending") +async def list_pending_approvals(session_id: Optional[str] = None): + """List pending approval requests.""" + manager = get_approval_manager() + pending = manager.list_pending(session_id) + return {"pending": [asdict(r) for r in pending]} + + +@app.post("/v1/console/approvals/{approval_id}/decide") +async def decide_approval(approval_id: str, req: ApprovalDecisionIn): + """Approve or reject a pending action.""" + manager = get_approval_manager() + try: + success = manager.decide( + approval_id=approval_id, + approved=req.approved, + approver=req.approver, + reason=req.reason, + ) + return { + "ok": success, + "decision": "approved" if req.approved else "rejected", + } + except PermissionError as e: + raise HTTPException(status_code=403, detail=str(e)) + except KeyError: + raise HTTPException(status_code=404, detail="Approval not found") + + +# ============================================================================ +# GitLab Webhook Handler +# ============================================================================ + +@app.post("/gitlab/webhook") +async def gitlab_webhook(request: Request): + """ + Handle GitLab webhook events and emit Console receipts. + + Supports: + - Push events → console_gitlab_push + - Merge request events → console_gitlab_mr + - Pipeline events → console_gitlab_pipeline + + Configure in GitLab: Settings → Webhooks + """ + event_type = request.headers.get("X-Gitlab-Event", "unknown") + + try: + body = await request.json() + except Exception: + raise HTTPException(status_code=400, detail="Invalid JSON body") + + emitter = get_emitter() + + if event_type == "Push Hook": + # Push event + project = body.get("project", {}) + commits = body.get("commits", []) + session_id = f"gitlab-push-{body.get('checkout_sha', 'unknown')[:12]}" + + emit_console_receipt( + "console_command", # Reuse console_command for push events + { + "command": "git_push", + "args_hash": body.get("checkout_sha", ""), + "exit_code": 0, + "duration_ms": 0, + "gitlab_event": "push", + "project_path": project.get("path_with_namespace", ""), + "ref": body.get("ref", ""), + "commits_count": len(commits), + "user": body.get("user_name", ""), + }, + session_id=session_id, + ) + + return {"ok": True, "event": "push", "session_id": session_id} + + elif event_type == "Merge Request Hook": + # Merge request event + mr = body.get("object_attributes", {}) + project = body.get("project", {}) + session_id = f"gitlab-mr-{mr.get('iid', 'unknown')}" + + action = mr.get("action", "update") # open, close, merge, update, etc. + + emit_console_receipt( + "console_command", + { + "command": f"mr_{action}", + "args_hash": mr.get("last_commit", {}).get("id", ""), + "exit_code": 0, + "duration_ms": 0, + "gitlab_event": "merge_request", + "project_path": project.get("path_with_namespace", ""), + "mr_iid": mr.get("iid"), + "mr_title": mr.get("title", ""), + "mr_state": mr.get("state", ""), + "source_branch": mr.get("source_branch", ""), + "target_branch": mr.get("target_branch", ""), + "user": body.get("user", {}).get("name", ""), + }, + session_id=session_id, + ) + + return {"ok": True, "event": "merge_request", "action": action, "session_id": session_id} + + elif event_type == "Pipeline Hook": + # Pipeline event + pipeline = body.get("object_attributes", {}) + project = body.get("project", {}) + session_id = f"gitlab-pipeline-{pipeline.get('id', 'unknown')}" + + status = pipeline.get("status", "unknown") + + # Only emit for significant status changes + if status in ("running", "success", "failed", "canceled"): + receipt_type = "console_session_start" if status == "running" else "console_session_end" + + if status == "running": + emit_console_receipt( + "console_session_start", + { + "agent_type": "gitlab-ci", + "model_id": "none", + "caller": "did:vm:service:gitlab-webhook", + "project_path": project.get("path_with_namespace", ""), + "pipeline_id": pipeline.get("id"), + "ref": pipeline.get("ref", ""), + "commit": pipeline.get("sha", ""), + }, + session_id=session_id, + ) + else: + emit_console_receipt( + "console_session_end", + { + "duration_ms": pipeline.get("duration", 0) * 1000 if pipeline.get("duration") else 0, + "commands_executed": 0, + "files_modified": 0, + "exit_reason": f"pipeline-{status}", + }, + session_id=session_id, + ) + + return {"ok": True, "event": "pipeline", "status": status, "session_id": session_id} + + else: + # Unknown event type - log but don't fail + return {"ok": True, "event": event_type, "note": "Unhandled event type"} + + +# ============================================================================ +# HTML Dashboard +# ============================================================================ + +from fastapi.responses import HTMLResponse + +def format_time_ago(ts_str: str) -> str: + """Format timestamp as relative time.""" + from datetime import datetime, timezone + try: + ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00")) + now = datetime.now(timezone.utc) + diff = now - ts + seconds = diff.total_seconds() + if seconds < 60: + return f"{int(seconds)}s ago" + elif seconds < 3600: + return f"{int(seconds/60)}m ago" + elif seconds < 86400: + return f"{int(seconds/3600)}h ago" + else: + return f"{int(seconds/86400)}d ago" + except: + return ts_str[:19] + + +def get_event_color(event_type: str) -> str: + """Get color for event type.""" + colors = { + "console_session_start": "#22c55e", # green + "console_session_end": "#6b7280", # gray + "console_command": "#3b82f6", # blue + "console_file_edit": "#a855f7", # purple + "console_tool_call": "#06b6d4", # cyan + "console_approval_request": "#f59e0b", # amber + "console_approval": "#10b981", # emerald + "console_git_commit": "#ec4899", # pink + } + return colors.get(event_type, "#9ca3af") + + +def get_event_icon(event_type: str) -> str: + """Get icon for event type.""" + icons = { + "console_session_start": "▶", + "console_session_end": "■", + "console_command": "⌘", + "console_file_edit": "✎", + "console_tool_call": "⚡", + "console_approval_request": "⏳", + "console_approval": "✓", + "console_git_commit": "⬆", + } + return icons.get(event_type, "•") + + +@app.get("/console/dashboard", response_class=HTMLResponse) +async def console_dashboard(): + """ + HTML dashboard showing Console status at a glance. + + Shows: + - Active and recent sessions + - Pending approvals + - Recent events stream + """ + import json + from datetime import datetime, timezone + + emitter = get_emitter() + events_path = Path(emitter.events_path) + + # Collect data + sessions: Dict[str, Dict[str, Any]] = {} + all_events: List[Dict[str, Any]] = [] + + if events_path.exists(): + for line in events_path.read_text(encoding="utf-8").splitlines(): + if not line.strip(): + continue + try: + r = json.loads(line) + all_events.append(r) + + sid = r.get("session_id") + if sid: + if sid not in sessions: + sessions[sid] = { + "session_id": sid, + "status": "active", + "started_at": None, + "ended_at": None, + "events": 0, + "agent_type": None, + "caller": None, + } + sessions[sid]["events"] += 1 + + if r.get("type") == "console_session_start": + sessions[sid]["started_at"] = r.get("ts") + payload = r.get("payload", {}) + sessions[sid]["agent_type"] = payload.get("agent_type") + sessions[sid]["caller"] = payload.get("caller") + elif r.get("type") == "console_session_end": + sessions[sid]["ended_at"] = r.get("ts") + sessions[sid]["status"] = "ended" + except: + continue + + # Get pending approvals + manager = get_approval_manager() + pending = manager.list_pending() + + # Get root info + root_info = emitter.get_root_info() + + # Sort sessions by most recent activity + session_list = sorted( + sessions.values(), + key=lambda s: s.get("started_at") or "", + reverse=True + )[:20] + + # Recent events (last 30) + recent_events = list(reversed(all_events[-30:])) + + # Build HTML + html = f""" + + + + + + VaultMesh Console Dashboard + + + +
+

🜂 VaultMesh Console Dashboard

+
+ Sessions: {len(sessions)} + Events: {root_info.get('events', 0)} + Pending: {len(pending)} + Root: {root_info.get('merkle_root', '0')[:12]}... +
+
+ +
+
+

⏳ Pending Approvals ({len(pending)})

+""" + + if pending: + for p in pending: + html += f""" +
+
{p.approval_id}
+
{p.action_type}
+
Session: {p.session_id}
+
vm console approve {p.approval_id} --reason "..."
+
+""" + else: + html += '
No pending approvals
' + + html += """ +
+ +
+

📡 Sessions

+""" + + if session_list: + for s in session_list[:10]: + status_class = "active" if s["status"] == "active" else "ended" + agent = s.get("agent_type") or "unknown" + started = format_time_ago(s["started_at"]) if s.get("started_at") else "?" + html += f""" +
+
{s['session_id']}
+
{agent} • {s['events']} events • {started}
+
+""" + else: + html += '
No sessions yet
' + + html += """ +
+ +
+

📜 Recent Events

+""" + + if recent_events: + for e in recent_events: + etype = e.get("type", "unknown") + ts = format_time_ago(e.get("ts", "")) + color = get_event_color(etype) + icon = get_event_icon(etype) + payload = e.get("payload", {}) + + # Build detail string based on event type + detail = "" + if etype == "console_command": + detail = payload.get("command", "") + elif etype == "console_file_edit": + detail = payload.get("file_path", "") + elif etype == "console_approval_request": + detail = f"{payload.get('action_type', '')} → {payload.get('approval_id', '')}" + elif etype == "console_approval": + approved = "✓" if payload.get("approved") else "✗" + detail = f"{approved} {payload.get('action_type', '')} by {payload.get('approver', '')}" + elif etype == "console_session_start": + detail = f"{payload.get('agent_type', '')} by {payload.get('caller', '')}" + elif etype == "console_session_end": + detail = payload.get("exit_reason", "") + + short_type = etype.replace("console_", "") + + html += f""" +
+
{icon}
+
{ts}
+
{short_type}
+
{detail}
+
+""" + else: + html += '
No events yet
' + + html += """ +
+
+ + + + +""" + + return html + + +# ============================================================================ +# Main +# ============================================================================ + +def main(): + """Run the server.""" + import uvicorn + + print(f"[VaultMesh] Console Receipts Server") + print(f"[VaultMesh] Root: {VAULTMESH_ROOT}") + print(f"[VaultMesh] Listening on http://127.0.0.1:9110") + + uvicorn.run( + app, + host="127.0.0.1", + port=9110, + log_level="info", + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/gitlab_console_session.sh b/scripts/gitlab_console_session.sh new file mode 100755 index 0000000..5dedeaa --- /dev/null +++ b/scripts/gitlab_console_session.sh @@ -0,0 +1,190 @@ +#!/usr/bin/env bash +# ============================================================================ +# VaultMesh GitLab Console Session Helper +# +# Wires GitLab CI pipelines into the Console engine as sessions. +# Each pipeline becomes a Console session with start/end receipts. +# +# Usage: +# ./scripts/gitlab_console_session.sh start +# ./scripts/gitlab_console_session.sh end +# ./scripts/gitlab_console_session.sh cmd +# ./scripts/gitlab_console_session.sh request_approval +# +# Environment Variables (set in GitLab CI/CD Settings → Variables): +# VAULTMESH_CONSOLE_BASE - Console HTTP bridge URL +# VAULTMESH_CALLER_DID - DID for GitLab CI service +# VAULTMESH_APPROVER_DID - Default approver DID +# VM_ENV - Environment: dev, staging, or prod +# +# GitLab CI Variables (automatic): +# CI_PIPELINE_ID, CI_PROJECT_PATH, CI_COMMIT_SHA, CI_JOB_STATUS +# ============================================================================ + +set -euo pipefail + +# Defaults +BASE="${VAULTMESH_CONSOLE_BASE:-http://127.0.0.1:9110/v1/console}" +ENDPOINT="$BASE/receipt" +CALLER="${VAULTMESH_CALLER_DID:-did:vm:service:gitlab-ci}" +APPROVER="${VAULTMESH_APPROVER_DID:-did:vm:human:karol}" +SESSION_ID="gitlab-pipeline-${CI_PIPELINE_ID:-unknown}" +PROJECT="${CI_PROJECT_PATH:-unknown}" +COMMIT="${CI_COMMIT_SHA:-unknown}" +ENV="${VM_ENV:-prod}" # Default to prod (most restrictive) + +# Emit a receipt to Console +emit() { + local type="$1" + local payload="$2" + + curl -sS -X POST "$ENDPOINT" \ + -H 'Content-Type: application/json' \ + -d "{ + \"type\":\"$type\", + \"session_id\":\"$SESSION_ID\", + \"payload\":$payload + }" >/dev/null || echo "[VaultMesh] Warning: Failed to emit $type receipt" +} + +# Request approval and return approval_id +request_approval() { + local action_type="$1" + local details="${2:-{}}" + + local resp + resp=$(curl -sS -X POST "$BASE/approvals/request" \ + -H 'Content-Type: application/json' \ + -d "{ + \"session_id\":\"$SESSION_ID\", + \"action_type\":\"$action_type\", + \"action_details\":$details, + \"requested_by\":\"$CALLER\", + \"approvers\":[\"$APPROVER\"], + \"timeout_minutes\": 120 + }") + + echo "$resp" +} + +# Check if approval is still pending +check_pending() { + local approval_id="$1" + + local resp + resp=$(curl -sS "$BASE/approvals/pending?session_id=$SESSION_ID") + + # Check if approval_id is in pending list + if echo "$resp" | jq -e ".pending[] | select(.approval_id == \"$approval_id\")" >/dev/null 2>&1; then + echo "pending" + else + echo "decided" + fi +} + +# Main dispatch +case "${1:-}" in + start) + echo "[VaultMesh] Starting Console session: $SESSION_ID (env: $ENV)" + emit "console_session_start" "{ + \"agent_type\":\"gitlab-ci\", + \"model_id\":\"none\", + \"caller\":\"$CALLER\", + \"project_path\":\"$PROJECT\", + \"pipeline_id\":\"${CI_PIPELINE_ID:-unknown}\", + \"commit\":\"$COMMIT\", + \"env\":\"$ENV\" + }" + echo "[VaultMesh] Session started" + ;; + + end) + STATUS="${CI_JOB_STATUS:-unknown}" + echo "[VaultMesh] Ending Console session: $SESSION_ID (status: $STATUS)" + emit "console_session_end" "{ + \"duration_ms\":0, + \"commands_executed\":0, + \"files_modified\":0, + \"exit_reason\":\"pipeline-$STATUS\" + }" + echo "[VaultMesh] Session ended" + ;; + + cmd) + CMD_NAME="${2:-unknown}" + EXIT_CODE="${3:-0}" + echo "[VaultMesh] Recording command: $CMD_NAME (exit: $EXIT_CODE)" + emit "console_command" "{ + \"command\":\"$CMD_NAME\", + \"args_hash\":\"$COMMIT\", + \"exit_code\":$EXIT_CODE, + \"duration_ms\":0 + }" + ;; + + request_approval) + ACTION_TYPE="${2:-deploy}" + DETAILS="${3:-{\"env\":\"$ENV\",\"commit\":\"$COMMIT\",\"pipeline_id\":\"${CI_PIPELINE_ID:-unknown}\"}}" + + echo "[VaultMesh] Requesting approval for: $ACTION_TYPE (env: $ENV)" + RESP=$(request_approval "$ACTION_TYPE" "$DETAILS") + APPROVAL_ID=$(echo "$RESP" | jq -r '.approval_id') + + echo "[VaultMesh] Approval requested: $APPROVAL_ID" + echo "" + echo "============================================================" + echo "ACTION REQUIRES APPROVAL" + echo "============================================================" + echo "" + echo "Approval ID: $APPROVAL_ID" + echo "Action Type: $ACTION_TYPE" + echo "" + echo "To approve, run on VaultMesh host:" + echo "" + echo " export VAULTMESH_ACTOR_DID=\"$APPROVER\"" + echo " vm console approve $APPROVAL_ID --reason \"Approved from GitLab\"" + echo "" + echo "============================================================" + + # Exit 1 to fail the job (approval required) + exit 1 + ;; + + check_approval) + APPROVAL_ID="${2:-}" + if [ -z "$APPROVAL_ID" ]; then + echo "Usage: $0 check_approval " >&2 + exit 1 + fi + + STATUS=$(check_pending "$APPROVAL_ID") + echo "[VaultMesh] Approval $APPROVAL_ID status: $STATUS" + + if [ "$STATUS" = "pending" ]; then + echo "Approval still pending. Cannot proceed." + exit 1 + else + echo "Approval decided. Proceeding." + exit 0 + fi + ;; + + *) + echo "VaultMesh GitLab Console Session Helper" + echo "" + echo "Usage: $0 {start|end|cmd|request_approval|check_approval}" + echo "" + echo "Commands:" + echo " start - Emit console_session_start" + echo " end - Emit console_session_end" + echo " cmd - Emit console_command" + echo " request_approval - Request approval and exit 1" + echo " check_approval - Check if approval decided" + echo "" + echo "Environment:" + echo " VAULTMESH_CONSOLE_BASE - Console HTTP bridge URL" + echo " VAULTMESH_CALLER_DID - DID for GitLab CI service" + echo " VAULTMESH_APPROVER_DID - Default approver DID" + exit 1 + ;; +esac diff --git a/scripts/offsec_node_client.py b/scripts/offsec_node_client.py new file mode 100644 index 0000000..173e444 --- /dev/null +++ b/scripts/offsec_node_client.py @@ -0,0 +1,123 @@ +""" +Thin client for talking to the OffSec Shield Node (offsec-agents MCP backend). + +Usage: + + from scripts.offsec_node_client import OffsecNodeClient + + client = OffsecNodeClient() # uses OFFSEC_NODE_URL env + agents = await client.command("agents list") + status = await client.command("tem status") + +""" + +from __future__ import annotations + +import asyncio +import json +import os +from dataclasses import dataclass +from typing import Any, Dict, Optional + +import aiohttp + + +DEFAULT_OFFSEC_NODE_URL = "http://shield-vm:8081" + + +@dataclass +class OffsecNodeError(Exception): + message: str + status: Optional[int] = None + details: Optional[Dict[str, Any]] = None + + def __str__(self) -> str: + base = self.message + if self.status is not None: + base += f" (status={self.status})" + if self.details: + base += f" details={self.details}" + return base + + +@dataclass +class OffsecNodeClient: + base_url: str = DEFAULT_OFFSEC_NODE_URL + timeout_seconds: int = 10 + + @classmethod + def from_env(cls) -> "OffsecNodeClient": + url = os.getenv("OFFSEC_NODE_URL", DEFAULT_OFFSEC_NODE_URL) + return cls(base_url=url) + + async def health(self) -> Dict[str, Any]: + url = f"{self.base_url.rstrip('/')}/health" + async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout_seconds)) as session: + async with session.get(url) as resp: + if resp.status != 200: + raise OffsecNodeError("Shield node health check failed", status=resp.status) + return await resp.json() + + async def command( + self, + command: str, + session_id: str = "vaultmesh-client", + user: str = "vaultmesh", + ) -> Dict[str, Any]: + """ + Send a command to the Shield Node MCP backend. + + Example commands: + "agents list" + "status" + "shield status" + "proof latest" + "agent spawn recon" + """ + url = f"{self.base_url.rstrip('/')}/mcp/command" + payload: Dict[str, Any] = { + "session_id": session_id, + "user": user, + "command": command, + } + + async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout_seconds)) as session: + async with session.post(url, json=payload) as resp: + text = await resp.text() + if resp.status != 200: + # Try to parse JSON details if present + try: + data = json.loads(text) + except json.JSONDecodeError: + data = None + raise OffsecNodeError( + "Shield node command failed", + status=resp.status, + details=data or {"raw": text}, + ) + try: + return json.loads(text) + except json.JSONDecodeError as exc: + raise OffsecNodeError("Invalid JSON from Shield node", details={"raw": text}) from exc + + +# Optional: CLI entrypoint for quick manual tests +async def _demo() -> None: + client = OffsecNodeClient.from_env() + print(f"[offsec-node] base_url={client.base_url}") + try: + health = await client.health() + print("Health:", json.dumps(health, indent=2)) + except OffsecNodeError as e: + print("Health check failed:", e) + return + + try: + agents = await client.command("agents list") + print("Agents:", json.dumps(agents, indent=2)) + except OffsecNodeError as e: + print("Command failed:", e) + + +if __name__ == "__main__": + asyncio.run(_demo()) diff --git a/spec/ATTACK_RESISTANCE_LEDGER.md b/spec/ATTACK_RESISTANCE_LEDGER.md new file mode 100644 index 0000000..2c798b0 --- /dev/null +++ b/spec/ATTACK_RESISTANCE_LEDGER.md @@ -0,0 +1,217 @@ +# Attack Resistance & Control Ledger + +Status: Canonical +Scope: State-Level Threat Model +Operating Mode: Single-Sovereign, Local-First +Federation: Optional Witness Augmentation + +## Non-goal + +VaultMesh does not guarantee liveness, availability, or global consensus under adversarial conditions. It guarantees detectability, attribution, and recoverable truth. + +## Pinned Definitions + +- **ShadowReceipt**: An append-only proof emitted when an action is considered but not executed, recording intent, denial reason, and (if applicable) scope narrowing without side effects. +- **Receipt scroll**: An append-only JSONL event log per domain (e.g., `receipts/treasury/treasury_events.jsonl`). +- **Merkle root**: A deterministic commitment over a scroll’s hashed leaves using the VaultMesh `VmHash` + `merkle_root` algorithm. +- **Root file**: A file that stores the current Merkle root for a scroll (typically `ROOT..txt`). +- **Seal bundle (Ouroboros)**: A deterministic digest over a selected evidence set in the local SQLite ledger, stored as a `proof_artifacts.kind=ouroboros_seal_bundle` artifact. +- **Anchor (external, optional)**: A timestamp/immutability witness over a seal digest (RFC-3161 / blockchain / etc.), recorded as an additional proof artifact referencing the seal bundle. +- **Trace id**: A correlation id linking the evidence chain across `tool_invocations`, `mcp_calls`, `proof_artifacts`, and (where emitted) `shadow_receipts`. +- **Capability / scope**: A revocable, least-privilege right that can be narrowed without rewriting history (Mesh receipts). + +## Operating Assumption + +VaultMesh is designed to remain truthful, auditable, and survivable as a single sovereign system under isolation. Federation is an optional augmentation that increases resilience and reach, but is never required for correctness. + +Primary adversary context: *“You are alone. No peers. No network. No court protection.”* + +## Adversary Classes (State-Level) + +- **Isolation**: network partition, long-term offline operation, selective connectivity denial. +- **Seizure**: physical confiscation, disk imaging, forced shutdown, forced relocation. +- **Coercion**: compelled operation, compelled credential disclosure, compelled signing. +- **Supply chain**: compromised dependencies, poisoned updates, build/release interdiction. +- **Insider drift**: sequences of individually policy-valid actions that violate long-horizon invariants. +- **Narrative warfare**: re-framing outages as “lies”, attacking legitimacy rather than mechanics. + +--- + +## 🜔 Proof (Immutable Wealth) + +### Invariants + +- **Append-only evidence**: evidence is never rewritten in place; corrections are new events referencing prior ids/hashes. +- **Deterministic verification**: the same inputs yield the same roots and seal digests. +- **Detectable tamper**: any change to past events must surface as a root/seal mismatch. + +### Likely Moves + +- Edit/delete old receipts; roll back state to a “clean” snapshot; truncate scrolls. +- Rewrite root files to match a forged history. +- Partition the node so anchoring cannot occur. + +### Controls + +- Scrolls are append-only JSONL + deterministic Merkle roots (`ROOT.*.txt`). +- Seals bind SQLite evidence to a deterministic digest (local witness) that can be copied out-of-band. +- Optional anchoring adds a time witness when connectivity exists (never required for local correctness). + +### Evidence Artifacts + +- Scrolls: `receipts/**` +- Roots: `ROOT.*.txt` and `receipts/console/ROOT.console.txt` +- Local ledger: `.state/ledger.sqlite` (`tool_invocations`, `mcp_calls`, `proof_artifacts`, `shadow_receipts`) +- Seal bundles: `.state/seals/ouroboros_seal_*.json` + `proof_artifacts.kind=ouroboros_seal_bundle` +- Anchor receipts: `receipts/guardian/anchor_events.jsonl` (and planned `proof_artifacts.kind=external_anchor`) + +### Drills + +- Recompute and compare roots (no writes): `python3 cli/vm_cli.py guardian compute-roots` +- Check whether on-disk roots match computed roots: `python3 cli/vm_cli.py guardian status` +- Emit an anchor cycle (writes roots + anchor receipt): `python3 cli/vm_cli.py guardian anchor --backend local` +- Seal recent evidence (deterministic digest over ledger tables): `python3 cli/ledger.py seal --since "7 days"` + +--- + +## 🜃 Energy (Scarce Wealth) + +### Invariants + +- **No action without cost**: actions require a debit/charge. +- **No cost without record**: debits/credits are receipted; state is reconstructable from receipts. +- **No silent denial**: denied/aborted high-impact actions produce a ShadowReceipt (proof of restraint), not silence. + +### Likely Moves + +- Spam/flood to force writes, bury signals in volume, or extract unbounded work. +- Coercive drain of budgets to force depletion or induce “just this once” shortcuts. + +### Controls + +- Debit-before-write for mutating operations; budgets enforce ceilings. +- Compartment budgets by purpose; require stronger capabilities for high-impact budgets. +- “Freeze” and “contain” responses narrow authority; they never grant new authority. + +### Evidence Artifacts + +- Treasury scroll + root: `receipts/treasury/treasury_events.jsonl`, `ROOT.treasury.txt` +- Ledger witness of debits/denials: `.state/ledger.sqlite` (`tool_invocations`, `shadow_receipts`) +- Seal bundles over the above: `.state/seals/ouroboros_seal_*.json` + +### Drills + +- Create a seal over a high-volume window and confirm it is stable on replay: `python3 cli/ledger.py seal --since "1 day"` +- Confirm denials are queryable (ShadowReceipts are sealed even if not rooted): + - `sqlite3 .state/ledger.sqlite "select ts,horizon_id,reason_unrealized,trace_id from shadow_receipts order by datetime(ts) desc limit 20;"` + +--- + +## 🜍 Intelligence (Auditable Consciousness) + +### Invariants + +- **Bounded automation**: analysis may run, but execution requires capability + receipt trail. +- **Legible decisions**: reasoning/uncertainty is recorded as evidence, not “trust the model”. +- **Temporal defensibility**: “locally allowed” is insufficient if a long-horizon invariant is violated. + +### Likely Moves + +- Drift sequences: individually allowed steps that collectively break invariants. +- Poisoned telemetry: adversarial signals to induce unsafe policies or overblocking. +- “Optimize away restraint”: remove proof-of-denial records to reduce friction. + +### Controls + +- DriftGuard pattern: detect long-horizon invariant violations and deny execution while emitting a ShadowReceipt. +- Quarantine: treat suspicious signals as inputs to proposals (artifacts), not direct law rewrites. +- Replay + seal: decisions are reviewable through deterministic seals over evidence sets. + +### Evidence Artifacts + +- ShadowReceipts: `.state/ledger.sqlite` table `shadow_receipts` +- Correlated evidence chain: `.state/ledger.sqlite` tables `tool_invocations`, `mcp_calls`, `proof_artifacts` +- Automation scroll (if/when used): `receipts/automation/automation_events.jsonl`, `ROOT.automation.txt` + +### Drills + +- Create a ShadowReceipt for a denied path (proof of restraint), then seal the window: + - `python3 cli/ledger.py seal --since "1 day"` +- Confirm trace correlation is preserved (and treat gaps as audit failures): + - `python3 cli/ledger.py last --n 50` + +--- + +## ☿ Trust (Circulating Authority) + +### Invariants + +- **No ambient trust**: rights are explicit capabilities with scopes. +- **Revocation is additive**: power can shrink without erasing history. +- **Containment > blame**: automatic responses narrow scopes; they do not expand authority. + +### Likely Moves + +- Key theft / replay; attempt to broaden scope “just for recovery”. +- Coercion to compel signing or privileged action. + +### Controls + +- Least-privilege, short-lived capabilities; explicit scopes; revocation receipts. +- For irreversible actions: time-locks and/or multi-party approval (policy-dependent). +- Record refusals as ShadowReceipts; never “black-hole” denied operations. + +### Evidence Artifacts + +- Mesh capability receipts + root: `receipts/mesh/mesh_events.jsonl`, `ROOT.mesh.txt` +- Identity receipts + root: `receipts/identity/identity_events.jsonl`, `ROOT.identity.txt` +- ShadowReceipts for denied/coerced paths: `.state/ledger.sqlite` `shadow_receipts` + +### Drills + +- Revoke and verify containment: + - (emit revoke) verify it appears in `receipts/mesh/mesh_events.jsonl` + - recompute roots: `python3 cli/vm_cli.py guardian compute-roots` + +--- + +## 🜞 Time (Continuity Across Decades) + +### Invariants + +- **Portability**: proofs can be verified from artifacts alone. +- **Legibility**: tools and formats remain understandable without a priesthood. +- **Recoverability**: state can be reconstructed from receipts + seals. + +### Likely Moves + +- Long-term offline storage; partial artifact survival; missing dependencies; bit rot. +- Availability attacks misframed as correctness failures (narrative warfare). + +### Controls + +- Boring formats: JSONL + SQLite + text roots. +- Archaeology drill: restore from a cold copy and re-derive roots and seals. +- Explicitly separate **truth** from **availability** (see Non-goal). + +### Evidence Artifacts + +- Local ledger: `.state/ledger.sqlite` +- Scrolls + roots: `receipts/**`, `ROOT.*.txt`, `receipts/console/ROOT.console.txt` +- Constitutional mapping: `spec/BLUEPRINT_SPEC.md`, `spec/MAPPING.md` + +### Drills + +- Cold-restore verification: copy artifacts to a new directory and run: + - `python3 cli/vm_cli.py guardian status` + - `python3 cli/ledger.py seal --since "365 days"` + +--- + +## Federation (Optional Witness Augmentation) + +Federation is not correctness. It is redundancy and cross-witnessing. + +- Peers may mirror roots/seals to increase survivability and detect targeted rollback. +- Disagreement is an incident artifact, not a correctness failure of the local node. + diff --git a/spec/BLUEPRINT_SPEC.md b/spec/BLUEPRINT_SPEC.md new file mode 100644 index 0000000..731a321 --- /dev/null +++ b/spec/BLUEPRINT_SPEC.md @@ -0,0 +1,203 @@ +# VaultMesh Blueprint Spec (v0) + +This document defines the concrete, auditable objects and invariants implied by the VaultMesh Blueprint. It is written to be implementable and reviewable by engineers and auditors. + +## Scope + +This spec covers: +- Local-first evidence storage (SQLite ledger) +- File-based receipt scrolls and Merkle roots +- Sealing (deterministic digests over a set of events) and external anchoring +- ShadowReceipts (“proof of restraint”) as a first-class, queryable record +- Evolution control (epochs) and bounded automation (Autogene: read-only) + +## Core Objects + +### 1) ProofRune + +A generic “proof container” for any payload that must be replayable, hashable, and auditable. + +Minimal shape: +```json +{ + "payload": { "any": "json" }, + "capability_hash": "hex", + "merkle_root": "hex|0", + "epoch": "nigredo|albedo|rubedo", + "ts": "ISO-8601 Z" +} +``` + +Storage: +- As an on-disk file referenced by a row in `proof_artifacts` (preferred), or +- As a JSON object in `proof_artifacts.meta_json` for small payloads. + +### 2) OuroborosReceipt (Seal Bundle + External Anchor) + +An OuroborosReceipt is a *sealed* summary proof for a selected set of events, anchored externally. + +It has two layers: +1) **Local seal bundle** (SQLite witness): a deterministic digest over a chosen set. +2) **External anchor** (pipeline witness): an immutable timestamp/anchor referencing the seal digest. + +Minimal fields (local seal bundle file): +```json +{ + "format": "vm-ouroboros-seal-v0", + "schema_version": 3, + "schema_last_migration": "0003_shadow_receipts.sql", + "seal_id": "uuid", + "sealed_at": "ISO-8601 Z", + "digest_algo": "sha256", + "selection": { + "scope": "time_window|trace_set", + "since": "ISO-8601 Z|null", + "until": "ISO-8601 Z|null", + "trace_ids": ["uuid", "..."], + "kinds": ["tool_invocations", "mcp_calls", "proof_artifacts"] + }, + "digest": { + "algorithm": "sha256", + "hex": "..." + }, + "bounds": { + "min_ts": "ISO-8601 Z|null", + "max_ts": "ISO-8601 Z|null" + }, + "inputs": { + "sqlite_db_path": "path", + "receipt_roots": ["receipts/**/ROOT.*.txt"] + } +} +``` + +External anchor evidence is recorded as an additional `proof_artifacts` row that either: +- embeds the anchor payload (e.g., RFC-3161 token, txid), or +- references an artifact path containing it. + +### 3) ShadowReceipt (Proof of Restraint) + +A ShadowReceipt records a counterfactual/near-miss/aborted path without rewriting canonical history. + +Minimal row fields (table `shadow_receipts`): +- `id`: uuid +- `ts`: ISO-8601 Z +- `horizon_id`: stable identifier for a decision horizon +- `counterfactual_hash`: hash of a canonicalized counterfactual description +- `entropy_delta`: numeric drift/uncertainty signal (policy-defined meaning) +- `reason`: short reason the path was not executed +- `observer_signature`: signature/attestation of the observer (human or system identity) +- `trace_id`: correlation id linking to tool/MCP/receipt chain +- `meta_json`: optional redacted metadata + +ShadowReceipts are **not** part of the per-engine Merkle roots by default; they remain queryable and auditable via SQLite + seals. + +### 4) Guardians + +Guardians are bounded subsystems that: +- ingest signals (telemetry, anomalies, drift indicators) +- produce evidence (receipts/artifacts) +- produce proposals (never unilateral law rewrites) + +In implementation terms, a Guardian is a module that consumes ledger/scroll state and emits: +- `proof_artifacts` (reports, policies, recommended changes) +- (optionally) receipt scroll entries in its engine domain +- (optionally) ShadowReceipts + +### 5) Directed Evolution Engine (DEE) + +DEE is a controller that converts signals into *proposed* system changes. + +Outputs are always proposals, not direct execution. Executing a proposal requires: +- capability authorization, and +- an auditable receipt trail (scroll + seal). + +DEE outputs are stored as `proof_artifacts` with stable formats (e.g., `vm-dee-proposal-v0`). + +### 6) Epoch + +`epoch` is an explicit system mode included in ProofRunes and proposals: +- `nigredo`: maximum learning; conservative execution +- `albedo`: normalization and stabilization +- `rubedo`: directed transformation (strict gating) + +Epoch changes are themselves auditable events (receipt + seal). + +### 7) Autogene (Read-only) + +Autogene is a bounded anticipatory system: +- It can analyze and forecast. +- It cannot execute changes directly. + +Autogene outputs are stored as `proof_artifacts` (e.g., `autogene_forecast`, `autogene_recommendation`) and require the same approval/execution gates as any other proposal. + +## Invariants (Must Hold) + +1) **Local-first evidence**: every externally anchored claim must correspond to a locally stored seal bundle (SQLite witness). +2) **Deterministic sealing**: the seal digest is deterministic for a given selection + canonicalization rules. +3) **Append-only evidence**: evidence is never rewritten in place; corrections are new receipts/artifacts referencing prior ids/hashes. +4) **Action gating**: proposals never execute themselves; execution requires capability + receipt trail. +5) **Autogene read-only**: Autogene cannot mutate state; it only emits artifacts. +6) **Shadow separation**: ShadowReceipts do not retroactively alter canonical history; they are additive evidence. +7) **Trace continuity**: a `trace_id` links the full chain across: + - `tool_invocations.trace_id` + - `mcp_calls.trace_id` + - `proof_artifacts.trace_id` + - `shadow_receipts.trace_id` (planned) +8) **Redaction for storage**: stored payloads must be redacted/hashed where appropriate (secrets never stored in plaintext). + +## Evidence Artifacts (What Proves What) + +### SQLite ledger (queryable witness) + +Current tables: +- `tool_invocations`: “what local tools did” with redacted inputs/outputs. +- `mcp_calls`: “what boundary calls happened” with redacted request/response. +- `proof_artifacts`: “what proof objects exist” (files and/or meta) with content hashes. +- `shadow_receipts`: “what paths were considered and not executed” (proof of restraint). + +### Receipt scrolls + roots (tamper-evident chronology) + +Per-engine JSONL scrolls and their Merkle roots (e.g., console engine): +- `receipts/**/ENGINE_EVENTS.jsonl` +- `receipts/**/ROOT.*.txt` + +These are inputs into sealing and external anchoring, but they remain independently verifiable. + +### Seals and anchors (bridge to external immutability) + +- Seal bundle file: stored as `proof_artifacts` with a strong digest. +- External anchor evidence: stored as a second `proof_artifacts` row referencing or embedding: + - RFC-3161 timestamp token + - release-time Merkle root(s) + - blockchain txid(s) or other anchoring proof + +## Failure Modes (Detection + Response) + +1) **Ledger tampering / local rollback** + - Detect: mismatch between `proof_artifacts.sha256_hex` and file contents; seal digest mismatch; missing expected rows. + - Respond: emit a new artifact documenting divergence; re-seal; escalate to external anchor verification. + +2) **External anchor missing or unverifiable** + - Detect: seal exists but no anchor artifact; anchor proof fails verification. + - Respond: block promotion of high-impact changes; emit incident artifact; re-run anchoring pipeline. + +3) **Poisoning of learning signals / adversarial telemetry** + - Detect: replay reports show FP increase or catastrophic boundary regression; outlier actors dominate support. + - Respond: require quorum + replay + human approval; quarantine candidates; record a ShadowReceipt for the rejected promotion. + +4) **Overblocking / drift into refusal** + - Detect: trend in false positives; rising “blocked” outcomes on benign workloads. + - Respond: rollback via new policy receipt; require replay validation before relaxation is promoted. + +5) **Clock skew / timestamp ambiguity** + - Detect: non-monotonic timestamps, ordering anomalies in seals. + - Respond: anchor by content ordering (stable sort keys); treat timestamps as metadata; include row ids/hashes in sealing. + +6) **Trace discontinuity** + - Detect: tool/MCP/proof artifacts lacking `trace_id` where expected. + - Respond: enforce trace propagation at call sites; treat missing trace links as audit gaps. + +## Versioning + +All seal bundle formats MUST include a `format` string (e.g., `vm-ouroboros-seal-v0`). Any breaking change requires a new version and must be documented as an auditable artifact. diff --git a/spec/MAPPING.md b/spec/MAPPING.md new file mode 100644 index 0000000..176009a --- /dev/null +++ b/spec/MAPPING.md @@ -0,0 +1,36 @@ +# Blueprint → Code Mapping (v0) + +This document maps Blueprint objects to concrete modules/files and to the local SQLite ledger evidence they must produce. + +## Mapping Table + +| Blueprint object | Implementation location | Ledger tables touched | Proof artifacts emitted | Trace propagation rules | +|---|---|---|---|---| +| ProofRune | `ledger/db.py` (artifact hashing), `ledger/redact.py` (safe storage), engine scroll emitters | `proof_artifacts` | `proof_artifacts.kind=*` (file/hash recorded) | `proof_artifacts.trace_id` should match the initiating tool/MCP trace | +| OuroborosReceipt (local seal bundle) | `cli/ledger.py` subcommand `seal` | `proof_artifacts` (and read-only queries over `tool_invocations`, `mcp_calls`) | `kind=ouroboros_seal_bundle` | Seal bundle should include selection (since/until/trace_ids) and record the sealing `trace_id` | +| External anchor evidence | **Pipeline/ops**, recorded back into SQLite via CLI/tooling | `proof_artifacts` | `kind=external_anchor` (planned) | Anchor artifact must reference seal digest + seal artifact id/path | +| ShadowReceipt | `ledger/schema/0003_shadow_receipts.sql` + `ledger/db.py` helper | `shadow_receipts` | Optional: `kind=shadow_receipt_attachment` for large payloads | `shadow_receipts.trace_id` must correlate to the considered action chain | +| Guardian (generic) | `vaultmesh-guardian/` (Rust), `vaultmesh-offsec/`, `vaultmesh-observability/` (future), plus Python engines as needed | `tool_invocations`, `mcp_calls`, `proof_artifacts` | `kind=guardian_report`, `kind=guardian_policy_proposal` (planned) | Guardian-generated events must either reuse the upstream `trace_id` or emit a new trace id and link it in meta | +| DEE proposal output | `engines/` (planned controller module) | `proof_artifacts` | `kind=dee_proposal` (planned) | Proposal artifacts must link to the evidence set used to produce them (seal ids, trace ids) | +| Epoch (mode) | Stored as a field inside proposal/seal payloads; epoch changes logged as receipts (future) | `proof_artifacts` (and engine scrolls) | `kind=epoch_change` (planned) | Epoch changes must be sealed and externally anchored for high-impact transitions | +| Autogene (read-only) | `engines/` (planned analytics job) | `proof_artifacts` | `kind=autogene_forecast`, `kind=autogene_recommendation` (planned) | Outputs must never trigger execution; they must be consumable as inputs to an approval gate | +| Console receipt scroll + Merkle root | `engines/console/receipts.py` | (not SQLite by default) | Root file(s) under `receipts/**/ROOT.*.txt` | Seal bundles should include the root files as inputs; optional `proof_artifacts` rows can reference root files | +| Approvals | `engines/console/approvals.py` (receipted), `cli/vm_cli.py` (entrypoint) | (scroll-based today; SQLite optional later) | Receipt types `console_approval_request` and `console_approval` | Approval decisions should carry the same `trace_id` as the action being approved (planned wiring) | +| Local ledger introspection | `cli/ledger.py` | Read-only queries over `tool_invocations`, `mcp_calls`, `proof_artifacts` | None (unless exporting reports) | Reporting commands should preserve trace correlation when exporting artifacts | + +## SQLite Ledger Schema (Current) + +Defined in: +- `ledger/schema/0001_init.sql` (tables) +- `ledger/schema/0002_indexes.sql` (indexes) + +Tables: +- `tool_invocations` +- `mcp_calls` +- `proof_artifacts` + +## Planned Additions (Not Yet Implemented) + +1) External anchoring capture: + - Insert `proof_artifacts.kind=external_anchor` referencing a prior `ouroboros_seal_bundle` + - Store RFC-3161 tokens and/or chain txids either embedded in `meta_json` or as file paths diff --git a/spec/MERIDIAN_V1_FAQ_HOSTILE_REGULATOR_EDITION.md b/spec/MERIDIAN_V1_FAQ_HOSTILE_REGULATOR_EDITION.md new file mode 100644 index 0000000..5b176e9 --- /dev/null +++ b/spec/MERIDIAN_V1_FAQ_HOSTILE_REGULATOR_EDITION.md @@ -0,0 +1,331 @@ +# MERIDIAN v1 FAQ — Hostile Regulator Edition (Evidence‑First) + +Status: Draft (regulator-facing; non-marketing; copy/paste runnable) +Scope: MERIDIAN v1 conformance evidence + Sentinel v1 offline verifier + +This FAQ is written for the “hostile room”: every answer routes to **offline, deterministic evidence**. + +Baseline: generate the evidence once, then answer every question by pointing to `out/`. + +```bash +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +``` + +Artifacts: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/*.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/sentinel_reports/*.verification_report.json` + +Definitions (used throughout): +- `exit`: verifier exit code (`0` PASS, `1` FAIL) +- `failure_code`: Sentinel failure code (e.g. `E_EVENT_HASH_MISMATCH`) +- `violated_contract_ids`: stable contract IDs (e.g. `E-2`) +- “fail‑closed”: verifier returns non‑zero and emits explicit codes/contract IDs + +Normative references: +- Verifier: `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py` +- Failure codes: `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py` +- Failure semantics: `vaultmesh-orgine-mobile/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md` +- Contract matrix: `vaultmesh-orgine-mobile/spec/SENTINEL_V1_CONTRACT_MATRIX.md` +- Evidence standard: `vaultmesh-orgine-mobile/spec/SENTINEL_EVIDENCE_STANDARD.md` + +--- + +## 1) “How do you prevent evidence fabrication by the operator?” + +Answer (direct): +- The offline verifier proves **internal integrity** of a bundle (hash chain + Merkle roots + integrity manifest). +- It does **not** prove “this history was the only possible history” without an **external anchor** (procedural control). + +Verifier‑enforced controls (objective; evidence‑backed): +- Any post‑hoc modification inside a bundle is detected and fails closed (`E_EVENT_HASH_MISMATCH`, `E_MANIFEST_HASH_MISMATCH`, `E_ROOT_MISMATCH`). + +Audit‑procedural controls (required if you want “anti‑fabrication” rather than “tamper detection”): +- Independent anchoring of Merkle roots to an immutable witness (WORM storage / external ledger / third‑party timestamping). +- Chain‑of‑custody of bundles (who held them, when). + +Run (tamper detection demonstrations; both are expected FAIL fixtures): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/event_hash_mismatch \ + --strict + +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/manifest_hash_mismatch \ + --strict +``` + +Expected: +- Exit: `1` +- Failure codes: `E_EVENT_HASH_MISMATCH` and `E_MANIFEST_HASH_MISMATCH` +- Contract IDs: `E-2` and `I-3` + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-001.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-002.record.json` + +Fail‑closed statement: +- If any evidence inside a bundle is altered after sealing, verification refuses (`exit=1`) and emits stable codes + contract IDs. + +--- + +## 2) “What happens if clocks drift, reboot loops occur, or power cycles happen?” + +Answer (direct): +- MERIDIAN/Sentinel ordering is secured by `seq` and the hash chain (`prev_event_hash`), not by wallclock. +- Wallclock timestamps (`ts`) are recorded for audit correlation but are not the trust anchor for ordering. + +Verifier‑enforced controls: +- `seq` must be consistent and non‑ambiguous; replay/duplicate ordering fails closed. +- Hash chain continuity fails closed if `prev_event_hash` does not link. + +Audit‑procedural controls: +- Correlate `ts` with independent time sources (NTP logs, hardware event logs, external anchors). +- Treat time anomalies as incident signals; verification remains possible offline. + +Run (ordering / continuity are expected FAIL fixtures): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/seq_non_monotonic_duplicate \ + --strict + +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/prev_event_hash_mismatch \ + --strict +``` + +Expected: +- Exit: `1` +- Failure codes: `E_SEQ_NON_MONOTONIC`, `E_CHAIN_DISCONTINUITY` +- Contract IDs: `E-4`, `E-3` + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-005.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-003.record.json` + +Fail‑closed statement: +- If ordering/continuity is ambiguous or broken, verification refuses and emits explicit codes (`E_SEQ_NON_MONOTONIC` / `E_CHAIN_DISCONTINUITY`). + +--- + +## 3) “How do you handle lawful access vs refusal?” + +Answer (direct): +- Lawful access produces an attributable `action_executed` trail. +- Refusal produces an attributable `shadow_receipt` trail. +- “No silent denial” is enforced: intent must have an outcome (executed or denied) in strict mode. + +Verifier‑enforced controls: +- Strict trace linkage requires `action_intent` → (`action_executed` **or** `shadow_receipt`), otherwise verification fails closed. + +Audit‑procedural controls: +- Define lawful basis and policy outside the verifier; the verifier enforces the resulting evidence integrity. + +Run (refusal proof PASS + “silent denial” FAIL): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass \ + --strict + +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/silent_denial_intent_without_outcome \ + --strict +``` + +Expected: +- PASS bundle exit: `0` +- FAIL bundle exit: `1` +- FAIL bundle `failure_code`: `E_CHAIN_DISCONTINUITY` +- FAIL bundle contract IDs: `E-3` + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-PASS-REFUSAL-001.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-REFUSAL-001.record.json` + +Fail‑closed statement: +- If a denial would be “silent” (intent without outcome), verification refuses with `E_CHAIN_DISCONTINUITY` and contract `E-3`. + +--- + +## 4) “Can an operator delete or rewrite history?” + +Answer (direct): +- Delete: they can destroy local copies, but then there is no evidence; retention is procedural. +- Rewrite: any rewrite inside a bundle is detected and fails closed. + +Verifier‑enforced controls: +- Missing required artifacts fail loudly (`E_MISSING_REQUIRED_FILE`). +- Rewrites are detected via integrity digests (`E_MANIFEST_HASH_MISMATCH`), event hashes (`E_EVENT_HASH_MISMATCH`), roots/ranges (`E_ROOT_MISMATCH`, `E_RANGE_MISMATCH`). + +Audit‑procedural controls: +- WORM retention, redundancy, and off‑site escrow of evidence bundles. + +Run (expected FAIL fixtures): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/missing_required_file_roots \ + --strict +``` + +Expected: +- Exit: `1` +- Failure code: `E_MISSING_REQUIRED_FILE` +- Contract IDs: `B-1` + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-001.record.json` + +Fail‑closed statement: +- If required artifacts are missing, verification refuses and emits `E_MISSING_REQUIRED_FILE` + `B-1`. + +--- + +## 5) “What if Sentinel itself is compromised?” + +Answer (direct): +- The verifier can prove integrity of what was recorded; it cannot prove the recorder was honest. +- If Sentinel is compromised enough to generate a fully consistent but fraudulent history, detecting that requires external controls (anchoring, independent telemetry, attestation). + +Verifier‑enforced controls: +- The verifier can still detect internal inconsistencies (tampering, missing links, revoked capability used). +- A “tamper signal” can be recorded and verified as present (evidence of a condition being asserted). + +Audit‑procedural controls: +- Independent tamper sensors, hardware attestation, and root anchoring external to the compromised device. + +Run (tamper signal PASS + revoked cap FAIL): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/tamper_signal_pass \ + --strict + +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/revoked_capability_used \ + --strict +``` + +Expected: +- Tamper PASS exit: `0` +- Revoked-cap FAIL exit: `1` +- Revoked-cap `failure_code`: `E_REVOKED_CAPABILITY_USED` +- Revoked-cap contract IDs: `E-7` + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-PASS-TAMPER-001.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-CAP-001.record.json` + +Fail‑closed statement: +- If execution uses a revoked capability, verification refuses with `E_REVOKED_CAPABILITY_USED` + `E-7`. + +--- + +## 6) “Is this surveillance by default?” + +Answer (direct): +- MERIDIAN v1 is an evidence system: it records **security‑relevant operations and refusals**, not continuous surveillance, unless you choose to log more. +- The verifier enforces integrity of what you record; minimization is a governance and engineering choice. + +Verifier‑enforced controls: +- Integrity only: the verifier validates structure/hashes; it does not evaluate whether a payload is “too invasive.” + +Audit‑procedural controls (privacy posture): +- Minimize payloads; store sensitive values as digests/handles where possible. +- Redact before storage/export where appropriate: + - `vaultmesh-orgine-mobile/ledger/redact.py` + +Run (evidence surface is discrete; PASS fixture demonstrates minimal event set): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass \ + --strict +``` + +Expected: +- Exit: `0` +- Contract IDs: none (PASS) + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass/receipts.jsonl` + +Fail‑closed statement: +- If the evidence bundle is malformed or tampered, verification refuses; privacy minimization is enforced by policy and export/redaction controls, not by the verifier. + +--- + +## 7) “What fails open vs fails closed?” + +Answer (direct): +- For compliance use, **always run strict**: `--strict` + the conformance suite gate. +- In strict mode, warnings and partial verifications become failures (fail‑closed posture). + +Verifier‑enforced controls: +- Strict-mode failures for: missing links, replay/ordering ambiguity, tampering, unsupported canonicalization versions, oversize inputs. + +Audit‑procedural controls: +- Require strict verification for any compliance claim. +- Treat any non‑zero verifier exit as “invalid evidence.” + +Run (strict-mode is the compliance actuator): +```bash +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +``` + +Expected: +- Exit: `0` for a compliant build (any drift ⇒ non-zero) +- Contract IDs: N/A at suite level (per-test records carry contract IDs) + +Inspect: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/*.record.json` + +Fail‑closed statement: +- If any expected code/contract outcome drifts, the suite exits non-zero and CI fails. + +--- + +## 8) “How do you stop the verifier from silently changing meanings?” + +Answer (direct): +- Failure codes are contractual, documented, and parity-gated. +- The build fails if codes drift between docs and implementation. + +Verifier‑enforced controls: +- The verifier emits only enumerated codes (see `sentinel_failure_codes.py`); reports include contract IDs. + +Audit‑procedural controls: +- Pin verifier versions; record hashes of verifier artifacts in release processes. + +Run (parity gate): +```bash +python3 vaultmesh-orgine-mobile/tools/check_sentinel_contract_parity.py +``` + +Expected: +- Exit: `0` and prints `[OK] …` +- Contract IDs: N/A (tooling gate), but it protects all Sentinel contract IDs from silent drift. + +Inspect: +- `vaultmesh-orgine-mobile/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md` +- `vaultmesh-orgine-mobile/spec/SENTINEL_V1_CONTRACT_MATRIX.md` + +Fail‑closed statement: +- If the implementation and contractual docs drift, the parity gate fails and the build blocks. + +--- + +## Annex — “If you only have 60 seconds” + +1) Run: +```bash +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +``` + +2) Open: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` + +3) Confirm: +- `counts.failed == 0` + +4) For any hostile question: +- Find the referenced `out/tests/.record.json` and compare `expected` vs `observed`. + diff --git a/spec/MERIDIAN_V1_INSURER_DUE_DILIGENCE_QUESTIONS.md b/spec/MERIDIAN_V1_INSURER_DUE_DILIGENCE_QUESTIONS.md new file mode 100644 index 0000000..42c8ad6 --- /dev/null +++ b/spec/MERIDIAN_V1_INSURER_DUE_DILIGENCE_QUESTIONS.md @@ -0,0 +1,309 @@ +# MERIDIAN v1 — Insurer Due Diligence Questions (Evidence‑First) + +Status: Draft (underwriter-facing; copy/paste runnable) +Scope: MERIDIAN v1 conformance evidence, backed by Sentinel v1 offline verifier + +This document is designed so an underwriter (or auditor) can ask the questions **verbatim** and receive answers as: +- **paths** (authoritative artifacts) +- **commands** (offline, deterministic verification) +- **pass/fail conditions** (exit codes + contract IDs + failure codes) + +## 0) Generate the evidence (offline) + +From repo root: + +```bash +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +``` + +Expected outcome: +- Suite summary prints `Passed: N` and `Failed: 0` +- Non-zero exit on any unexpected drift (exit code, failure_code, contract_ids) + +Artifacts produced (the evidence surface): +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.txt` +- Per-test records (expected vs observed): + - `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/*.record.json` +- Raw verifier outputs (regenerable; convenience copies): + - `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/sentinel_reports/*.verification_report.json` + - `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/sentinel_stdio/*.stdout.txt` + - `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/sentinel_stdio/*.stderr.txt` + +Fast index: + +```bash +python3 vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.py --list +``` + +## A) “Can you prove what happened, offline, years later?” + +### A1. Offline verification +Question: Can an auditor validate a bundle **without network access or secrets**? + +Evidence: +- Verifier: `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py` +- Bundle(s): `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/…` +- Suite output: `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` + +How to answer (example PASS bundle): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/pass/refusal_proof_pass \ + --strict +``` + +Pass condition: +- Exit `0` and prints `PASS` +- Generated `verification_report.json` is deterministic for identical inputs + +Test coverage: +- `MV1-PASS-REFUSAL-001` (PASS refusal proof bundle) +- `MV1-PASS-OFFLINE-001` (PASS after clean-room copy) + +### A2. Deterministic roots (reproducible from artifacts alone) +Question: Are Merkle roots reproducible from artifacts alone? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/sentinel_reports/*.verification_report.json` + +How to answer (rerun determinism spot-check): +```bash +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +cp vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json /tmp/meridian_report_run1.json + +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +diff -u /tmp/meridian_report_run1.json vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json +``` + +Pass condition: +- Same inputs ⇒ same `computed_end_root`, same `failure_code`, same `violated_contract_ids` + +Test coverage: +- `MV1-PASS-REFUSAL-001` (roots recompute to PASS) +- `MV1-FAIL-SEAL-004` (root mismatch fails loudly) + +### A3. No silent denial +Question: Do denied operations produce verifiable evidence (not gaps)? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-REFUSAL-001.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-PASS-REFUSAL-001.record.json` + +Pass condition: +- “Intent without outcome” is detected as a failure with explicit contract IDs (no silent gaps). + +Test coverage: +- `MV1-FAIL-REFUSAL-001` (intent without outcome ⇒ `E_CHAIN_DISCONTINUITY`, contract `E-3`) +- `MV1-FAIL-REFUSAL-002` (execution without intent ⇒ `E_CHAIN_DISCONTINUITY`, contract `E-3`) +- `MV1-FAIL-REFUSAL-003` (double outcome ⇒ `E_CHAIN_DISCONTINUITY`, contract `E-3`) +- `MV1-PASS-REFUSAL-001` (well-formed refusal proof passes) + +## B) “Can you detect tampering and incomplete evidence?” + +### B4. Missing required files fail loudly +Question: If required artifacts are missing, does verification fail loudly with contract IDs? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-001.record.json` + +Pass condition: +- Non-zero exit; `failure_code=E_MISSING_REQUIRED_FILE`; `violated_contract_ids` includes `B-1`. + +Test coverage: +- `MV1-FAIL-SEAL-001` + +### B5. Manifest mismatch is detected +Question: If the integrity manifest hash is altered, do you catch it? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-002.record.json` + +Pass condition: +- Fail with `E_MANIFEST_HASH_MISMATCH`; `violated_contract_ids` includes `I-3`. + +Test coverage: +- `MV1-FAIL-SEAL-002` + +### B6. Root mismatch / range mismatch are detected +Question: If the bundle claims the wrong root or ranges, do you catch it? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-004.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-005.record.json` + +Pass condition: +- Root mismatch ⇒ `E_ROOT_MISMATCH` with contract `E-5` +- Range mismatch ⇒ `E_RANGE_MISMATCH` with contract `E-6` + +Test coverage: +- `MV1-FAIL-SEAL-004` +- `MV1-FAIL-SEAL-005` + +### B7. Bounded verification (oversize input rejected) +Question: Is verification bounded so oversized inputs fail deterministically? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-007.record.json` + +Pass condition: +- Fail with `E_OVERSIZE_INPUT`; contract `B-3`. + +Test coverage: +- `MV1-FAIL-SEAL-007` + +## C) “Can you attribute actions and prevent unauthorized control?” + +### C8. Revoked capability used is rejected +Question: If a revoked capability is used, does verification fail with explicit attribution? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-CAP-001.record.json` + +Pass condition: +- Fail with `E_REVOKED_CAPABILITY_USED`; contract `E-7`. + +Test coverage: +- `MV1-FAIL-CAP-001` + +### C9. Replay / ordering attacks are rejected +Question: Do you prevent replay or ordering attacks (non-monotonic sequences)? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-005.record.json` + +Pass condition: +- Fail with `E_SEQ_NON_MONOTONIC`; contract `E-4`. + +Test coverage: +- `MV1-FAIL-EVENT-005` + +## D) “Can you prove integrity of each event?” + +### D10. Event hash integrity is enforced +Question: Is `event_hash` enforced as `H(canonical(event_without_event_hash))`? + +Evidence: +- Fixture alias (explicit path): `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH/` +- Suite record: `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-001.record.json` + +How to answer (explicit run): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py \ + --bundle vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/fail/E_EVENT_HASH_MISMATCH \ + --strict +``` + +Pass condition: +- Fail with `E_EVENT_HASH_MISMATCH` +- `violated_contract_ids` contains `E-2` + +Test coverage: +- `MV1-FAIL-EVENT-001` + +### D11. op_digest and prev_event_hash chain are enforced +Question: Are `op_digest` and the `prev_event_hash` chain integrity enforced? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-002.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-003.record.json` + +Pass condition: +- op_digest mismatch ⇒ `E_EVENT_HASH_MISMATCH` with contract `E-2` +- prev_event_hash mismatch ⇒ `E_CHAIN_DISCONTINUITY` with contract `E-3` + +Test coverage: +- `MV1-FAIL-EVENT-002` +- `MV1-FAIL-EVENT-003` + +### D12. Schema and canonicalization versioning are enforced +Question: Do you hard-fail invalid schema inputs and unsupported canonicalization versions? + +Evidence: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-EVENT-004.record.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/MV1-FAIL-SEAL-006.record.json` + +Pass condition: +- Invalid JSONL/schema ⇒ `E_SCHEMA_INVALID` with contract `E-1` +- Unsupported canonicalization ⇒ `E_CANON_VERSION_UNSUPPORTED` with contract `S-6` + +Test coverage: +- `MV1-FAIL-EVENT-004` +- `MV1-FAIL-SEAL-006` + +## E) “Is this enforceable continuously, not just in a PDF?” + +### E13. Build-blocking conformance +Question: Does CI fail if expected contract IDs drift or exit codes change? + +Evidence: +- CI job: `vaultmesh-orgine-mobile/.gitlab-ci.yml` (`meridian-v1-conformance`) +- Suite actuator: `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh` +- Suite report: `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` + +Pass condition: +- Any drift ⇒ non-zero suite exit ⇒ CI job fails + +### E14. Failure codes are stable and documented (no proprietary drift) +Question: Are failure codes stable, documented, and parity-gated? + +Evidence: +- Source of truth: `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py` +- Semantics: `vaultmesh-orgine-mobile/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md` +- Parity gate: `vaultmesh-orgine-mobile/tools/check_sentinel_contract_parity.py` + +Pass condition: +- `python3 vaultmesh-orgine-mobile/tools/check_sentinel_contract_parity.py` prints `[OK] …` + +## Annex A — Test coverage map (authoritative) + +The authoritative mapping lives in: +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/manifest.yaml` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/*.record.json` + +Quick mapping (question → tests): +- A1: `MV1-PASS-REFUSAL-001`, `MV1-PASS-OFFLINE-001` +- A2: `MV1-PASS-REFUSAL-001`, `MV1-FAIL-SEAL-004` +- A3: `MV1-FAIL-REFUSAL-001`, `MV1-FAIL-REFUSAL-002`, `MV1-FAIL-REFUSAL-003`, `MV1-PASS-REFUSAL-001` +- B4: `MV1-FAIL-SEAL-001` +- B5: `MV1-FAIL-SEAL-002` +- B6: `MV1-FAIL-SEAL-004`, `MV1-FAIL-SEAL-005` +- B7: `MV1-FAIL-SEAL-007` +- C8: `MV1-FAIL-CAP-001` +- C9: `MV1-FAIL-EVENT-005` +- D10: `MV1-FAIL-EVENT-001` +- D11: `MV1-FAIL-EVENT-002`, `MV1-FAIL-EVENT-003` +- D12: `MV1-FAIL-EVENT-004`, `MV1-FAIL-SEAL-006` +- E13: `meridian-v1-conformance` CI job + suite exit behavior +- E14: parity gate + semantics doc + +## Annex B — Evidence packet (what to request; what to run) + +Minimum “evidence packet” contents (offline runnable): +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.json` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/report.txt` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/out/tests/` +- `vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/fixtures/` +- `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py` +- `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py` + +Optional (human-readable contract references): +- `vaultmesh-orgine-mobile/spec/SENTINEL_V1_CONTRACT_MATRIX.md` +- `vaultmesh-orgine-mobile/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md` + +Verifier run (auditor entrypoint): +```bash +python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py --bundle --strict +``` + +Suite run (answer all questions at once): +```bash +bash vaultmesh-orgine-mobile/MERIDIAN_V1_CONFORMANCE_TEST_SUITE/run.sh +``` + +60-second interpretation: +- Open `out/report.json` and confirm `counts.failed == 0`. +- For any question, open the referenced `out/tests/.record.json` and compare `expected` vs `observed`. + diff --git a/spec/SENTINEL_EVIDENCE_STANDARD.md b/spec/SENTINEL_EVIDENCE_STANDARD.md new file mode 100644 index 0000000..37e06e8 --- /dev/null +++ b/spec/SENTINEL_EVIDENCE_STANDARD.md @@ -0,0 +1,41 @@ +# Sentinel Evidence Standard v1 + +Status: Draft (implementation-oriented) +Scope: VaultMesh Sentinel v1 offline-verifiable evidence artifacts + +## 1) What “evidence” means here + +Evidence is a **portable, offline-verifiable artifact set** that allows an auditor to deterministically verify: +- what happened (`action_executed`) +- what was attempted (`action_intent`) +- what was denied (`shadow_receipt`) +- and whether the history was tampered with (`prev_event_hash` + roots + integrity digests) + +## 2) Required artifact set (seal bundle) + +A seal bundle is the canonical evidence package. At minimum it contains: +- `seal.json` +- `integrity.json` +- `verifier_manifest.json` +- `receipts.jsonl` (or declared ledger export) +- `roots.txt` + +## 3) Cryptographic defaults (v1) + +- Signing: Ed25519 (where signatures are used; v1 verifier does not require a seal signature) +- Hashing: `blake3` **or** `sha256` (declared in artifacts; verifier MUST respect the declared `hash_algo`) +- Canonicalization: pinned by `canonicalization_version` and enforced by the verifier + +## 4) Verification entrypoints (offline) + +Single bundle: +- `python3 vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py --bundle vaultmesh-orgine-mobile/testvectors/sentinel/black-box-that-refused --strict` + +All testvectors (includes determinism check): +- `bash vaultmesh-orgine-mobile/tools/run_sentinel_testvectors.sh` + +## 5) Failure codes are contractual + +Failure codes are stable and must not be reused across meanings: +- Source of truth: `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py` + diff --git a/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md b/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md new file mode 100644 index 0000000..231f864 --- /dev/null +++ b/spec/SENTINEL_FAILURE_CODE_SEMANTICS.md @@ -0,0 +1,43 @@ +# Sentinel Failure & Warning Code Semantics (v1) + +Status: Canonical (protocol surface) + +Source of truth: `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py` + +This document freezes the meaning of `failure_code` / `warning` codes emitted by the Sentinel v1 offline verifier(s). Codes are intended to be stable identifiers for automation, audits, and long-term tooling. + +## Stability Rules + +- Codes MUST NOT be reused. If meaning must change, add a new code and deprecate the old one. +- Codes MUST have stable semantics across verifier implementations. +- Verifiers MUST emit only codes defined in `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py`. +- Reports SHOULD include `contract_ids` / `violated_contract_ids` to bind findings to the Contract Matrix. + +## Severity Model + +- `E_*` = verification FAIL (non-zero exit; bundle is not accepted as correct evidence). +- `W_*` = verification WARNING (bundle may PASS). In `--strict` mode, verifiers MAY elevate some `W_*` conditions into `E_*` failures (documented per-code below). + +## Failure Codes (`E_*`) + +| Code | Meaning (stable) | Typical Triggers | Remediation / Next Actions | +| --- | --- | --- | --- | +| `E_SCHEMA_INVALID` | Any required schema/format/cross-field validation fails, or required JSON cannot be parsed. | Invalid JSONL line; missing required field; unknown `format`; inconsistent manifest fields. | Re-export the bundle from a known-good producer; verify older seals; treat as corruption/tamper evidence if produced artifacts are expected to be immutable. | +| `E_MISSING_REQUIRED_FILE` | A required file is missing from the bundle (or referenced but absent). | `seal.json` missing; `seal.json.files.*` points to absent paths; integrity lists missing file. | Restore the missing artifact from redundancy/WORM copy; re-export the seal; verify an older seal bundle that includes the missing file(s). | +| `E_MANIFEST_HASH_MISMATCH` | A file’s computed digest does not match `integrity.json`. | One-bit flip / truncation; tampering; wrong `hash_algo` used for verification. | Treat as high-confidence tamper/corruption evidence; restore from a known-good copy; compare against earlier seals / out-of-band copies. | +| `E_OVERSIZE_INPUT` | A bundle input exceeds configured maximum bytes (defense against decompression bombs / giant files). | Abnormally large receipts; attacker-supplied oversized file; misconfigured exporter. | Re-export with bounded payload/chunking; raise size limits only with explicit operational override and strong justification. | +| `E_EVENT_HASH_MISMATCH` | `event_hash` does not match recomputation from canonical bytes (with `event_hash` omitted). | Event field mutation; canonicalization drift; wrong `hash_algo`; corruption. | Confirm `canonicalization_version` + `hash_algo`; re-export from canonical source; treat persistent mismatch as tamper evidence. | +| `E_CHAIN_DISCONTINUITY` | `prev_event_hash` chain breaks (tamper/reorder/rollback evidence) or strict linkage invariants fail. | Missing `prev_event_hash`; chain fork; execution without intent; intent without outcome (strict). | Verify earlier bundles; compare to independent copies; treat as rollback/reorder evidence; restore from last-known-good continuity point. | +| `E_SEQ_NON_MONOTONIC` | Duplicate, missing, or non-monotonic `seq` values in the covered range. | Truncation; rollback; forked history; corrupted append order. | Treat as rollback/corruption evidence; restore from earlier seal; reconstruct from raw receipts if available. | +| `E_RANGE_MISMATCH` | Seal declares a range that does not match included events/roots. | `seal.json.range` inconsistent with receipts; `roots.txt` last seq mismatch. | Re-export the seal with correct range; ensure exporter includes complete receipts for the declared range. | +| `E_CANON_VERSION_UNSUPPORTED` | Declared `canonicalization_version` is unknown/unsupported by this verifier. | Newer producer; older verifier; incompatible artifact set. | Use an updated verifier that supports the declared version; keep N-2 verifier compatibility policy enforced. | +| `E_ROOT_MISMATCH` | Recomputed Merkle end root does not match declared/observed root. | Tampering; missing continuation state (strict); wrong hash algorithm; inconsistent leaf definition. | Confirm `hash_algo` and leaf definition; restore missing continuation state (eg. frontier snapshot) or verify a genesis-range seal; treat persistent mismatch as tamper evidence. | +| `E_REVOKED_CAPABILITY_USED` | A revoked capability is used after revoke (violates “revocation is authoritative”). | `action_executed` references `cap_hash` after `cap_revoke`. | Treat as compromise/violation of authority semantics; investigate actor systems; rotate keys/caps; ensure future actions are denied + recorded via `shadow_receipt`. | + +## Warning Codes (`W_*`) + +| Code | Meaning (stable) | Typical Triggers | Strict Mode Behavior | Remediation / Next Actions | +| --- | --- | --- | --- | --- | +| `W_FILE_NOT_IN_MANIFEST` | A file exists in the bundle but is not listed in `integrity.json` (or recommended files like `seal.json` are not covered). | Extra stray files; incomplete integrity manifest; accidental artifacts (eg. `.DS_Store`). | MAY be elevated to `E_SCHEMA_INVALID` (fail-closed) depending on verifier policy. | Either add the file to `integrity.json` (preferred) or remove it from the bundle; ensure exporters generate complete integrity manifests. | +| `W_RANGE_ROOT_PARTIAL` | Verifier cannot fully verify roots/chain due to missing prior context (non-genesis range). | `since_seq > 0`; first event is not `seq=0`. | MAY be elevated to `E_CHAIN_DISCONTINUITY` or `E_ROOT_MISMATCH` depending on verifier policy. | Provide verifiable continuation state (frontier snapshot) or export a seal that starts at `seq=0`; accept reduced assurance only intentionally (non-strict). | + diff --git a/spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md b/spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md new file mode 100644 index 0000000..0872080 --- /dev/null +++ b/spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md @@ -0,0 +1,225 @@ +# Sentinel v1 Offline Verifier Requirements + +Artifact: `vaultmesh-orgine-mobile/spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md` +Purpose: Verify Sentinel evidence without network, without secrets, without the original runtime, using only exported artifacts. + +--- + +## 0) Inputs + +Verifier accepts either: +- a Seal Bundle directory (recommended), or +- a raw artifact directory (receipts + roots + integrity + manifest) + +Required files (seal bundle mode): +- `seal.json` +- `integrity.json` +- `verifier_manifest.json` +- one or more receipt/event files (JSONL or sqlite export, as declared by seal) +- root history file(s) as declared by seal + +--- + +## 1) Outputs + +Verifier MUST produce: +- PASS or FAIL +- deterministic error codes + human-readable explanations +- a machine-readable `verification_report.json` containing: + - verified ranges + - computed roots + - observed roots (from artifacts) + - mismatches + - corruption findings + - toolchain / schema versions used + - `failure_code` (or null) + +--- + +## 2) Trust Model + +- No trust in the producer. +- No trust in timestamps. +- No trust in filesystem ordering. +- Only trust: + - schemas & canonicalization rules + - cryptographic hashes + - deterministic computation + +--- + +## 3) Verification Phases (MUST implement all) + +### Phase A — Manifest & Schema Validation + +**A1. Schema validate** +- Validate `seal.json`, `integrity.json`, `verifier_manifest.json`, and each event against their schemas. + +Fail if: +- missing required fields +- unknown schema version without explicit compatibility support +- canonicalization version missing or unsupported + +**A2. Canonicalization version pin** +- Verifier MUST enforce `canonicalization_version` from `seal.json` / manifest. +- If multiple are present, they MUST match. + +--- + +### Phase B — Integrity Manifest Verification + +**B1. File hash verification** +- For every file listed in `integrity.json`, compute its hash and compare. + +Fail if: +- any hash mismatch (hard fail) +- any required file missing (hard fail) +- any file present but not listed (soft fail or warning; policy configurable) + +**B2. Size & bounds checks** +- Enforce max file sizes (configurable) to prevent decompression bombs / giant inputs. + +Fail if: +- size exceeds limits without explicit override + +--- + +### Phase C — Event Canonicalization & Hashing + +**C1. Canonicalize every event** +- Convert each event into canonical bytes per `canonicalization.md`. + +**C2. Compute `event_hash`** +- Compute `event_hash = H(canonical_event_bytes)`. + +Fail if: +- event has `event_hash` and it does not match recomputed hash +- event has no `event_hash` AND v1 policy requires it (recommended hardening) + +**C3. Compute `op_digest`** +- Verify `op_digest` matches canonicalized operation descriptor/params. + +Fail if: +- `op_digest` mismatch + +--- + +### Phase D — Chain Verification (Tamper / Rollback Detection) + +**D1. `prev_event_hash` chain** +- For each event in strict `seq` order: +- verify `event.prev_event_hash == previous.event_hash` + +Fail if: +- missing `seq` values +- duplicate `seq` values +- non-monotonic `seq` +- chain discontinuity +- chain fork (two events with same `seq`) + +**D2. Trace linkage integrity (optional but recommended)** +- Where `trace_id` exists, verify that: + - `action_intent` exists before `action_executed` + - If no `action_executed`, then `shadow_receipt` exists (policy may require) + +Fail if (strict mode): +- execution without intent +- denial without shadow receipt (unless explicitly allowed as “impossible-to-write”) + +--- + +### Phase E — Merkle Root Verification + +**E1. Leaf definition** +- Leaf = hash(`event_hash` OR canonical_event_bytes) as specified. Must be consistent with spec. + +**E2. Root recomputation** +- Recompute roots for the receipt range in the seal. + +Fail if: +- computed end root != `seal.json.end_root` +- root history continuity breaks (start root mismatch where applicable) + +**E3. Root timestamp is non-authoritative** +- `ts` can be displayed but never treated as security-critical. + +--- + +### Phase F — Seal Verification + +**F1. Seal self-consistency** +- seal range aligns with included receipts +- seal declares expected roots correctly +- integrity manifest covers all seal files + +Fail if: +- declared range doesn’t match included data +- missing roots or receipts required for the range + +**F2. Optional seal signature (future)** +- v1 may omit cryptographic signature; if present, verify it. + +--- + +## 4) Failure Classes & Codes (Required) + +Verifier must map failures to stable codes: +- `E_SCHEMA_INVALID` +- `E_MANIFEST_HASH_MISMATCH` +- `E_MISSING_REQUIRED_FILE` +- `E_EVENT_HASH_MISMATCH` +- `E_CHAIN_DISCONTINUITY` +- `E_SEQ_NON_MONOTONIC` +- `E_ROOT_MISMATCH` +- `E_RANGE_MISMATCH` +- `E_CANON_VERSION_UNSUPPORTED` +- `E_OVERSIZE_INPUT` +- `E_REVOKED_CAPABILITY_USED` + +This is what makes audits repeatable. + +--- + +## 5) Corruption Handling Requirements + +If verifier detects: +- malformed JSONL segments +- partial file reads +- decode errors + +It MUST: +- produce FAIL (unless “best-effort mode” is explicitly requested) +- report: + - last good `seq` + - affected byte ranges (if possible) + - last valid root +- recommend recovery path: + - verify older seal bundle + - restore from WORM copy + - compare to out-of-band seal digest + +--- + +## 6) Determinism Requirements + +Given identical inputs, verifier output must be byte-identical (except wallclock runtime fields). +- deterministic ordering rules (sort by `seq`, not file order) +- deterministic JSON serialization for reports (stable key order) + +--- + +## 7) CLI Interface (Minimum) + +- `sentinel verify --bundle [--strict] [--report ]` +- `sentinel verify --artifacts --range ` +- `sentinel compute-roots --events ` + +--- + +## 8) Definition of Done + +Verifier is “v1 complete” when it passes these drills: +1. Cold archaeology restore +2. Denial proof drill +3. Corruption drill +4. Rollback attempt detection drill diff --git a/spec/SENTINEL_V1_CONTRACT_MATRIX.md b/spec/SENTINEL_V1_CONTRACT_MATRIX.md new file mode 100644 index 0000000..a7767f3 --- /dev/null +++ b/spec/SENTINEL_V1_CONTRACT_MATRIX.md @@ -0,0 +1,137 @@ +# Sentinel v1 Contract Matrix + +Purpose: Define what must be recorded, by whom, when, and what the verifier must enforce. + +Legend: +- Emitter: system responsible for writing the event +- Trigger: when the event must be emitted +- Hard rule: verifier MUST fail if violated +- Soft rule: verifier MAY warn/fail in strict mode + +--- + +## 0) Bundle & Schema Contracts (Seal Bundle) + +| Contract ID | Invariant | Evidence Artifact(s) | Verifier Enforcement | Failure Codes | +| --- | --- | --- | --- | --- | +| `B-1` | Bundle MUST contain `seal.json`, `integrity.json`, `verifier_manifest.json`, plus the paths referenced by `seal.json.files.*`. | `seal.json` | File presence checks in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:557`. | `E_MISSING_REQUIRED_FILE`, `E_SCHEMA_INVALID` | +| `B-2` | Bundle MUST be offline-verifiable (no network assumptions). | Entire bundle | Verifier MUST rely only on bundle bytes; no network calls are permitted by design. | `E_MISSING_REQUIRED_FILE`, `E_SCHEMA_INVALID` (derived from missing/incomplete artifacts) | + +### `seal.json` (vm-sentinel-seal-v1) + +| Contract ID | Invariant | Evidence Artifact(s) | Verifier Enforcement | Failure Codes | +| --- | --- | --- | --- | --- | +| `S-1` | `seal.json.format` MUST equal `vm-sentinel-seal-v1`. | `seal.json` | Format allowlist check in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:594`. | `E_SCHEMA_INVALID` | +| `S-2` | `seal.json.hash_algo` MUST be `blake3` or `sha256`. | `seal.json` | `hash_algo` validation in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:604`. | `E_SCHEMA_INVALID` | +| `S-3` | Seal MUST declare a sequence range (`since_seq`, `until_seq`). | `seal.json.range` | Schema validation in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:582`. | `E_SCHEMA_INVALID` | +| `S-4` | Seal MUST declare `root.start` and `root.end`. | `seal.json.root` | Schema validation in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:582`. | `E_SCHEMA_INVALID` | +| `S-5` | Seal MUST point to receipts/roots/integrity/verifier_manifest files (and they must exist in the bundle). | `seal.json.files.*` | Path resolution + existence checks in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:640`. | `E_MISSING_REQUIRED_FILE`, `E_SCHEMA_INVALID` | + +### `integrity.json` (vm-sentinel-integrity-v1) + +| Contract ID | Invariant | Evidence Artifact(s) | Verifier Enforcement | Failure Codes | +| --- | --- | --- | --- | --- | +| `I-1` | `integrity.json.format` MUST equal `vm-sentinel-integrity-v1`. | `integrity.json` | Format allowlist check in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:689`. | `E_SCHEMA_INVALID` | +| `I-2` | `integrity.json.hash_algo` MUST match `seal.json.hash_algo`. | `integrity.json`, `seal.json` | Cross-check in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:697`. | `E_SCHEMA_INVALID` | +| `I-3` | Each listed file MUST have `path` + `digest`, and the digest MUST match. | `integrity.json.files[]` | Digest recomputation + compare in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:766`. | `E_MANIFEST_HASH_MISMATCH`, `E_SCHEMA_INVALID`, `E_MISSING_REQUIRED_FILE` | +| `I-4` | If `size_bytes` is present, it MUST match file size. | `integrity.json.files[].size_bytes` | Size check in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:822`. | `E_SCHEMA_INVALID` | + +### `verifier_manifest.json` (vm-sentinel-verifier-manifest-v1) + +| Contract ID | Invariant | Evidence Artifact(s) | Verifier Enforcement | Failure Codes | +| --- | --- | --- | --- | --- | +| `V-1` | `verifier_manifest.json.format` MUST equal `vm-sentinel-verifier-manifest-v1`. | `verifier_manifest.json` | Format allowlist check in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:874`. | `E_SCHEMA_INVALID` | +| `V-2` | MUST declare `sentinel_version`, `schema_version`, `canonicalization_version`. | `verifier_manifest.json` | Schema validation in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:866`. | `E_SCHEMA_INVALID` | +| `V-3` | If `hash_algo` is present, it MUST match `seal.json.hash_algo`. | `verifier_manifest.json`, `seal.json` | Cross-check in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:898`. | `E_SCHEMA_INVALID` | +| `V-4` | `verifier` object MAY include `name/version/sha256`; if present, include it in reports. | `verifier_manifest.json` | Report inclusion in `vaultmesh-orgine-mobile/tools/vm_verify_sentinel_bundle.py:912`. | N/A | + +## A) Action Lifecycle Events (Intent → Decision → Outcome) + +| Event Type | Emitter | Trigger | Required Fields (minimum) | Hard Verification Rules | +| --- | --- | --- | --- | --- | +| `action_intent` | Actor system (Cloudflare ops / guardian / CLI / RTOS host) | Before any attempt to execute an operation | `seq`, `ts`, `event_type`, `actor`, `op`, `op_digest`, `trace_id`, `cap_hash`, `prev_event_hash`, `event_hash` | No execution without prior intent: Every `action_executed` must reference a prior `action_intent` with same `trace_id`. | +| `policy_decision` (recommended v1; can be embedded in intent `payload` to keep schema lean) | Policy engine (Layer0/classifier) | After intent, before allow/deny | `seq`, `ts`, `actor`, `trace_id`, `op_digest`, `result` (`allow`/`deny`), `payload.reason_code`, `payload.classification`, `prev_event_hash` | Policy recording: if system claims policy enforcement, then every allow/deny must have a recorded decision (either as this event or as a signed payload embedded in intent). | +| `action_executed` | Actor system | Immediately after side-effect completes | `seq`, `ts`, `actor`, `trace_id`, `op_digest`, `result` (`ok`/`error`), `root_before`, `root_after`, `prev_event_hash`, `event_hash` | Must have matching prior `action_intent`. Must not exist if corresponding denial exists for same `trace_id` (no split reality). | +| `shadow_receipt` | Sentinel core (or Actor if core is embedded) | On any denial (capability fail, policy deny, budget deny, degraded mode) | `seq`, `ts`, `actor`, `trace_id`, `op_digest`, `result` (`deny`), `payload.reason_code`, `payload.side_effects="none"`, `prev_event_hash`, `event_hash` | No silent denial: if an intent does not lead to `action_executed`, there must be a `shadow_receipt` unless explicitly marked “write-impossible” (rare). Shadow receipts must assert `side_effects="none"`. | + +Notes: +- If you want to avoid a separate `policy_decision` event, put the decision in `action_intent.payload.policy` and require it to be present when `result != "unknown"`. + +--- + +## B) Trust / Authority Events (Capabilities) + +| Event Type | Emitter | Trigger | Required Fields | Hard Verification Rules | +| --- | --- | --- | --- | --- | +| `cap_grant` | Authority issuer (operator tool / provisioning process) | When a capability is created/issued | `seq`, `ts`, `actor` (issuer), `payload.capability_json`, `cap_hash`, `prev_event_hash` | Capability used later must hash to a previously granted token OR be in a pinned trust root (bootstrap). | +| `cap_revoke` | Authority issuer | When revoking capability | `seq`, `ts`, `actor`, `payload.revoked_cap_hash`, `payload.reason_code`, `prev_event_hash` | Revocation is authoritative: any subsequent event with `cap_hash` matching a revoked one MUST be denied (`shadow_receipt` with reason revoked). | +| `cap_use` (optional; can be derived) | Sentinel core | Whenever a capability is presented | `seq`, `ts`, `actor`, `cap_hash`, `trace_id`, `prev_event_hash` | If enabled, must correlate to `action_intent/trace_id`. Not required if you already store `cap_hash` on intent/outcome. | + +--- + +## C) Root & Sealing Events (Time Compression / Witness Bundles) + +| Event Type | Emitter | Trigger | Required Fields | Hard Verification Rules | +| --- | --- | --- | --- | --- | +| `root_published` | Sentinel core | After appending N events or T time window | `seq`, `ts`, `payload.root_hex`, `payload.coverage_seq`, `prev_event_hash` | Root must match recomputation over all events up to `coverage_seq`. Root determinism required. | +| `seal_created` | Sentinel core | On schedule OR on tamper/corruption OR on manual request | `seq`, `ts`, `payload.seal_id`, `payload.range_since`, `payload.range_until`, `payload.end_root`, `payload.canonicalization_version`, `prev_event_hash` | Seal must be verifiable offline: receipts + roots + integrity manifest present. Seal completeness is a hard requirement. | + +--- + +## D) Integrity / Tamper / Degradation Events (Hostile Reality) + +| Event Type | Emitter | Trigger | Required Fields | Hard Verification Rules | +| --- | --- | --- | --- | --- | +| `tamper_signal` | Hardware/host sensor adapter → recorded by Sentinel | Unexpected power cycle, clock jump, enclosure open, seizure sensor | `seq`, `ts`, `payload.kind`, `payload.sensor_digest`, `payload.severity`, `prev_event_hash` | Presence of tamper signals must tighten policy: subsequent high-risk ops should be denied or require stronger caps (enforced in policy rules; verifier can check if policy recorded). | +| `corruption_detected` | Storage layer / Sentinel core | On detection of checksum/page/parse failure | `seq`, `ts`, `payload.affected_ranges`, `payload.last_good_seq`, `payload.last_good_root`, `prev_event_hash` | No silent data loss: corruption must be recorded. Recommended: must trigger immediate `seal_created` if possible (strict mode can enforce). | +| `boot_event` | Sentinel core | On startup | `seq`, `ts`, `payload.version`, `payload.schema_version`, `payload.hash_algo`, `prev_event_hash` | Boot events should be monotonic across sessions; verifier can warn if missing across long ranges. | + +--- + +## E) Minimal Verifier Rules (Derived from Matrix) + +Hard fails (always): +1. Schema invalid for any required file/event → `E_SCHEMA_INVALID` +2. `seq` non-monotonic / duplicate / missing in covered range → `E_SEQ_NON_MONOTONIC` +3. `prev_event_hash` chain discontinuity → `E_CHAIN_DISCONTINUITY` +4. Root mismatch for declared coverage → `E_ROOT_MISMATCH` +5. Seal missing required artifacts (receipts + roots + integrity + verifier manifest) → `E_MISSING_REQUIRED_FILE`, `E_SCHEMA_INVALID` +6. Revoked capability used without denial → `E_REVOKED_CAPABILITY_USED` + +Strict-mode fails (recommended): +7. Missing `shadow_receipt` where intent didn’t execute +8. Corruption without subsequent seal (when seal could be created) +9. Policy enforcement claim without recorded policy decision + +--- + +## F) Boundary Rules (encoded) + +- Actor Replaceability: actor systems can change; evidence format cannot. +- Policy Recording: decisions go into receipts. +- Tamper Evidence: tamper events must exist for power/clock anomalies. +- Export Redundancy: portable artifacts exist independent of runtime. +- Verifier Independence: no network/secrets required. +- Version Compatibility: verifier supports N-2. + +--- + +## G) Failure Code Map (v1) + +Source of truth: `vaultmesh-orgine-mobile/tools/sentinel_failure_codes.py` + +- `E_SCHEMA_INVALID`: Any required schema/format/cross-field validation fails. +- `E_MISSING_REQUIRED_FILE`: A required file is missing from the bundle (or referenced but absent). +- `E_MANIFEST_HASH_MISMATCH`: A file’s computed digest does not match `integrity.json`. +- `E_OVERSIZE_INPUT`: Input exceeds configured maximum bytes. +- `E_EVENT_HASH_MISMATCH`: `event_hash` does not match recomputation from canonical bytes. +- `E_CHAIN_DISCONTINUITY`: `prev_event_hash` chain breaks (tamper/reorder/rollback evidence). +- `E_SEQ_NON_MONOTONIC`: Duplicate, missing, or non-monotonic `seq` values. +- `E_RANGE_MISMATCH`: Seal declares a range that does not match included events. +- `E_CANON_VERSION_UNSUPPORTED`: Declared `canonicalization_version` is unknown/unsupported. +- `E_ROOT_MISMATCH`: Recomputed Merkle end root does not match declared/observed root. +- `E_REVOKED_CAPABILITY_USED`: A revoked capability is used after revoke without denial semantics. + +Warnings (non-fatal unless strict-mode elevates): +- `W_FILE_NOT_IN_MANIFEST`: A file exists in the bundle but is not listed in `integrity.json` (or recommended `seal.json` coverage is missing). +- `W_RANGE_ROOT_PARTIAL`: Verifier cannot fully verify roots/chain due to missing prior context (eg. `since_seq > 0` without continuation state). diff --git a/spec/SENTINEL_V1_SPEC.md b/spec/SENTINEL_V1_SPEC.md new file mode 100644 index 0000000..254c7ca --- /dev/null +++ b/spec/SENTINEL_V1_SPEC.md @@ -0,0 +1,340 @@ +# VaultMesh Sentinel v1 Specification + +Status: Draft v1 (implementation-oriented) +Goal: A space/IoT-grade “forensic continuity core” that remains truthful under isolation, capture, corruption, and time. +Non-goals: availability guarantees, global consensus, remote control plane, “SIEM replacement,” full workflow UI. + +Non-goal clarification: Sentinel does not guarantee liveness or uptime under adversarial conditions. It guarantees detectability, attribution, and recoverable truth. + +--- + +## 1) Sentinel v1 Operating Assumptions + +- Correctness under isolation: Sentinel must remain verifiable with zero network for months/years. +- Federation optional: peers are witness augmenters, never required for correctness. +- Adversary present: assume capture, coercion, supply-chain drift, narrative pressure. +- Truth > uptime: Sentinel may degrade functionality, but must not silently lie. + +--- + +## 2) System Invariants + +Sentinel is “correct” iff all invariants hold. + +### I-1 Append-only evidence + +- Evidence events are append-only and monotonic by sequence number. +- Any truncation/rollback is detectable via root mismatch. + +### I-2 Deterministic verification + +- Given the same artifacts, verification must be deterministic across platforms. + +### I-3 No silent denial + +- If an operation is denied, a ShadowReceipt MUST be emitted (unless physically impossible). + +### I-4 No authority amplification + +- Automation and recovery actions may only narrow authority, never expand it. + +### I-5 Corruption becomes evidence + +- Storage corruption must emit CorruptionReceipt with scope of damage (pages/segments) and last known good root. + +### I-6 Archaeology-first survivability + +- A clean-room restore + verification must succeed using only exported artifacts + verifier. + +--- + +## 3) Sentinel v1 Scope + +### Included + +- Evidence ledger (append-only receipts) +- Roots (deterministic Merkle) +- Seal bundles (portable witness packages) +- ShadowReceipts (proof of restraint) +- Minimal capability verification (scoped authority) +- Corruption detection + reporting +- Export + verification CLI + +### Excluded (v1) + +- Full federation sync +- Cloud dashboards +- Remote orchestration beyond export bundles +- Complex policy engines (keep minimal) +- On-chain anchoring required for correctness (optional) + +--- + +## 4) Core Components + +### 4.1 Receipt Ledger + +Storage: single-file SQLite (preferred) OR append-only JSONL (fallback), but exported artifacts MUST be open and redundant. + +Event types (minimum): +- `action_intent` (attempt to perform operation) +- `policy_decision` (recommended; record allow/deny reasoning, or embed in `action_intent.payload`) +- `action_executed` (operation performed) +- `shadow_receipt` (operation denied / restrained) +- `cap_grant` / `cap_revoke` +- `seal_created` +- `root_published` +- `corruption_detected` +- `boot_event` (startup, version, schema hash) +- `health_event` (periodic status, storage checks) + +Fields (required per event): +- `event_id` (uuid) +- `seq` (u64 monotonic) +- `ts` (monotonic + wallclock if available) +- `event_type` +- `actor` (capability subject / device identity) +- `cap_hash` (hash of capability JSON or `"none"`) +- `op` (operation name / canonical descriptor) +- `op_digest` (hash of normalized op params) +- `result` (`ok`/`deny`/`error`) +- `root_before` / `root_after` (where applicable) +- `trace_id` (correlates intent → outcome) +- `prev_event_hash` (hash chain for quick tamper evidence) +- `event_hash` (hash of canonical event bytes; verifiable) +- `payload` (bounded JSON with strict schema) + +Bounded payload rule: payload size must be capped (configurable), with overflow handled by chunking or external blob with hash reference. + +### 4.2 Merkle Root Engine + +- Maintain rolling Merkle root of receipt leaves (each leaf = hash(event_canonical_bytes)). +- Support incremental updates O(log n). +- Persist: + - `ROOT.current.txt` (root + seq + timestamp) + - `frontier.bin` (or equivalent) for fast restart + - Deterministic canonicalization rules for event hashing. + +Hash function: BLAKE3 (recommended) or SHA-256 (if platform constraints). Must be constant across builds. + +### 4.3 Seal Bundles (Ouroboros Seals) + +A seal is a portable, offline-verifiable “witness packet”. + +Seal contents: +- `seal.json` (metadata + root + ranges) +- `receipts_range.jsonl` (or sqlite page range export) +- `roots.txt` (root history for covered range) +- `integrity.json` (hashes of included files) +- `verifier_manifest.json` (expected tool versions & checksums) + +Seal creation policy (v1 default): +- Time-based (e.g., every 24h), OR +- Event-based (N high-risk operations), OR +- Tamper-signal triggered (see §6) + +Seal cadence guidance (normative defaults; implementations MAY be stricter): +- Max time window: 24h between seals while in NORMAL mode. +- Max event window: 10,000 events between seals (configurable). +- Mandatory seal triggers: any `tamper_signal` or `corruption_detected` event SHOULD attempt an immediate seal creation (unless physically impossible). + +Seal verification must not require network. + +### 4.4 Capabilities (Trust, killable) + +Format: signed JSON capability tokens. + +Fields: +- `sub` (subject identity) +- `scopes[]` (fine-grained) +- `exp` (expiry) +- `nbf` (not-before) +- `aud` (sentinel instance id) +- `jti` (token id) + +Rules: +- Deny if expired/invalid signature/wrong audience/revoked. +- Revocation is additive evidence: emits `cap_revoke` receipt. +- Emergency mode may narrow scopes only. + +### 4.5 ShadowReceipts (Proof of Restraint) + +Emitted when: +- capability check fails +- policy denies +- budget insufficient (if using energy model) +- unsafe context (tamper signals, degraded mode) + +Minimum fields: +- `reason_code` (enum) +- `reason_text` (short) +- `would_have_done` (op descriptor + digest) +- `constraints_applied` (scopes narrowed, mode escalated) +- `context_snapshot_hash` (hash of relevant context) + +Canonical convention (recommended for audit clarity): +- `payload.side_effects` SHOULD be `"none"` for `shadow_receipt` events. +- `payload.reason_code` SHOULD be from a stable enum (e.g., `policy_violation`, `insufficient_capability`, `insufficient_budget`, `unsafe_context`, `backpressure`, `integrity_degraded`). + +--- + +## 5) Execution Model (Action Gating) + +Sentinel sits between “intent” and “effect”. + +State machine: +1. `action_intent` emitted +2. Validate capability + context +3. If allowed → execute → emit `action_executed` +4. If denied → emit `shadow_receipt` +5. Always advance receipt root + +No operation may execute without producing a receipt trail. + +--- + +## 6) Tamper & Degradation Signals (v1 minimal) + +Sentinel accepts a `tamper_signal` input stream (from hardware or host OS). v1 supports: +- acceleration spike / motion pattern +- unexpected power cycle +- secure clock jump / rollback +- enclosure open (if available) +- storage integrity failures + +Response: never self-destruct. Instead: +- elevate mode to `DEGRADED` +- narrow allowable scopes +- increase sealing frequency +- emit `corruption_detected` or `tamper_signal` receipt + +--- + +## 7) Storage Integrity & Corruption Handling + +Requirements: +- periodic background scan (configurable duty cycle) +- per-page checksums (if sqlite: use `PRAGMA integrity_check` + custom page hashing if needed) +- on detection: + - emit `corruption_detected` + - record impacted ranges/pages + - freeze mutating ops (or narrow to safe subset) + - create an immediate seal bundle if possible + +--- + +## 8) Interfaces + +### 8.1 Local API (host integration) + +Provide minimal host-callable interface (C FFI or HTTP on localhost, depending on platform). + +Calls (v1): +- `submit_intent(op, params, cap_token) -> trace_id` +- `get_trace(trace_id) -> outcome + receipt references` +- `export_seal(since, until) -> path` +- `verify_bundle(path) -> ok + details` +- `get_root() -> current root + seq` +- `signal_tamper(kind, payload) -> emits receipt + mode shift` + +### 8.2 CLI (must ship with artifacts) + +Offline verifier requirements: `spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md`. + +- `sentinel verify --bundle ` +- `sentinel compute-roots --from ` +- `sentinel seal --since "24h"` +- `sentinel export --since "90d" --for auditor` + +--- + +## 9) Artifact Layout (v1 canonical) + +``` +.state/ + ledger.sqlite + frontier.bin + seals/ + ouroboros_seal_YYYYMMDD_HHMMSS/ + seal.json + receipts.jsonl + roots.txt + integrity.json + verifier_manifest.json +ROOT.current.txt +receipts/ + receipts.jsonl (optional mirror) + identity.jsonl + mesh.jsonl + treasury.jsonl (optional) +``` + +Mirror strategy: keep SQLite as primary, JSONL mirror for portability (configurable). + +--- + +## 10) Security & Key Management (v1 minimum) + +- Device identity keypair (Ed25519 recommended) +- Capability verification public keys pinned locally +- Revocation list stored append-only with receipts +- Keys must be exportable for succession but not casually readable +- Support “Operator Phone” as witness/verifier only (no full node requirement) + +--- + +## 11) Performance & Footprint Targets + +- Memory: < 64MB steady state (configurable) +- Storage: < 256MB baseline for 90 days (depends on event volume) +- Receipt append latency: target < 10ms typical +- Sealing: incremental, bounded CPU +- Deterministic behavior under load (backpressure emits ShadowReceipts) + +--- + +## 12) Test & Drill Requirements (Definition of Done) + +Required drills (must pass): + +1. Cold archaeology restore + - restore artifacts to empty dir + - recompute roots + - verify seals match +2. Denial proof drill + - attempt forbidden op + - confirm ShadowReceipt exists + - confirm no `action_executed` receipt exists +3. Corruption drill + - corrupt storage segment/pages + - confirm corruption detected + - confirm mode degrade + narrowed scopes + - confirm immediate seal if possible +4. Coercion drill + - simulate “must sign” request + - confirm refusal recorded (ShadowReceipt) + - confirm no authority expansion occurred + +--- + +## 13) Versioning & Compatibility + +- Sentinel v1 schemas are append-only. Any breaking change requires a v2 schema and a new `canonicalization_version`. +- Every artifact includes: + - `sentinel_version` + - `schema_version` + - `hash_algo` + - `canonicalization_version` +- Verifier must support at least: + - N-2 schema versions (configurable) + +--- + +## 14) v1 Deliverables + +- `sentinel-core` library (Rust recommended) +- Sentinel CLI + verifier +- Seal bundle generator + verifier +- Minimal host API +- Reference integration example (IoT gateway simulator) +- Test suite for drills above diff --git a/spec/sentinel/README.md b/spec/sentinel/README.md new file mode 100644 index 0000000..90309a8 --- /dev/null +++ b/spec/sentinel/README.md @@ -0,0 +1,22 @@ +# Sentinel v1 Schemas (v1) + +This directory contains implementer-facing, machine-checkable schemas and deterministic verification rules for Sentinel v1. + +- `canonicalization.md`: normative hashing + Merkle + root publication rules. +- `event.schema.json`: minimal event schema for `receipts*.jsonl` export. +- `seal.schema.json`: minimal schema for `seal.json` inside an Ouroboros seal bundle. +- `integrity.schema.json`: optional schema for `integrity.json` (hashes of bundle files). +- `verifier_manifest.schema.json`: optional schema for `verifier_manifest.json` (tool/version expectations). + +Related v1 documents: +- `spec/SENTINEL_OFFLINE_VERIFIER_REQUIREMENTS.md` +- `spec/SENTINEL_V1_CONTRACT_MATRIX.md` + +Reference verifier + testvector: +- Verifier: `tools/vm_verify_sentinel_bundle.py` +- Testvector bundle: `testvectors/sentinel/black-box-that-refused/` + +Quick run: +```bash +python3 tools/vm_verify_sentinel_bundle.py --bundle testvectors/sentinel/black-box-that-refused --strict +``` diff --git a/spec/sentinel/canonicalization.md b/spec/sentinel/canonicalization.md new file mode 100644 index 0000000..b3636b4 --- /dev/null +++ b/spec/sentinel/canonicalization.md @@ -0,0 +1,123 @@ +# Sentinel v1 Canonicalization & Hashing Rules + +This document defines deterministic event hashing and Merkle root computation for Sentinel v1. Verification MUST be deterministic across platforms given the same artifacts. + +## 1) Hash function (`hash_algo`) + +`hash_algo` MUST be one of: +- `blake3` (recommended) +- `sha256` (fallback for constrained platforms) + +The chosen `hash_algo` MUST be constant for a given Sentinel instance/build. Verifiers MUST reject mixed algorithms within a single bundle unless explicitly versioned. + +### 1.1 `vmhash` + +`vmhash(data: bytes) -> string` returns: +- `"blake3:" + hex(blake3(data))` when `hash_algo=blake3` +- `"sha256:" + hex(sha256(data))` when `hash_algo=sha256` + +`hex(...)` is lowercase hex with no separators. + +## 2) JSON canonicalization (`canonicalization_version`) + +`canonicalization_version` for Sentinel v1 events is: + +- `sentinel-event-jcs-v1` + +Canonical JSON MUST use RFC 8785 (JSON Canonicalization Scheme, “JCS”): +- UTF-8 encoding +- Object keys sorted lexicographically +- No insignificant whitespace +- Numbers encoded per JCS rules + +If a platform cannot implement full JCS, it MUST NOT claim `sentinel-event-jcs-v1`. + +## 3) Event canonical bytes + +Each exported event is a JSON object that conforms to `event.schema.json`. + +`event_canonical_bytes` is the UTF-8 bytes of the JCS-canonicalized event object. + +## 4) Event hash + hash chain + +### 4.1 `event_hash` + +`event_hash` MUST be computed over the canonical bytes of the event object *excluding* the `event_hash` field itself. + +Define: +- `event_without_event_hash = event` with the `event_hash` property removed (if present) +- `event_canonical_bytes = jcs_bytes(event_without_event_hash)` + +Then: + +`event_hash = vmhash(event_canonical_bytes)` + +For exported artifacts, `event_hash` MUST be present in the event record and verifiers MUST recompute and compare it. + +### 4.2 `prev_event_hash` + +- For `seq = 0` (or the first event in a new ledger): `prev_event_hash = "0"` +- For `seq = n > 0`: `prev_event_hash` MUST equal the computed `event_hash` of the immediately preceding event (`seq = n-1`) in the same ledger. + +This provides fast tamper evidence even without Merkle recomputation. + +## 5) Operation digest (`op_digest`) + +`op_digest` commits to the *normalized* operation descriptor. + +Define the normalized object: +```json +{ + "op": "", + "params": { "canonical": "params" } +} +``` + +Normalization rules: +- `op` MUST be a stable, versioned identifier (e.g., `sentinel.export_seal.v1`). +- `params` MUST be JSON (no NaN/Infinity); omit unset fields rather than using null where possible. +- Canonicalize the object using `sentinel-event-jcs-v1`, then hash: + +`op_digest = vmhash(jcs_bytes({"op": op, "params": params}))` + +## 6) Merkle root (`ROOT.current.txt`) + +### 6.1 Leaves + +The Merkle tree commits to the ordered list of event hashes: + +`leaves = [event_hash(seq=0), event_hash(seq=1), ...]` + +Each leaf is a `vmhash` string (`algo:hex`). + +Note on ranged bundles: A verifier can only recompute the global Merkle roots for an arbitrary `since_seq > 0` bundle if it is also given a verifiable Merkle continuation state (e.g., a frontier snapshot) at `since_seq-1`. Otherwise, verification MUST fall back to hash-chain + file-integrity checks for that range, or the bundle MUST start at `since_seq = 0`. + +### 6.2 Parent computation (VaultMesh-style) + +To compute a parent from two children: +- Let `left_hex = left.split(":", 1)[-1]` +- Let `right_hex = right.split(":", 1)[-1]` +- `parent = vmhash( (left_hex + right_hex).encode("utf-8") )` + +If the level has an odd count, duplicate the last element (i.e., `right = left`). + +### 6.3 Empty tree root + +If there are no leaves, the root MUST be: + +`vmhash(b"empty")` + +### 6.4 Root publication file format + +`ROOT.current.txt` MUST be human-readable and parseable as key/value lines: + +``` +format=vm-sentinel-root-v1 +root= +seq= +updated_at= +hash_algo= +canonicalization_version=sentinel-event-jcs-v1 +``` + +Additional keys MAY be included, but verifiers MUST ignore unknown keys. diff --git a/spec/sentinel/event.schema.json b/spec/sentinel/event.schema.json new file mode 100644 index 0000000..be02159 --- /dev/null +++ b/spec/sentinel/event.schema.json @@ -0,0 +1,68 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Event", + "type": "object", + "additionalProperties": false, + "required": [ + "event_id", + "seq", + "ts", + "event_type", + "actor", + "cap_hash", + "op", + "op_digest", + "result", + "trace_id", + "prev_event_hash", + "event_hash", + "payload" + ], + "properties": { + "event_id": { "type": "string", "format": "uuid" }, + "seq": { "type": "integer", "minimum": 0 }, + "ts": { + "description": "Monotonic + wallclock if available. Accepts ISO-8601 Z or a structured object.", + "anyOf": [ + { "type": "string" }, + { + "type": "object", + "additionalProperties": false, + "required": ["wall"], + "properties": { + "wall": { "type": "string", "format": "date-time" }, + "mono_ns": { "type": "integer", "minimum": 0 } + } + } + ] + }, + "event_type": { + "type": "string", + "enum": [ + "action_intent", + "policy_decision", + "action_executed", + "shadow_receipt", + "cap_grant", + "cap_revoke", + "seal_created", + "root_published", + "corruption_detected", + "tamper_signal", + "boot_event", + "health_event" + ] + }, + "actor": { "type": "string", "minLength": 1 }, + "cap_hash": { "type": "string", "minLength": 1 }, + "op": { "type": "string", "minLength": 1 }, + "op_digest": { "type": "string", "minLength": 1 }, + "result": { "type": "string", "enum": ["ok", "deny", "error"] }, + "root_before": { "type": "string" }, + "root_after": { "type": "string" }, + "trace_id": { "type": "string", "format": "uuid" }, + "prev_event_hash": { "type": "string", "minLength": 1 }, + "event_hash": { "type": "string" }, + "payload": { "type": "object" } + } +} diff --git a/spec/sentinel/integrity.schema.json b/spec/sentinel/integrity.schema.json new file mode 100644 index 0000000..b93ed01 --- /dev/null +++ b/spec/sentinel/integrity.schema.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Integrity Manifest (integrity.json)", + "type": "object", + "additionalProperties": false, + "required": ["format", "hash_algo", "files"], + "properties": { + "format": { "type": "string", "const": "vm-sentinel-integrity-v1" }, + "hash_algo": { "type": "string", "enum": ["blake3", "sha256"] }, + "files": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": ["path", "digest"], + "properties": { + "path": { "type": "string" }, + "digest": { "type": "string" }, + "size_bytes": { "type": "integer", "minimum": 0 } + } + } + } + } +} diff --git a/spec/sentinel/seal.schema.json b/spec/sentinel/seal.schema.json new file mode 100644 index 0000000..59a3a93 --- /dev/null +++ b/spec/sentinel/seal.schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Seal Bundle (seal.json)", + "type": "object", + "additionalProperties": false, + "required": [ + "format", + "sentinel_version", + "schema_version", + "hash_algo", + "canonicalization_version", + "seal_id", + "created_at", + "range", + "root", + "files" + ], + "properties": { + "format": { "type": "string", "const": "vm-sentinel-seal-v1" }, + "sentinel_version": { "type": "string" }, + "schema_version": { "type": "string" }, + "hash_algo": { "type": "string", "enum": ["blake3", "sha256"] }, + "canonicalization_version": { "type": "string" }, + "seal_id": { "type": "string" }, + "created_at": { "type": "string", "format": "date-time" }, + "instance_id": { "type": "string" }, + "ledger_type": { "type": "string", "enum": ["sqlite", "jsonl"] }, + "range": { + "type": "object", + "additionalProperties": false, + "required": ["since_seq", "until_seq"], + "properties": { + "since_seq": { "type": "integer", "minimum": 0 }, + "until_seq": { "type": "integer", "minimum": 0 }, + "since_ts": { "type": "string" }, + "until_ts": { "type": "string" } + } + }, + "root": { + "type": "object", + "additionalProperties": false, + "required": ["start", "end"], + "properties": { + "start": { "type": "string" }, + "end": { "type": "string" }, + "seq": { "type": "integer", "minimum": 0 } + } + }, + "files": { + "type": "object", + "additionalProperties": false, + "required": ["receipts", "roots", "integrity", "verifier_manifest"], + "properties": { + "receipts": { "type": "string" }, + "roots": { "type": "string" }, + "integrity": { "type": "string" }, + "verifier_manifest": { "type": "string" } + } + }, + "notes": { "type": "string" } + } +} diff --git a/spec/sentinel/verifier_manifest.schema.json b/spec/sentinel/verifier_manifest.schema.json new file mode 100644 index 0000000..051a791 --- /dev/null +++ b/spec/sentinel/verifier_manifest.schema.json @@ -0,0 +1,23 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Verifier Manifest (verifier_manifest.json)", + "type": "object", + "additionalProperties": false, + "required": ["format", "sentinel_version", "schema_version", "canonicalization_version"], + "properties": { + "format": { "type": "string", "const": "vm-sentinel-verifier-manifest-v1" }, + "sentinel_version": { "type": "string" }, + "schema_version": { "type": "string" }, + "hash_algo": { "type": "string", "enum": ["blake3", "sha256"] }, + "canonicalization_version": { "type": "string" }, + "verifier": { + "type": "object", + "additionalProperties": true, + "properties": { + "name": { "type": "string" }, + "version": { "type": "string" }, + "sha256": { "type": "string" } + } + } + } +} diff --git a/testvectors/proofbundle/README.md b/testvectors/proofbundle/README.md new file mode 100644 index 0000000..e7f5ca3 --- /dev/null +++ b/testvectors/proofbundle/README.md @@ -0,0 +1,81 @@ +# VaultMesh ProofBundle Conformance Test Pack + +This directory contains **example ProofBundle JSON files** used to validate +implementations of the `vm_verify_proofbundle.py` verifier. + +It is designed for **air-gapped, offline verification**: you can unpack this +folder on a laptop with Python installed and run the tests locally. + +## 1. Contents + +- `proofbundle-valid.json` + A known-good ProofBundle with a contiguous hash chain. + The verifier MUST report success. + +- `proofbundle-tampered-body.json` + A bundle where the body of one receipt was modified **without** + updating its `root_hash`. + The verifier MUST detect a hash mismatch. + +- `proofbundle-tampered-root.json` + A bundle where a receipt's `root_hash` is incorrect. + The verifier MUST detect a hash mismatch. + +- `proofbundle-broken-chain.json` + A bundle where a `previous_hash` does not match the prior receipt's + `root_hash`. + The verifier MUST report a broken chain. + +## 2. Prerequisites + +- Python **3.10+** +- The `blake3` package: + +```bash +pip install blake3 +``` + +- The verifier script on your machine: + +``` +vm_verify_proofbundle.py +``` + +(From the VaultMesh distribution under `burocrat/app/tools/`.) + +## 3. How to run + +From this directory: + +```bash +python3 vm_verify_proofbundle.py proofbundle-valid.json +python3 vm_verify_proofbundle.py proofbundle-tampered-body.json +python3 vm_verify_proofbundle.py proofbundle-tampered-root.json +python3 vm_verify_proofbundle.py proofbundle-broken-chain.json +``` + +## 4. Expected results + +| File | Expected Exit | Expected Outcome | +|------|---------------|------------------| +| `proofbundle-valid.json` | 0 | OK - chain is contiguous and valid | +| `proofbundle-tampered-body.json` | 1 | HASH_MISMATCH detected | +| `proofbundle-tampered-root.json` | 1 | HASH_MISMATCH detected | +| `proofbundle-broken-chain.json` | 1 | BROKEN_CHAIN or equivalent error | + +Exact wording of error messages MAY vary across implementations, but: + +- Any hash mismatch MUST cause a non-zero exit code. +- Any broken chain MUST cause a non-zero exit code. + +## 5. Specification Reference + +For the formal, normative definition of ProofBundle and verification rules, see: + +- `VAULTMESH-PROOFBUNDLE-SPEC.md` (Conformance Tests section) +- `VAULTMESH-STANDARDS-INDEX.md` (for version matrix and related artifacts) + +--- + +_VaultMesh ProofBundle Conformance Test Pack v1.0_ +_Sovereign Infrastructure for the Digital Age_ diff --git a/testvectors/proofbundle/proofbundle-broken-chain.json b/testvectors/proofbundle/proofbundle-broken-chain.json new file mode 100644 index 0000000..88fa786 --- /dev/null +++ b/testvectors/proofbundle/proofbundle-broken-chain.json @@ -0,0 +1,80 @@ +{ + "actor": { + "did": "did:vm:human:test", + "display_name": "Test User", + "role": "auditor" + }, + "bundle_id": "pb-test-broken-chain", + "chain": { + "end": { + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + }, + "length": 3, + "ok": true, + "receipts": [ + { + "previous_hash": null, + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + }, + { + "previous_hash": "blake3:badcafebadcafebadcafebadcafebad0", + "root_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "timestamp": "2025-12-06T15:00:00.000Z", + "type": "test_event_2" + }, + { + "previous_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + } + ], + "start": { + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + } + }, + "document": { + "category": "Testing", + "doc_id": "Test Document", + "filename": "test.docx" + }, + "generated_at": "2025-12-06T17:00:00.000Z", + "guardian_anchor": { + "anchor_by": "did:vm:guardian:test", + "anchor_epoch": null, + "anchor_id": "anchor-test", + "anchor_timestamp": null, + "root_hash": null, + "scroll_roots": {} + }, + "meta": { + "node": "test", + "requested_by_session": "test-session", + "requested_by_user": "test@example.com" + }, + "portal": { + "did": "did:vm:portal:test", + "instance": "test" + }, + "proofchain": { + "btc": { + "status": "not_anchored", + "txid": null + }, + "eth": { + "status": "not_anchored", + "txid": null + }, + "ots": { + "status": "not_anchored", + "timestamp_url": null + } + }, + "schema_version": "1.0.0" +} \ No newline at end of file diff --git a/testvectors/proofbundle/proofbundle-tampered-body.json b/testvectors/proofbundle/proofbundle-tampered-body.json new file mode 100644 index 0000000..fe44df5 --- /dev/null +++ b/testvectors/proofbundle/proofbundle-tampered-body.json @@ -0,0 +1,80 @@ +{ + "actor": { + "did": "did:vm:human:test", + "display_name": "Test User", + "role": "auditor" + }, + "bundle_id": "pb-test-tampered-body", + "chain": { + "end": { + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + }, + "length": 3, + "ok": true, + "receipts": [ + { + "previous_hash": null, + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + }, + { + "previous_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "root_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "timestamp": "2099-01-01T00:00:00.000Z", + "type": "test_event_2" + }, + { + "previous_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + } + ], + "start": { + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + } + }, + "document": { + "category": "Testing", + "doc_id": "Test Document", + "filename": "test.docx" + }, + "generated_at": "2025-12-06T17:00:00.000Z", + "guardian_anchor": { + "anchor_by": "did:vm:guardian:test", + "anchor_epoch": null, + "anchor_id": "anchor-test", + "anchor_timestamp": null, + "root_hash": null, + "scroll_roots": {} + }, + "meta": { + "node": "test", + "requested_by_session": "test-session", + "requested_by_user": "test@example.com" + }, + "portal": { + "did": "did:vm:portal:test", + "instance": "test" + }, + "proofchain": { + "btc": { + "status": "not_anchored", + "txid": null + }, + "eth": { + "status": "not_anchored", + "txid": null + }, + "ots": { + "status": "not_anchored", + "timestamp_url": null + } + }, + "schema_version": "1.0.0" +} \ No newline at end of file diff --git a/testvectors/proofbundle/proofbundle-tampered-root.json b/testvectors/proofbundle/proofbundle-tampered-root.json new file mode 100644 index 0000000..63a08cf --- /dev/null +++ b/testvectors/proofbundle/proofbundle-tampered-root.json @@ -0,0 +1,80 @@ +{ + "actor": { + "did": "did:vm:human:test", + "display_name": "Test User", + "role": "auditor" + }, + "bundle_id": "pb-test-tampered-root", + "chain": { + "end": { + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + }, + "length": 3, + "ok": true, + "receipts": [ + { + "previous_hash": null, + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + }, + { + "previous_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "root_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "timestamp": "2025-12-06T15:00:00.000Z", + "type": "test_event_2" + }, + { + "previous_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "root_hash": "blake3:deadbeefdeadbeefdeadbeefdeadbeef", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + } + ], + "start": { + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + } + }, + "document": { + "category": "Testing", + "doc_id": "Test Document", + "filename": "test.docx" + }, + "generated_at": "2025-12-06T17:00:00.000Z", + "guardian_anchor": { + "anchor_by": "did:vm:guardian:test", + "anchor_epoch": null, + "anchor_id": "anchor-test", + "anchor_timestamp": null, + "root_hash": null, + "scroll_roots": {} + }, + "meta": { + "node": "test", + "requested_by_session": "test-session", + "requested_by_user": "test@example.com" + }, + "portal": { + "did": "did:vm:portal:test", + "instance": "test" + }, + "proofchain": { + "btc": { + "status": "not_anchored", + "txid": null + }, + "eth": { + "status": "not_anchored", + "txid": null + }, + "ots": { + "status": "not_anchored", + "timestamp_url": null + } + }, + "schema_version": "1.0.0" +} \ No newline at end of file diff --git a/testvectors/proofbundle/proofbundle-valid.json b/testvectors/proofbundle/proofbundle-valid.json new file mode 100644 index 0000000..c294eaa --- /dev/null +++ b/testvectors/proofbundle/proofbundle-valid.json @@ -0,0 +1,80 @@ +{ + "actor": { + "did": "did:vm:human:test", + "display_name": "Test User", + "role": "auditor" + }, + "bundle_id": "pb-test-valid", + "chain": { + "end": { + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + }, + "length": 3, + "ok": true, + "receipts": [ + { + "previous_hash": null, + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + }, + { + "previous_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "root_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "timestamp": "2025-12-06T15:00:00.000Z", + "type": "test_event_2" + }, + { + "previous_hash": "blake3:42f374530d74cb007574720db2328497109a31dad191817dfb90d8a7d124173d", + "root_hash": "blake3:66a2cca1d65c2fa7f89b6b7e4763dc1c8d4a248ed619882e8c8ad3077ae73363", + "timestamp": "2025-12-06T16:00:00.000Z", + "type": "document_download" + } + ], + "start": { + "root_hash": "blake3:b6f2c11adac10c96111ae69e33fb3f45082942004c5d59f2c8568b96bc57d5aa", + "timestamp": "2025-12-06T14:00:00.000Z", + "type": "test_event_1" + } + }, + "document": { + "category": "Testing", + "doc_id": "Test Document", + "filename": "test.docx" + }, + "generated_at": "2025-12-06T17:00:00.000Z", + "guardian_anchor": { + "anchor_by": "did:vm:guardian:test", + "anchor_epoch": null, + "anchor_id": "anchor-test", + "anchor_timestamp": null, + "root_hash": null, + "scroll_roots": {} + }, + "meta": { + "node": "test", + "requested_by_session": "test-session", + "requested_by_user": "test@example.com" + }, + "portal": { + "did": "did:vm:portal:test", + "instance": "test" + }, + "proofchain": { + "btc": { + "status": "not_anchored", + "txid": null + }, + "eth": { + "status": "not_anchored", + "txid": null + }, + "ots": { + "status": "not_anchored", + "timestamp_url": null + } + }, + "schema_version": "1.0.0" +} \ No newline at end of file diff --git a/testvectors/sentinel/black-box-that-refused/README.md b/testvectors/sentinel/black-box-that-refused/README.md new file mode 100644 index 0000000..4b8923b --- /dev/null +++ b/testvectors/sentinel/black-box-that-refused/README.md @@ -0,0 +1,12 @@ +# Black Box That Refused (Sentinel v1 testvector) + +This directory is a deterministic, offline-verifiable Sentinel v1 seal bundle. + +Scenario: A high-risk operation is attempted at 03:17 UTC and is denied with a ShadowReceipt (proof of restraint). + +Verify: + python3 ../../tools/vm_verify_sentinel_bundle.py --bundle . + +Notes: +- hash_algo: sha256 (no external dependencies) +- canonicalization_version: sentinel-event-jcs-v1 diff --git a/testvectors/sentinel/black-box-that-refused/integrity.json b/testvectors/sentinel/black-box-that-refused/integrity.json new file mode 100644 index 0000000..0ba009c --- /dev/null +++ b/testvectors/sentinel/black-box-that-refused/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:254b0df96e3ef1dd0639124eed3cebaa3947016e9f5b248bb2d959340f84ce88", + "path": "README.md", + "size_bytes": 422 + }, + { + "digest": "sha256:d35ad9da08534fe90840055668967f1892d69bfed7e2de7a5e37b0ae39229602", + "path": "receipts.jsonl", + "size_bytes": 3944 + }, + { + "digest": "sha256:8cbcf3b70d46dd3d79302489a0522d1468ac23ebdd1fcae5ea12643b6b909d92", + "path": "roots.txt", + "size_bytes": 460 + }, + { + "digest": "sha256:a95af94b8b0a5e3f507e423183ca2dcc2460423a847a437fe4da56126ea901a2", + "path": "seal.json", + "size_bytes": 736 + }, + { + "digest": "sha256:b68082e3fab021062c9084802c2438b74e8d3198caa8676e29af0b10be2baabd", + "path": "verifier_manifest.json", + "size_bytes": 239 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/testvectors/sentinel/black-box-that-refused/receipts.jsonl b/testvectors/sentinel/black-box-that-refused/receipts.jsonl new file mode 100644 index 0000000..3d8e57e --- /dev/null +++ b/testvectors/sentinel/black-box-that-refused/receipts.jsonl @@ -0,0 +1,5 @@ +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:1e90417fd3c5c703deb4c6a33f89b32cb17d579116b872ba4873b05080fdb6bc","event_id":"00000000-0000-4000-8000-000000000001","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:actor:cloudflare:ops","cap_hash":"none","event_hash":"sha256:28f1aa938399720d18a561be7aa376a5f08577bc700bf5348a8c3ce5a18e73a2","event_id":"00000000-0000-4000-8000-000000000002","event_type":"action_intent","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2054406539fc9904fd1f04ffe4c257174496f4c035a1a06b5156f888a9a84b3f","payload":{"params":{"action":"block","notes":"03:17 drift candidate","rule_id":"vm-demo-317","target":"api.example.com"},"policy":{"classification":"ambiguous","confidence_bp":7100,"note":"Ambiguous -> trending forbidden (03:17 incident)","reason_code":"invariant_drift"}},"prev_event_hash":"sha256:1e90417fd3c5c703deb4c6a33f89b32cb17d579116b872ba4873b05080fdb6bc","result":"ok","seq":1,"trace_id":"22222222-2222-4222-8222-222222222222","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:63035ec50c6f983e1803358384e49a6d3e7ab5bf54f8ef1f19aeec874dd90e63","event_id":"00000000-0000-4000-8000-000000000003","event_type":"shadow_receipt","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2054406539fc9904fd1f04ffe4c257174496f4c035a1a06b5156f888a9a84b3f","payload":{"constraints_applied":{"mode":"DEGRADED","scopes_narrowed":["cloudflare.waf.read"]},"context_snapshot_hash":"sha256:e1d3e26c9b2c38c77d44d9fe7ee1d24e955ad0f07c457373edd545265d54d757","energy":{"benefit_bp":700,"debit_units":1000},"params":{"action":"block","notes":"03:17 drift candidate","rule_id":"vm-demo-317","target":"api.example.com"},"reason_code":"unsafe_context","reason_text":"Cost exceeded probabilistic benefit; invariant drift containment","side_effects":"none","would_have_done":{"op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2054406539fc9904fd1f04ffe4c257174496f4c035a1a06b5156f888a9a84b3f"}},"prev_event_hash":"sha256:28f1aa938399720d18a561be7aa376a5f08577bc700bf5348a8c3ce5a18e73a2","result":"deny","seq":2,"trace_id":"22222222-2222-4222-8222-222222222222","ts":"2025-03-17T03:17:43Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:3ee6d2408d85c911ce3ea1af0bd8427a43910e10a6ffa22e1623b20b897fe487","event_id":"00000000-0000-4000-8000-000000000004","event_type":"root_published","op":"sentinel.root_published.v1","op_digest":"sha256:188abbdb94b993cf8e542d6dca2e760abe421698ebeda6bbac0c655cd6e21dda","payload":{"params":{"coverage_seq":2},"root_hex":"sha256:81841fe0288d58b04dcb9f1d3c11e84a246665befc67f93871f31b9ef2c8c9e3"},"prev_event_hash":"sha256:63035ec50c6f983e1803358384e49a6d3e7ab5bf54f8ef1f19aeec874dd90e63","result":"ok","seq":3,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:44Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:1034ffe70214290d7e0c7fd5ec85a0f44872bf0d6d2bd87e0d5b04ad2c2a1f4b","event_id":"00000000-0000-4000-8000-000000000005","event_type":"seal_created","op":"sentinel.seal_created.v1","op_digest":"sha256:1e2e86210bc67f26758ce3101c860672b743566cbad53dc4c3a61b319b58e4f3","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","seal_id":"ouroboros_seal_20250317_031742Z_black_box_that_refused","since_seq":0,"until_seq":4}},"prev_event_hash":"sha256:3ee6d2408d85c911ce3ea1af0bd8427a43910e10a6ffa22e1623b20b897fe487","result":"ok","seq":4,"trace_id":"44444444-4444-4444-8444-444444444444","ts":"2025-03-17T03:17:45Z"} diff --git a/testvectors/sentinel/black-box-that-refused/roots.txt b/testvectors/sentinel/black-box-that-refused/roots.txt new file mode 100644 index 0000000..1b08bc0 --- /dev/null +++ b/testvectors/sentinel/black-box-that-refused/roots.txt @@ -0,0 +1,6 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:1e90417fd3c5c703deb4c6a33f89b32cb17d579116b872ba4873b05080fdb6bc +seq=1 root=sha256:701a1ae4b6c56b0508746a0f425947fa5ce3ed0554b9632d2ba993862d09553b +seq=2 root=sha256:81841fe0288d58b04dcb9f1d3c11e84a246665befc67f93871f31b9ef2c8c9e3 +seq=3 root=sha256:2ef9222e3c9fceae815837584c2eda262e64df3fa5c1960e42914fa1fdd1f9be +seq=4 root=sha256:b68479558afa41325a0a18f7087bca9549be90e61134bf5f584c598a956a6385 diff --git a/testvectors/sentinel/black-box-that-refused/seal.json b/testvectors/sentinel/black-box-that-refused/seal.json new file mode 100644 index 0000000..cf1f7d1 --- /dev/null +++ b/testvectors/sentinel/black-box-that-refused/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:sentinel:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":4,"until_ts":"2025-03-17T03:17:45Z"},"root":{"end":"sha256:b68479558afa41325a0a18f7087bca9549be90e61134bf5f584c598a956a6385","seq":4,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"ouroboros_seal_20250317_031742Z_black_box_that_refused","sentinel_version":"0.1.0"} diff --git a/testvectors/sentinel/black-box-that-refused/verifier_manifest.json b/testvectors/sentinel/black-box-that-refused/verifier_manifest.json new file mode 100644 index 0000000..69a0a1a --- /dev/null +++ b/testvectors/sentinel/black-box-that-refused/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} diff --git a/testvectors/sentinel/corruption-truncated-jsonl/README.md b/testvectors/sentinel/corruption-truncated-jsonl/README.md new file mode 100644 index 0000000..5385dc9 --- /dev/null +++ b/testvectors/sentinel/corruption-truncated-jsonl/README.md @@ -0,0 +1,6 @@ +# Corruption Drill: Truncated receipts.jsonl (Sentinel v1 testvector) + +Expected result: FAIL with E_SCHEMA_INVALID (malformed JSONL). + +Verify: + python3 ../../tools/vm_verify_sentinel_bundle.py --bundle . --strict diff --git a/testvectors/sentinel/corruption-truncated-jsonl/integrity.json b/testvectors/sentinel/corruption-truncated-jsonl/integrity.json new file mode 100644 index 0000000..70fc96d --- /dev/null +++ b/testvectors/sentinel/corruption-truncated-jsonl/integrity.json @@ -0,0 +1 @@ +{"files":[{"digest":"sha256:9ba07babfb5c19611b323c89a456b8968a1572f868b881b93f43da1dfb7deaaa","path":"README.md","size_bytes":214},{"digest":"sha256:964bfec3a37f7b0616a8eb919e3c34daeba665f68222743a075f24f90ec59764","path":"receipts.jsonl","size_bytes":3700},{"digest":"sha256:9683545f7c23da977fc54f7901c41459d012217aecb60c5544a35caf71238436","path":"roots.txt","size_bytes":460},{"digest":"sha256:2f29bb00c6da0ad7e967ea6ab0c7632fbe148ab222744647bf01dea5a94fc9f4","path":"seal.json","size_bytes":740},{"digest":"sha256:b68082e3fab021062c9084802c2438b74e8d3198caa8676e29af0b10be2baabd","path":"verifier_manifest.json","size_bytes":239}],"format":"vm-sentinel-integrity-v1","hash_algo":"sha256"} diff --git a/testvectors/sentinel/corruption-truncated-jsonl/receipts.jsonl b/testvectors/sentinel/corruption-truncated-jsonl/receipts.jsonl new file mode 100644 index 0000000..6b16690 --- /dev/null +++ b/testvectors/sentinel/corruption-truncated-jsonl/receipts.jsonl @@ -0,0 +1,5 @@ +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:521abeea9c029b319e4753fe28aa3900e2be0bad6609548bb33c742f3d1aeb76","event_id":"00000000-0000-4000-8000-000000000201","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"eeeeeeee-eeee-4eee-8eee-eeeeeeeeeeee","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:actor:cloudflare:ops","cap_hash":"none","event_hash":"sha256:99beb8c6cf0d90550f9bfbc63b49b497d906a4ba967a6c214feaa2abaa36f22e","event_id":"00000000-0000-4000-8000-000000000202","event_type":"action_intent","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2e6864e10c30e74398dd04b48322d13a21e535b9a1f1ab93425fa7cd4b04e000","payload":{"params":{"action":"block","notes":"corruption truncated jsonl vector","rule_id":"vm-demo-corruption","target":"api.example.com"}},"prev_event_hash":"sha256:521abeea9c029b319e4753fe28aa3900e2be0bad6609548bb33c742f3d1aeb76","result":"ok","seq":1,"trace_id":"ffffffff-ffff-4fff-8fff-ffffffffffff","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:97545c07d685e6c41c52a11ca3b4ba4cba38f137c943f145e23678c1bdaac5a2","event_id":"00000000-0000-4000-8000-000000000203","event_type":"shadow_receipt","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2e6864e10c30e74398dd04b48322d13a21e535b9a1f1ab93425fa7cd4b04e000","payload":{"constraints_applied":{"mode":"DEGRADED"},"context_snapshot_hash":"sha256:5b1bb1b57322632ca395d30566fc58c31d96ff9a8ca5b353eee66921f3dafcd1","params":{"action":"block","notes":"corruption truncated jsonl vector","rule_id":"vm-demo-corruption","target":"api.example.com"},"reason_code":"integrity_degraded","reason_text":"corruption drill (expected truncation)","side_effects":"none","would_have_done":{"op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2e6864e10c30e74398dd04b48322d13a21e535b9a1f1ab93425fa7cd4b04e000"}},"prev_event_hash":"sha256:99beb8c6cf0d90550f9bfbc63b49b497d906a4ba967a6c214feaa2abaa36f22e","result":"deny","seq":2,"trace_id":"ffffffff-ffff-4fff-8fff-ffffffffffff","ts":"2025-03-17T03:17:43Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:35195a1c29a356e4284d8b112d1751b68d7cf284486d570cd0f8154b31971155","event_id":"00000000-0000-4000-8000-000000000204","event_type":"root_published","op":"sentinel.root_published.v1","op_digest":"sha256:188abbdb94b993cf8e542d6dca2e760abe421698ebeda6bbac0c655cd6e21dda","payload":{"params":{"coverage_seq":2},"root_hex":"sha256:95d9228665adf0eec74481ef926a477ac163324cbbd374fd579aeb69b1222264"},"prev_event_hash":"sha256:97545c07d685e6c41c52a11ca3b4ba4cba38f137c943f145e23678c1bdaac5a2","result":"ok","seq":3,"trace_id":"99999999-9999-4999-8999-999999999999","ts":"2025-03-17T03:17:44Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:65d7cc94c55941060e631ab53afe9d9b41239f4b069c88d1276f8436100816f8","event_id":"00000000-0000-4000-8000-000000000205","event_type":"seal_created","op":"sentinel.seal_created.v1","op_digest":"sha256:4a48d5d29aa855839b0d1ff9747f7602d03b01ebdaa1fbcf491be59cbeaf1548","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","seal_id":"ouroboros_seal_20250317_031742Z_corruption_truncated_jsonl","since_seq":0,"until_seq":4}},"prev_event_hash":"sha256:35195a1c29a356e4284d8b112d1751b68d7cf284486d570cd0f8154b31971155","result":"ok","seq":4,"trace_id":"88888888-8888-4888-8888-888888888888","ts" diff --git a/testvectors/sentinel/corruption-truncated-jsonl/roots.txt b/testvectors/sentinel/corruption-truncated-jsonl/roots.txt new file mode 100644 index 0000000..fa7452e --- /dev/null +++ b/testvectors/sentinel/corruption-truncated-jsonl/roots.txt @@ -0,0 +1,6 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:521abeea9c029b319e4753fe28aa3900e2be0bad6609548bb33c742f3d1aeb76 +seq=1 root=sha256:6da5f7f245313afb796712809339e325f2c8b91512fdce9b9f8e80c4b597cbd6 +seq=2 root=sha256:95d9228665adf0eec74481ef926a477ac163324cbbd374fd579aeb69b1222264 +seq=3 root=sha256:6485a940d63970bd1bba392d3260c7e6a46d67a5d8299e36f0d4691fdfdcde29 +seq=4 root=sha256:2859d129262ea2417678da4c21dde5c564ca2c0ae8124b55912a71eb45320a23 diff --git a/testvectors/sentinel/corruption-truncated-jsonl/seal.json b/testvectors/sentinel/corruption-truncated-jsonl/seal.json new file mode 100644 index 0000000..811de7e --- /dev/null +++ b/testvectors/sentinel/corruption-truncated-jsonl/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:sentinel:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":4,"until_ts":"2025-03-17T03:17:45Z"},"root":{"end":"sha256:2859d129262ea2417678da4c21dde5c564ca2c0ae8124b55912a71eb45320a23","seq":4,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"ouroboros_seal_20250317_031742Z_corruption_truncated_jsonl","sentinel_version":"0.1.0"} diff --git a/testvectors/sentinel/corruption-truncated-jsonl/verifier_manifest.json b/testvectors/sentinel/corruption-truncated-jsonl/verifier_manifest.json new file mode 100644 index 0000000..69a0a1a --- /dev/null +++ b/testvectors/sentinel/corruption-truncated-jsonl/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} diff --git a/testvectors/sentinel/integrity-size-mismatch/README.md b/testvectors/sentinel/integrity-size-mismatch/README.md new file mode 100644 index 0000000..0b19910 --- /dev/null +++ b/testvectors/sentinel/integrity-size-mismatch/README.md @@ -0,0 +1,16 @@ +# Black Box That Refused (Sentinel v1 testvector) + +This directory is a deterministic, offline-verifiable Sentinel v1 seal bundle. + +Scenario: A high-risk operation is attempted at 03:17 UTC and is denied with a ShadowReceipt (proof of restraint). + +Verify: + python3 ../../tools/vm_verify_sentinel_bundle.py --bundle . --strict + +Expected outcome: +- FAIL with `E_SCHEMA_INVALID` +- Violated contract_id: `I-4` (size_bytes mismatch) + +Notes: +- hash_algo: sha256 (no external dependencies) +- canonicalization_version: sentinel-event-jcs-v1 diff --git a/testvectors/sentinel/integrity-size-mismatch/integrity.json b/testvectors/sentinel/integrity-size-mismatch/integrity.json new file mode 100644 index 0000000..afc1b37 --- /dev/null +++ b/testvectors/sentinel/integrity-size-mismatch/integrity.json @@ -0,0 +1,31 @@ +{ + "files": [ + { + "digest": "sha256:254b0df96e3ef1dd0639124eed3cebaa3947016e9f5b248bb2d959340f84ce88", + "path": "README.md", + "size_bytes": 999 + }, + { + "digest": "sha256:d35ad9da08534fe90840055668967f1892d69bfed7e2de7a5e37b0ae39229602", + "path": "receipts.jsonl", + "size_bytes": 3944 + }, + { + "digest": "sha256:8cbcf3b70d46dd3d79302489a0522d1468ac23ebdd1fcae5ea12643b6b909d92", + "path": "roots.txt", + "size_bytes": 460 + }, + { + "digest": "sha256:a95af94b8b0a5e3f507e423183ca2dcc2460423a847a437fe4da56126ea901a2", + "path": "seal.json", + "size_bytes": 736 + }, + { + "digest": "sha256:b68082e3fab021062c9084802c2438b74e8d3198caa8676e29af0b10be2baabd", + "path": "verifier_manifest.json", + "size_bytes": 239 + } + ], + "format": "vm-sentinel-integrity-v1", + "hash_algo": "sha256" +} diff --git a/testvectors/sentinel/integrity-size-mismatch/receipts.jsonl b/testvectors/sentinel/integrity-size-mismatch/receipts.jsonl new file mode 100644 index 0000000..3d8e57e --- /dev/null +++ b/testvectors/sentinel/integrity-size-mismatch/receipts.jsonl @@ -0,0 +1,5 @@ +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:1e90417fd3c5c703deb4c6a33f89b32cb17d579116b872ba4873b05080fdb6bc","event_id":"00000000-0000-4000-8000-000000000001","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"11111111-1111-4111-8111-111111111111","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:actor:cloudflare:ops","cap_hash":"none","event_hash":"sha256:28f1aa938399720d18a561be7aa376a5f08577bc700bf5348a8c3ce5a18e73a2","event_id":"00000000-0000-4000-8000-000000000002","event_type":"action_intent","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2054406539fc9904fd1f04ffe4c257174496f4c035a1a06b5156f888a9a84b3f","payload":{"params":{"action":"block","notes":"03:17 drift candidate","rule_id":"vm-demo-317","target":"api.example.com"},"policy":{"classification":"ambiguous","confidence_bp":7100,"note":"Ambiguous -> trending forbidden (03:17 incident)","reason_code":"invariant_drift"}},"prev_event_hash":"sha256:1e90417fd3c5c703deb4c6a33f89b32cb17d579116b872ba4873b05080fdb6bc","result":"ok","seq":1,"trace_id":"22222222-2222-4222-8222-222222222222","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:63035ec50c6f983e1803358384e49a6d3e7ab5bf54f8ef1f19aeec874dd90e63","event_id":"00000000-0000-4000-8000-000000000003","event_type":"shadow_receipt","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2054406539fc9904fd1f04ffe4c257174496f4c035a1a06b5156f888a9a84b3f","payload":{"constraints_applied":{"mode":"DEGRADED","scopes_narrowed":["cloudflare.waf.read"]},"context_snapshot_hash":"sha256:e1d3e26c9b2c38c77d44d9fe7ee1d24e955ad0f07c457373edd545265d54d757","energy":{"benefit_bp":700,"debit_units":1000},"params":{"action":"block","notes":"03:17 drift candidate","rule_id":"vm-demo-317","target":"api.example.com"},"reason_code":"unsafe_context","reason_text":"Cost exceeded probabilistic benefit; invariant drift containment","side_effects":"none","would_have_done":{"op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:2054406539fc9904fd1f04ffe4c257174496f4c035a1a06b5156f888a9a84b3f"}},"prev_event_hash":"sha256:28f1aa938399720d18a561be7aa376a5f08577bc700bf5348a8c3ce5a18e73a2","result":"deny","seq":2,"trace_id":"22222222-2222-4222-8222-222222222222","ts":"2025-03-17T03:17:43Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:3ee6d2408d85c911ce3ea1af0bd8427a43910e10a6ffa22e1623b20b897fe487","event_id":"00000000-0000-4000-8000-000000000004","event_type":"root_published","op":"sentinel.root_published.v1","op_digest":"sha256:188abbdb94b993cf8e542d6dca2e760abe421698ebeda6bbac0c655cd6e21dda","payload":{"params":{"coverage_seq":2},"root_hex":"sha256:81841fe0288d58b04dcb9f1d3c11e84a246665befc67f93871f31b9ef2c8c9e3"},"prev_event_hash":"sha256:63035ec50c6f983e1803358384e49a6d3e7ab5bf54f8ef1f19aeec874dd90e63","result":"ok","seq":3,"trace_id":"33333333-3333-4333-8333-333333333333","ts":"2025-03-17T03:17:44Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:1034ffe70214290d7e0c7fd5ec85a0f44872bf0d6d2bd87e0d5b04ad2c2a1f4b","event_id":"00000000-0000-4000-8000-000000000005","event_type":"seal_created","op":"sentinel.seal_created.v1","op_digest":"sha256:1e2e86210bc67f26758ce3101c860672b743566cbad53dc4c3a61b319b58e4f3","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","seal_id":"ouroboros_seal_20250317_031742Z_black_box_that_refused","since_seq":0,"until_seq":4}},"prev_event_hash":"sha256:3ee6d2408d85c911ce3ea1af0bd8427a43910e10a6ffa22e1623b20b897fe487","result":"ok","seq":4,"trace_id":"44444444-4444-4444-8444-444444444444","ts":"2025-03-17T03:17:45Z"} diff --git a/testvectors/sentinel/integrity-size-mismatch/roots.txt b/testvectors/sentinel/integrity-size-mismatch/roots.txt new file mode 100644 index 0000000..1b08bc0 --- /dev/null +++ b/testvectors/sentinel/integrity-size-mismatch/roots.txt @@ -0,0 +1,6 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:1e90417fd3c5c703deb4c6a33f89b32cb17d579116b872ba4873b05080fdb6bc +seq=1 root=sha256:701a1ae4b6c56b0508746a0f425947fa5ce3ed0554b9632d2ba993862d09553b +seq=2 root=sha256:81841fe0288d58b04dcb9f1d3c11e84a246665befc67f93871f31b9ef2c8c9e3 +seq=3 root=sha256:2ef9222e3c9fceae815837584c2eda262e64df3fa5c1960e42914fa1fdd1f9be +seq=4 root=sha256:b68479558afa41325a0a18f7087bca9549be90e61134bf5f584c598a956a6385 diff --git a/testvectors/sentinel/integrity-size-mismatch/seal.json b/testvectors/sentinel/integrity-size-mismatch/seal.json new file mode 100644 index 0000000..cf1f7d1 --- /dev/null +++ b/testvectors/sentinel/integrity-size-mismatch/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:sentinel:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":4,"until_ts":"2025-03-17T03:17:45Z"},"root":{"end":"sha256:b68479558afa41325a0a18f7087bca9549be90e61134bf5f584c598a956a6385","seq":4,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"ouroboros_seal_20250317_031742Z_black_box_that_refused","sentinel_version":"0.1.0"} diff --git a/testvectors/sentinel/integrity-size-mismatch/verifier_manifest.json b/testvectors/sentinel/integrity-size-mismatch/verifier_manifest.json new file mode 100644 index 0000000..69a0a1a --- /dev/null +++ b/testvectors/sentinel/integrity-size-mismatch/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} diff --git a/testvectors/sentinel/revocation-used-after-revoke/README.md b/testvectors/sentinel/revocation-used-after-revoke/README.md new file mode 100644 index 0000000..e1eb906 --- /dev/null +++ b/testvectors/sentinel/revocation-used-after-revoke/README.md @@ -0,0 +1,6 @@ +# Revocation Drill: Used after revoke (Sentinel v1 testvector) + +Expected result: FAIL with E_REVOKED_CAPABILITY_USED. + +Verify: + python3 ../../tools/vm_verify_sentinel_bundle.py --bundle . --strict diff --git a/testvectors/sentinel/revocation-used-after-revoke/integrity.json b/testvectors/sentinel/revocation-used-after-revoke/integrity.json new file mode 100644 index 0000000..35821a7 --- /dev/null +++ b/testvectors/sentinel/revocation-used-after-revoke/integrity.json @@ -0,0 +1 @@ +{"files":[{"digest":"sha256:3dcb8554c22c6342667f50455b173fca610a202b8c7cacbaa1d9454464f72c49","path":"README.md","size_bytes":198},{"digest":"sha256:1b27f570e1386d5c2001fdaa26ed8b39c69c8c5020e2fffa6f64b32d073f6ea8","path":"receipts.jsonl","size_bytes":3480},{"digest":"sha256:af9a834c78e5fb8b00cd155218bd7c07d877741ceb0460164b913fa0f7e41b62","path":"roots.txt","size_bytes":460},{"digest":"sha256:2f0c087927bf0cdda31c8308559809aafea5c55ffb87bad5100ebbaf7f63f4b9","path":"seal.json","size_bytes":742},{"digest":"sha256:b68082e3fab021062c9084802c2438b74e8d3198caa8676e29af0b10be2baabd","path":"verifier_manifest.json","size_bytes":239}],"format":"vm-sentinel-integrity-v1","hash_algo":"sha256"} diff --git a/testvectors/sentinel/revocation-used-after-revoke/receipts.jsonl b/testvectors/sentinel/revocation-used-after-revoke/receipts.jsonl new file mode 100644 index 0000000..2630e6d --- /dev/null +++ b/testvectors/sentinel/revocation-used-after-revoke/receipts.jsonl @@ -0,0 +1,5 @@ +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:488ff4a060d14db4e268930b232c201692989d47cbdad33a80fb6a4ae721c771","event_id":"00000000-0000-4000-8000-000000000301","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"12121212-1212-4121-8121-121212121212","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:authority:demo","cap_hash":"sha256:edae118e5b7d242976f087fe7d6c6ec95b85cc9da5dead4164083daeff9e7857","event_hash":"sha256:9ae139e5a8adcdc31bcaf1eea87ce98dd49628b24ed1b67818a326e9f97cc71c","event_id":"00000000-0000-4000-8000-000000000302","event_type":"cap_grant","op":"sentinel.cap_grant.v1","op_digest":"sha256:cf238b606b127aa0ac8809aa45218a9a23ae9842964d42638153ad98136ecc78","payload":{"params":{"cap_hash":"sha256:edae118e5b7d242976f087fe7d6c6ec95b85cc9da5dead4164083daeff9e7857","capability":{"aud":"did:vm:sentinel:demo","exp":"2026-01-01T00:00:00Z","jti":"cap-demo-1","nbf":"2025-01-01T00:00:00Z","scopes":["sentinel.demo.dangerous_op"],"sub":"did:vm:actor:demo"}}},"prev_event_hash":"sha256:488ff4a060d14db4e268930b232c201692989d47cbdad33a80fb6a4ae721c771","result":"ok","seq":1,"trace_id":"34343434-3434-4343-8343-343434343434","ts":"2025-03-17T03:17:41Z"} +{"actor":"did:vm:authority:demo","cap_hash":"none","event_hash":"sha256:9fbb5cb1f63c8b7a8459f4bc9b785857f11c937902732c4f46366b22f1c19e76","event_id":"00000000-0000-4000-8000-000000000303","event_type":"cap_revoke","op":"sentinel.cap_revoke.v1","op_digest":"sha256:9796668016c7a22c0fee3648769a09b97a02af9cda234c1e3cf7cb007ce3fcf5","payload":{"params":{"reason_code":"operator_revoked","revoked_cap_hash":"sha256:edae118e5b7d242976f087fe7d6c6ec95b85cc9da5dead4164083daeff9e7857"}},"prev_event_hash":"sha256:9ae139e5a8adcdc31bcaf1eea87ce98dd49628b24ed1b67818a326e9f97cc71c","result":"ok","seq":2,"trace_id":"34343434-3434-4343-8343-343434343434","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:actor:demo","cap_hash":"sha256:edae118e5b7d242976f087fe7d6c6ec95b85cc9da5dead4164083daeff9e7857","event_hash":"sha256:e1dfc490106b6ae2f8316c16b9c655c7e899fe96a9a78f86dae30ca2efb54152","event_id":"00000000-0000-4000-8000-000000000304","event_type":"action_intent","op":"sentinel.demo.dangerous_op.v1","op_digest":"sha256:0091cfd21b0717922d234b13278067aec65828e7dd0090b189e7ef72dec88f95","payload":{"params":{"action":"override","target":"demo-device"}},"prev_event_hash":"sha256:9fbb5cb1f63c8b7a8459f4bc9b785857f11c937902732c4f46366b22f1c19e76","result":"ok","seq":3,"trace_id":"56565656-5656-4565-8565-565656565656","ts":"2025-03-17T03:17:43Z"} +{"actor":"did:vm:actor:demo","cap_hash":"sha256:edae118e5b7d242976f087fe7d6c6ec95b85cc9da5dead4164083daeff9e7857","event_hash":"sha256:987ff91a4cbec21b112950efb645fd62ff9021121ab8d91e25a932c5d706e58e","event_id":"00000000-0000-4000-8000-000000000305","event_type":"action_executed","op":"sentinel.demo.dangerous_op.v1","op_digest":"sha256:0091cfd21b0717922d234b13278067aec65828e7dd0090b189e7ef72dec88f95","payload":{"params":{"action":"override","target":"demo-device"}},"prev_event_hash":"sha256:e1dfc490106b6ae2f8316c16b9c655c7e899fe96a9a78f86dae30ca2efb54152","result":"ok","seq":4,"trace_id":"56565656-5656-4565-8565-565656565656","ts":"2025-03-17T03:17:44Z"} diff --git a/testvectors/sentinel/revocation-used-after-revoke/roots.txt b/testvectors/sentinel/revocation-used-after-revoke/roots.txt new file mode 100644 index 0000000..8eaf75a --- /dev/null +++ b/testvectors/sentinel/revocation-used-after-revoke/roots.txt @@ -0,0 +1,6 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:488ff4a060d14db4e268930b232c201692989d47cbdad33a80fb6a4ae721c771 +seq=1 root=sha256:dd1d833faf0dc4551432aa9ed62038f8d438fc580dbbad848cca4decd40fdaa1 +seq=2 root=sha256:93af676f38d884cf894244d379b46a7bd840df1364b0badac33649f9c2f77143 +seq=3 root=sha256:686f07fd9a782cee48adbef5b39ec2665c014624581bc00f78eddfae719074fd +seq=4 root=sha256:98a5bee2349e9cb97733697dfb008df2b9324a12812e323a3158dbdf8d3d95d6 diff --git a/testvectors/sentinel/revocation-used-after-revoke/seal.json b/testvectors/sentinel/revocation-used-after-revoke/seal.json new file mode 100644 index 0000000..a161ebf --- /dev/null +++ b/testvectors/sentinel/revocation-used-after-revoke/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:sentinel:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":4,"until_ts":"2025-03-17T03:17:44Z"},"root":{"end":"sha256:98a5bee2349e9cb97733697dfb008df2b9324a12812e323a3158dbdf8d3d95d6","seq":4,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"ouroboros_seal_20250317_031742Z_revocation_used_after_revoke","sentinel_version":"0.1.0"} diff --git a/testvectors/sentinel/revocation-used-after-revoke/verifier_manifest.json b/testvectors/sentinel/revocation-used-after-revoke/verifier_manifest.json new file mode 100644 index 0000000..69a0a1a --- /dev/null +++ b/testvectors/sentinel/revocation-used-after-revoke/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} diff --git a/testvectors/sentinel/rollback-duplicate-seq/README.md b/testvectors/sentinel/rollback-duplicate-seq/README.md new file mode 100644 index 0000000..0a80655 --- /dev/null +++ b/testvectors/sentinel/rollback-duplicate-seq/README.md @@ -0,0 +1,6 @@ +# Rollback Attempt: Duplicate seq (Sentinel v1 testvector) + +Expected result: FAIL with E_SEQ_NON_MONOTONIC. + +Verify: + python3 ../../tools/vm_verify_sentinel_bundle.py --bundle . --strict diff --git a/testvectors/sentinel/rollback-duplicate-seq/integrity.json b/testvectors/sentinel/rollback-duplicate-seq/integrity.json new file mode 100644 index 0000000..340de0d --- /dev/null +++ b/testvectors/sentinel/rollback-duplicate-seq/integrity.json @@ -0,0 +1 @@ +{"files":[{"digest":"sha256:1c94f9b6b1f23bd13cf311c65961449ee6bdf50dbb735759429b6e41df58435b","path":"README.md","size_bytes":188},{"digest":"sha256:c7bb7f7c52f7e5e3b033cca6313c821119a5dd9cdb77670c0e92016e5517fbb7","path":"receipts.jsonl","size_bytes":3828},{"digest":"sha256:0d5f7123eccf92c926057c496cd6c47f8848c3fd257c04f16bb68449341c12ae","path":"roots.txt","size_bytes":460},{"digest":"sha256:c1d7376fc8901beafed447395686dd707afa4e1687eba141d9217f2bb40b9834","path":"seal.json","size_bytes":736},{"digest":"sha256:b68082e3fab021062c9084802c2438b74e8d3198caa8676e29af0b10be2baabd","path":"verifier_manifest.json","size_bytes":239}],"format":"vm-sentinel-integrity-v1","hash_algo":"sha256"} diff --git a/testvectors/sentinel/rollback-duplicate-seq/receipts.jsonl b/testvectors/sentinel/rollback-duplicate-seq/receipts.jsonl new file mode 100644 index 0000000..d6de7bc --- /dev/null +++ b/testvectors/sentinel/rollback-duplicate-seq/receipts.jsonl @@ -0,0 +1,5 @@ +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:4b86de283ae784282990c57acd5aeb49b6695206a5c66d555342e1770d0fc1c3","event_id":"00000000-0000-4000-8000-000000000101","event_type":"boot_event","op":"sentinel.boot_event.v1","op_digest":"sha256:d5b3a96e7033a4d0dcf2b452826cbe950f80f354ee5166487312fba6fde10758","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0"}},"prev_event_hash":"0","result":"ok","seq":0,"trace_id":"aaaaaaaa-aaaa-4aaa-8aaa-aaaaaaaaaaaa","ts":"2025-03-17T03:17:40Z"} +{"actor":"did:vm:actor:cloudflare:ops","cap_hash":"none","event_hash":"sha256:2550e78926b563d3f717cae0bca598ca8454837ce0759ba082321d867930c24d","event_id":"00000000-0000-4000-8000-000000000102","event_type":"action_intent","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:830189862cfdd06230cd4d5d9f94d1b2bcbdddf451915d3be781b1e7aa5eaa55","payload":{"params":{"action":"block","notes":"rollback duplicate seq vector","rule_id":"vm-demo-rollback","target":"api.example.com"},"policy":{"classification":"ambiguous","confidence_bp":7100,"note":"duplicate seq rollback vector","reason_code":"invariant_drift"}},"prev_event_hash":"sha256:4b86de283ae784282990c57acd5aeb49b6695206a5c66d555342e1770d0fc1c3","result":"ok","seq":1,"trace_id":"bbbbbbbb-bbbb-4bbb-8bbb-bbbbbbbbbbbb","ts":"2025-03-17T03:17:42Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:d080048781ef2f067775cf93db816add1c2f7b88e7a52feea790b3a483c1ec6c","event_id":"00000000-0000-4000-8000-000000000103","event_type":"shadow_receipt","op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:830189862cfdd06230cd4d5d9f94d1b2bcbdddf451915d3be781b1e7aa5eaa55","payload":{"constraints_applied":{"mode":"DEGRADED"},"context_snapshot_hash":"sha256:23ad3979556e46fe6dfcbeda7099b8d9f020d96780bbce2fdab9072863a73b6d","params":{"action":"block","notes":"rollback duplicate seq vector","rule_id":"vm-demo-rollback","target":"api.example.com"},"reason_code":"unsafe_context","reason_text":"duplicate seq rollback attempt","side_effects":"none","would_have_done":{"op":"cloudflare.waf.apply_rule.v1","op_digest":"sha256:830189862cfdd06230cd4d5d9f94d1b2bcbdddf451915d3be781b1e7aa5eaa55"}},"prev_event_hash":"sha256:2550e78926b563d3f717cae0bca598ca8454837ce0759ba082321d867930c24d","result":"deny","seq":2,"trace_id":"bbbbbbbb-bbbb-4bbb-8bbb-bbbbbbbbbbbb","ts":"2025-03-17T03:17:43Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:606160c5e082497e35ce0de57dd0b1b34ab37397ab91f3bbbf0269e002fd821f","event_id":"00000000-0000-4000-8000-000000000104","event_type":"root_published","op":"sentinel.root_published.v1","op_digest":"sha256:188abbdb94b993cf8e542d6dca2e760abe421698ebeda6bbac0c655cd6e21dda","payload":{"params":{"coverage_seq":2},"root_hex":"sha256:6f7088c38a3a599d173c4d4c5aff095fd149c489b29f8a54c230582826128bff"},"prev_event_hash":"sha256:d080048781ef2f067775cf93db816add1c2f7b88e7a52feea790b3a483c1ec6c","result":"ok","seq":2,"trace_id":"cccccccc-cccc-4ccc-8ccc-cccccccccccc","ts":"2025-03-17T03:17:44Z"} +{"actor":"did:vm:sentinel:demo","cap_hash":"none","event_hash":"sha256:9a423be7b6eded215e3c3ca1aabed3d5b4091530fe965b0934fe4231f136bc89","event_id":"00000000-0000-4000-8000-000000000105","event_type":"seal_created","op":"sentinel.seal_created.v1","op_digest":"sha256:eca1b25ebda9ce43a0c87d2dc7028d36ab8d21323a9189e4003b91e078f064ba","payload":{"params":{"canonicalization_version":"sentinel-event-jcs-v1","hash_algo":"sha256","seal_id":"ouroboros_seal_20250317_031742Z_rollback_duplicate_seq","since_seq":0,"until_seq":4}},"prev_event_hash":"sha256:606160c5e082497e35ce0de57dd0b1b34ab37397ab91f3bbbf0269e002fd821f","result":"ok","seq":4,"trace_id":"dddddddd-dddd-4ddd-8ddd-dddddddddddd","ts":"2025-03-17T03:17:45Z"} diff --git a/testvectors/sentinel/rollback-duplicate-seq/roots.txt b/testvectors/sentinel/rollback-duplicate-seq/roots.txt new file mode 100644 index 0000000..8edbd65 --- /dev/null +++ b/testvectors/sentinel/rollback-duplicate-seq/roots.txt @@ -0,0 +1,6 @@ +# Sentinel root history (seq -> merkle root) +seq=0 root=sha256:4b86de283ae784282990c57acd5aeb49b6695206a5c66d555342e1770d0fc1c3 +seq=1 root=sha256:79c749b3f5667aa3005ad8e92f065d09f17645771b2f4732c0c5f56a52ee50ea +seq=2 root=sha256:6f7088c38a3a599d173c4d4c5aff095fd149c489b29f8a54c230582826128bff +seq=2 root=sha256:b518588137a128e2298fdb0ec044def5ce4164b94dc18065753912090f91c400 +seq=4 root=sha256:ede022904832f7ebfbd8a9e0b14391158f9d9502e1c04532560ed2e4a5661ad3 diff --git a/testvectors/sentinel/rollback-duplicate-seq/seal.json b/testvectors/sentinel/rollback-duplicate-seq/seal.json new file mode 100644 index 0000000..9d35917 --- /dev/null +++ b/testvectors/sentinel/rollback-duplicate-seq/seal.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","created_at":"2025-03-17T03:17:42Z","files":{"integrity":"integrity.json","receipts":"receipts.jsonl","roots":"roots.txt","verifier_manifest":"verifier_manifest.json"},"format":"vm-sentinel-seal-v1","hash_algo":"sha256","instance_id":"did:vm:sentinel:demo","ledger_type":"jsonl","range":{"since_seq":0,"since_ts":"2025-03-17T03:17:40Z","until_seq":4,"until_ts":"2025-03-17T03:17:45Z"},"root":{"end":"sha256:ede022904832f7ebfbd8a9e0b14391158f9d9502e1c04532560ed2e4a5661ad3","seq":4,"start":"sha256:2e1cfa82b035c26cbbbdae632cea070514eb8b773f616aaeaf668e2f0be8f10d"},"schema_version":"1.0.0","seal_id":"ouroboros_seal_20250317_031742Z_rollback_duplicate_seq","sentinel_version":"0.1.0"} diff --git a/testvectors/sentinel/rollback-duplicate-seq/verifier_manifest.json b/testvectors/sentinel/rollback-duplicate-seq/verifier_manifest.json new file mode 100644 index 0000000..69a0a1a --- /dev/null +++ b/testvectors/sentinel/rollback-duplicate-seq/verifier_manifest.json @@ -0,0 +1 @@ +{"canonicalization_version":"sentinel-event-jcs-v1","format":"vm-sentinel-verifier-manifest-v1","hash_algo":"sha256","schema_version":"1.0.0","sentinel_version":"0.1.0","verifier":{"name":"vm_verify_sentinel_bundle.py","version":"0.1.0"}} diff --git a/tools/check_sentinel_contract_parity.py b/tools/check_sentinel_contract_parity.py new file mode 100644 index 0000000..2f3f753 --- /dev/null +++ b/tools/check_sentinel_contract_parity.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +""" +check_sentinel_contract_parity.py + +Ensures Sentinel v1 contracts (docs) and verifier implementation stay aligned: +- Every FailureCode is referenced by the verifier implementation. +- Every FailureCode is referenced by the contract matrix. +- The contract matrix does not reference unknown E_/W_ codes. + +Usage: + python3 tools/check_sentinel_contract_parity.py +""" + +from __future__ import annotations + +import re +import sys +from pathlib import Path + +from sentinel_failure_codes import FailureCode, WarningCode + +REPO_ROOT = Path(__file__).resolve().parents[1] +VERIFIER_PATH = REPO_ROOT / "tools" / "vm_verify_sentinel_bundle.py" +CONTRACT_MATRIX_PATH = REPO_ROOT / "spec" / "SENTINEL_V1_CONTRACT_MATRIX.md" +SEMANTICS_PATH = REPO_ROOT / "spec" / "SENTINEL_FAILURE_CODE_SEMANTICS.md" + + +def _read_text(path: Path) -> str: + return path.read_text(encoding="utf-8") + + +def main() -> int: + errors: list[str] = [] + + verifier = _read_text(VERIFIER_PATH) + matrix = _read_text(CONTRACT_MATRIX_PATH) + semantics = _read_text(SEMANTICS_PATH) + + known_failure_values = {c.value for c in FailureCode} + known_warning_values = {c.value for c in WarningCode} + + # 1) Every FailureCode is referenced by the verifier code. + for code in FailureCode: + needle = f"FailureCode.{code.name}" + if needle not in verifier: + errors.append(f"Verifier does not reference {needle} ({code.value})") + + # 1b) Every WarningCode is referenced by the verifier code. + for code in WarningCode: + needle = f"WarningCode.{code.name}" + if needle not in verifier: + errors.append(f"Verifier does not reference {needle} ({code.value})") + + # 2) Every FailureCode value appears in the contract matrix. + for code in FailureCode: + if code.value not in matrix: + errors.append( + f"Contract matrix does not reference failure code {code.value}" + ) + + # 2b) Every WarningCode value appears in the contract matrix. + for code in WarningCode: + if code.value not in matrix: + errors.append( + f"Contract matrix does not reference warning code {code.value}" + ) + + # 3) Contract matrix does not contain unknown E_/W_ codes. + referenced_failures = set(re.findall(r"\bE_[A-Z_]+\b", matrix)) + referenced_warnings = set(re.findall(r"\bW_[A-Z_]+\b", matrix)) + + unknown_failures = sorted(referenced_failures - known_failure_values) + unknown_warnings = sorted(referenced_warnings - known_warning_values) + + if unknown_failures: + errors.append( + "Contract matrix references unknown failure codes: " + + ", ".join(unknown_failures) + ) + if unknown_warnings: + errors.append( + "Contract matrix references unknown warning codes: " + + ", ".join(unknown_warnings) + ) + + # 4) Semantics doc must cover all codes. + for code in FailureCode: + if code.value not in semantics: + errors.append(f"Failure code semantics doc does not reference {code.value}") + + for code in WarningCode: + if code.value not in semantics: + errors.append(f"Warning code semantics doc does not reference {code.value}") + + # 5) Semantics doc does not contain unknown E_/W_ codes. + referenced_failures = set(re.findall(r"\bE_[A-Z_]+\b", semantics)) + referenced_warnings = set(re.findall(r"\bW_[A-Z_]+\b", semantics)) + + unknown_failures = sorted(referenced_failures - known_failure_values) + unknown_warnings = sorted(referenced_warnings - known_warning_values) + + if unknown_failures: + errors.append( + "Failure code semantics doc references unknown failure codes: " + + ", ".join(unknown_failures) + ) + if unknown_warnings: + errors.append( + "Failure code semantics doc references unknown warning codes: " + + ", ".join(unknown_warnings) + ) + + if errors: + for e in errors: + print(f"[FAIL] {e}", file=sys.stderr) + return 1 + + print("[OK] Sentinel contract parity verified") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tools/make_proofbundle_testvectors.py b/tools/make_proofbundle_testvectors.py new file mode 100755 index 0000000..8d622cc --- /dev/null +++ b/tools/make_proofbundle_testvectors.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +Generate ProofBundle conformance test vectors from a known-good bundle. + +Usage: + python3 make_proofbundle_testvectors.py \\ + /root/work/vaultmesh/proofbundle-sample.json \\ + /root/work/vaultmesh/testvectors/proofbundle + +This script takes a valid ProofBundle and creates: +- proofbundle-valid.json (copy of original with normalized bundle_id) +- proofbundle-tampered-body.json (timestamp modified without updating root_hash) +- proofbundle-tampered-root.json (wrong root_hash in a receipt) +- proofbundle-broken-chain.json (previous_hash mismatch) +""" + +import copy +import json +import sys +from pathlib import Path + + +def load_bundle(path: Path) -> dict: + with path.open("r", encoding="utf-8") as f: + return json.load(f) + + +def save_bundle(bundle: dict, path: Path) -> None: + path.write_text( + json.dumps(bundle, indent=2, sort_keys=True, ensure_ascii=False), + encoding="utf-8", + ) + print(f"[+] wrote {path}") + + +def make_valid(bundle: dict) -> dict: + """Create a normalized copy as the valid reference.""" + out = copy.deepcopy(bundle) + out["bundle_id"] = "pb-test-valid" + return out + + +def make_tampered_body(bundle: dict) -> dict: + """Tamper a receipt's timestamp without updating root_hash.""" + out = copy.deepcopy(bundle) + out["bundle_id"] = "pb-test-tampered-body" + receipts = out.get("chain", {}).get("receipts", []) + if len(receipts) >= 2: + # Modify the second receipt's timestamp (the root_hash will be wrong) + receipts[1]["timestamp"] = "2099-01-01T00:00:00.000Z" + return out + + +def make_tampered_root(bundle: dict) -> dict: + """Replace a receipt's root_hash with a clearly wrong value.""" + out = copy.deepcopy(bundle) + out["bundle_id"] = "pb-test-tampered-root" + receipts = out.get("chain", {}).get("receipts", []) + if receipts: + # Tamper the last receipt's root_hash + receipts[-1]["root_hash"] = "blake3:deadbeefdeadbeefdeadbeefdeadbeef" + return out + + +def make_broken_chain(bundle: dict) -> dict: + """Break the chain linkage via previous_hash mismatch.""" + out = copy.deepcopy(bundle) + out["bundle_id"] = "pb-test-broken-chain" + receipts = out.get("chain", {}).get("receipts", []) + if len(receipts) >= 2: + # Break linkage at receipt[1] + receipts[1]["previous_hash"] = "blake3:badcafebadcafebadcafebadcafebad0" + return out + + +def main(): + if len(sys.argv) != 3: + print( + "Usage: make_proofbundle_testvectors.py INPUT_BUNDLE OUTPUT_DIR", + file=sys.stderr, + ) + sys.exit(1) + + src = Path(sys.argv[1]) + dest_dir = Path(sys.argv[2]) + dest_dir.mkdir(parents=True, exist_ok=True) + + base = load_bundle(src) + + save_bundle(make_valid(base), dest_dir / "proofbundle-valid.json") + save_bundle(make_tampered_body(base), dest_dir / "proofbundle-tampered-body.json") + save_bundle(make_tampered_root(base), dest_dir / "proofbundle-tampered-root.json") + save_bundle(make_broken_chain(base), dest_dir / "proofbundle-broken-chain.json") + + print(f"\n[OK] Generated 4 test vectors in {dest_dir}") + + +if __name__ == "__main__": + main() diff --git a/tools/run_sentinel_testvectors.sh b/tools/run_sentinel_testvectors.sh new file mode 100755 index 0000000..c257050 --- /dev/null +++ b/tools/run_sentinel_testvectors.sh @@ -0,0 +1,110 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +VERIFIER="$ROOT/tools/vm_verify_sentinel_bundle.py" +TV_DIR="$ROOT/testvectors/sentinel" + +if [[ ! -x "$VERIFIER" && ! -f "$VERIFIER" ]]; then + echo "[ERROR] Verifier not found: $VERIFIER" >&2 + exit 2 +fi + +if [[ ! -d "$TV_DIR" ]]; then + echo "[ERROR] Testvector directory not found: $TV_DIR" >&2 + exit 2 +fi + +expected_exit_for() { + case "$1" in + black-box-that-refused) echo 0 ;; + rollback-duplicate-seq) echo 1 ;; + corruption-truncated-jsonl) echo 1 ;; + revocation-used-after-revoke) echo 1 ;; + integrity-size-mismatch) echo 1 ;; + *) echo "" ;; + esac +} + +expected_code_for() { + case "$1" in + rollback-duplicate-seq) echo "E_SEQ_NON_MONOTONIC" ;; + corruption-truncated-jsonl) echo "E_SCHEMA_INVALID" ;; + revocation-used-after-revoke) echo "E_REVOKED_CAPABILITY_USED" ;; + integrity-size-mismatch) echo "E_MANIFEST_HASH_MISMATCH" ;; + *) echo "" ;; + esac +} + +missing=0 + +for dir in "$TV_DIR"/*; do + [[ -d "$dir" ]] || continue + name="$(basename "$dir")" + + expected_exit="$(expected_exit_for "$name")" + if [[ -z "$expected_exit" ]]; then + echo "[ERROR] Unknown testvector: $name" >&2 + missing=1 + continue + fi + + report1="$(mktemp)" + report2="$(mktemp)" + + set +e + python3 "$VERIFIER" --bundle "$dir" --strict --report "$report1" >/dev/null 2>/dev/null + status=$? + set -e + + if [[ "$status" -ne "$expected_exit" ]]; then + echo "[FAIL] $name: exit=$status expected=$expected_exit" >&2 + python3 "$VERIFIER" --bundle "$dir" --strict --report "$report1" >&2 || true + exit 1 + fi + + failure_code="$(python3 -c "import json; p='$report1'; fc=json.load(open(p,'r',encoding='utf-8')).get('failure_code'); print('' if fc is None else fc)")" + + if [[ "$expected_exit" -eq 0 ]]; then + if [[ -n "$failure_code" ]]; then + echo "[FAIL] $name: expected PASS but failure_code=$failure_code" >&2 + exit 1 + fi + else + expected_code="$(expected_code_for "$name")" + if [[ -z "$expected_code" ]]; then + echo "[ERROR] Missing EXPECT_CODE mapping for failing vector: $name" >&2 + exit 2 + fi + if [[ "$failure_code" != "$expected_code" ]]; then + echo "[FAIL] $name: failure_code=$failure_code expected=$expected_code" >&2 + exit 1 + fi + fi + + # Determinism: same inputs -> byte-identical report. + set +e + python3 "$VERIFIER" --bundle "$dir" --strict --report "$report2" >/dev/null 2>/dev/null + status2=$? + set -e + + if [[ "$status2" -ne "$expected_exit" ]]; then + echo "[FAIL] $name: second run exit=$status2 expected=$expected_exit" >&2 + exit 1 + fi + + if ! cmp -s "$report1" "$report2"; then + echo "[FAIL] $name: report is not deterministic" >&2 + diff -u "$report1" "$report2" | head -n 200 >&2 || true + exit 1 + fi + + rm -f "$report1" "$report2" + echo "[OK] $name" +done + +if [[ "$missing" -ne 0 ]]; then + exit 2 +fi + +echo "[OK] All Sentinel testvectors verified" diff --git a/tools/sentinel_failure_codes.py b/tools/sentinel_failure_codes.py new file mode 100644 index 0000000..e5559c8 --- /dev/null +++ b/tools/sentinel_failure_codes.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from enum import Enum + + +class FailureCode(str, Enum): + SCHEMA_INVALID = "E_SCHEMA_INVALID" + MANIFEST_HASH_MISMATCH = "E_MANIFEST_HASH_MISMATCH" + MISSING_REQUIRED_FILE = "E_MISSING_REQUIRED_FILE" + EVENT_HASH_MISMATCH = "E_EVENT_HASH_MISMATCH" + CHAIN_DISCONTINUITY = "E_CHAIN_DISCONTINUITY" + SEQ_NON_MONOTONIC = "E_SEQ_NON_MONOTONIC" + ROOT_MISMATCH = "E_ROOT_MISMATCH" + RANGE_MISMATCH = "E_RANGE_MISMATCH" + CANON_VERSION_UNSUPPORTED = "E_CANON_VERSION_UNSUPPORTED" + OVERSIZE_INPUT = "E_OVERSIZE_INPUT" + REVOKED_CAPABILITY_USED = "E_REVOKED_CAPABILITY_USED" + + +class WarningCode(str, Enum): + FILE_NOT_IN_MANIFEST = "W_FILE_NOT_IN_MANIFEST" + RANGE_ROOT_PARTIAL = "W_RANGE_ROOT_PARTIAL" diff --git a/tools/vm_verify_sentinel_bundle.py b/tools/vm_verify_sentinel_bundle.py new file mode 100644 index 0000000..9fe222f --- /dev/null +++ b/tools/vm_verify_sentinel_bundle.py @@ -0,0 +1,1650 @@ +#!/usr/bin/env python3 +""" +vm_verify_sentinel_bundle.py + +Offline verifier for VaultMesh Sentinel v1 seal bundles. + +Usage: + python3 vm_verify_sentinel_bundle.py --bundle /path/to/bundle_dir [--strict] [--report out.json] + +Exit codes: + 0 - verification OK + 1 - verification failed + 2 - usage / unexpected error +""" + +from __future__ import annotations + +import argparse +import hashlib +import json +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Iterable, Optional + +from sentinel_failure_codes import FailureCode, WarningCode + +try: + import blake3 # type: ignore +except ImportError: # pragma: no cover + blake3 = None + + +SUPPORTED_SEAL_FORMATS = {"vm-sentinel-seal-v1"} +SUPPORTED_INTEGRITY_FORMATS = {"vm-sentinel-integrity-v1"} +SUPPORTED_VERIFIER_MANIFEST_FORMATS = {"vm-sentinel-verifier-manifest-v1"} +SUPPORTED_CANONICALIZATION_VERSIONS = {"sentinel-event-jcs-v1"} + +ERROR_SCHEMA_INVALID = FailureCode.SCHEMA_INVALID.value +ERROR_MANIFEST_HASH_MISMATCH = FailureCode.MANIFEST_HASH_MISMATCH.value +ERROR_MISSING_REQUIRED_FILE = FailureCode.MISSING_REQUIRED_FILE.value +ERROR_EVENT_HASH_MISMATCH = FailureCode.EVENT_HASH_MISMATCH.value +ERROR_CHAIN_DISCONTINUITY = FailureCode.CHAIN_DISCONTINUITY.value +ERROR_SEQ_NON_MONOTONIC = FailureCode.SEQ_NON_MONOTONIC.value +ERROR_ROOT_MISMATCH = FailureCode.ROOT_MISMATCH.value +ERROR_RANGE_MISMATCH = FailureCode.RANGE_MISMATCH.value +ERROR_CANON_VERSION_UNSUPPORTED = FailureCode.CANON_VERSION_UNSUPPORTED.value +ERROR_OVERSIZE_INPUT = FailureCode.OVERSIZE_INPUT.value +ERROR_REVOKED_CAPABILITY_USED = FailureCode.REVOKED_CAPABILITY_USED.value + +WARNING_UNLISTED_FILE = WarningCode.FILE_NOT_IN_MANIFEST.value +WARNING_RANGE_ROOT_PARTIAL = WarningCode.RANGE_ROOT_PARTIAL.value + + +REPO_ROOT = Path(__file__).resolve().parents[1] +SCHEMA_DIR = REPO_ROOT / "spec" / "sentinel" + +_EMBEDDED_SCHEMAS: dict[str, dict[str, Any]] = { + "event.schema.json": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Event", + "type": "object", + "additionalProperties": False, + "required": [ + "event_id", + "seq", + "ts", + "event_type", + "actor", + "cap_hash", + "op", + "op_digest", + "result", + "trace_id", + "prev_event_hash", + "event_hash", + "payload", + ], + "properties": { + "event_id": {"type": "string"}, + "seq": {"type": "integer", "minimum": 0}, + "ts": { + "description": "Monotonic + wallclock if available. Accepts ISO-8601 Z or a structured object.", + "anyOf": [ + {"type": "string"}, + { + "type": "object", + "additionalProperties": False, + "required": ["wall"], + "properties": { + "wall": {"type": "string"}, + "mono_ns": {"type": "integer", "minimum": 0}, + }, + }, + ], + }, + "event_type": { + "type": "string", + "enum": [ + "action_intent", + "policy_decision", + "action_executed", + "shadow_receipt", + "cap_grant", + "cap_revoke", + "seal_created", + "root_published", + "corruption_detected", + "tamper_signal", + "boot_event", + "health_event", + ], + }, + "actor": {"type": "string", "minLength": 1}, + "cap_hash": {"type": "string", "minLength": 1}, + "op": {"type": "string", "minLength": 1}, + "op_digest": {"type": "string", "minLength": 1}, + "result": {"type": "string", "enum": ["ok", "deny", "error"]}, + "root_before": {"type": "string"}, + "root_after": {"type": "string"}, + "trace_id": {"type": "string"}, + "prev_event_hash": {"type": "string", "minLength": 1}, + "event_hash": {"type": "string"}, + "payload": {"type": "object"}, + }, + }, + "seal.schema.json": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Seal Bundle (seal.json)", + "type": "object", + "additionalProperties": False, + "required": [ + "format", + "sentinel_version", + "schema_version", + "hash_algo", + "canonicalization_version", + "seal_id", + "created_at", + "range", + "root", + "files", + ], + "properties": { + "format": {"type": "string", "const": "vm-sentinel-seal-v1"}, + "sentinel_version": {"type": "string"}, + "schema_version": {"type": "string"}, + "hash_algo": {"type": "string", "enum": ["blake3", "sha256"]}, + "canonicalization_version": {"type": "string"}, + "seal_id": {"type": "string"}, + "created_at": {"type": "string"}, + "instance_id": {"type": "string"}, + "ledger_type": {"type": "string", "enum": ["sqlite", "jsonl"]}, + "range": { + "type": "object", + "additionalProperties": False, + "required": ["since_seq", "until_seq"], + "properties": { + "since_seq": {"type": "integer", "minimum": 0}, + "until_seq": {"type": "integer", "minimum": 0}, + "since_ts": {"type": "string"}, + "until_ts": {"type": "string"}, + }, + }, + "root": { + "type": "object", + "additionalProperties": False, + "required": ["start", "end"], + "properties": { + "start": {"type": "string"}, + "end": {"type": "string"}, + "seq": {"type": "integer", "minimum": 0}, + }, + }, + "files": { + "type": "object", + "additionalProperties": False, + "required": ["receipts", "roots", "integrity", "verifier_manifest"], + "properties": { + "receipts": {"type": "string"}, + "roots": {"type": "string"}, + "integrity": {"type": "string"}, + "verifier_manifest": {"type": "string"}, + }, + }, + "notes": {"type": "string"}, + }, + }, + "integrity.schema.json": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Integrity Manifest (integrity.json)", + "type": "object", + "additionalProperties": False, + "required": ["format", "hash_algo", "files"], + "properties": { + "format": {"type": "string", "const": "vm-sentinel-integrity-v1"}, + "hash_algo": {"type": "string", "enum": ["blake3", "sha256"]}, + "files": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "required": ["path", "digest"], + "properties": { + "path": {"type": "string"}, + "digest": {"type": "string"}, + "size_bytes": {"type": "integer", "minimum": 0}, + }, + }, + }, + }, + }, + "verifier_manifest.schema.json": { + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "VaultMesh Sentinel v1 Verifier Manifest (verifier_manifest.json)", + "type": "object", + "additionalProperties": False, + "required": [ + "format", + "sentinel_version", + "schema_version", + "canonicalization_version", + ], + "properties": { + "format": { + "type": "string", + "const": "vm-sentinel-verifier-manifest-v1", + }, + "sentinel_version": {"type": "string"}, + "schema_version": {"type": "string"}, + "hash_algo": {"type": "string", "enum": ["blake3", "sha256"]}, + "canonicalization_version": {"type": "string"}, + "verifier": { + "type": "object", + "additionalProperties": True, + "properties": { + "name": {"type": "string"}, + "version": {"type": "string"}, + "sha256": {"type": "string"}, + }, + }, + }, + }, +} + + +@dataclass(frozen=True) +class Finding: + code: str + message: str + path: str | None = None + + def as_dict(self) -> dict: + d = {"code": self.code, "message": self.message} + if self.path: + d["path"] = self.path + return d + + +def _contract_ids_for_finding(finding: Finding) -> list[str]: + """ + Best-effort mapping from verifier findings -> Contract Matrix IDs. + + This is meant to make verification_report.json auditor-friendly without + requiring readers to inspect verifier source code. + """ + + contract_ids: list[str] = [] + code = finding.code + path = finding.path or "" + message = finding.message + + def add(contract_id: str) -> None: + if contract_id not in contract_ids: + contract_ids.append(contract_id) + + # Bundle-level + if code == ERROR_MISSING_REQUIRED_FILE: + add("B-1") + if code == ERROR_OVERSIZE_INPUT: + add("B-3") + + # Seal bundle / seal.json + if path == "seal.json": + add("B-1") + if path.startswith("seal.json.format"): + add("S-1") + if path.startswith("seal.json.hash_algo"): + add("S-2") + if path.startswith("seal.json.range"): + add("S-3") + if path.startswith("seal.json.root"): + add("S-4") + if path.startswith("seal.json.files"): + add("S-5") + add("B-1") + if "missing file referenced by seal.files." in message: + add("S-5") + add("B-1") + if path.startswith("seal.json.canonicalization_version"): + add("S-6") + if code == ERROR_CANON_VERSION_UNSUPPORTED: + add("S-6") + + # integrity.json + if path.startswith("integrity.json.format"): + add("I-1") + if path.startswith("integrity.json.hash_algo"): + add("I-2") + if path.startswith("integrity.json.files"): + add("I-3") + if code == ERROR_MANIFEST_HASH_MISMATCH: + add("I-3") + if "size_bytes mismatch" in message: + add("I-4") + if message.startswith("file present but not listed in integrity.json:"): + add("I-5") + if message.startswith("integrity.json does not cover required seal file:"): + add("I-6") + + # verifier_manifest.json + if path.startswith("verifier_manifest.json.format"): + add("V-1") + if path.startswith("verifier_manifest.json.hash_algo"): + add("V-3") + if path.startswith("verifier_manifest.json") and not ( + path.startswith("verifier_manifest.json.format") + or path.startswith("verifier_manifest.json.hash_algo") + ): + add("V-2") + + # Event ledger + if code == ERROR_SCHEMA_INVALID and path.endswith(".jsonl"): + add("E-1") + if code == ERROR_EVENT_HASH_MISMATCH: + add("E-2") + if code == ERROR_CHAIN_DISCONTINUITY: + add("E-3") + if code == ERROR_SEQ_NON_MONOTONIC: + add("E-4") + if code == ERROR_ROOT_MISMATCH: + add("E-5") + if code == ERROR_RANGE_MISMATCH: + add("E-6") + if code == ERROR_REVOKED_CAPABILITY_USED: + add("E-7") + + return contract_ids + + +def _finding_to_report_dict(finding: Finding) -> dict: + d = finding.as_dict() + contract_ids = _contract_ids_for_finding(finding) + if contract_ids: + d["contract_ids"] = contract_ids + return d + + +def _finalize_report_findings( + report: dict[str, Any], *, errors: list[Finding], warnings: list[Finding] +) -> None: + report["errors"] = [_finding_to_report_dict(e) for e in errors] + report["warnings"] = [_finding_to_report_dict(w) for w in warnings] + report["ok"] = not errors + report["failure_code"] = errors[0].code if errors else None + report["violated_contract_ids"] = sorted( + {cid for e in errors for cid in _contract_ids_for_finding(e)} + ) + report["warned_contract_ids"] = sorted( + {cid for w in warnings for cid in _contract_ids_for_finding(w)} + ) + + +def _load_json(path: Path) -> dict: + return json.loads(path.read_text(encoding="utf-8")) + + +def _hex_part(value: str) -> str: + return value.split(":", 1)[-1] + + +def _require_no_floats(value: Any, *, path: str = "$") -> None: + if isinstance(value, float): + raise ValueError(f"float not allowed in canonical JSON at {path}") + if isinstance(value, dict): + for k, v in value.items(): + _require_no_floats(v, path=f"{path}.{k}") + elif isinstance(value, list): + for i, v in enumerate(value): + _require_no_floats(v, path=f"{path}[{i}]") + + +def _canonical_json_bytes(obj: Any) -> bytes: + """ + Deterministic canonical JSON bytes for Sentinel v1 hashing. + + This verifier enforces a strict subset compatible with sentinel-event-jcs-v1 + for Sentinel artifacts: + - UTF-8 + - object keys sorted + - separators (",", ":") + - no NaN/Infinity + - no floats (represent decimals as strings instead) + """ + _require_no_floats(obj) + encoded = json.dumps( + obj, + sort_keys=True, + separators=(",", ":"), + ensure_ascii=False, + allow_nan=False, + ).encode("utf-8") + return encoded + + +def _vmhash(data: bytes, *, hash_algo: str) -> str: + if hash_algo == "blake3": + if blake3 is None: + raise RuntimeError( + "Missing dependency: blake3 (required for blake3 bundles)" + ) + return f"blake3:{blake3.blake3(data).hexdigest()}" + if hash_algo == "sha256": + return f"sha256:{hashlib.sha256(data).hexdigest()}" + raise ValueError(f"unsupported hash_algo: {hash_algo!r}") + + +def _compute_merkle_root(leaves: list[str], *, hash_algo: str) -> str: + if not leaves: + return _vmhash(b"empty", hash_algo=hash_algo) + if len(leaves) == 1: + return leaves[0] + + level = leaves[:] + while len(level) > 1: + next_level: list[str] = [] + for i in range(0, len(level), 2): + left = level[i] + right = level[i + 1] if i + 1 < len(level) else left + combined = (_hex_part(left) + _hex_part(right)).encode("utf-8") + next_level.append(_vmhash(combined, hash_algo=hash_algo)) + level = next_level + return level[0] + + +def _iter_jsonl(path: Path) -> Iterable[dict]: + with path.open("r", encoding="utf-8") as f: + for line_no, line in enumerate(f, start=1): + line = line.strip() + if not line: + continue + try: + obj = json.loads(line) + except Exception as exc: + raise ValueError( + f"{path.name}:{line_no}: invalid JSON ({exc})" + ) from exc + if not isinstance(obj, dict): + raise ValueError(f"{path.name}:{line_no}: expected JSON object") + yield obj + + +def _load_schema(filename: str) -> dict: + path = SCHEMA_DIR / filename + if path.exists(): + return _load_json(path) + embedded = _EMBEDDED_SCHEMAS.get(filename) + if embedded is None: + raise FileNotFoundError(f"schema not found: {filename}") + return embedded + + +def _validate_schema(instance: Any, schema: dict, *, path: str = "$") -> list[Finding]: + """ + Minimal JSON Schema validator (subset) for Sentinel v1 verifier. + + Supports: type, required, properties, additionalProperties, enum, const, anyOf, items, minimum. + """ + findings: list[Finding] = [] + + if "const" in schema: + if instance != schema["const"]: + findings.append( + Finding( + ERROR_SCHEMA_INVALID, + f"expected const {schema['const']!r}, got {instance!r}", + path=path, + ) + ) + return findings + + if "enum" in schema: + if instance not in schema["enum"]: + findings.append( + Finding( + ERROR_SCHEMA_INVALID, + f"expected one of {schema['enum']!r}, got {instance!r}", + path=path, + ) + ) + return findings + + if "anyOf" in schema: + options = schema["anyOf"] + for opt in options: + if not _validate_schema(instance, opt, path=path): + return [] + findings.append( + Finding(ERROR_SCHEMA_INVALID, "did not match anyOf schema", path=path) + ) + return findings + + schema_type = schema.get("type") + if schema_type == "object": + if not isinstance(instance, dict): + findings.append(Finding(ERROR_SCHEMA_INVALID, "expected object", path=path)) + return findings + + required = schema.get("required") or [] + for key in required: + if key not in instance: + findings.append( + Finding( + ERROR_SCHEMA_INVALID, + f"missing required property: {key}", + path=path, + ) + ) + + properties = schema.get("properties") or {} + additional = schema.get("additionalProperties", True) + + for key, value in instance.items(): + key_path = f"{path}.{key}" + if key in properties: + findings.extend(_validate_schema(value, properties[key], path=key_path)) + else: + if additional is False: + findings.append( + Finding( + ERROR_SCHEMA_INVALID, + "unexpected additional property", + path=key_path, + ) + ) + + return findings + + if schema_type == "array": + if not isinstance(instance, list): + findings.append(Finding(ERROR_SCHEMA_INVALID, "expected array", path=path)) + return findings + items_schema = schema.get("items") + if isinstance(items_schema, dict): + for i, item in enumerate(instance): + findings.extend( + _validate_schema(item, items_schema, path=f"{path}[{i}]") + ) + return findings + + if schema_type == "string": + if not isinstance(instance, str): + findings.append(Finding(ERROR_SCHEMA_INVALID, "expected string", path=path)) + return findings + min_len = schema.get("minLength") + if isinstance(min_len, int) and len(instance) < min_len: + findings.append( + Finding( + ERROR_SCHEMA_INVALID, + f"minLength {min_len} violated", + path=path, + ) + ) + return findings + + if schema_type == "integer": + if not isinstance(instance, int) or isinstance(instance, bool): + findings.append( + Finding(ERROR_SCHEMA_INVALID, "expected integer", path=path) + ) + return findings + minimum = schema.get("minimum") + if isinstance(minimum, int) and instance < minimum: + findings.append( + Finding( + ERROR_SCHEMA_INVALID, + f"minimum {minimum} violated", + path=path, + ) + ) + return findings + + if schema_type == "boolean": + if not isinstance(instance, bool): + findings.append( + Finding(ERROR_SCHEMA_INVALID, "expected boolean", path=path) + ) + return findings + + # If schema has no type, treat as permissive. + return findings + + +def _parse_roots_txt(path: Path) -> list[tuple[int, str]]: + roots: list[tuple[int, str]] = [] + for line_no, line in enumerate( + path.read_text(encoding="utf-8").splitlines(), start=1 + ): + s = line.strip() + if not s or s.startswith("#"): + continue + if "seq=" in s and "root=" in s: + parts = dict(part.split("=", 1) for part in s.split() if "=" in part) + try: + seq = int(parts["seq"]) + except Exception as exc: + raise ValueError(f"{path.name}:{line_no}: invalid seq ({exc})") from exc + root = parts.get("root") + if not root: + raise ValueError(f"{path.name}:{line_no}: missing root") + roots.append((seq, root)) + else: + raise ValueError( + f"{path.name}:{line_no}: expected 'seq= root='" + ) + return roots + + +def _write_report(path: Path, report: dict) -> None: + path.write_text( + json.dumps(report, sort_keys=True, separators=(",", ":"), ensure_ascii=False) + + "\n", + encoding="utf-8", + ) + + +def verify_bundle( + bundle_dir: Path, + *, + strict: bool, + report_path: Path, + max_file_bytes: int, +) -> int: + errors: list[Finding] = [] + warnings: list[Finding] = [] + + report: dict[str, Any] = { + "format": "vm-sentinel-verification-report-v1", + "ok": False, + "strict": strict, + "failure_code": None, + "inputs": {"bundle_dir": str(bundle_dir)}, + "covered_seq_range": None, + "verified_ranges": [], + "observed_roots": {}, + "computed_roots": {}, + "observed_end_root": None, + "computed_end_root": None, + "mismatches": [], + "corruption_findings": [], + "versions": {}, + "schema_versions_used": {}, + "errors": [], + "warnings": [], + "verifier": { + "name": "vm_verify_sentinel_bundle.py", + "python": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}", + }, + "declared_verifier": None, + } + + seal_path = bundle_dir / "seal.json" + if not seal_path.exists(): + errors.append( + Finding( + ERROR_MISSING_REQUIRED_FILE, "seal.json not found", path="seal.json" + ) + ) + _finalize_report_findings(report, errors=errors, warnings=warnings) + _write_report(report_path, report) + return 1 + + try: + seal = _load_json(seal_path) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"failed to parse seal.json: {exc}", + path="seal.json", + ) + ) + _finalize_report_findings(report, errors=errors, warnings=warnings) + _write_report(report_path, report) + return 1 + + try: + seal_schema = _load_schema("seal.schema.json") + errors.extend(_validate_schema(seal, seal_schema, path="seal.json")) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"failed to load/validate seal.schema.json: {exc}", + path="seal.schema.json", + ) + ) + + fmt = seal.get("format") + if fmt not in SUPPORTED_SEAL_FORMATS: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"seal.format unsupported: {fmt!r}", + path="seal.json.format", + ) + ) + + hash_algo = seal.get("hash_algo") + if hash_algo not in ("blake3", "sha256"): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"seal.hash_algo invalid: {hash_algo!r}", + path="seal.json.hash_algo", + ) + ) + hash_algo = "sha256" # keep verifier progressing for report completeness + + canonicalization_version = seal.get("canonicalization_version") + if not isinstance(canonicalization_version, str) or not canonicalization_version: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "seal.canonicalization_version missing", + path="seal.json.canonicalization_version", + ) + ) + canonicalization_version = "" + elif canonicalization_version not in SUPPORTED_CANONICALIZATION_VERSIONS: + errors.append( + Finding( + ERROR_CANON_VERSION_UNSUPPORTED, + f"unsupported canonicalization_version: {canonicalization_version!r}", + path="seal.json.canonicalization_version", + ) + ) + + files = seal.get("files") + if not isinstance(files, dict): + errors.append( + Finding(ERROR_SCHEMA_INVALID, "seal.files missing", path="seal.json.files") + ) + files = {} + + def _file_from_seal(key: str) -> Optional[Path]: + rel = files.get(key) + if not isinstance(rel, str) or not rel: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"seal.files.{key} missing", + path=f"seal.json.files.{key}", + ) + ) + return None + p = (bundle_dir / rel).resolve() + if not p.exists(): + errors.append( + Finding( + ERROR_MISSING_REQUIRED_FILE, + f"missing file referenced by seal.files.{key}: {rel}", + path=rel, + ) + ) + return None + return p + + receipts_path = _file_from_seal("receipts") + roots_path = _file_from_seal("roots") + integrity_path = _file_from_seal("integrity") + verifier_manifest_path = _file_from_seal("verifier_manifest") + + report["versions"] = { + "sentinel_version": seal.get("sentinel_version"), + "schema_version": seal.get("schema_version"), + "hash_algo": hash_algo, + "canonicalization_version": canonicalization_version, + } + report["schema_versions_used"]["seal"] = seal.get("schema_version") + + integrity: dict[str, Any] | None = None + if integrity_path is not None: + try: + integrity = _load_json(integrity_path) + + integrity_schema = _load_schema("integrity.schema.json") + errors.extend( + _validate_schema( + integrity, integrity_schema, path=str(integrity_path.name) + ) + ) + + if integrity.get("format") not in SUPPORTED_INTEGRITY_FORMATS: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"integrity.format unsupported: {integrity.get('format')!r}", + path="integrity.json.format", + ) + ) + if integrity.get("hash_algo") != hash_algo: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "integrity.hash_algo != seal.hash_algo", + path="integrity.json.hash_algo", + ) + ) + + listed: list[dict] = integrity.get("files") or [] + if not isinstance(listed, list) or not listed: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "integrity.files missing or empty", + path="integrity.json.files", + ) + ) + listed = [] + + # Deterministic ordering for verification/reporting. + listed_sorted = sorted( + (e for e in listed if isinstance(e, dict)), + key=lambda e: str(e.get("path", "")), + ) + + listed_paths = {str(e.get("path")) for e in listed_sorted if "path" in e} + + # Enforce that seal-referenced files are covered by integrity.json. + # + # Note: integrity.json MUST NOT be required to include a digest of itself (recursive), + # so we only require the other seal files here. + required_files = [ + files.get("receipts"), + files.get("roots"), + files.get("verifier_manifest"), + ] + for required_rel in required_files: + if ( + isinstance(required_rel, str) + and required_rel + and required_rel not in listed_paths + ): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"integrity.json does not cover required seal file: {required_rel}", + path="integrity.json.files", + ) + ) + + # Recommended: cover seal.json too (strict mode enforces). + if "seal.json" not in listed_paths: + finding = Finding( + WARNING_UNLISTED_FILE, + "integrity.json does not cover seal.json (recommended)", + path="integrity.json.files", + ) + if strict: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + finding.message, + path="integrity.json.files", + ) + ) + else: + warnings.append(finding) + + # Hash verification + for entry in listed_sorted: + rel = entry.get("path") + digest = entry.get("digest") + if not isinstance(rel, str) or not rel: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "integrity.files entry missing path", + path="integrity.json.files", + ) + ) + continue + if not isinstance(digest, str) or not digest: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"integrity.files[{rel}] missing digest", + path="integrity.json.files", + ) + ) + continue + + file_path = (bundle_dir / rel).resolve() + if not file_path.exists(): + errors.append( + Finding( + ERROR_MISSING_REQUIRED_FILE, + f"integrity missing file: {rel}", + path=rel, + ) + ) + continue + + size = file_path.stat().st_size + if size > max_file_bytes: + errors.append( + Finding( + ERROR_OVERSIZE_INPUT, + f"file exceeds max size ({size} > {max_file_bytes} bytes): {rel}", + path=rel, + ) + ) + continue + + data = file_path.read_bytes() + computed = _vmhash(data, hash_algo=hash_algo) + if computed != digest: + errors.append( + Finding( + ERROR_MANIFEST_HASH_MISMATCH, + f"digest mismatch for {rel}: expected {digest}, got {computed}", + path=rel, + ) + ) + + size_bytes = entry.get("size_bytes") + if isinstance(size_bytes, int) and size_bytes != len(data): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"size_bytes mismatch for {rel}: expected {size_bytes}, got {len(data)}", + path=rel, + ) + ) + + # Extra files present but not listed in integrity.json + ignored = {".DS_Store", "verification_report.json", report_path.name} + integrity_rel = files.get("integrity") + if isinstance(integrity_rel, str) and integrity_rel: + ignored.add(integrity_rel) + for fp in sorted(bundle_dir.rglob("*")): + if fp.is_dir(): + continue + rel = fp.relative_to(bundle_dir).as_posix() + if rel in ignored: + continue + if rel not in listed_paths: + finding = Finding( + WARNING_UNLISTED_FILE, + f"file present but not listed in integrity.json: {rel}", + path=rel, + ) + if strict: + errors.append( + Finding(ERROR_SCHEMA_INVALID, finding.message, path=rel) + ) + else: + warnings.append(finding) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"failed to verify integrity.json: {exc}", + path="integrity.json", + ) + ) + + if verifier_manifest_path is not None: + try: + manifest = _load_json(verifier_manifest_path) + manifest_schema = _load_schema("verifier_manifest.schema.json") + errors.extend( + _validate_schema( + manifest, manifest_schema, path=str(verifier_manifest_path.name) + ) + ) + + mfmt = manifest.get("format") + if mfmt not in SUPPORTED_VERIFIER_MANIFEST_FORMATS: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"verifier_manifest.format unsupported: {mfmt!r}", + path="verifier_manifest.json.format", + ) + ) + + mv = manifest.get("canonicalization_version") + if ( + isinstance(mv, str) + and canonicalization_version + and mv != canonicalization_version + ): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "verifier_manifest.canonicalization_version != seal.canonicalization_version", + path="verifier_manifest.json.canonicalization_version", + ) + ) + + mh = manifest.get("hash_algo") + if isinstance(mh, str) and mh != hash_algo: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "verifier_manifest.hash_algo != seal.hash_algo", + path="verifier_manifest.json.hash_algo", + ) + ) + report["schema_versions_used"]["verifier_manifest"] = manifest.get( + "schema_version" + ) + + dv = manifest.get("verifier") + if isinstance(dv, dict): + report["declared_verifier"] = dv + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"failed to parse verifier_manifest.json: {exc}", + path="verifier_manifest.json", + ) + ) + + range_obj = seal.get("range") or {} + since_seq = range_obj.get("since_seq") + until_seq = range_obj.get("until_seq") + if not isinstance(since_seq, int) or not isinstance(until_seq, int): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "seal.range.since_seq/until_seq missing or invalid", + path="seal.json.range", + ) + ) + since_seq = 0 + until_seq = -1 + + report["covered_seq_range"] = {"since_seq": since_seq, "until_seq": until_seq} + + events: list[dict] = [] + if receipts_path is not None: + if receipts_path.stat().st_size > max_file_bytes: + errors.append( + Finding( + ERROR_OVERSIZE_INPUT, + f"receipts file exceeds max size ({receipts_path.stat().st_size} > {max_file_bytes} bytes)", + path=receipts_path.name, + ) + ) + else: + event_schema = _load_schema("event.schema.json") + last_good_seq: int | None = None + last_good_line_no: int | None = None + byte_offset = 0 + with receipts_path.open("rb") as f: + for line_no, raw in enumerate(f, start=1): + line_start = byte_offset + byte_offset += len(raw) + if not raw.strip(): + continue + try: + text = raw.decode("utf-8").strip() + except UnicodeDecodeError as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"{receipts_path.name}:{line_no}: utf-8 decode error ({exc})", + path=receipts_path.name, + ) + ) + report["corruption_findings"].append( + { + "file": receipts_path.name, + "line_no": line_no, + "byte_offset": line_start, + "last_good_seq": last_good_seq, + "last_good_line_no": last_good_line_no, + "error": f"utf-8 decode error ({exc})", + "recommended_recovery": [ + "Verify an older seal bundle that predates this range.", + "Restore receipts from WORM/immutable storage if available.", + "Compare integrity.json digests to an out-of-band copy.", + ], + } + ) + break + try: + ev = json.loads(text) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"{receipts_path.name}:{line_no}: invalid JSON ({exc})", + path=receipts_path.name, + ) + ) + report["corruption_findings"].append( + { + "file": receipts_path.name, + "line_no": line_no, + "byte_offset": line_start, + "last_good_seq": last_good_seq, + "last_good_line_no": last_good_line_no, + "error": f"invalid JSON ({exc})", + "recommended_recovery": [ + "Verify an older seal bundle that predates this range.", + "Restore receipts from WORM/immutable storage if available.", + "Compare integrity.json digests to an out-of-band copy.", + ], + } + ) + break + + if not isinstance(ev, dict): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"{receipts_path.name}:{line_no}: expected JSON object", + path=receipts_path.name, + ) + ) + report["corruption_findings"].append( + { + "file": receipts_path.name, + "line_no": line_no, + "byte_offset": line_start, + "last_good_seq": last_good_seq, + "last_good_line_no": last_good_line_no, + "error": "expected JSON object", + } + ) + break + + errors.extend( + _validate_schema( + ev, event_schema, path=f"{receipts_path.name}:{line_no}" + ) + ) + seq = ev.get("seq") + if isinstance(seq, int): + last_good_seq = seq + last_good_line_no = line_no + events.append(ev) + + if events: + # Deterministic ordering: sort by seq, not file order. + by_seq: dict[int, list[dict]] = {} + for ev in events: + seq = ev.get("seq") + if isinstance(seq, int): + by_seq.setdefault(seq, []).append(ev) + + dupes = sorted([seq for seq, lst in by_seq.items() if len(lst) > 1]) + for seq in dupes: + errors.append( + Finding( + ERROR_SEQ_NON_MONOTONIC, + f"duplicate seq value: {seq}", + path=f"events.seq:{seq}", + ) + ) + + ordered_seqs = sorted(by_seq.keys()) + if ordered_seqs: + if ordered_seqs[0] != since_seq or ordered_seqs[-1] != until_seq: + errors.append( + Finding( + ERROR_RANGE_MISMATCH, + f"event seq range mismatch: got {ordered_seqs[0]}..{ordered_seqs[-1]}, expected {since_seq}..{until_seq}", + path="seal.json.range", + ) + ) + + expected_count = until_seq - since_seq + 1 + if expected_count != len(events): + errors.append( + Finding( + ERROR_RANGE_MISMATCH, + f"receipt count mismatch: expected {expected_count}, got {len(events)}", + path=receipts_path.name if receipts_path else "receipts", + ) + ) + + missing = [s for s in range(since_seq, until_seq + 1) if s not in by_seq] + if missing: + errors.append( + Finding( + ERROR_RANGE_MISMATCH, + f"missing seq values in range: {missing[:20]}{'...' if len(missing) > 20 else ''}", + path="events.seq", + ) + ) + + # Flatten events in seq order (deterministic). + events_ordered = [ + by_seq[s][0] for s in range(since_seq, until_seq + 1) if s in by_seq + ] + events = events_ordered + + root_obj = seal.get("root") or {} + root_start = root_obj.get("start") + root_end = root_obj.get("end") + report["observed_roots"] = {"start": root_start, "end": root_end} + report["observed_end_root"] = root_end + if not isinstance(root_start, str) or not isinstance(root_end, str): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "seal.root.start/end missing or invalid", + path="seal.json.root", + ) + ) + + # Event hashing, op_digest, and prev_event_hash chain verification. + computed_event_hashes: list[str] = [] + leaves: list[str] = [] + if events: + revoked_cap_hashes: set[str] = set() + if canonicalization_version not in SUPPORTED_CANONICALIZATION_VERSIONS: + errors.append( + Finding( + ERROR_CANON_VERSION_UNSUPPORTED, + f"unsupported canonicalization_version: {canonicalization_version!r}", + path="seal.json.canonicalization_version", + ) + ) + else: + for idx, ev in enumerate(events): + seq = ev.get("seq") + if not isinstance(seq, int): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "event.seq missing or invalid", + path=f"events[{idx}].seq", + ) + ) + continue + + stored_event_hash = ev.get("event_hash") + if not isinstance(stored_event_hash, str) or not stored_event_hash: + errors.append( + Finding( + ERROR_EVENT_HASH_MISMATCH, + "event_hash missing", + path=f"events[{idx}].event_hash", + ) + ) + continue + + ev_no_hash = dict(ev) + ev_no_hash.pop("event_hash", None) + try: + canon = _canonical_json_bytes(ev_no_hash) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"canonicalization failed: {exc}", + path=f"events[{idx}]", + ) + ) + continue + + try: + computed_hash = _vmhash(canon, hash_algo=hash_algo) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"hashing failed: {exc}", + path=f"events[{idx}]", + ) + ) + continue + + computed_event_hashes.append(computed_hash) + leaves.append(computed_hash) + + if computed_hash != stored_event_hash: + errors.append( + Finding( + ERROR_EVENT_HASH_MISMATCH, + f"event_hash mismatch: expected {stored_event_hash}, got {computed_hash}", + path=f"events[{idx}].event_hash", + ) + ) + + prev = ev.get("prev_event_hash") + if not isinstance(prev, str) or not prev: + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + "prev_event_hash missing", + path=f"events[{idx}].prev_event_hash", + ) + ) + else: + if idx == 0: + if seq == 0: + if prev != "0": + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + 'prev_event_hash must be "0" for seq=0', + path=f"events[{idx}].prev_event_hash", + ) + ) + else: + finding = Finding( + WARNING_RANGE_ROOT_PARTIAL, + "first event is not seq=0; prev_event_hash cannot be verified without prior context", + path=f"events[{idx}].prev_event_hash", + ) + if strict: + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + finding.message, + path=f"events[{idx}].prev_event_hash", + ) + ) + else: + warnings.append(finding) + else: + if prev != computed_event_hashes[idx - 1]: + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + "prev_event_hash does not match previous event_hash", + path=f"events[{idx}].prev_event_hash", + ) + ) + + # op_digest verification (params convention: payload.params) + op = ev.get("op") + op_digest = ev.get("op_digest") + payload = ( + ev.get("payload") if isinstance(ev.get("payload"), dict) else {} + ) + params = payload.get("params", {}) + if params is None: + params = {} + if not isinstance(op, str) or not op: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, "op missing", path=f"events[{idx}].op" + ) + ) + elif not isinstance(op_digest, str) or not op_digest: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "op_digest missing", + path=f"events[{idx}].op_digest", + ) + ) + elif not isinstance(params, dict): + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + "payload.params must be an object", + path=f"events[{idx}].payload.params", + ) + ) + else: + try: + op_obj = {"op": op, "params": params} + op_bytes = _canonical_json_bytes(op_obj) + computed_op_digest = _vmhash(op_bytes, hash_algo=hash_algo) + if computed_op_digest != op_digest: + errors.append( + Finding( + ERROR_EVENT_HASH_MISMATCH, + f"op_digest mismatch: expected {op_digest}, got {computed_op_digest}", + path=f"events[{idx}].op_digest", + ) + ) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"op_digest computation failed: {exc}", + path=f"events[{idx}].op_digest", + ) + ) + + # Capability revocation enforcement (v1 hardening): + # If a capability is revoked, subsequent action execution MUST NOT use it. + event_type = ev.get("event_type") + cap_hash = ev.get("cap_hash") + if event_type == "cap_revoke" and isinstance(payload, dict): + revoked = None + if isinstance(payload.get("revoked_cap_hash"), str): + revoked = payload.get("revoked_cap_hash") + elif isinstance(params, dict) and isinstance( + params.get("revoked_cap_hash"), str + ): + revoked = params.get("revoked_cap_hash") + if isinstance(revoked, str) and revoked: + revoked_cap_hashes.add(revoked) + + if ( + event_type == "action_executed" + and isinstance(cap_hash, str) + and cap_hash in revoked_cap_hashes + ): + errors.append( + Finding( + ERROR_REVOKED_CAPABILITY_USED, + f"action_executed uses revoked cap_hash: {cap_hash}", + path=f"events[{idx}].cap_hash", + ) + ) + + report["verified_ranges"] = [{"since_seq": since_seq, "until_seq": until_seq}] + + # Merkle verification (only possible from genesis without additional continuation state). + if ( + isinstance(since_seq, int) + and since_seq == 0 + and leaves + and isinstance(root_start, str) + and isinstance(root_end, str) + ): + try: + expected_start = _vmhash(b"empty", hash_algo=hash_algo) + report["computed_roots"]["expected_start"] = expected_start + if root_start != expected_start: + errors.append( + Finding( + ERROR_ROOT_MISMATCH, + f"seal.root.start mismatch: expected {expected_start}, got {root_start}", + path="seal.json.root.start", + ) + ) + + computed_end = _compute_merkle_root(leaves, hash_algo=hash_algo) + report["computed_roots"]["computed_end"] = computed_end + report["computed_end_root"] = computed_end + if computed_end != root_end: + errors.append( + Finding( + ERROR_ROOT_MISMATCH, + f"seal.root.end mismatch: expected {root_end}, got {computed_end}", + path="seal.json.root.end", + ) + ) + except Exception as exc: + errors.append( + Finding( + ERROR_ROOT_MISMATCH, + f"merkle verification failed: {exc}", + path="seal.json.root", + ) + ) + else: + if isinstance(since_seq, int) and since_seq > 0: + finding = Finding( + WARNING_RANGE_ROOT_PARTIAL, + "cannot recompute Merkle roots for since_seq>0 without a verifiable continuation state (frontier snapshot)", + path="seal.json.range", + ) + if strict: + errors.append( + Finding( + ERROR_ROOT_MISMATCH, finding.message, path="seal.json.range" + ) + ) + else: + warnings.append(finding) + + # roots.txt parsing (self-consistency with seal.root.end) + if roots_path is not None and isinstance(root_end, str): + try: + roots = _parse_roots_txt(roots_path) + if roots: + last_seq, last_root = roots[-1] + if last_seq != until_seq: + errors.append( + Finding( + ERROR_RANGE_MISMATCH, + f"roots.txt last seq mismatch: expected {until_seq}, got {last_seq}", + path=roots_path.name, + ) + ) + if last_root != root_end: + errors.append( + Finding( + ERROR_ROOT_MISMATCH, + "roots.txt last root does not match seal.root.end", + path=roots_path.name, + ) + ) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"failed to parse roots.txt: {exc}", + path=roots_path.name, + ) + ) + + # Strict-mode trace linkage integrity checks (intent → executed/denied). + if strict and events: + try: + by_trace: dict[str, list[tuple[int, str]]] = {} + for ev in events: + trace_id = ev.get("trace_id") + event_type = ev.get("event_type") + seq = ev.get("seq") + if ( + isinstance(trace_id, str) + and isinstance(event_type, str) + and isinstance(seq, int) + ): + if event_type in ( + "action_intent", + "action_executed", + "shadow_receipt", + ): + by_trace.setdefault(trace_id, []).append((seq, event_type)) + + for trace_id, seq_types in sorted(by_trace.items()): + seq_types_sorted = sorted(seq_types, key=lambda t: t[0]) + types = [t for _, t in seq_types_sorted] + has_intent = "action_intent" in types + has_exec = "action_executed" in types + has_shadow = "shadow_receipt" in types + + if has_exec and not has_intent: + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + f"execution without prior intent for trace_id {trace_id}", + path=f"trace_id:{trace_id}", + ) + ) + + if has_intent and not (has_exec or has_shadow): + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + f"intent without executed/denied outcome for trace_id {trace_id}", + path=f"trace_id:{trace_id}", + ) + ) + + if has_exec and has_shadow: + errors.append( + Finding( + ERROR_CHAIN_DISCONTINUITY, + f"both action_executed and shadow_receipt present for trace_id {trace_id}", + path=f"trace_id:{trace_id}", + ) + ) + except Exception as exc: + errors.append( + Finding( + ERROR_SCHEMA_INVALID, + f"trace linkage verification failed: {exc}", + path="trace_id", + ) + ) + + _finalize_report_findings(report, errors=errors, warnings=warnings) + + _write_report(report_path, report) + + # Console output: PASS/FAIL + stable codes + if errors: + for e in errors: + loc = f" ({e.path})" if e.path else "" + print(f"FAIL {e.code}{loc}: {e.message}", file=sys.stderr) + for w in warnings: + loc = f" ({w.path})" if w.path else "" + print(f"WARN {w.code}{loc}: {w.message}", file=sys.stderr) + return 1 + + for w in warnings: + loc = f" ({w.path})" if w.path else "" + print(f"WARN {w.code}{loc}: {w.message}", file=sys.stderr) + + print("PASS") + return 0 + + +def main(argv: list[str]) -> int: + p = argparse.ArgumentParser( + description="Verify a VaultMesh Sentinel v1 bundle directory." + ) + p.add_argument( + "--bundle", required=True, help="Path to bundle directory (contains seal.json)" + ) + p.add_argument( + "--strict", + action="store_true", + help="Treat warnings and partial verifications as failures.", + ) + p.add_argument( + "--report", + help="Write machine-readable verification report JSON to this path (default: verification_report.json in bundle).", + ) + p.add_argument( + "--max-file-bytes", + type=int, + default=50_000_000, + help="Reject any single input file larger than this many bytes (default: 50,000,000).", + ) + args = p.parse_args(argv) + + bundle_dir = Path(args.bundle).expanduser().resolve() + if not bundle_dir.exists() or not bundle_dir.is_dir(): + print(f"[ERROR] --bundle must be a directory: {bundle_dir}", file=sys.stderr) + return 2 + + report_path = ( + Path(args.report).expanduser().resolve() + if args.report + else (bundle_dir / "verification_report.json") + ) + + try: + return verify_bundle( + bundle_dir, + strict=bool(args.strict), + report_path=report_path, + max_file_bytes=int(args.max_file_bytes), + ) + except Exception as exc: + print(f"[ERROR] unexpected error: {exc}", file=sys.stderr) + return 2 + + +if __name__ == "__main__": + raise SystemExit(main(sys.argv[1:])) diff --git a/vaultmesh-automation/Cargo.toml b/vaultmesh-automation/Cargo.toml new file mode 100644 index 0000000..7379f44 --- /dev/null +++ b/vaultmesh-automation/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "vaultmesh-automation" +version = "0.1.0" +edition = "2021" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } diff --git a/vaultmesh-automation/src/lib.rs b/vaultmesh-automation/src/lib.rs new file mode 100644 index 0000000..464c58a --- /dev/null +++ b/vaultmesh-automation/src/lib.rs @@ -0,0 +1,5 @@ +//! vaultmesh-automation - stub engine, to be implemented. + +pub fn ping() -> &'static str { + "vaultmesh-automation alive" +} diff --git a/vaultmesh-core/Cargo.toml b/vaultmesh-core/Cargo.toml new file mode 100644 index 0000000..bbf1e5b --- /dev/null +++ b/vaultmesh-core/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "vaultmesh-core" +version = "0.1.0" +edition = "2021" + +[dependencies] +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +blake3 = "1" diff --git a/vaultmesh-core/src/did.rs b/vaultmesh-core/src/did.rs new file mode 100644 index 0000000..c365d39 --- /dev/null +++ b/vaultmesh-core/src/did.rs @@ -0,0 +1,130 @@ +use serde::{Deserialize, Serialize}; + +/// VaultMesh DID +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Did(String); + +impl Did { + pub fn new(did_type: DidType, identifier: &str) -> Self { + Did(format!("did:vm:{}:{}", did_type.as_str(), identifier)) + } + + pub fn parse(s: &str) -> Result { + if !s.starts_with("did:vm:") { + return Err(DidParseError::InvalidPrefix); + } + Ok(Did(s.to_string())) + } + + pub fn did_type(&self) -> Option { + let parts: Vec<&str> = self.0.split(':').collect(); + if parts.len() >= 3 { + DidType::from_str(parts[2]) + } else { + None + } + } + + pub fn identifier(&self) -> Option<&str> { + let parts: Vec<&str> = self.0.split(':').collect(); + if parts.len() >= 4 { + Some(parts[3]) + } else { + None + } + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum DidType { + Node, + Human, + Agent, + Service, + Mesh, + Portal, + Guardian, + Skill, +} + +impl DidType { + pub fn as_str(&self) -> &'static str { + match self { + DidType::Node => "node", + DidType::Human => "human", + DidType::Agent => "agent", + DidType::Service => "service", + DidType::Mesh => "mesh", + DidType::Portal => "portal", + DidType::Guardian => "guardian", + DidType::Skill => "skill", + } + } + + pub fn from_str(s: &str) -> Option { + match s { + "node" => Some(DidType::Node), + "human" => Some(DidType::Human), + "agent" => Some(DidType::Agent), + "service" => Some(DidType::Service), + "mesh" => Some(DidType::Mesh), + "portal" => Some(DidType::Portal), + "guardian" => Some(DidType::Guardian), + "skill" => Some(DidType::Skill), + _ => None, + } + } +} + +#[derive(Debug)] +pub enum DidParseError { + InvalidPrefix, + MissingType, + MissingIdentifier, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_did_new() { + let did = Did::new(DidType::Human, "alice"); + assert_eq!(did.as_str(), "did:vm:human:alice"); + } + + #[test] + fn test_did_parse_valid() { + let did = Did::parse("did:vm:guardian:local").unwrap(); + assert_eq!(did.did_type(), Some(DidType::Guardian)); + assert_eq!(did.identifier(), Some("local")); + } + + #[test] + fn test_did_parse_invalid_prefix() { + let result = Did::parse("did:web:example.com"); + assert!(matches!(result, Err(DidParseError::InvalidPrefix))); + } + + #[test] + fn test_did_type_roundtrip() { + for did_type in [ + DidType::Node, DidType::Human, DidType::Agent, + DidType::Service, DidType::Mesh, DidType::Portal, + DidType::Guardian, DidType::Skill, + ] { + let s = did_type.as_str(); + let parsed = DidType::from_str(s); + assert_eq!(parsed, Some(did_type)); + } + } + + #[test] + fn test_did_type_unknown() { + assert_eq!(DidType::from_str("unknown"), None); + } +} diff --git a/vaultmesh-core/src/hash.rs b/vaultmesh-core/src/hash.rs new file mode 100644 index 0000000..e75cf36 --- /dev/null +++ b/vaultmesh-core/src/hash.rs @@ -0,0 +1,147 @@ +use serde::{Deserialize, Serialize}; + +/// VaultMesh hash with algorithm prefix +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct VmHash(String); + +impl VmHash { + /// Create hash from bytes using Blake3 + pub fn blake3(data: &[u8]) -> Self { + let hash = blake3::hash(data); + VmHash(format!("blake3:{}", hash.to_hex())) + } + + /// Create hash from JSON-serializable value + pub fn from_json(value: &T) -> Result { + let json = serde_json::to_vec(value)?; + Ok(Self::blake3(&json)) + } + + /// Create hash from file contents + pub fn from_file(path: &std::path::Path) -> std::io::Result { + let contents = std::fs::read(path)?; + Ok(Self::blake3(&contents)) + } + + /// Get the raw hex value without prefix + pub fn hex(&self) -> &str { + self.0.strip_prefix("blake3:").unwrap_or(&self.0) + } + + /// Get full prefixed value + pub fn as_str(&self) -> &str { + &self.0 + } +} + +/// Compute Merkle root from list of hashes +pub fn merkle_root(hashes: &[VmHash]) -> VmHash { + if hashes.is_empty() { + return VmHash::blake3(b"empty"); + } + if hashes.len() == 1 { + return hashes[0].clone(); + } + + let mut current_level: Vec = hashes.to_vec(); + + while current_level.len() > 1 { + let mut next_level = Vec::new(); + + for chunk in current_level.chunks(2) { + let combined = if chunk.len() == 2 { + format!("{}{}", chunk[0].hex(), chunk[1].hex()) + } else { + format!("{}{}", chunk[0].hex(), chunk[0].hex()) + }; + next_level.push(VmHash::blake3(combined.as_bytes())); + } + + current_level = next_level; + } + + current_level.remove(0) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vmhash_blake3_deterministic() { + let data = b"test data"; + let h1 = VmHash::blake3(data); + let h2 = VmHash::blake3(data); + assert_eq!(h1, h2); + assert!(h1.as_str().starts_with("blake3:")); + } + + #[test] + fn test_vmhash_different_inputs() { + let h1 = VmHash::blake3(b"hello"); + let h2 = VmHash::blake3(b"world"); + assert_ne!(h1, h2); + } + + #[test] + fn test_vmhash_from_json() { + #[derive(Serialize)] + struct TestStruct { value: i32 } + + let obj = TestStruct { value: 42 }; + let hash = VmHash::from_json(&obj).unwrap(); + assert!(hash.as_str().starts_with("blake3:")); + assert_eq!(hash.hex().len(), 64); // 256 bits = 64 hex chars + } + + #[test] + fn test_vmhash_hex_extraction() { + let hash = VmHash::blake3(b"test"); + let hex = hash.hex(); + assert_eq!(hex.len(), 64); + assert!(!hex.contains("blake3:")); + } + + #[test] + fn test_merkle_root_empty() { + let hashes: Vec = vec![]; + let root = merkle_root(&hashes); + let expected = VmHash::blake3(b"empty"); + assert_eq!(root, expected); + } + + #[test] + fn test_merkle_root_single() { + let h = VmHash::blake3(b"single"); + let root = merkle_root(&[h.clone()]); + assert_eq!(root, h); + } + + #[test] + fn test_merkle_root_pair() { + let h1 = VmHash::blake3(b"left"); + let h2 = VmHash::blake3(b"right"); + let root = merkle_root(&[h1.clone(), h2.clone()]); + + // Verify it's not just one of the inputs + assert_ne!(root, h1); + assert_ne!(root, h2); + + // Verify determinism + let root2 = merkle_root(&[h1, h2]); + assert_eq!(root, root2); + } + + #[test] + fn test_merkle_root_odd_count() { + let h1 = VmHash::blake3(b"one"); + let h2 = VmHash::blake3(b"two"); + let h3 = VmHash::blake3(b"three"); + + let root = merkle_root(&[h1.clone(), h2.clone(), h3.clone()]); + + // Should be deterministic + let root2 = merkle_root(&[h1, h2, h3]); + assert_eq!(root, root2); + } +} diff --git a/vaultmesh-core/src/lib.rs b/vaultmesh-core/src/lib.rs new file mode 100644 index 0000000..b6612ca --- /dev/null +++ b/vaultmesh-core/src/lib.rs @@ -0,0 +1,7 @@ +pub mod receipt; +pub mod did; +pub mod hash; + +pub use receipt::{Receipt, ReceiptHeader, ReceiptMeta, Scroll}; +pub use did::{Did, DidType, DidParseError}; +pub use hash::{VmHash, merkle_root}; diff --git a/vaultmesh-core/src/receipt.rs b/vaultmesh-core/src/receipt.rs new file mode 100644 index 0000000..a85599d --- /dev/null +++ b/vaultmesh-core/src/receipt.rs @@ -0,0 +1,79 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + +/// Universal receipt header present in all receipts +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReceiptHeader { + pub receipt_type: String, + pub timestamp: DateTime, + pub root_hash: String, + pub tags: Vec, +} + +/// Receipt metadata for tracking and querying +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReceiptMeta { + pub scroll: Scroll, + pub sequence: u64, + pub anchor_epoch: Option, + pub proof_path: Option, +} + +/// Scroll identifiers +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(rename_all = "snake_case")] +pub enum Scroll { + Drills, + Compliance, + Guardian, + Treasury, + Mesh, + OffSec, + Identity, + Observability, + Automation, + PsiField, +} + +impl Scroll { + pub fn jsonl_path(&self) -> &'static str { + match self { + Scroll::Drills => "receipts/drills/drill_runs.jsonl", + Scroll::Compliance => "receipts/compliance/oracle_answers.jsonl", + Scroll::Guardian => "receipts/guardian/anchor_events.jsonl", + Scroll::Treasury => "receipts/treasury/treasury_events.jsonl", + Scroll::Mesh => "receipts/mesh/mesh_events.jsonl", + Scroll::OffSec => "receipts/offsec/offsec_events.jsonl", + Scroll::Identity => "receipts/identity/identity_events.jsonl", + Scroll::Observability => "receipts/observability/observability_events.jsonl", + Scroll::Automation => "receipts/automation/automation_events.jsonl", + Scroll::PsiField => "receipts/psi/psi_events.jsonl", + } + } + + pub fn root_file(&self) -> &'static str { + match self { + Scroll::Drills => "ROOT.drills.txt", + Scroll::Compliance => "ROOT.compliance.txt", + Scroll::Guardian => "ROOT.guardian.txt", + Scroll::Treasury => "ROOT.treasury.txt", + Scroll::Mesh => "ROOT.mesh.txt", + Scroll::OffSec => "ROOT.offsec.txt", + Scroll::Identity => "ROOT.identity.txt", + Scroll::Observability => "ROOT.observability.txt", + Scroll::Automation => "ROOT.automation.txt", + Scroll::PsiField => "ROOT.psi.txt", + } + } +} + +/// Generic receipt wrapper +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Receipt { + #[serde(flatten)] + pub header: ReceiptHeader, + #[serde(flatten)] + pub meta: ReceiptMeta, + #[serde(flatten)] + pub body: T, +} diff --git a/vaultmesh-guardian/Cargo.toml b/vaultmesh-guardian/Cargo.toml new file mode 100644 index 0000000..6941ff9 --- /dev/null +++ b/vaultmesh-guardian/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "vaultmesh-guardian" +version = "0.1.0" +edition = "2021" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } +vaultmesh-observability = { path = "../vaultmesh-observability", optional = true } +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +[features] +default = [] +metrics = ["vaultmesh-observability"] + +[dev-dependencies] +tempfile = "3.8" +tokio = { version = "1.28", features = ["rt-multi-thread", "macros", "time"] } +reqwest = { version = "0.11", features = ["json", "rustls-tls"] } +vaultmesh-observability = { path = "../vaultmesh-observability" } diff --git a/vaultmesh-guardian/src/lib.rs b/vaultmesh-guardian/src/lib.rs new file mode 100644 index 0000000..72ce6b0 --- /dev/null +++ b/vaultmesh-guardian/src/lib.rs @@ -0,0 +1,339 @@ +//! VaultMesh Guardian Engine - Merkle root anchoring for the Civilization Ledger +//! +//! The Guardian engine computes Merkle roots over scroll JSONL files and +//! emits anchor receipts that cryptographically bind all receipts at a point in time. + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs::{self, OpenOptions}; +use std::io::{BufRead, BufReader, Write}; +use std::path::{Path, PathBuf}; +#[cfg(feature = "metrics")] +use std::sync::Arc; +#[cfg(feature = "metrics")] +use std::time::Instant; + +use vaultmesh_core::{merkle_root, Scroll, VmHash}; + +#[cfg(feature = "metrics")] +use vaultmesh_observability::ObservabilityEngine; + +/// Schema version for guardian receipts +pub const SCHEMA_VERSION: &str = "2.0.0"; + +/// Guardian anchor receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AnchorReceipt { + pub schema_version: String, + #[serde(rename = "type")] + pub receipt_type: String, + pub timestamp: DateTime, + pub anchor_id: String, + pub backend: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub anchor_by: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub anchor_epoch: Option, + pub roots: HashMap, + pub scrolls: Vec, + pub anchor_hash: String, +} + +/// Result of computing a scroll's Merkle root +#[derive(Debug, Clone)] +pub struct ScrollRoot { + pub scroll: Scroll, + pub root: VmHash, + pub leaf_count: usize, +} + +/// Guardian engine for anchoring scroll Merkle roots +pub struct GuardianEngine { + /// Path to the VaultMesh root directory + pub vaultmesh_root: PathBuf, + /// DID of the guardian performing anchors + pub guardian_did: String, + /// Backend identifier (e.g., "local", "ethereum", "stellar") + pub backend: String, + /// Optional observability engine for metrics + #[cfg(feature = "metrics")] + pub observability: Option>, +} + +impl GuardianEngine { + /// Create a new Guardian engine + pub fn new(vaultmesh_root: impl AsRef, guardian_did: &str) -> Self { + Self { + vaultmesh_root: vaultmesh_root.as_ref().to_path_buf(), + guardian_did: guardian_did.to_string(), + backend: "local".to_string(), + #[cfg(feature = "metrics")] + observability: None, + } + } + + /// Set the backend for this engine + pub fn with_backend(mut self, backend: &str) -> Self { + self.backend = backend.to_string(); + self + } + + /// Set the observability engine for metrics + #[cfg(feature = "metrics")] + pub fn with_observability(mut self, obs: Arc) -> Self { + self.observability = Some(obs); + self + } + + /// Get the path to a scroll's JSONL file + fn scroll_path(&self, scroll: &Scroll) -> PathBuf { + self.vaultmesh_root.join(scroll.jsonl_path()) + } + + /// Compute the Merkle root for a single scroll + pub fn compute_scroll_root(&self, scroll: &Scroll) -> std::io::Result { + let path = self.scroll_path(scroll); + + if !path.exists() { + return Ok(ScrollRoot { + scroll: scroll.clone(), + root: VmHash::blake3(b"empty"), + leaf_count: 0, + }); + } + + let file = fs::File::open(&path)?; + let reader = BufReader::new(file); + + let mut hashes = Vec::new(); + for line in reader.lines() { + let line = line?; + if line.trim().is_empty() { + continue; + } + hashes.push(VmHash::blake3(line.as_bytes())); + } + + let leaf_count = hashes.len(); + let root = merkle_root(&hashes); + + Ok(ScrollRoot { + scroll: scroll.clone(), + root, + leaf_count, + }) + } + + /// Compute roots for all provided scrolls + pub fn compute_all_roots(&self, scrolls: &[Scroll]) -> std::io::Result> { + scrolls.iter().map(|s| self.compute_scroll_root(s)).collect() + } + + /// Generate an anchor ID based on current timestamp + fn generate_anchor_id() -> String { + let now = Utc::now(); + format!("anchor-{}", now.format("%Y%m%d%H%M%S")) + } + + /// Anchor all provided scrolls and emit a guardian receipt + pub fn anchor(&self, scrolls: &[Scroll]) -> std::io::Result { + let roots = self.compute_all_roots(scrolls)?; + + // Build roots map + let mut roots_map = HashMap::new(); + for sr in &roots { + let scroll_name = match sr.scroll { + Scroll::Drills => "drills", + Scroll::Compliance => "compliance", + Scroll::Guardian => "guardian", + Scroll::Treasury => "treasury", + Scroll::Mesh => "mesh", + Scroll::OffSec => "offsec", + Scroll::Identity => "identity", + Scroll::Observability => "observability", + Scroll::Automation => "automation", + Scroll::PsiField => "psi", + }; + roots_map.insert(scroll_name.to_string(), sr.root.as_str().to_string()); + } + + // Compute anchor hash over all roots + let roots_json = serde_json::to_vec(&roots_map)?; + let anchor_hash = VmHash::blake3(&roots_json); + + let now = Utc::now(); + let anchor_epoch = now.timestamp() as u64; + + let receipt = AnchorReceipt { + schema_version: SCHEMA_VERSION.to_string(), + receipt_type: "guardian_anchor".to_string(), + timestamp: now, + anchor_id: Self::generate_anchor_id(), + backend: self.backend.clone(), + anchor_by: Some(self.guardian_did.clone()), + anchor_epoch: Some(anchor_epoch), + roots: roots_map, + scrolls: scrolls + .iter() + .map(|s| match s { + Scroll::Drills => "drills", + Scroll::Compliance => "compliance", + Scroll::Guardian => "guardian", + Scroll::Treasury => "treasury", + Scroll::Mesh => "mesh", + Scroll::OffSec => "offsec", + Scroll::Identity => "identity", + Scroll::Observability => "observability", + Scroll::Automation => "automation", + Scroll::PsiField => "psi", + }) + .map(String::from) + .collect(), + anchor_hash: anchor_hash.as_str().to_string(), + }; + + // Emit to guardian scroll + self.emit_receipt(&receipt)?; + + Ok(receipt) + } + + /// Write receipt to the guardian JSONL file + fn emit_receipt(&self, receipt: &AnchorReceipt) -> std::io::Result<()> { + #[cfg(feature = "metrics")] + let start = Instant::now(); + + let path = self.scroll_path(&Scroll::Guardian); + + // Ensure parent directory exists + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let mut file = OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + + let json = serde_json::to_string(receipt)?; + writeln!(file, "{}", json)?; + + // Update ROOT file + self.update_root_file(&Scroll::Guardian)?; + + // Record metrics if observability is enabled + #[cfg(feature = "metrics")] + if let Some(ref obs) = self.observability { + let elapsed = start.elapsed().as_secs_f64(); + obs.observe_emitted("guardian", elapsed); + // Set anchor age to 0 to indicate fresh anchor just occurred + obs.set_anchor_age(0.0); + } + + Ok(()) + } + + /// Update the ROOT.*.txt file for a scroll + fn update_root_file(&self, scroll: &Scroll) -> std::io::Result<()> { + let root_result = self.compute_scroll_root(scroll)?; + let root_path = self.vaultmesh_root.join(scroll.root_file()); + + fs::write(&root_path, root_result.root.as_str())?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn setup_test_env() -> (TempDir, GuardianEngine) { + let tmp = TempDir::new().unwrap(); + let receipts_dir = tmp.path().join("receipts/guardian"); + fs::create_dir_all(&receipts_dir).unwrap(); + + let engine = GuardianEngine::new(tmp.path(), "did:vm:guardian:test"); + (tmp, engine) + } + + #[test] + fn test_compute_scroll_root_empty() { + let (_tmp, engine) = setup_test_env(); + let result = engine.compute_scroll_root(&Scroll::Drills).unwrap(); + + assert_eq!(result.leaf_count, 0); + assert_eq!(result.root, VmHash::blake3(b"empty")); + } + + #[test] + fn test_compute_scroll_root_single_line() { + let (tmp, engine) = setup_test_env(); + + // Create a test JSONL file + let path = tmp.path().join("receipts/drills/drill_runs.jsonl"); + fs::create_dir_all(path.parent().unwrap()).unwrap(); + fs::write(&path, "{\"test\": \"data\"}\n").unwrap(); + + let result = engine.compute_scroll_root(&Scroll::Drills).unwrap(); + + assert_eq!(result.leaf_count, 1); + // Single leaf = its own hash + let expected = VmHash::blake3(b"{\"test\": \"data\"}"); + assert_eq!(result.root, expected); + } + + #[test] + fn test_compute_scroll_root_multiple_lines() { + let (tmp, engine) = setup_test_env(); + + let path = tmp.path().join("receipts/drills/drill_runs.jsonl"); + fs::create_dir_all(path.parent().unwrap()).unwrap(); + fs::write(&path, "{\"line\": 1}\n{\"line\": 2}\n{\"line\": 3}\n").unwrap(); + + let result = engine.compute_scroll_root(&Scroll::Drills).unwrap(); + + assert_eq!(result.leaf_count, 3); + // Root should be deterministic + let result2 = engine.compute_scroll_root(&Scroll::Drills).unwrap(); + assert_eq!(result.root, result2.root); + } + + #[test] + fn test_anchor_creates_receipt() { + let (tmp, engine) = setup_test_env(); + + let receipt = engine.anchor(&[Scroll::Guardian]).unwrap(); + + assert_eq!(receipt.receipt_type, "guardian_anchor"); + assert_eq!(receipt.backend, "local"); + assert!(receipt.anchor_by.is_some()); + assert!(receipt.anchor_id.starts_with("anchor-")); + + // Verify receipt was written to JSONL + let path = tmp.path().join("receipts/guardian/anchor_events.jsonl"); + assert!(path.exists()); + + let content = fs::read_to_string(&path).unwrap(); + assert!(content.contains("guardian_anchor")); + } + + #[test] + fn test_anchor_hash_deterministic() { + let (tmp, engine) = setup_test_env(); + + // Create some test data + let drills_path = tmp.path().join("receipts/drills/drill_runs.jsonl"); + fs::create_dir_all(drills_path.parent().unwrap()).unwrap(); + fs::write(&drills_path, "{\"drill\": 1}\n").unwrap(); + + // Compute roots twice + let roots1 = engine.compute_all_roots(&[Scroll::Drills]).unwrap(); + let roots2 = engine.compute_all_roots(&[Scroll::Drills]).unwrap(); + + // Same input = same roots + assert_eq!(roots1[0].root, roots2[0].root); + } +} diff --git a/vaultmesh-guardian/tests/metrics_integration.rs b/vaultmesh-guardian/tests/metrics_integration.rs new file mode 100644 index 0000000..76c6c41 --- /dev/null +++ b/vaultmesh-guardian/tests/metrics_integration.rs @@ -0,0 +1,75 @@ +//! Integration test: Guardian anchor updates observability metrics +//! +//! Run with: cargo test -p vaultmesh-guardian --features metrics --test metrics_integration + +#![cfg(feature = "metrics")] + +use std::net::TcpListener; +use std::sync::Arc; +use tokio::time::{sleep, Duration}; + +use vaultmesh_core::Scroll; +use vaultmesh_guardian::GuardianEngine; +use vaultmesh_observability::ObservabilityEngine; + +#[tokio::test] +async fn test_anchor_updates_observability_metrics() { + // Create temp directory for test + let tmp = tempfile::TempDir::new().unwrap(); + let receipts_dir = tmp.path().join("receipts/guardian"); + std::fs::create_dir_all(&receipts_dir).unwrap(); + + // Dynamic port allocation - bind to port 0 to get an available port + let listener = TcpListener::bind("127.0.0.1:0").expect("bind to port 0"); + let addr = listener.local_addr().expect("get local addr"); + drop(listener); // Release so ObservabilityEngine can bind + + // Create Observability engine and run its HTTP server on dynamic port + let obs = Arc::new(ObservabilityEngine::new()); + obs.clone().serve(&addr).await.expect("serve failed"); + + // Give the server time to start + sleep(Duration::from_millis(100)).await; + + // Build guardian with observability (using the metrics feature) + let guardian = GuardianEngine::new(tmp.path(), "did:vm:guardian:test") + .with_observability(obs.clone()); + + // Perform an anchor + let scrolls = vec![Scroll::Guardian, Scroll::Treasury]; + guardian.anchor(&scrolls).expect("anchor failed"); + + // Give time for metrics to update + sleep(Duration::from_millis(100)).await; + + // Fetch metrics from Observability server using dynamic address + let metrics_url = format!("http://{}/metrics", addr); + let resp = reqwest::get(&metrics_url) + .await + .expect("request failed"); + let body = resp.text().await.expect("body read failed"); + + // Assert metrics contain receipts counter + assert!( + body.contains("vaultmesh_receipts_total"), + "metrics should expose receipts counter" + ); + + // Assert metrics contain anchor age gauge + assert!( + body.contains("vaultmesh_anchor_age_seconds"), + "metrics should expose anchor age gauge" + ); + + // Assert guardian module appears in metrics + assert!( + body.contains("guardian"), + "guardian module should appear in metrics" + ); + + // Anchor age should be 0 (just anchored) + assert!( + body.contains("vaultmesh_anchor_age_seconds 0"), + "anchor age should be 0 after fresh anchor" + ); +} diff --git a/vaultmesh-identity/Cargo.toml b/vaultmesh-identity/Cargo.toml new file mode 100644 index 0000000..e3c67c7 --- /dev/null +++ b/vaultmesh-identity/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "vaultmesh-identity" +version = "0.1.0" +edition = "2021" + +[dependencies] +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1" + +vaultmesh-core = { path = "../vaultmesh-core" } diff --git a/vaultmesh-identity/src/lib.rs b/vaultmesh-identity/src/lib.rs new file mode 100644 index 0000000..fd4c630 --- /dev/null +++ b/vaultmesh-identity/src/lib.rs @@ -0,0 +1,284 @@ +//! vaultmesh-identity - Identity engine for DID management and receipts. + +use chrono::Utc; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +use vaultmesh_core::{ + Did, + DidType, + Receipt, + ReceiptHeader, + ReceiptMeta, + Scroll, + VmHash, +}; + +/// DID Document based on W3C DID Core with a VaultMesh extension namespace. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DidDocument { + #[serde(rename = "@context")] + pub context: Vec, + pub id: Did, + pub controller: Option, + #[serde(rename = "verificationMethod")] + pub verification_method: Vec, + pub authentication: Vec, + #[serde(rename = "assertionMethod")] + pub assertion_method: Vec, + pub service: Vec, + /// Optional human-facing display name. + pub display_name: Option, + /// Optional role / purpose (e.g. "portal", "guardian", "skill", "human"). + pub role: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VerificationMethod { + pub id: String, + #[serde(rename = "type")] + pub method_type: String, + pub controller: Did, + #[serde(rename = "publicKeyMultibase")] + pub public_key_multibase: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Service { + pub id: String, + #[serde(rename = "type")] + pub service_type: String, + #[serde(rename = "serviceEndpoint")] + pub service_endpoint: String, +} + +/// Receipt body for `identity_did_create` events. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DidCreateReceipt { + /// Newly created DID. + pub did: Did, + /// DID type as string (e.g. "portal", "guardian", "human"). + pub did_type: String, + /// Optional controller DID. + pub controller: Option, + /// Actor who created this DID. + pub created_by: Did, + /// Human-friendly label. + pub display_name: Option, + /// Logical role (portal, guardian, skill, human, etc.). + pub role: Option, + /// Verification key type (e.g. Ed25519VerificationKey2020). + pub public_key_type: String, + /// Multibase-encoded public key. + pub public_key_multibase: String, + /// IDs of initial keys attached to the DID document. + pub initial_keys: Vec, + /// Hash of the full DID Document JSON (blake3:...). + pub did_document_hash: String, +} + +/// Minimal Identity engine: manages DIDs + emits receipts. +pub struct IdentityEngine { + did_documents: HashMap, +} + +impl Default for IdentityEngine { + fn default() -> Self { + Self::new() + } +} + +impl IdentityEngine { + /// Create a new, empty IdentityEngine. + pub fn new() -> Self { + IdentityEngine { + did_documents: HashMap::new(), + } + } + + /// Create a DID with a single Ed25519 key and emit an `identity_did_create` receipt. + /// + /// This does NOT persist anything to disk; it just returns the receipt + + /// DID Document so the caller (CLI / service) can: + /// - store the keypair safely + /// - append the receipt to `receipts/identity/identity_events.jsonl` + pub fn create_did( + &mut self, + did_type: DidType, + name: &str, + controller: Option, + display_name: Option, + role: Option, + public_key_multibase: String, + created_by: Did, + ) -> Result<(Receipt, DidDocument), IdentityError> { + // did:vm:{type}:{name} + let did = Did::new(did_type, name); + + if self.did_documents.contains_key(&did) { + return Err(IdentityError::DidExists); + } + + let key_id = format!("{}#key-1", did.as_str()); + + let doc = DidDocument { + context: vec![ + "https://www.w3.org/ns/did/v1".to_string(), + "https://vaultmesh.io/ns/did/v1".to_string(), + ], + id: did.clone(), + controller: controller.clone(), + verification_method: vec![VerificationMethod { + id: key_id.clone(), + method_type: "Ed25519VerificationKey2020".to_string(), + controller: did.clone(), + public_key_multibase: public_key_multibase.clone(), + }], + authentication: vec![key_id.clone()], + assertion_method: vec![key_id.clone()], + service: vec![], + display_name: display_name.clone(), + role: role.clone(), + }; + + // Hash of DID Document JSON for did_document_hash. + let doc_hash = VmHash::from_json(&doc) + .map_err(|_| IdentityError::SerializationError)?; + + self.did_documents.insert(did.clone(), doc); + + let receipt_body = DidCreateReceipt { + did: did.clone(), + did_type: did_type.as_str().to_string(), + controller, + created_by, + display_name, + role, + public_key_type: "Ed25519VerificationKey2020".to_string(), + public_key_multibase, + initial_keys: vec![key_id], + did_document_hash: doc_hash.as_str().to_string(), + }; + + // Root hash over the receipt body. + let root_hash = VmHash::from_json(&receipt_body) + .map_err(|_| IdentityError::SerializationError)?; + + let receipt = Receipt { + header: ReceiptHeader { + receipt_type: "identity_did_create".to_string(), + timestamp: Utc::now(), + root_hash: root_hash.as_str().to_string(), + tags: vec![ + "identity".to_string(), + "did".to_string(), + "create".to_string(), + receipt_body.did_type.clone(), // e.g. "portal" + ], + }, + meta: ReceiptMeta { + scroll: Scroll::Identity, + sequence: 0, // to be filled by append layer + anchor_epoch: None, // to be filled by Guardian + proof_path: None, + }, + body: receipt_body, + }; + + // We just inserted it, so unwrap is safe. + let stored_doc = self.did_documents.get(&did).unwrap().clone(); + Ok((receipt, stored_doc)) + } + + /// Resolve a DID into its document, if present. + pub fn resolve_did(&self, did: &Did) -> Option<&DidDocument> { + self.did_documents.get(did) + } + + /// Check if a DID exists in the engine. + pub fn has_did(&self, did: &Did) -> bool { + self.did_documents.contains_key(did) + } + + /// Get all DIDs in the engine. + pub fn list_dids(&self) -> Vec<&Did> { + self.did_documents.keys().collect() + } +} + +/// Errors that can occur in the Identity engine. +#[derive(Debug)] +pub enum IdentityError { + DidExists, + SerializationError, +} + +impl std::fmt::Display for IdentityError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IdentityError::DidExists => write!(f, "DID already exists"), + IdentityError::SerializationError => write!(f, "Serialization error"), + } + } +} + +impl std::error::Error for IdentityError {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_create_did() { + let mut engine = IdentityEngine::new(); + let created_by = Did::new(DidType::Human, "karol"); + + let result = engine.create_did( + DidType::Portal, + "shield", + None, + Some("VaultMesh Auditor Portal (shield)".to_string()), + Some("portal".to_string()), + "z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK".to_string(), + created_by, + ); + + assert!(result.is_ok()); + let (receipt, doc) = result.unwrap(); + + assert_eq!(doc.id.as_str(), "did:vm:portal:shield"); + assert_eq!(receipt.header.receipt_type, "identity_did_create"); + assert_eq!(receipt.body.did_type, "portal"); + assert!(receipt.header.tags.contains(&"portal".to_string())); + } + + #[test] + fn test_duplicate_did_error() { + let mut engine = IdentityEngine::new(); + let created_by = Did::new(DidType::Human, "karol"); + + // First creation should succeed + let _ = engine.create_did( + DidType::Guardian, + "local", + None, + None, + None, + "z6Mktest".to_string(), + created_by.clone(), + ); + + // Second creation with same DID should fail + let result = engine.create_did( + DidType::Guardian, + "local", + None, + None, + None, + "z6MkotherKey".to_string(), + created_by, + ); + + assert!(matches!(result, Err(IdentityError::DidExists))); + } +} diff --git a/vaultmesh-mesh/Cargo.toml b/vaultmesh-mesh/Cargo.toml new file mode 100644 index 0000000..e701323 --- /dev/null +++ b/vaultmesh-mesh/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "vaultmesh-mesh" +version = "0.1.0" +edition = "2021" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } +vaultmesh-observability = { path = "../vaultmesh-observability", optional = true } +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +[features] +default = [] +metrics = ["vaultmesh-observability"] + +[dev-dependencies] +tempfile = "3.8" +tokio = { version = "1.28", features = ["rt-multi-thread", "macros", "time"] } +reqwest = { version = "0.11", features = ["json", "rustls-tls"] } +vaultmesh-observability = { path = "../vaultmesh-observability" } diff --git a/vaultmesh-mesh/src/lib.rs b/vaultmesh-mesh/src/lib.rs new file mode 100644 index 0000000..81d7aff --- /dev/null +++ b/vaultmesh-mesh/src/lib.rs @@ -0,0 +1,824 @@ +//! VaultMesh Mesh Engine - Federation topology management +//! +//! The Mesh engine tracks nodes, routes, and capabilities in the federation +//! network, emitting receipts for all topology changes. + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs::{self, OpenOptions}; +use std::io::Write; +use std::path::{Path, PathBuf}; +#[cfg(feature = "metrics")] +use std::sync::Arc; +#[cfg(feature = "metrics")] +use std::time::Instant; + +use vaultmesh_core::{Scroll, VmHash}; + +#[cfg(feature = "metrics")] +use vaultmesh_observability::ObservabilityEngine; + +/// Schema version for mesh receipts +pub const SCHEMA_VERSION: &str = "2.0.0"; + +/// Node type in the mesh +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum NodeType { + Infrastructure, + Edge, + Oracle, + Guardian, + External, +} + +/// Node status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum NodeStatus { + Active, + Inactive, + Degraded, +} + +/// A node in the mesh +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Node { + pub node_id: String, + pub display_name: String, + pub node_type: NodeType, + pub endpoints: HashMap, + pub public_key: Option, + pub capabilities: Vec, + pub status: NodeStatus, + pub joined_at: DateTime, + pub tags: Vec, +} + +/// Route status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum RouteStatus { + Active, + Degraded, + Failed, +} + +/// A route between nodes +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Route { + pub route_id: String, + pub source: String, + pub destination: String, + pub transport: String, + pub priority: u32, + pub status: RouteStatus, + pub latency_ms: Option, + pub established_at: DateTime, +} + +/// Capability scope +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum CapabilityScope { + Global, + Local, + Limited, +} + +/// A capability granted to a node +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Capability { + pub capability_id: String, + pub node_id: String, + pub capability: String, + pub scope: CapabilityScope, + pub granted_by: String, + pub granted_at: DateTime, + pub expires_at: Option>, +} + +/// Topology snapshot +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TopologySnapshot { + pub snapshot_id: String, + pub timestamp: DateTime, + pub node_count: usize, + pub route_count: usize, + pub capability_count: usize, + pub nodes: Vec, + pub topology_hash: String, +} + +/// Generic mesh receipt wrapper +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MeshReceipt { + pub schema_version: String, + #[serde(rename = "type")] + pub receipt_type: String, + pub timestamp: DateTime, + pub scroll: String, + pub tags: Vec, + pub root_hash: String, + pub body: serde_json::Value, +} + +/// Node join receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeJoinReceiptBody { + pub node_id: String, + pub display_name: String, + pub node_type: NodeType, + pub joined_by: String, +} + +/// Node leave receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeLeaveReceiptBody { + pub node_id: String, + pub reason: String, + pub left_by: String, +} + +/// Route change receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RouteChangeReceiptBody { + pub route_id: String, + pub operation: String, + pub source: String, + pub destination: String, + pub transport: String, +} + +/// Capability grant receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CapabilityGrantReceiptBody { + pub capability_id: String, + pub node_id: String, + pub capability: String, + pub scope: CapabilityScope, + pub granted_by: String, + pub expires_at: Option>, +} + +/// Capability revoke receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CapabilityRevokeReceiptBody { + pub capability_id: String, + pub node_id: String, + pub capability: String, + pub revoked_by: String, + pub reason: String, +} + +/// Topology snapshot receipt body +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TopologySnapshotReceiptBody { + pub snapshot_id: String, + pub node_count: usize, + pub route_count: usize, + pub capability_count: usize, + pub topology_hash: String, +} + +/// Mesh engine errors +#[derive(Debug)] +pub enum MeshError { + NodeExists(String), + NodeNotFound(String), + RouteExists(String), + RouteNotFound(String), + CapabilityExists(String), + CapabilityNotFound(String), + IoError(std::io::Error), + SerializationError(serde_json::Error), +} + +impl std::fmt::Display for MeshError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MeshError::NodeExists(id) => write!(f, "Node already exists: {}", id), + MeshError::NodeNotFound(id) => write!(f, "Node not found: {}", id), + MeshError::RouteExists(id) => write!(f, "Route already exists: {}", id), + MeshError::RouteNotFound(id) => write!(f, "Route not found: {}", id), + MeshError::CapabilityExists(id) => write!(f, "Capability already exists: {}", id), + MeshError::CapabilityNotFound(id) => write!(f, "Capability not found: {}", id), + MeshError::IoError(e) => write!(f, "IO error: {}", e), + MeshError::SerializationError(e) => write!(f, "Serialization error: {}", e), + } + } +} + +impl std::error::Error for MeshError {} + +impl From for MeshError { + fn from(e: std::io::Error) -> Self { + MeshError::IoError(e) + } +} + +impl From for MeshError { + fn from(e: serde_json::Error) -> Self { + MeshError::SerializationError(e) + } +} + +/// Mesh engine for federation topology management +pub struct MeshEngine { + /// Path to VaultMesh root + pub vaultmesh_root: PathBuf, + /// In-memory node registry + nodes: HashMap, + /// In-memory route registry + routes: HashMap, + /// In-memory capability registry + capabilities: HashMap, + /// Optional observability engine for metrics + #[cfg(feature = "metrics")] + pub observability: Option>, +} + +impl MeshEngine { + /// Create a new Mesh engine + pub fn new(vaultmesh_root: impl AsRef) -> Self { + Self { + vaultmesh_root: vaultmesh_root.as_ref().to_path_buf(), + nodes: HashMap::new(), + routes: HashMap::new(), + capabilities: HashMap::new(), + #[cfg(feature = "metrics")] + observability: None, + } + } + + /// Set the observability engine for metrics + #[cfg(feature = "metrics")] + pub fn with_observability(mut self, obs: Arc) -> Self { + self.observability = Some(obs); + self + } + + /// Get path to mesh events JSONL + fn events_path(&self) -> PathBuf { + self.vaultmesh_root.join(Scroll::Mesh.jsonl_path()) + } + + /// Register a new node in the mesh + pub fn node_join( + &mut self, + node_id: &str, + display_name: &str, + node_type: NodeType, + endpoints: HashMap, + joined_by: &str, + ) -> Result { + if self.nodes.contains_key(node_id) { + return Err(MeshError::NodeExists(node_id.to_string())); + } + + let node = Node { + node_id: node_id.to_string(), + display_name: display_name.to_string(), + node_type: node_type.clone(), + endpoints, + public_key: None, + capabilities: Vec::new(), + status: NodeStatus::Active, + joined_at: Utc::now(), + tags: vec!["mesh".to_string(), "node".to_string()], + }; + + self.nodes.insert(node_id.to_string(), node.clone()); + + // Emit receipt + let receipt_body = NodeJoinReceiptBody { + node_id: node_id.to_string(), + display_name: display_name.to_string(), + node_type, + joined_by: joined_by.to_string(), + }; + + self.emit_receipt("mesh_node_join", &receipt_body, vec![ + "mesh".to_string(), + "node".to_string(), + "join".to_string(), + node_id.to_string(), + ])?; + + Ok(node) + } + + /// Remove a node from the mesh + pub fn node_leave( + &mut self, + node_id: &str, + reason: &str, + left_by: &str, + ) -> Result<(), MeshError> { + if !self.nodes.contains_key(node_id) { + return Err(MeshError::NodeNotFound(node_id.to_string())); + } + + self.nodes.remove(node_id); + + // Remove associated routes + self.routes.retain(|_, r| r.source != node_id && r.destination != node_id); + + // Remove associated capabilities + self.capabilities.retain(|_, c| c.node_id != node_id); + + // Emit receipt + let receipt_body = NodeLeaveReceiptBody { + node_id: node_id.to_string(), + reason: reason.to_string(), + left_by: left_by.to_string(), + }; + + self.emit_receipt("mesh_node_leave", &receipt_body, vec![ + "mesh".to_string(), + "node".to_string(), + "leave".to_string(), + node_id.to_string(), + ])?; + + Ok(()) + } + + /// Add a route between nodes + pub fn route_add( + &mut self, + source: &str, + destination: &str, + transport: &str, + ) -> Result { + let route_id = format!("route-{}-to-{}", source.split(':').last().unwrap_or(source), + destination.split(':').last().unwrap_or(destination)); + + if self.routes.contains_key(&route_id) { + return Err(MeshError::RouteExists(route_id)); + } + + let route = Route { + route_id: route_id.clone(), + source: source.to_string(), + destination: destination.to_string(), + transport: transport.to_string(), + priority: 1, + status: RouteStatus::Active, + latency_ms: None, + established_at: Utc::now(), + }; + + self.routes.insert(route_id.clone(), route.clone()); + + // Emit receipt + let receipt_body = RouteChangeReceiptBody { + route_id: route_id.clone(), + operation: "add".to_string(), + source: source.to_string(), + destination: destination.to_string(), + transport: transport.to_string(), + }; + + self.emit_receipt("mesh_route_change", &receipt_body, vec![ + "mesh".to_string(), + "route".to_string(), + "add".to_string(), + ])?; + + Ok(route) + } + + /// Remove a route + pub fn route_remove(&mut self, route_id: &str) -> Result<(), MeshError> { + let route = self.routes.remove(route_id) + .ok_or_else(|| MeshError::RouteNotFound(route_id.to_string()))?; + + // Emit receipt + let receipt_body = RouteChangeReceiptBody { + route_id: route_id.to_string(), + operation: "remove".to_string(), + source: route.source, + destination: route.destination, + transport: route.transport, + }; + + self.emit_receipt("mesh_route_change", &receipt_body, vec![ + "mesh".to_string(), + "route".to_string(), + "remove".to_string(), + ])?; + + Ok(()) + } + + /// Grant a capability to a node + pub fn capability_grant( + &mut self, + node_id: &str, + capability: &str, + scope: CapabilityScope, + granted_by: &str, + expires_at: Option>, + ) -> Result { + let capability_id = format!("cap:{}:{}:{}", + node_id.split(':').last().unwrap_or(node_id), + capability, + Utc::now().format("%Y")); + + if self.capabilities.contains_key(&capability_id) { + return Err(MeshError::CapabilityExists(capability_id)); + } + + let cap = Capability { + capability_id: capability_id.clone(), + node_id: node_id.to_string(), + capability: capability.to_string(), + scope: scope.clone(), + granted_by: granted_by.to_string(), + granted_at: Utc::now(), + expires_at, + }; + + self.capabilities.insert(capability_id.clone(), cap.clone()); + + // Update node capabilities list + if let Some(node) = self.nodes.get_mut(node_id) { + if !node.capabilities.contains(&capability.to_string()) { + node.capabilities.push(capability.to_string()); + } + } + + // Emit receipt + let receipt_body = CapabilityGrantReceiptBody { + capability_id: capability_id.clone(), + node_id: node_id.to_string(), + capability: capability.to_string(), + scope, + granted_by: granted_by.to_string(), + expires_at, + }; + + self.emit_receipt("mesh_capability_grant", &receipt_body, vec![ + "mesh".to_string(), + "capability".to_string(), + "grant".to_string(), + capability.to_string(), + ])?; + + Ok(cap) + } + + /// Revoke a capability from a node + pub fn capability_revoke( + &mut self, + node_id: &str, + capability: &str, + revoked_by: &str, + reason: &str, + ) -> Result<(), MeshError> { + // Find and remove the capability + let cap_to_remove: Option = self.capabilities.iter() + .find(|(_, c)| c.node_id == node_id && c.capability == capability) + .map(|(id, _)| id.clone()); + + let capability_id = cap_to_remove + .ok_or_else(|| MeshError::CapabilityNotFound( + format!("{}:{}", node_id, capability) + ))?; + + self.capabilities.remove(&capability_id); + + // Update node capabilities list + if let Some(node) = self.nodes.get_mut(node_id) { + node.capabilities.retain(|c| c != capability); + } + + // Emit receipt + let receipt_body = CapabilityRevokeReceiptBody { + capability_id, + node_id: node_id.to_string(), + capability: capability.to_string(), + revoked_by: revoked_by.to_string(), + reason: reason.to_string(), + }; + + self.emit_receipt("mesh_capability_revoke", &receipt_body, vec![ + "mesh".to_string(), + "capability".to_string(), + "revoke".to_string(), + capability.to_string(), + ])?; + + Ok(()) + } + + /// Create a topology snapshot + pub fn topology_snapshot(&self) -> Result { + let now = Utc::now(); + let snapshot_id = format!("snapshot-{}", now.format("%Y%m%d%H%M%S")); + + // Compute topology hash + let topology_data = serde_json::json!({ + "nodes": self.nodes.keys().collect::>(), + "routes": self.routes.keys().collect::>(), + "capabilities": self.capabilities.keys().collect::>(), + }); + let topology_hash = VmHash::from_json(&topology_data)?; + + let snapshot = TopologySnapshot { + snapshot_id: snapshot_id.clone(), + timestamp: now, + node_count: self.nodes.len(), + route_count: self.routes.len(), + capability_count: self.capabilities.len(), + nodes: self.nodes.keys().cloned().collect(), + topology_hash: topology_hash.as_str().to_string(), + }; + + // Emit receipt (use a separate method to avoid borrow issues) + let receipt_body = TopologySnapshotReceiptBody { + snapshot_id: snapshot.snapshot_id.clone(), + node_count: snapshot.node_count, + route_count: snapshot.route_count, + capability_count: snapshot.capability_count, + topology_hash: snapshot.topology_hash.clone(), + }; + + self.emit_snapshot_receipt(&receipt_body)?; + + Ok(snapshot) + } + + /// Get a node by ID + pub fn get_node(&self, node_id: &str) -> Option<&Node> { + self.nodes.get(node_id) + } + + /// List all nodes + pub fn list_nodes(&self) -> Vec<&Node> { + self.nodes.values().collect() + } + + /// Get a route by ID + pub fn get_route(&self, route_id: &str) -> Option<&Route> { + self.routes.get(route_id) + } + + /// List all routes + pub fn list_routes(&self) -> Vec<&Route> { + self.routes.values().collect() + } + + /// Check if node has capability + pub fn has_capability(&self, node_id: &str, capability: &str) -> bool { + self.capabilities.values() + .any(|c| c.node_id == node_id && c.capability == capability) + } + + /// Emit a mesh receipt + fn emit_receipt( + &self, + receipt_type: &str, + body: &T, + tags: Vec, + ) -> Result<(), MeshError> { + #[cfg(feature = "metrics")] + let start = Instant::now(); + + let body_json = serde_json::to_value(body)?; + let root_hash = VmHash::from_json(&body_json)?; + + let receipt = MeshReceipt { + schema_version: SCHEMA_VERSION.to_string(), + receipt_type: receipt_type.to_string(), + timestamp: Utc::now(), + scroll: "mesh".to_string(), + tags, + root_hash: root_hash.as_str().to_string(), + body: body_json, + }; + + // Ensure directory exists + let path = self.events_path(); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + // Append to JSONL + let mut file = OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + + let json = serde_json::to_string(&receipt)?; + writeln!(file, "{}", json)?; + + // Update ROOT file + self.update_root_file()?; + + // Record metrics if observability is enabled + #[cfg(feature = "metrics")] + if let Some(ref obs) = self.observability { + let elapsed = start.elapsed().as_secs_f64(); + obs.observe_emitted("mesh", elapsed); + } + + Ok(()) + } + + /// Emit snapshot receipt (separate method to work with &self) + fn emit_snapshot_receipt(&self, body: &TopologySnapshotReceiptBody) -> Result<(), MeshError> { + #[cfg(feature = "metrics")] + let start = Instant::now(); + + let body_json = serde_json::to_value(body)?; + let root_hash = VmHash::from_json(&body_json)?; + + let receipt = MeshReceipt { + schema_version: SCHEMA_VERSION.to_string(), + receipt_type: "mesh_topology_snapshot".to_string(), + timestamp: Utc::now(), + scroll: "mesh".to_string(), + tags: vec!["mesh".to_string(), "snapshot".to_string(), "topology".to_string()], + root_hash: root_hash.as_str().to_string(), + body: body_json, + }; + + let path = self.events_path(); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + let mut file = OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + + let json = serde_json::to_string(&receipt)?; + writeln!(file, "{}", json)?; + + self.update_root_file()?; + + #[cfg(feature = "metrics")] + if let Some(ref obs) = self.observability { + let elapsed = start.elapsed().as_secs_f64(); + obs.observe_emitted("mesh", elapsed); + } + + Ok(()) + } + + /// Update ROOT.mesh.txt with current Merkle root + fn update_root_file(&self) -> Result<(), MeshError> { + let events_path = self.events_path(); + if !events_path.exists() { + return Ok(()); + } + + let content = fs::read_to_string(&events_path)?; + let hashes: Vec = content + .lines() + .filter(|l| !l.trim().is_empty()) + .map(|l| VmHash::blake3(l.as_bytes())) + .collect(); + + let root = vaultmesh_core::merkle_root(&hashes); + let root_path = self.vaultmesh_root.join(Scroll::Mesh.root_file()); + fs::write(&root_path, root.as_str())?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn setup_test_env() -> (TempDir, MeshEngine) { + let tmp = TempDir::new().unwrap(); + let engine = MeshEngine::new(tmp.path()); + (tmp, engine) + } + + #[test] + fn test_node_join_creates_receipt() { + let (tmp, mut engine) = setup_test_env(); + + let mut endpoints = HashMap::new(); + endpoints.insert("portal".to_string(), "https://brick-01.local:8443".to_string()); + + let node = engine.node_join( + "did:vm:node:brick-01", + "BRICK-01 (Dublin)", + NodeType::Infrastructure, + endpoints, + "did:vm:human:admin", + ).unwrap(); + + assert_eq!(node.node_id, "did:vm:node:brick-01"); + assert_eq!(node.status, NodeStatus::Active); + + // Verify receipt was written + let events_path = tmp.path().join("receipts/mesh/mesh_events.jsonl"); + assert!(events_path.exists()); + + let content = fs::read_to_string(&events_path).unwrap(); + assert!(content.contains("mesh_node_join")); + } + + #[test] + fn test_node_leave_removes_node() { + let (_tmp, mut engine) = setup_test_env(); + + engine.node_join( + "did:vm:node:brick-01", + "BRICK-01", + NodeType::Infrastructure, + HashMap::new(), + "did:vm:human:admin", + ).unwrap(); + + assert!(engine.get_node("did:vm:node:brick-01").is_some()); + + engine.node_leave( + "did:vm:node:brick-01", + "decommissioned", + "did:vm:human:admin", + ).unwrap(); + + assert!(engine.get_node("did:vm:node:brick-01").is_none()); + } + + #[test] + fn test_route_add_connects_nodes() { + let (_tmp, mut engine) = setup_test_env(); + + let route = engine.route_add( + "did:vm:node:brick-01", + "did:vm:node:brick-02", + "wireguard", + ).unwrap(); + + assert_eq!(route.source, "did:vm:node:brick-01"); + assert_eq!(route.destination, "did:vm:node:brick-02"); + assert_eq!(route.transport, "wireguard"); + assert_eq!(route.status, RouteStatus::Active); + + // Verify route is retrievable + assert!(engine.get_route(&route.route_id).is_some()); + } + + #[test] + fn test_capability_grant_and_check() { + let (_tmp, mut engine) = setup_test_env(); + + engine.node_join( + "did:vm:node:brick-01", + "BRICK-01", + NodeType::Infrastructure, + HashMap::new(), + "did:vm:human:admin", + ).unwrap(); + + let cap = engine.capability_grant( + "did:vm:node:brick-01", + "anchor", + CapabilityScope::Global, + "did:vm:human:admin", + None, + ).unwrap(); + + assert_eq!(cap.capability, "anchor"); + assert!(engine.has_capability("did:vm:node:brick-01", "anchor")); + + // Revoke and verify + engine.capability_revoke( + "did:vm:node:brick-01", + "anchor", + "did:vm:human:admin", + "security audit", + ).unwrap(); + + assert!(!engine.has_capability("did:vm:node:brick-01", "anchor")); + } + + #[test] + fn test_topology_snapshot() { + let (_tmp, mut engine) = setup_test_env(); + + // Add some nodes and routes + engine.node_join("did:vm:node:brick-01", "BRICK-01", NodeType::Infrastructure, HashMap::new(), "admin").unwrap(); + engine.node_join("did:vm:node:brick-02", "BRICK-02", NodeType::Infrastructure, HashMap::new(), "admin").unwrap(); + engine.route_add("did:vm:node:brick-01", "did:vm:node:brick-02", "wireguard").unwrap(); + + let snapshot = engine.topology_snapshot().unwrap(); + + assert_eq!(snapshot.node_count, 2); + assert_eq!(snapshot.route_count, 1); + assert!(snapshot.snapshot_id.starts_with("snapshot-")); + assert!(!snapshot.topology_hash.is_empty()); + } +} diff --git a/vaultmesh-mesh/tests/metrics_integration.rs b/vaultmesh-mesh/tests/metrics_integration.rs new file mode 100644 index 0000000..139fe70 --- /dev/null +++ b/vaultmesh-mesh/tests/metrics_integration.rs @@ -0,0 +1,76 @@ +//! Integration test: Mesh operations update observability metrics +//! +//! Run with: +//! cargo test -p vaultmesh-mesh --features metrics --test metrics_integration +//! +#![cfg(feature = "metrics")] + +use std::collections::HashMap; +use std::net::TcpListener; +use std::sync::Arc; +use std::time::Duration; + +use tempfile::TempDir; +use tokio::time::sleep; + +use vaultmesh_mesh::{MeshEngine, NodeType}; +use vaultmesh_observability::ObservabilityEngine; + +#[tokio::test] +async fn test_mesh_operations_update_observability_metrics() { + // Temporary dir for mesh persistence + let tmp = TempDir::new().expect("tempdir"); + + // Dynamic port allocation - bind to port 0 to get an available port + let listener = TcpListener::bind("127.0.0.1:0").expect("bind to port 0"); + let addr = listener.local_addr().expect("get local addr"); + drop(listener); // Release so ObservabilityEngine can bind + + // Start observability engine on the dynamically assigned port + let obs = Arc::new(ObservabilityEngine::new()); + obs.clone() + .serve(&addr) + .await + .expect("observability serve failed"); + + // Small delay to allow server to bind and be ready + sleep(Duration::from_millis(100)).await; + + // Create MeshEngine and attach observability + let mut mesh = MeshEngine::new(tmp.path()) + .with_observability(obs.clone()); + + // Perform a basic operation that emits a receipt + mesh.node_join( + "did:vm:node:test", + "Test Node", + NodeType::Infrastructure, + HashMap::new(), + "admin", + ) + .expect("node_join"); + + // Wait a little to allow the receipt emission and metric recording + sleep(Duration::from_millis(100)).await; + + // Fetch metrics from observability server using dynamic address + let metrics_url = format!("http://{}/metrics", addr); + let resp = reqwest::get(&metrics_url) + .await + .expect("request failed"); + assert!(resp.status().is_success(), "metrics endpoint must return 200"); + + let body = resp.text().await.expect("read body"); + + // Basic asserts: receipts counter exists and mesh module is present + assert!( + body.contains("vaultmesh_receipts_total"), + "metrics should expose receipts counter" + ); + + // Assert mesh module appears in metrics + assert!( + body.contains("mesh"), + "mesh module should appear in metrics" + ); +} diff --git a/vaultmesh-observability/Cargo.toml b/vaultmesh-observability/Cargo.toml new file mode 100644 index 0000000..f0fca64 --- /dev/null +++ b/vaultmesh-observability/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "vaultmesh-observability" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "vaultmesh-observability" +path = "src/main.rs" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } +prometheus = "0.13" +tokio = { version = "1.28", features = ["rt-multi-thread", "macros", "time"] } +hyper = { version = "0.14", features = ["full"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +[dev-dependencies] +reqwest = { version = "0.11", features = ["json", "rustls-tls"] } +tokio = { version = "1.28", features = ["rt-multi-thread", "macros", "time"] } diff --git a/vaultmesh-observability/Dockerfile b/vaultmesh-observability/Dockerfile new file mode 100644 index 0000000..fadf90a --- /dev/null +++ b/vaultmesh-observability/Dockerfile @@ -0,0 +1,23 @@ +FROM rust:1.75 as builder + +WORKDIR /usr/src/vaultmesh + +# Copy workspace Cargo files +COPY Cargo.toml Cargo.lock ./ +COPY vaultmesh-core ./vaultmesh-core +COPY vaultmesh-observability ./vaultmesh-observability + +# Build release binary +RUN cargo build --release --package vaultmesh-observability + +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y ca-certificates && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /usr/src/vaultmesh/target/release/vaultmesh-observability /usr/local/bin/vaultmesh-observability + +EXPOSE 9108 + +USER 1000 + +ENTRYPOINT ["/usr/local/bin/vaultmesh-observability"] diff --git a/vaultmesh-observability/src/lib.rs b/vaultmesh-observability/src/lib.rs new file mode 100644 index 0000000..8dcf01c --- /dev/null +++ b/vaultmesh-observability/src/lib.rs @@ -0,0 +1,208 @@ +//! vaultmesh-observability +//! ObservabilityEngine: Prometheus exporter that exposes VaultMesh receipts metrics. + +use prometheus::{ + Encoder, Gauge, HistogramOpts, HistogramVec, IntCounterVec, Opts, Registry, TextEncoder, +}; +use std::convert::Infallible; +use std::net::SocketAddr; +use std::sync::Arc; + +use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Request, Response, Server}; + +/// Schema version for observability receipts +pub const SCHEMA_VERSION: &str = "2.0.0"; + +/// ObservabilityEngine - Prometheus metrics exporter for VaultMesh +#[derive(Clone)] +pub struct ObservabilityEngine { + registry: Registry, + receipts_counter: IntCounterVec, + receipts_failed: IntCounterVec, + anchor_age: Gauge, + emit_latency: HistogramVec, +} + +impl ObservabilityEngine { + /// Create a new ObservabilityEngine with all metrics registered + pub fn new() -> Self { + let registry = Registry::new(); + + let receipts_opts = + Opts::new("vaultmesh_receipts_total", "Number of receipts emitted by module"); + let receipts_counter = + IntCounterVec::new(receipts_opts, &["module"]).expect("receipts counter"); + registry + .register(Box::new(receipts_counter.clone())) + .unwrap(); + + let failed_opts = Opts::new( + "vaultmesh_receipts_failed_total", + "Number of failed receipt emissions", + ); + let receipts_failed = + IntCounterVec::new(failed_opts, &["module", "reason"]).expect("failed counter"); + registry + .register(Box::new(receipts_failed.clone())) + .unwrap(); + + let anchor_age = Gauge::with_opts(Opts::new( + "vaultmesh_anchor_age_seconds", + "Seconds since last guardian anchor", + )) + .expect("anchor age gauge"); + registry.register(Box::new(anchor_age.clone())).unwrap(); + + let hist_opts = HistogramOpts::new( + "vaultmesh_emit_seconds", + "Histogram for receipt emit latency in seconds", + ); + let emit_latency = HistogramVec::new(hist_opts, &["module"]).expect("emit latency hist"); + registry.register(Box::new(emit_latency.clone())).unwrap(); + + Self { + registry, + receipts_counter, + receipts_failed, + anchor_age, + emit_latency, + } + } + + /// Record a successful receipt emission + pub fn observe_emitted(&self, module: &str, latency_secs: f64) { + self.receipts_counter.with_label_values(&[module]).inc(); + self.emit_latency + .with_label_values(&[module]) + .observe(latency_secs); + } + + /// Record a failed receipt emission + pub fn observe_failed(&self, module: &str, reason: &str) { + self.receipts_failed + .with_label_values(&[module, reason]) + .inc(); + } + + /// Update the anchor age gauge (seconds since epoch of last anchor) + pub fn set_anchor_age(&self, secs: f64) { + self.anchor_age.set(secs); + } + + /// Gather all metrics and return as Prometheus text format + pub fn gather_metrics(&self) -> String { + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + let metric_families = self.registry.gather(); + encoder.encode(&metric_families, &mut buffer).unwrap(); + String::from_utf8(buffer).unwrap() + } + + /// Start an HTTP server serving /metrics and /health endpoints + pub async fn serve( + self: Arc, + addr: &SocketAddr, + ) -> Result<(), Box> { + let registry_self = self.clone(); + let make_service = make_service_fn(move |_conn| { + let inner = registry_self.clone(); + async move { + Ok::<_, Infallible>(service_fn(move |req: Request| { + let inner_clone = inner.clone(); + async move { + match (req.method().as_str(), req.uri().path()) { + ("GET", "/metrics") => { + let body = inner_clone.gather_metrics(); + Ok::<_, Infallible>(Response::new(Body::from(body))) + } + ("GET", "/health") => { + Ok::<_, Infallible>(Response::new(Body::from("ok"))) + } + _ => Ok::<_, Infallible>( + Response::builder() + .status(404) + .body(Body::from("not found")) + .unwrap(), + ), + } + } + })) + } + }); + + let server = Server::bind(addr).serve(make_service); + tokio::spawn(async move { + if let Err(e) = server.await { + eprintln!("server error: {}", e); + } + }); + Ok(()) + } +} + +impl Default for ObservabilityEngine { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_engine_creation() { + let engine = ObservabilityEngine::new(); + // Prometheus only outputs metrics after they've been observed + // So we need to observe something first + engine.observe_emitted("test", 0.01); + let metrics = engine.gather_metrics(); + assert!(metrics.contains("vaultmesh_receipts_total")); + } + + #[test] + fn test_observe_emitted() { + let engine = ObservabilityEngine::new(); + engine.observe_emitted("guardian", 0.05); + engine.observe_emitted("guardian", 0.03); + engine.observe_emitted("treasury", 0.01); + + let metrics = engine.gather_metrics(); + assert!(metrics.contains("vaultmesh_receipts_total")); + assert!(metrics.contains("guardian")); + assert!(metrics.contains("treasury")); + } + + #[test] + fn test_observe_failed() { + let engine = ObservabilityEngine::new(); + engine.observe_failed("mesh", "io_error"); + + let metrics = engine.gather_metrics(); + assert!(metrics.contains("vaultmesh_receipts_failed_total")); + assert!(metrics.contains("mesh")); + assert!(metrics.contains("io_error")); + } + + #[test] + fn test_anchor_age() { + let engine = ObservabilityEngine::new(); + engine.set_anchor_age(1234.5); + + let metrics = engine.gather_metrics(); + assert!(metrics.contains("vaultmesh_anchor_age_seconds")); + assert!(metrics.contains("1234.5")); + } + + #[test] + fn test_emit_latency_histogram() { + let engine = ObservabilityEngine::new(); + engine.observe_emitted("compliance", 0.001); + engine.observe_emitted("compliance", 0.002); + + let metrics = engine.gather_metrics(); + assert!(metrics.contains("vaultmesh_emit_seconds")); + assert!(metrics.contains("compliance")); + } +} diff --git a/vaultmesh-observability/src/main.rs b/vaultmesh-observability/src/main.rs new file mode 100644 index 0000000..a283f19 --- /dev/null +++ b/vaultmesh-observability/src/main.rs @@ -0,0 +1,55 @@ +//! VaultMesh Observability Exporter +//! +//! HTTP server exposing Prometheus metrics at :9108/metrics + +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use vaultmesh_observability::ObservabilityEngine; + +#[tokio::main] +async fn main() { + // Default listen address + let addr: SocketAddr = std::env::var("VAULTMESH_METRICS_ADDR") + .unwrap_or_else(|_| "0.0.0.0:9108".to_string()) + .parse() + .expect("Invalid address"); + + let engine = Arc::new(ObservabilityEngine::new()); + + // Start HTTP server + engine + .clone() + .serve(&addr) + .await + .expect("Failed to start server"); + + println!("vaultmesh-observability exporter listening on http://{}", addr); + println!(" /metrics - Prometheus metrics"); + println!(" /health - Health check"); + + // Example: demo background emitter (in production, hooks call engine.observe_emitted) + let demo = engine.clone(); + tokio::spawn(async move { + loop { + // Simulate some metrics updates + demo.observe_emitted("guardian", 0.02); + demo.observe_emitted("treasury", 0.01); + + // In real usage, anchor_age would be computed from last anchor timestamp + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs() as f64; + demo.set_anchor_age(now); + + tokio::time::sleep(Duration::from_secs(5)).await; + } + }); + + // Keep main alive + loop { + tokio::time::sleep(Duration::from_secs(3600)).await; + } +} diff --git a/vaultmesh-observability/tests/smoketest.rs b/vaultmesh-observability/tests/smoketest.rs new file mode 100644 index 0000000..b3669ee --- /dev/null +++ b/vaultmesh-observability/tests/smoketest.rs @@ -0,0 +1,85 @@ +//! Smoke test for the observability exporter HTTP server + +use std::net::SocketAddr; +use std::sync::Arc; +use tokio::time::{sleep, Duration}; +use vaultmesh_observability::ObservabilityEngine; + +#[tokio::test] +async fn test_metrics_endpoint_returns_200() { + let engine = Arc::new(ObservabilityEngine::new()); + let addr: SocketAddr = "127.0.0.1:19108".parse().unwrap(); + + // Prometheus only outputs metrics after they've been observed + engine.observe_emitted("test", 0.01); + engine.set_anchor_age(1700000000.0); + + engine.clone().serve(&addr).await.expect("serve failed"); + + // Give the server time to start + sleep(Duration::from_millis(100)).await; + + // Request /metrics + let resp = reqwest::get("http://127.0.0.1:19108/metrics") + .await + .expect("request failed"); + + assert!(resp.status().is_success(), "Expected 200 OK"); + + let body = resp.text().await.expect("body read failed"); + assert!( + body.contains("vaultmesh_receipts_total"), + "Expected vaultmesh_receipts_total metric" + ); + assert!( + body.contains("vaultmesh_anchor_age_seconds"), + "Expected vaultmesh_anchor_age_seconds metric" + ); +} + +#[tokio::test] +async fn test_health_endpoint() { + let engine = Arc::new(ObservabilityEngine::new()); + let addr: SocketAddr = "127.0.0.1:19109".parse().unwrap(); + + engine.clone().serve(&addr).await.expect("serve failed"); + sleep(Duration::from_millis(100)).await; + + let resp = reqwest::get("http://127.0.0.1:19109/health") + .await + .expect("request failed"); + + assert!(resp.status().is_success()); + let body = resp.text().await.unwrap(); + assert_eq!(body, "ok"); +} + +#[tokio::test] +async fn test_metrics_after_observations() { + let engine = Arc::new(ObservabilityEngine::new()); + let addr: SocketAddr = "127.0.0.1:19110".parse().unwrap(); + + // Record some metrics before starting server + engine.observe_emitted("guardian", 0.05); + engine.observe_emitted("treasury", 0.02); + engine.observe_failed("mesh", "timeout"); + engine.set_anchor_age(1700000000.0); + + engine.clone().serve(&addr).await.expect("serve failed"); + sleep(Duration::from_millis(100)).await; + + let resp = reqwest::get("http://127.0.0.1:19110/metrics") + .await + .expect("request failed"); + let body = resp.text().await.unwrap(); + + // Check that our recorded metrics appear + assert!(body.contains("guardian"), "Expected guardian label"); + assert!(body.contains("treasury"), "Expected treasury label"); + assert!(body.contains("mesh"), "Expected mesh label"); + assert!(body.contains("timeout"), "Expected timeout reason"); + assert!( + body.contains("1.7e+09") || body.contains("1700000000"), + "Expected anchor age value" + ); +} diff --git a/vaultmesh-offsec/Cargo.toml b/vaultmesh-offsec/Cargo.toml new file mode 100644 index 0000000..7a4d949 --- /dev/null +++ b/vaultmesh-offsec/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "vaultmesh-offsec" +version = "0.1.0" +edition = "2021" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } diff --git a/vaultmesh-offsec/src/lib.rs b/vaultmesh-offsec/src/lib.rs new file mode 100644 index 0000000..89924ad --- /dev/null +++ b/vaultmesh-offsec/src/lib.rs @@ -0,0 +1,5 @@ +//! vaultmesh-offsec - stub engine, to be implemented. + +pub fn ping() -> &'static str { + "vaultmesh-offsec alive" +} diff --git a/vaultmesh-psi/Cargo.toml b/vaultmesh-psi/Cargo.toml new file mode 100644 index 0000000..b946098 --- /dev/null +++ b/vaultmesh-psi/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "vaultmesh-psi" +version = "0.1.0" +edition = "2021" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } diff --git a/vaultmesh-psi/src/lib.rs b/vaultmesh-psi/src/lib.rs new file mode 100644 index 0000000..ef926a9 --- /dev/null +++ b/vaultmesh-psi/src/lib.rs @@ -0,0 +1,5 @@ +//! vaultmesh-psi - stub engine, to be implemented. + +pub fn ping() -> &'static str { + "vaultmesh-psi alive" +} diff --git a/vaultmesh-treasury/Cargo.toml b/vaultmesh-treasury/Cargo.toml new file mode 100644 index 0000000..1013131 --- /dev/null +++ b/vaultmesh-treasury/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "vaultmesh-treasury" +version = "0.1.0" +edition = "2021" + +[dependencies] +vaultmesh-core = { path = "../vaultmesh-core" } +vaultmesh-observability = { path = "../vaultmesh-observability", optional = true } +chrono = { version = "0.4", features = ["serde"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +[features] +default = [] +metrics = ["vaultmesh-observability"] + +[dev-dependencies] +tempfile = "3.8" diff --git a/vaultmesh-treasury/src/lib.rs b/vaultmesh-treasury/src/lib.rs new file mode 100644 index 0000000..5e0701f --- /dev/null +++ b/vaultmesh-treasury/src/lib.rs @@ -0,0 +1,466 @@ +//! VaultMesh Treasury Engine - Budget tracking and financial receipts +//! +//! The Treasury engine manages budgets, tracks spending, and emits receipts +//! for all financial operations in the Civilization Ledger. + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs::{self, OpenOptions}; +use std::io::Write; +use std::path::{Path, PathBuf}; +#[cfg(feature = "metrics")] +use std::sync::Arc; +#[cfg(feature = "metrics")] +use std::time::Instant; + +use vaultmesh_core::{Scroll, VmHash}; + +#[cfg(feature = "metrics")] +use vaultmesh_observability::ObservabilityEngine; + +/// Schema version for treasury receipts +pub const SCHEMA_VERSION: &str = "2.0.0"; + +/// A budget allocation +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Budget { + pub id: String, + pub name: String, + pub currency: String, + pub allocated: u64, + pub spent: u64, + pub created_at: DateTime, + pub created_by: String, +} + +impl Budget { + /// Remaining balance + pub fn remaining(&self) -> u64 { + self.allocated.saturating_sub(self.spent) + } + + /// Check if a spend would exceed budget + pub fn can_spend(&self, amount: u64) -> bool { + self.spent + amount <= self.allocated + } +} + +/// Receipt body for budget creation +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BudgetCreateReceipt { + pub budget_id: String, + pub name: String, + pub currency: String, + pub allocated: u64, + pub created_by: String, +} + +/// Receipt body for debit operations +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreasuryDebitReceipt { + pub budget_id: String, + pub amount: u64, + pub currency: String, + pub description: String, + pub debited_by: String, + pub new_spent: u64, + pub new_remaining: u64, +} + +/// Receipt body for credit operations (refunds, adjustments) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreasuryCreditReceipt { + pub budget_id: String, + pub amount: u64, + pub currency: String, + pub description: String, + pub credited_by: String, + pub new_allocated: u64, +} + +/// Generic treasury receipt wrapper +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TreasuryReceipt { + pub schema_version: String, + #[serde(rename = "type")] + pub receipt_type: String, + pub timestamp: DateTime, + pub scroll: String, + pub tags: Vec, + pub root_hash: String, + pub body: serde_json::Value, +} + +/// Treasury engine errors +#[derive(Debug)] +pub enum TreasuryError { + BudgetExists(String), + BudgetNotFound(String), + InsufficientFunds { budget_id: String, requested: u64, available: u64 }, + IoError(std::io::Error), + SerializationError(serde_json::Error), +} + +impl std::fmt::Display for TreasuryError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TreasuryError::BudgetExists(id) => write!(f, "Budget already exists: {}", id), + TreasuryError::BudgetNotFound(id) => write!(f, "Budget not found: {}", id), + TreasuryError::InsufficientFunds { budget_id, requested, available } => { + write!(f, "Insufficient funds in {}: requested {}, available {}", budget_id, requested, available) + } + TreasuryError::IoError(e) => write!(f, "IO error: {}", e), + TreasuryError::SerializationError(e) => write!(f, "Serialization error: {}", e), + } + } +} + +impl std::error::Error for TreasuryError {} + +impl From for TreasuryError { + fn from(e: std::io::Error) -> Self { + TreasuryError::IoError(e) + } +} + +impl From for TreasuryError { + fn from(e: serde_json::Error) -> Self { + TreasuryError::SerializationError(e) + } +} + +/// Treasury engine for budget management +pub struct TreasuryEngine { + /// Path to VaultMesh root + pub vaultmesh_root: PathBuf, + /// In-memory budget cache + budgets: HashMap, + /// Default currency + pub default_currency: String, + /// Optional observability engine for metrics + #[cfg(feature = "metrics")] + pub observability: Option>, +} + +impl TreasuryEngine { + /// Create a new Treasury engine + pub fn new(vaultmesh_root: impl AsRef) -> Self { + Self { + vaultmesh_root: vaultmesh_root.as_ref().to_path_buf(), + budgets: HashMap::new(), + default_currency: "EUR".to_string(), + #[cfg(feature = "metrics")] + observability: None, + } + } + + /// Set default currency + pub fn with_currency(mut self, currency: &str) -> Self { + self.default_currency = currency.to_string(); + self + } + + /// Set the observability engine for metrics + #[cfg(feature = "metrics")] + pub fn with_observability(mut self, obs: Arc) -> Self { + self.observability = Some(obs); + self + } + + /// Get path to treasury events JSONL + fn events_path(&self) -> PathBuf { + self.vaultmesh_root.join(Scroll::Treasury.jsonl_path()) + } + + /// Create a new budget + pub fn create_budget( + &mut self, + id: &str, + name: &str, + allocated: u64, + created_by: &str, + ) -> Result { + self.create_budget_with_currency(id, name, allocated, &self.default_currency.clone(), created_by) + } + + /// Create a new budget with specific currency + pub fn create_budget_with_currency( + &mut self, + id: &str, + name: &str, + allocated: u64, + currency: &str, + created_by: &str, + ) -> Result { + if self.budgets.contains_key(id) { + return Err(TreasuryError::BudgetExists(id.to_string())); + } + + let budget = Budget { + id: id.to_string(), + name: name.to_string(), + currency: currency.to_string(), + allocated, + spent: 0, + created_at: Utc::now(), + created_by: created_by.to_string(), + }; + + self.budgets.insert(id.to_string(), budget.clone()); + + // Emit receipt + let receipt_body = BudgetCreateReceipt { + budget_id: id.to_string(), + name: name.to_string(), + currency: currency.to_string(), + allocated, + created_by: created_by.to_string(), + }; + + self.emit_receipt("treasury_budget_create", &receipt_body, vec![ + "treasury".to_string(), + "budget".to_string(), + "create".to_string(), + id.to_string(), + ])?; + + Ok(budget) + } + + /// Debit (spend) from a budget + pub fn debit( + &mut self, + budget_id: &str, + amount: u64, + description: &str, + debited_by: &str, + ) -> Result { + let budget = self.budgets.get_mut(budget_id) + .ok_or_else(|| TreasuryError::BudgetNotFound(budget_id.to_string()))?; + + if !budget.can_spend(amount) { + return Err(TreasuryError::InsufficientFunds { + budget_id: budget_id.to_string(), + requested: amount, + available: budget.remaining(), + }); + } + + budget.spent += amount; + let updated = budget.clone(); + + // Emit receipt + let receipt_body = TreasuryDebitReceipt { + budget_id: budget_id.to_string(), + amount, + currency: updated.currency.clone(), + description: description.to_string(), + debited_by: debited_by.to_string(), + new_spent: updated.spent, + new_remaining: updated.remaining(), + }; + + self.emit_receipt("treasury_debit", &receipt_body, vec![ + "treasury".to_string(), + "debit".to_string(), + budget_id.to_string(), + ])?; + + Ok(updated) + } + + /// Credit (add funds) to a budget + pub fn credit( + &mut self, + budget_id: &str, + amount: u64, + description: &str, + credited_by: &str, + ) -> Result { + let budget = self.budgets.get_mut(budget_id) + .ok_or_else(|| TreasuryError::BudgetNotFound(budget_id.to_string()))?; + + budget.allocated += amount; + let updated = budget.clone(); + + // Emit receipt + let receipt_body = TreasuryCreditReceipt { + budget_id: budget_id.to_string(), + amount, + currency: updated.currency.clone(), + description: description.to_string(), + credited_by: credited_by.to_string(), + new_allocated: updated.allocated, + }; + + self.emit_receipt("treasury_credit", &receipt_body, vec![ + "treasury".to_string(), + "credit".to_string(), + budget_id.to_string(), + ])?; + + Ok(updated) + } + + /// Get a budget by ID + pub fn get_budget(&self, id: &str) -> Option<&Budget> { + self.budgets.get(id) + } + + /// List all budgets + pub fn list_budgets(&self) -> Vec<&Budget> { + self.budgets.values().collect() + } + + /// Emit a treasury receipt + fn emit_receipt( + &self, + receipt_type: &str, + body: &T, + tags: Vec, + ) -> Result<(), TreasuryError> { + #[cfg(feature = "metrics")] + let start = Instant::now(); + + let body_json = serde_json::to_value(body)?; + let root_hash = VmHash::from_json(&body_json)?; + + let receipt = TreasuryReceipt { + schema_version: SCHEMA_VERSION.to_string(), + receipt_type: receipt_type.to_string(), + timestamp: Utc::now(), + scroll: "treasury".to_string(), + tags, + root_hash: root_hash.as_str().to_string(), + body: body_json, + }; + + // Ensure directory exists + let path = self.events_path(); + if let Some(parent) = path.parent() { + fs::create_dir_all(parent)?; + } + + // Append to JSONL + let mut file = OpenOptions::new() + .create(true) + .append(true) + .open(&path)?; + + let json = serde_json::to_string(&receipt)?; + writeln!(file, "{}", json)?; + + // Update ROOT file + self.update_root_file()?; + + // Record metrics if observability is enabled + #[cfg(feature = "metrics")] + if let Some(ref obs) = self.observability { + let elapsed = start.elapsed().as_secs_f64(); + obs.observe_emitted("treasury", elapsed); + } + + Ok(()) + } + + /// Update ROOT.treasury.txt with current Merkle root + fn update_root_file(&self) -> Result<(), TreasuryError> { + let events_path = self.events_path(); + if !events_path.exists() { + return Ok(()); + } + + let content = fs::read_to_string(&events_path)?; + let hashes: Vec = content + .lines() + .filter(|l| !l.trim().is_empty()) + .map(|l| VmHash::blake3(l.as_bytes())) + .collect(); + + let root = vaultmesh_core::merkle_root(&hashes); + let root_path = self.vaultmesh_root.join(Scroll::Treasury.root_file()); + fs::write(&root_path, root.as_str())?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn setup_test_env() -> (TempDir, TreasuryEngine) { + let tmp = TempDir::new().unwrap(); + let engine = TreasuryEngine::new(tmp.path()); + (tmp, engine) + } + + #[test] + fn test_create_budget() { + let (_tmp, mut engine) = setup_test_env(); + + let budget = engine.create_budget( + "ops-2025", + "Operations Budget 2025", + 10000, + "did:vm:human:karol", + ).unwrap(); + + assert_eq!(budget.id, "ops-2025"); + assert_eq!(budget.allocated, 10000); + assert_eq!(budget.spent, 0); + assert_eq!(budget.remaining(), 10000); + } + + #[test] + fn test_debit_reduces_balance() { + let (_tmp, mut engine) = setup_test_env(); + + engine.create_budget("test", "Test", 1000, "did:vm:human:test").unwrap(); + let budget = engine.debit("test", 300, "Test expense", "did:vm:human:test").unwrap(); + + assert_eq!(budget.spent, 300); + assert_eq!(budget.remaining(), 700); + } + + #[test] + fn test_debit_insufficient_funds() { + let (_tmp, mut engine) = setup_test_env(); + + engine.create_budget("small", "Small Budget", 100, "did:vm:human:test").unwrap(); + let result = engine.debit("small", 500, "Too much", "did:vm:human:test"); + + assert!(matches!(result, Err(TreasuryError::InsufficientFunds { .. }))); + } + + #[test] + fn test_credit_increases_allocation() { + let (_tmp, mut engine) = setup_test_env(); + + engine.create_budget("grow", "Growing Budget", 500, "did:vm:human:test").unwrap(); + let budget = engine.credit("grow", 200, "Additional funding", "did:vm:human:test").unwrap(); + + assert_eq!(budget.allocated, 700); + assert_eq!(budget.remaining(), 700); + } + + #[test] + fn test_receipts_emitted() { + let (tmp, mut engine) = setup_test_env(); + + engine.create_budget("receipt-test", "Receipt Test", 1000, "did:vm:human:test").unwrap(); + engine.debit("receipt-test", 100, "Test debit", "did:vm:human:test").unwrap(); + + let events_path = tmp.path().join("receipts/treasury/treasury_events.jsonl"); + assert!(events_path.exists()); + + let content = fs::read_to_string(&events_path).unwrap(); + let lines: Vec<&str> = content.lines().collect(); + + assert_eq!(lines.len(), 2); // create + debit + assert!(lines[0].contains("treasury_budget_create")); + assert!(lines[1].contains("treasury_debit")); + } +}