chore: pre-migration snapshot
Some checks failed
WAF Intelligence Guardrail / waf-intel (push) Waiting to run
Cloudflare Registry Validation / validate-registry (push) Has been cancelled

Layer0, MCP servers, Terraform consolidation
This commit is contained in:
Vault Sovereign
2025-12-27 01:52:27 +00:00
parent 7f2e60e1c5
commit f0b8d962de
67 changed files with 14887 additions and 650 deletions

View File

@@ -0,0 +1,11 @@
"""
cloudflare_safe MCP server.
Summary-first Cloudflare tooling with hard output caps and default redaction.
"""
from __future__ import annotations
__all__ = ["__version__"]
__version__ = "0.1.0"

View File

@@ -0,0 +1,6 @@
from __future__ import annotations
from .server import main
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,496 @@
from __future__ import annotations
import hashlib
import json
import os
import urllib.error
import urllib.parse
import urllib.request
from dataclasses import dataclass
from datetime import datetime, timezone
from pathlib import Path
from typing import (
Any,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Tuple,
)
CF_API_BASE = "https://api.cloudflare.com/client/v4"
def utc_now_iso() -> str:
return datetime.now(timezone.utc).isoformat()
def stable_hash(data: Any) -> str:
blob = json.dumps(
data, sort_keys=True, separators=(",", ":"), ensure_ascii=False
).encode("utf-8")
return hashlib.sha256(blob).hexdigest()
class CloudflareError(RuntimeError):
pass
@dataclass(frozen=True)
class CloudflareContext:
api_token: str
account_id: str
@staticmethod
def from_env() -> "CloudflareContext":
api_token = (
os.getenv("CLOUDFLARE_API_TOKEN")
or os.getenv("CF_API_TOKEN")
or os.getenv("CLOUDFLARE_TOKEN")
or ""
).strip()
account_id = (
os.getenv("CLOUDFLARE_ACCOUNT_ID") or os.getenv("CF_ACCOUNT_ID") or ""
).strip()
if not api_token:
raise CloudflareError(
"Missing Cloudflare API token. Set CLOUDFLARE_API_TOKEN (or CF_API_TOKEN)."
)
if not account_id:
raise CloudflareError(
"Missing Cloudflare account id. Set CLOUDFLARE_ACCOUNT_ID (or CF_ACCOUNT_ID)."
)
return CloudflareContext(api_token=api_token, account_id=account_id)
class CloudflareClient:
def __init__(self, *, api_token: str) -> None:
self.api_token = api_token
def _request(
self,
method: str,
path: str,
*,
params: Optional[Mapping[str, str]] = None,
) -> Dict[str, Any]:
url = f"{CF_API_BASE}{path}"
if params:
url = f"{url}?{urllib.parse.urlencode(params)}"
req = urllib.request.Request(
url=url,
method=method,
headers={
"Authorization": f"Bearer {self.api_token}",
"Accept": "application/json",
"Content-Type": "application/json",
},
)
try:
with urllib.request.urlopen(req, timeout=30) as resp:
raw = resp.read()
except urllib.error.HTTPError as e:
raw = e.read() if hasattr(e, "read") else b""
detail = raw.decode("utf-8", "replace")
raise CloudflareError(
f"Cloudflare API HTTP {e.code} for {path}: {detail}"
) from e
except urllib.error.URLError as e:
raise CloudflareError(
f"Cloudflare API request failed for {path}: {e}"
) from e
try:
data = json.loads(raw.decode("utf-8", "replace"))
except json.JSONDecodeError:
raise CloudflareError(
f"Cloudflare API returned non-JSON for {path}: {raw[:200]!r}"
)
if not data.get("success", True):
raise CloudflareError(
f"Cloudflare API error for {path}: {data.get('errors')}"
)
return data
def paginate(
self,
path: str,
*,
params: Optional[Mapping[str, str]] = None,
per_page: int = 100,
max_pages: int = 5,
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
"""
Fetch a paginated Cloudflare endpoint.
Returns (results, result_info).
"""
results: List[Dict[str, Any]] = []
page = 1
last_info: Dict[str, Any] = {}
while True:
merged_params: Dict[str, str] = {
"page": str(page),
"per_page": str(per_page),
}
if params:
merged_params.update({k: str(v) for k, v in params.items()})
data = self._request("GET", path, params=merged_params)
batch = data.get("result") or []
if not isinstance(batch, list):
batch = [batch]
results.extend(batch)
last_info = data.get("result_info") or {}
total_pages = int(last_info.get("total_pages") or 1)
if page >= total_pages or page >= max_pages:
break
page += 1
return results, last_info
def list_zones(self) -> List[Dict[str, Any]]:
zones, _info = self.paginate("/zones", max_pages=2)
return zones
def list_dns_records_summary(
self, zone_id: str, *, max_pages: int = 1
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
return self.paginate(f"/zones/{zone_id}/dns_records", max_pages=max_pages)
def list_tunnels(self, account_id: str) -> List[Dict[str, Any]]:
tunnels, _info = self.paginate(
f"/accounts/{account_id}/cfd_tunnel", max_pages=2
)
return tunnels
def list_tunnel_connections(
self, account_id: str, tunnel_id: str
) -> List[Dict[str, Any]]:
data = self._request(
"GET", f"/accounts/{account_id}/cfd_tunnel/{tunnel_id}/connections"
)
result = data.get("result") or []
return result if isinstance(result, list) else [result]
def list_access_apps(self, account_id: str) -> List[Dict[str, Any]]:
apps, _info = self.paginate(f"/accounts/{account_id}/access/apps", max_pages=3)
return apps
def list_access_policies(
self, account_id: str, app_id: str
) -> List[Dict[str, Any]]:
policies, _info = self.paginate(
f"/accounts/{account_id}/access/apps/{app_id}/policies",
max_pages=3,
)
return policies
@dataclass(frozen=True)
class SnapshotMeta:
snapshot_id: str
created_at: str
scopes: List[str]
snapshot_path: str
class SnapshotStore:
def __init__(self, root_dir: Path) -> None:
self.root_dir = root_dir
self.snapshots_dir = root_dir / "snapshots"
self.diffs_dir = root_dir / "diffs"
self.snapshots_dir.mkdir(parents=True, exist_ok=True)
self.diffs_dir.mkdir(parents=True, exist_ok=True)
self._index: Dict[str, SnapshotMeta] = {}
def get(self, snapshot_id: str) -> SnapshotMeta:
if snapshot_id not in self._index:
raise CloudflareError(f"Unknown snapshot_id: {snapshot_id}")
return self._index[snapshot_id]
def load_snapshot(self, snapshot_id: str) -> Dict[str, Any]:
meta = self.get(snapshot_id)
return json.loads(Path(meta.snapshot_path).read_text(encoding="utf-8"))
def create_snapshot(
self,
*,
client: CloudflareClient,
ctx: CloudflareContext,
scopes: Sequence[str],
zone_id: Optional[str] = None,
zone_name: Optional[str] = None,
dns_max_pages: int = 1,
) -> Tuple[SnapshotMeta, Dict[str, Any]]:
scopes_norm = sorted(set(scopes))
created_at = utc_now_iso()
zones = client.list_zones()
zones_min = [
{
"id": z.get("id"),
"name": z.get("name"),
"status": z.get("status"),
"paused": z.get("paused"),
}
for z in zones
]
selected_zone_id = zone_id
if not selected_zone_id and zone_name:
for z in zones_min:
if z.get("name") == zone_name:
selected_zone_id = str(z.get("id"))
break
snapshot: Dict[str, Any] = {
"meta": {
"snapshot_id": "",
"created_at": created_at,
"account_id": ctx.account_id,
"scopes": scopes_norm,
},
"zones": zones_min,
}
if "tunnels" in scopes_norm:
tunnels = client.list_tunnels(ctx.account_id)
tunnels_min: List[Dict[str, Any]] = []
for t in tunnels:
tid = t.get("id")
name = t.get("name")
status = t.get("status")
connector_count: Optional[int] = None
last_seen: Optional[str] = None
if tid and status != "deleted":
conns = client.list_tunnel_connections(ctx.account_id, str(tid))
connector_count = len(conns)
# Pick the most recent 'opened_at' if present.
opened = [c.get("opened_at") for c in conns if isinstance(c, dict)]
opened = [o for o in opened if isinstance(o, str)]
last_seen = max(opened) if opened else None
tunnels_min.append(
{
"id": tid,
"name": name,
"status": status,
"created_at": t.get("created_at"),
"deleted_at": t.get("deleted_at"),
"connector_count": connector_count,
"last_seen": last_seen,
}
)
snapshot["tunnels"] = tunnels_min
if "access_apps" in scopes_norm:
apps = client.list_access_apps(ctx.account_id)
apps_min = [
{
"id": a.get("id"),
"name": a.get("name"),
"domain": a.get("domain"),
"type": a.get("type"),
"created_at": a.get("created_at"),
"updated_at": a.get("updated_at"),
}
for a in apps
]
snapshot["access_apps"] = apps_min
if "dns" in scopes_norm:
if selected_zone_id:
records, info = client.list_dns_records_summary(
selected_zone_id, max_pages=dns_max_pages
)
records_min = [
{
"id": r.get("id"),
"type": r.get("type"),
"name": r.get("name"),
"content": r.get("content"),
"proxied": r.get("proxied"),
"ttl": r.get("ttl"),
}
for r in records
]
snapshot["dns"] = {
"zone_id": selected_zone_id,
"zone_name": zone_name,
"result_info": info,
"records_sample": records_min,
}
else:
snapshot["dns"] = {
"note": "dns scope requested but no zone_id/zone_name provided; only zones list included",
}
snapshot_id = f"cf_{created_at.replace(':', '').replace('-', '').replace('.', '')}_{stable_hash(snapshot)[:10]}"
snapshot["meta"]["snapshot_id"] = snapshot_id
path = self.snapshots_dir / f"{snapshot_id}.json"
path.write_text(
json.dumps(snapshot, indent=2, ensure_ascii=False), encoding="utf-8"
)
meta = SnapshotMeta(
snapshot_id=snapshot_id,
created_at=created_at,
scopes=scopes_norm,
snapshot_path=str(path),
)
self._index[snapshot_id] = meta
return meta, snapshot
def diff(
self,
*,
from_snapshot_id: str,
to_snapshot_id: str,
scopes: Optional[Sequence[str]] = None,
) -> Dict[str, Any]:
before = self.load_snapshot(from_snapshot_id)
after = self.load_snapshot(to_snapshot_id)
scopes_before = set(before.get("meta", {}).get("scopes") or [])
scopes_after = set(after.get("meta", {}).get("scopes") or [])
scopes_all = sorted(scopes_before | scopes_after)
scopes_use = sorted(set(scopes or scopes_all))
def index_by_id(
items: Iterable[Mapping[str, Any]],
) -> Dict[str, Dict[str, Any]]:
out: Dict[str, Dict[str, Any]] = {}
for it in items:
_id = it.get("id")
if _id is None:
continue
out[str(_id)] = dict(it)
return out
diff_out: Dict[str, Any] = {
"from": from_snapshot_id,
"to": to_snapshot_id,
"scopes": scopes_use,
"changes": {},
}
for scope in scopes_use:
if scope not in {"tunnels", "access_apps", "zones"}:
continue
b_items = before.get(scope) or []
a_items = after.get(scope) or []
if not isinstance(b_items, list) or not isinstance(a_items, list):
continue
b_map = index_by_id(b_items)
a_map = index_by_id(a_items)
added = [a_map[k] for k in sorted(set(a_map) - set(b_map))]
removed = [b_map[k] for k in sorted(set(b_map) - set(a_map))]
changed: List[Dict[str, Any]] = []
for k in sorted(set(a_map) & set(b_map)):
if stable_hash(a_map[k]) != stable_hash(b_map[k]):
changed.append({"id": k, "before": b_map[k], "after": a_map[k]})
diff_out["changes"][scope] = {
"added": [{"id": x.get("id"), "name": x.get("name")} for x in added],
"removed": [
{"id": x.get("id"), "name": x.get("name")} for x in removed
],
"changed": [
{"id": x.get("id"), "name": x.get("after", {}).get("name")}
for x in changed
],
"counts": {
"added": len(added),
"removed": len(removed),
"changed": len(changed),
},
}
diff_path = self.diffs_dir / f"{from_snapshot_id}_to_{to_snapshot_id}.json"
diff_path.write_text(
json.dumps(diff_out, indent=2, ensure_ascii=False),
encoding="utf-8",
)
diff_out["diff_path"] = str(diff_path)
return diff_out
def parse_cloudflared_config_ingress(config_text: str) -> List[Dict[str, str]]:
"""
Best-effort parser for cloudflared YAML config ingress rules.
We intentionally avoid a YAML dependency; this extracts common patterns:
- hostname: example.com
service: http://127.0.0.1:8080
"""
rules: List[Dict[str, str]] = []
lines = config_text.splitlines()
i = 0
while i < len(lines):
line = lines[i]
stripped = line.lstrip()
if not stripped.startswith("-"):
i += 1
continue
after_dash = stripped[1:].lstrip()
if not after_dash.startswith("hostname:"):
i += 1
continue
hostname = after_dash[len("hostname:") :].strip().strip('"').strip("'")
base_indent = len(line) - len(line.lstrip())
service = ""
j = i + 1
while j < len(lines):
next_line = lines[j]
if next_line.strip() == "":
j += 1
continue
next_indent = len(next_line) - len(next_line.lstrip())
if next_indent <= base_indent:
break
next_stripped = next_line.lstrip()
if next_stripped.startswith("service:"):
service = next_stripped[len("service:") :].strip().strip('"').strip("'")
break
j += 1
rules.append({"hostname": hostname, "service": service})
i = j
return rules
def ingress_summary_from_file(
*,
config_path: str,
max_rules: int = 50,
) -> Dict[str, Any]:
path = Path(config_path)
if not path.exists():
raise CloudflareError(f"cloudflared config not found: {config_path}")
text = path.read_text(encoding="utf-8", errors="replace")
rules = parse_cloudflared_config_ingress(text)
hostnames = sorted({r["hostname"] for r in rules if r.get("hostname")})
return {
"config_path": config_path,
"ingress_rule_count": len(rules),
"hostnames": hostnames[:max_rules],
"rules_sample": rules[:max_rules],
"truncated": len(rules) > max_rules,
}

View File

@@ -0,0 +1,725 @@
from __future__ import annotations
import json
import os
import sys
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from .cloudflare_api import (
CloudflareClient,
CloudflareContext,
CloudflareError,
SnapshotStore,
ingress_summary_from_file,
)
MAX_BYTES_DEFAULT = 32_000
def _repo_root() -> Path:
# server.py -> cloudflare_safe -> mcp -> <repo root>
return Path(__file__).resolve().parents[3]
def _max_bytes() -> int:
raw = (os.getenv("VM_MCP_MAX_BYTES") or "").strip()
if not raw:
return MAX_BYTES_DEFAULT
try:
return max(4_096, int(raw))
except ValueError:
return MAX_BYTES_DEFAULT
def _redact(obj: Any) -> Any:
sensitive_keys = ("token", "secret", "password", "private", "key", "certificate")
if isinstance(obj, dict):
out: Dict[str, Any] = {}
for k, v in obj.items():
if any(s in str(k).lower() for s in sensitive_keys):
out[k] = "<REDACTED>"
else:
out[k] = _redact(v)
return out
if isinstance(obj, list):
return [_redact(v) for v in obj]
if isinstance(obj, str):
if obj.startswith("ghp_") or obj.startswith("github_pat_"):
return "<REDACTED>"
return obj
return obj
def _safe_json(payload: Dict[str, Any]) -> str:
payload = _redact(payload)
raw = json.dumps(payload, ensure_ascii=False, separators=(",", ":"))
if len(raw.encode("utf-8")) <= _max_bytes():
return json.dumps(payload, ensure_ascii=False, indent=2)
# Truncate: keep only summary + next_steps.
truncated = {
"ok": payload.get("ok", True),
"truncated": True,
"summary": payload.get("summary", "Response exceeded max size; truncated."),
"next_steps": payload.get(
"next_steps",
[
"request a narrower scope (e.g., scopes=['tunnels'])",
"request an export path instead of inline content",
],
),
}
return json.dumps(truncated, ensure_ascii=False, indent=2)
def _mcp_text_result(
payload: Dict[str, Any], *, is_error: bool = False
) -> Dict[str, Any]:
result: Dict[str, Any] = {
"content": [{"type": "text", "text": _safe_json(payload)}]
}
if is_error:
result["isError"] = True
return result
def _default_state_dir() -> Path:
return _repo_root() / "archive_runtime" / "cloudflare_mcp"
class CloudflareSafeTools:
def __init__(self) -> None:
self.store = SnapshotStore(
Path(os.getenv("VM_CF_MCP_STATE_DIR") or _default_state_dir())
)
def cf_snapshot(
self,
*,
scopes: Optional[Sequence[str]] = None,
zone_id: Optional[str] = None,
zone_name: Optional[str] = None,
dns_max_pages: int = 1,
) -> Dict[str, Any]:
scopes_use = list(scopes or ["tunnels", "access_apps"])
ctx = CloudflareContext.from_env()
client = CloudflareClient(api_token=ctx.api_token)
meta, snapshot = self.store.create_snapshot(
client=client,
ctx=ctx,
scopes=scopes_use,
zone_id=zone_id,
zone_name=zone_name,
dns_max_pages=dns_max_pages,
)
summary = (
f"Snapshot {meta.snapshot_id} captured "
f"(scopes={','.join(meta.scopes)}) and written to {meta.snapshot_path}."
)
return {
"ok": True,
"summary": summary,
"data": {
"snapshot_id": meta.snapshot_id,
"created_at": meta.created_at,
"scopes": meta.scopes,
"snapshot_path": meta.snapshot_path,
"counts": {
"zones": len(snapshot.get("zones") or []),
"tunnels": len(snapshot.get("tunnels") or []),
"access_apps": len(snapshot.get("access_apps") or []),
},
},
"truncated": False,
"next_steps": [
"cf_config_diff(from_snapshot_id=..., to_snapshot_id=...)",
"cf_export_config(full=false, snapshot_id=...)",
],
}
def cf_refresh(
self,
*,
snapshot_id: str,
scopes: Optional[Sequence[str]] = None,
dns_max_pages: int = 1,
) -> Dict[str, Any]:
before_meta = self.store.get(snapshot_id)
before = self.store.load_snapshot(snapshot_id)
scopes_use = list(scopes or (before.get("meta", {}).get("scopes") or []))
ctx = CloudflareContext.from_env()
client = CloudflareClient(api_token=ctx.api_token)
meta, _snapshot = self.store.create_snapshot(
client=client,
ctx=ctx,
scopes=scopes_use,
zone_id=(before.get("dns") or {}).get("zone_id"),
zone_name=(before.get("dns") or {}).get("zone_name"),
dns_max_pages=dns_max_pages,
)
return {
"ok": True,
"summary": f"Refreshed {before_meta.snapshot_id} -> {meta.snapshot_id} (scopes={','.join(meta.scopes)}).",
"data": {
"from_snapshot_id": before_meta.snapshot_id,
"to_snapshot_id": meta.snapshot_id,
"snapshot_path": meta.snapshot_path,
},
"truncated": False,
"next_steps": [
"cf_config_diff(from_snapshot_id=..., to_snapshot_id=...)",
],
}
def cf_config_diff(
self,
*,
from_snapshot_id: str,
to_snapshot_id: str,
scopes: Optional[Sequence[str]] = None,
) -> Dict[str, Any]:
diff = self.store.diff(
from_snapshot_id=from_snapshot_id,
to_snapshot_id=to_snapshot_id,
scopes=scopes,
)
# Keep the response small; point to diff_path for full detail.
changes = diff.get("changes") or {}
counts = {
scope: (changes.get(scope) or {}).get("counts")
for scope in sorted(changes.keys())
}
return {
"ok": True,
"summary": f"Diff computed and written to {diff.get('diff_path')}.",
"data": {
"from_snapshot_id": from_snapshot_id,
"to_snapshot_id": to_snapshot_id,
"scopes": diff.get("scopes"),
"counts": counts,
"diff_path": diff.get("diff_path"),
},
"truncated": False,
"next_steps": [
"Use filesystem MCP to open diff_path for full details",
"Run cf_export_config(full=false, snapshot_id=...) for a safe export path",
],
}
def cf_export_config(
self,
*,
snapshot_id: Optional[str] = None,
full: bool = False,
scopes: Optional[Sequence[str]] = None,
) -> Dict[str, Any]:
if snapshot_id is None:
snap = self.cf_snapshot(scopes=scopes)
snapshot_id = str((snap.get("data") or {}).get("snapshot_id"))
meta = self.store.get(snapshot_id)
if not full:
return {
"ok": True,
"summary": "Export is summary-first; full config requires full=true.",
"data": {
"snapshot_id": meta.snapshot_id,
"snapshot_path": meta.snapshot_path,
},
"truncated": False,
"next_steps": [
"Use filesystem MCP to open snapshot_path",
"If you truly need inline data, call cf_export_config(full=true, snapshot_id=...)",
],
}
snapshot = self.store.load_snapshot(snapshot_id)
return {
"ok": True,
"summary": "Full snapshot export (redacted + size-capped). Prefer snapshot_path for large data.",
"data": snapshot,
"truncated": False,
"next_steps": [
f"Snapshot file: {meta.snapshot_path}",
],
}
def cf_tunnel_status(
self,
*,
snapshot_id: Optional[str] = None,
tunnel_name: Optional[str] = None,
tunnel_id: Optional[str] = None,
) -> Dict[str, Any]:
if snapshot_id:
snap = self.store.load_snapshot(snapshot_id)
tunnels = snap.get("tunnels") or []
else:
snap = self.cf_snapshot(scopes=["tunnels"])
sid = str((snap.get("data") or {}).get("snapshot_id"))
tunnels = self.store.load_snapshot(sid).get("tunnels") or []
def matches(t: Dict[str, Any]) -> bool:
if tunnel_id and str(t.get("id")) != str(tunnel_id):
return False
if tunnel_name and str(t.get("name")) != str(tunnel_name):
return False
return True
filtered = [t for t in tunnels if isinstance(t, dict) and matches(t)]
if not filtered and (tunnel_id or tunnel_name):
return {
"ok": False,
"summary": "Tunnel not found in snapshot.",
"data": {"tunnel_id": tunnel_id, "tunnel_name": tunnel_name},
"truncated": False,
"next_steps": ["Call cf_snapshot(scopes=['tunnels']) and retry."],
}
connectors = [t.get("connector_count") for t in filtered if isinstance(t, dict)]
connectors = [c for c in connectors if isinstance(c, int)]
return {
"ok": True,
"summary": f"Returned {len(filtered)} tunnel(s).",
"data": {
"tunnels": [
{
"id": t.get("id"),
"name": t.get("name"),
"status": t.get("status"),
"connector_count": t.get("connector_count"),
"last_seen": t.get("last_seen"),
}
for t in filtered
],
"connectors_total": sum(connectors) if connectors else 0,
},
"truncated": False,
"next_steps": [
"For local ingress hostnames, use cf_tunnel_ingress_summary(config_path='/etc/cloudflared/config.yml')",
],
}
def cf_tunnel_ingress_summary(
self,
*,
config_path: str = "/etc/cloudflared/config.yml",
full: bool = False,
max_rules: int = 50,
) -> Dict[str, Any]:
summary = ingress_summary_from_file(
config_path=config_path, max_rules=max_rules
)
if not full:
return {
"ok": True,
"summary": f"Parsed ingress hostnames from {config_path}.",
"data": {
"config_path": summary["config_path"],
"ingress_rule_count": summary["ingress_rule_count"],
"hostnames": summary["hostnames"],
"truncated": summary["truncated"],
},
"truncated": False,
"next_steps": [
"Call cf_tunnel_ingress_summary(full=true, ...) to include service mappings (still capped).",
],
}
return {
"ok": True,
"summary": f"Ingress summary (full=true) for {config_path}.",
"data": summary,
"truncated": False,
"next_steps": [],
}
def cf_access_policy_list(
self,
*,
app_id: Optional[str] = None,
) -> Dict[str, Any]:
ctx = CloudflareContext.from_env()
client = CloudflareClient(api_token=ctx.api_token)
if not app_id:
apps = client.list_access_apps(ctx.account_id)
apps_min = [
{
"id": a.get("id"),
"name": a.get("name"),
"domain": a.get("domain"),
"type": a.get("type"),
}
for a in apps
]
return {
"ok": True,
"summary": f"Returned {len(apps_min)} Access app(s). Provide app_id to list policies.",
"data": {"apps": apps_min},
"truncated": False,
"next_steps": [
"Call cf_access_policy_list(app_id=...)",
],
}
policies = client.list_access_policies(ctx.account_id, app_id)
policies_min = [
{
"id": p.get("id"),
"name": p.get("name"),
"decision": p.get("decision"),
"precedence": p.get("precedence"),
}
for p in policies
]
return {
"ok": True,
"summary": f"Returned {len(policies_min)} policy/policies for app_id={app_id}.",
"data": {"app_id": app_id, "policies": policies_min},
"truncated": False,
"next_steps": [],
}
TOOLS: List[Dict[str, Any]] = [
{
"name": "cf_snapshot",
"description": "Create a summary-first Cloudflare state snapshot (writes JSON to disk; returns snapshot_id + paths).",
"inputSchema": {
"type": "object",
"properties": {
"scopes": {
"type": "array",
"items": {"type": "string"},
"description": "Scopes to fetch (default: ['tunnels','access_apps']). Supported: zones,tunnels,access_apps,dns",
},
"zone_id": {"type": "string"},
"zone_name": {"type": "string"},
"dns_max_pages": {"type": "integer", "default": 1},
},
},
},
{
"name": "cf_refresh",
"description": "Refresh a prior snapshot (creates a new snapshot_id).",
"inputSchema": {
"type": "object",
"properties": {
"snapshot_id": {"type": "string"},
"scopes": {"type": "array", "items": {"type": "string"}},
"dns_max_pages": {"type": "integer", "default": 1},
},
"required": ["snapshot_id"],
},
},
{
"name": "cf_config_diff",
"description": "Diff two snapshots (summary counts inline; full diff written to disk).",
"inputSchema": {
"type": "object",
"properties": {
"from_snapshot_id": {"type": "string"},
"to_snapshot_id": {"type": "string"},
"scopes": {"type": "array", "items": {"type": "string"}},
},
"required": ["from_snapshot_id", "to_snapshot_id"],
},
},
{
"name": "cf_export_config",
"description": "Export snapshot config. Defaults to summary-only; full=true returns redacted + size-capped data.",
"inputSchema": {
"type": "object",
"properties": {
"snapshot_id": {"type": "string"},
"full": {"type": "boolean", "default": False},
"scopes": {"type": "array", "items": {"type": "string"}},
},
},
},
{
"name": "cf_tunnel_status",
"description": "Return tunnel status summary (connector count, last seen).",
"inputSchema": {
"type": "object",
"properties": {
"snapshot_id": {"type": "string"},
"tunnel_name": {"type": "string"},
"tunnel_id": {"type": "string"},
},
},
},
{
"name": "cf_tunnel_ingress_summary",
"description": "Parse cloudflared ingress hostnames from a local config file (never dumps full YAML unless full=true, still capped).",
"inputSchema": {
"type": "object",
"properties": {
"config_path": {
"type": "string",
"default": "/etc/cloudflared/config.yml",
},
"full": {"type": "boolean", "default": False},
"max_rules": {"type": "integer", "default": 50},
},
},
},
{
"name": "cf_access_policy_list",
"description": "List Access apps, or policies for a specific app_id (summary-only).",
"inputSchema": {
"type": "object",
"properties": {
"app_id": {"type": "string"},
},
},
},
]
class StdioJsonRpc:
def __init__(self) -> None:
self._in = sys.stdin.buffer
self._out = sys.stdout.buffer
self._mode: str | None = None # "headers" | "line"
def read_message(self) -> Optional[Dict[str, Any]]:
while True:
if self._mode == "line":
line = self._in.readline()
if not line:
return None
raw = line.decode("utf-8", "replace").strip()
if not raw:
continue
try:
msg = json.loads(raw)
except Exception:
continue
if isinstance(msg, dict):
return msg
continue
first = self._in.readline()
if not first:
return None
if first in (b"\r\n", b"\n"):
continue
# Auto-detect newline-delimited JSON framing.
if self._mode is None and first.lstrip().startswith(b"{"):
try:
msg = json.loads(first.decode("utf-8", "replace"))
except Exception:
msg = None
if isinstance(msg, dict):
self._mode = "line"
return msg
headers: Dict[str, str] = {}
try:
text = first.decode("utf-8", "replace").strip()
except Exception:
continue
if ":" not in text:
continue
k, v = text.split(":", 1)
headers[k.lower().strip()] = v.strip()
while True:
line = self._in.readline()
if not line:
return None
if line in (b"\r\n", b"\n"):
break
try:
text = line.decode("utf-8", "replace").strip()
except Exception:
continue
if ":" not in text:
continue
k, v = text.split(":", 1)
headers[k.lower().strip()] = v.strip()
if "content-length" not in headers:
return None
try:
length = int(headers["content-length"])
except ValueError:
return None
body = self._in.read(length)
if not body:
return None
self._mode = "headers"
msg = json.loads(body.decode("utf-8", "replace"))
if isinstance(msg, dict):
return msg
return None
def write_message(self, message: Dict[str, Any]) -> None:
if self._mode == "line":
payload = json.dumps(
message, ensure_ascii=False, separators=(",", ":"), default=str
).encode("utf-8")
self._out.write(payload + b"\n")
self._out.flush()
return
body = json.dumps(message, ensure_ascii=False, separators=(",", ":")).encode(
"utf-8"
)
header = f"Content-Length: {len(body)}\r\n\r\n".encode("utf-8")
self._out.write(header)
self._out.write(body)
self._out.flush()
def main() -> None:
tools = CloudflareSafeTools()
rpc = StdioJsonRpc()
handlers: Dict[str, Callable[[Dict[str, Any]], Dict[str, Any]]] = {
"cf_snapshot": lambda a: tools.cf_snapshot(**a),
"cf_refresh": lambda a: tools.cf_refresh(**a),
"cf_config_diff": lambda a: tools.cf_config_diff(**a),
"cf_export_config": lambda a: tools.cf_export_config(**a),
"cf_tunnel_status": lambda a: tools.cf_tunnel_status(**a),
"cf_tunnel_ingress_summary": lambda a: tools.cf_tunnel_ingress_summary(**a),
"cf_access_policy_list": lambda a: tools.cf_access_policy_list(**a),
}
while True:
msg = rpc.read_message()
if msg is None:
return
method = msg.get("method")
msg_id = msg.get("id")
params = msg.get("params") or {}
try:
if method == "initialize":
result = {
"protocolVersion": "2024-11-05",
"serverInfo": {"name": "cloudflare_safe", "version": "0.1.0"},
"capabilities": {"tools": {}},
}
rpc.write_message({"jsonrpc": "2.0", "id": msg_id, "result": result})
continue
if method == "tools/list":
rpc.write_message(
{"jsonrpc": "2.0", "id": msg_id, "result": {"tools": TOOLS}}
)
continue
if method == "tools/call":
tool_name = str(params.get("name") or "")
args = params.get("arguments") or {}
if tool_name not in handlers:
rpc.write_message(
{
"jsonrpc": "2.0",
"id": msg_id,
"result": _mcp_text_result(
{
"ok": False,
"summary": f"Unknown tool: {tool_name}",
"data": {"known_tools": sorted(handlers.keys())},
"truncated": False,
"next_steps": ["Call tools/list"],
},
is_error=True,
),
}
)
continue
try:
payload = handlers[tool_name](args)
rpc.write_message(
{
"jsonrpc": "2.0",
"id": msg_id,
"result": _mcp_text_result(payload),
}
)
except CloudflareError as e:
rpc.write_message(
{
"jsonrpc": "2.0",
"id": msg_id,
"result": _mcp_text_result(
{
"ok": False,
"summary": str(e),
"truncated": False,
"next_steps": [
"Verify CLOUDFLARE_API_TOKEN and CLOUDFLARE_ACCOUNT_ID are set",
"Retry with a narrower scope",
],
},
is_error=True,
),
}
)
except Exception as e: # noqa: BLE001
rpc.write_message(
{
"jsonrpc": "2.0",
"id": msg_id,
"result": _mcp_text_result(
{
"ok": False,
"summary": f"Unhandled error: {e}",
"truncated": False,
"next_steps": ["Retry with a narrower scope"],
},
is_error=True,
),
}
)
continue
# Ignore notifications.
if msg_id is None:
continue
rpc.write_message(
{
"jsonrpc": "2.0",
"id": msg_id,
"result": _mcp_text_result(
{
"ok": False,
"summary": f"Unsupported method: {method}",
"truncated": False,
},
is_error=True,
),
}
)
except Exception as e: # noqa: BLE001
# Last-resort: avoid crashing the server.
if msg_id is not None:
rpc.write_message(
{
"jsonrpc": "2.0",
"id": msg_id,
"result": _mcp_text_result(
{
"ok": False,
"summary": f"fatal error: {e}",
"truncated": False,
},
),
}
)