uploader-bot/app/api/routes/network.py

229 lines
9.2 KiB
Python

from __future__ import annotations
import json
from datetime import datetime
from typing import Dict, Any
from base58 import b58decode
from sanic import response
from urllib.parse import urlparse
from app.core.logger import make_log
from app.core.network.constants import CURRENT_PROTOCOL_VERSION, NODE_TYPE_PRIVATE
from app.core.network.config import NODE_PRIVACY
from app.core.network.handshake import build_handshake_payload, compute_node_info, sign_response
from app.core.network.nodes import upsert_known_node, list_known_public_nodes
from app.core.network.semver import compatibility
from app.core.network.guard import check_rate_limit, check_timestamp_fresh, check_and_remember_nonce
from app.core.network.config import HANDSHAKE_TS_TOLERANCE_SEC
from app.core.ipfs_client import swarm_connect
from app.core._config import PROJECT_HOST
from app.core.events.service import record_event
def _port_from_public_host(public_host: str) -> int:
"""Return an integer port extracted from a public_host URL or host:port string."""
if not public_host:
return 80
parsed = urlparse(public_host)
if parsed.scheme:
if parsed.port:
return parsed.port
return 443 if parsed.scheme == "https" else 80
host_port = public_host.strip()
if ":" in host_port:
candidate = host_port.rsplit(":", 1)[-1]
try:
return int(candidate)
except (TypeError, ValueError):
pass
return 80
def _extract_ipfs_meta(payload: Dict[str, Any]) -> Dict[str, Any]:
ipfs = payload or {}
multiaddrs = ipfs.get("multiaddrs") or []
if not isinstance(multiaddrs, list):
multiaddrs = [multiaddrs]
normalized_multiaddrs = [str(m) for m in multiaddrs if m]
meta: Dict[str, Any] = {}
if normalized_multiaddrs:
meta["multiaddrs"] = normalized_multiaddrs
peer_id = ipfs.get("peer_id")
if peer_id:
meta["peer_id"] = str(peer_id)
agent = ipfs.get("agent_version") or ipfs.get("agentVersion")
if agent:
meta["agent_version"] = str(agent)
return meta
async def _connect_ipfs_multiaddrs(addrs):
for addr in addrs or []:
try:
await swarm_connect(addr)
except Exception:
pass
async def s_api_v1_network_info(request):
async with request.app.ctx.memory.transaction("network.info"):
node = await compute_node_info(request.ctx.db_session)
make_log("Network", "info served")
return response.json({"node": node})
async def s_api_v1_network_nodes(request):
rows = await list_known_public_nodes(request.ctx.db_session)
make_log("Network", f"nodes list count={len(rows)}")
return response.json({
"count": len(rows),
"nodes": rows,
})
async def s_api_v1_network_handshake(request):
# Handshake accepted regardless of our privacy; private nodes typically have no external endpoint
# Rate limit per remote IP
remote_ip = (request.headers.get('X-Forwarded-For') or request.remote_addr or request.ip or '').split(',')[0].strip()
if not check_rate_limit(request.app.ctx.memory, remote_ip):
return response.json({"error": "RATE_LIMIT"}, status=429)
data = request.json or {}
required = ["version", "public_key", "node_type", "metrics", "timestamp", "signature"]
for f in required:
if f not in data:
return response.json({"error": f"Missing field {f}"}, status=400)
# public_host is required for public nodes only
if data.get("node_type") != "private" and not data.get("public_host"):
return response.json({"error": "Missing field public_host"}, status=400)
# Timestamp freshness
if not check_timestamp_fresh(data.get("timestamp")):
return response.json({"error": "STALE_TIMESTAMP", "tolerance_sec": HANDSHAKE_TS_TOLERANCE_SEC}, status=400)
# Nonce replay protection (best-effort)
if not data.get("nonce") or not check_and_remember_nonce(request.app.ctx.memory, data.get("public_key"), data.get("nonce")):
return response.json({"error": "NONCE_REPLAY"}, status=400)
peer_version = str(data.get("version"))
ipfs_meta = _extract_ipfs_meta(data.get("ipfs") or {})
comp = compatibility(peer_version, CURRENT_PROTOCOL_VERSION)
if comp == "blocked":
# We still store the node but respond with 409
try:
await upsert_known_node(
request.ctx.db_session,
host=data.get("public_host"),
port=_port_from_public_host(data.get("public_host")),
public_key=str(data.get("public_key")),
meta={
"version": peer_version,
"compatibility": comp,
"is_public": data.get("node_type", "public") != "private",
"public_host": data.get("public_host"),
"unsupported_last_checked_at": datetime.utcnow().isoformat(),
"ipfs": ipfs_meta,
}
)
except Exception:
pass
make_log("Handshake", f"Reject incompatible peer {data.get('public_host')} peer={peer_version} current={CURRENT_PROTOCOL_VERSION}")
return response.json({
"error": "INCOMPATIBLE_VERSION",
"compatibility": comp,
"current": CURRENT_PROTOCOL_VERSION,
"peer": peer_version,
}, status=409)
# Verify signature
try:
# Verify signature over the entire payload except the signature itself
signed_fields = {k: v for (k, v) in data.items() if k != "signature"}
blob = json.dumps(signed_fields, sort_keys=True, separators=(",", ":")).encode()
import nacl.signing, nacl.encoding
vk = nacl.signing.VerifyKey(b58decode(data["public_key"]))
sig = b58decode(data["signature"])
vk.verify(blob, sig)
ok = True
except Exception:
ok = False
if not ok:
make_log("Handshake", f"Signature verification failed from {data.get('public_host')}", level='warning')
return response.json({"error": "BAD_SIGNATURE"}, status=400)
# Upsert node and respond with our info + known public nodes
# Do not persist private peers (ephemeral)
if data.get("node_type") != "private" and data.get("public_host"):
try:
await upsert_known_node(
request.ctx.db_session,
host=data.get("public_host"),
port=_port_from_public_host(data.get("public_host")),
public_key=str(data.get("public_key")),
meta={
"version": peer_version,
"compatibility": comp,
"is_public": True,
"public_host": data.get("public_host"),
"last_metrics": data.get("metrics", {}),
"capabilities": data.get("capabilities", {}),
"ipfs": ipfs_meta,
}
)
await _connect_ipfs_multiaddrs(ipfs_meta.get("multiaddrs"))
try:
await record_event(
request.ctx.db_session,
'node_registered',
{
'public_key': str(data.get("public_key")),
'public_host': data.get("public_host"),
'node_type': data.get("node_type"),
'version': peer_version,
'capabilities': data.get("capabilities", {}),
},
origin_host=PROJECT_HOST,
)
except Exception as ev_exc:
make_log("Events", f"Failed to record node_registered event: {ev_exc}", level="warning")
except Exception as e:
make_log("Handshake", f"Upsert peer failed: {e}", level='warning')
# Merge advertised peers from the caller (optional field)
for n in data.get("known_public_nodes", []) or []:
known_ipfs_meta = _extract_ipfs_meta(n.get("ipfs") or {})
try:
await upsert_known_node(
request.ctx.db_session,
host=n.get("public_host") or n.get("host"),
port=int(n.get("port") or 80),
public_key=n.get("public_key") or "",
meta={
"version": n.get("version") or "0.0.0",
"compatibility": compatibility(n.get("version") or "0.0.0", CURRENT_PROTOCOL_VERSION),
"is_public": True,
"public_host": n.get("public_host") or n.get("host"),
"capabilities": n.get("capabilities") or {},
"ipfs": known_ipfs_meta,
}
)
await _connect_ipfs_multiaddrs(known_ipfs_meta.get("multiaddrs"))
except Exception:
pass
node = await compute_node_info(request.ctx.db_session)
known = await list_known_public_nodes(request.ctx.db_session)
resp = sign_response({
"compatibility": comp,
"node": node,
"known_public_nodes": known,
})
make_log("Handshake", f"OK with {data.get('public_host')} compat={comp}")
status = 200
if comp == "warning":
status = 200
resp["warning"] = "MINOR version differs; proceed with caution"
return response.json(resp, status=status)