Coverage for integrations / mcp / mcp_http_bridge.py: 43.9%
524 statements
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
1"""
2MCP HTTP Bridge — Exposes HARTOS MCP tools via REST endpoints.
4Nunba and external clients connect here instead of stdio.
5MCPServerConnector (mcp_integration.py) already speaks this REST contract:
6 GET /health -> {"status": "ok", "tools": N}
7 GET /tools/list -> {"tools": [...]}
8 POST /tools/execute -> {"tool": "name", "arguments": {...}} -> result
10Tool functions are implemented directly here (not imported from mcp_server.py)
11to avoid FastMCP/pydantic v1-v2 import conflicts. Both modules call the same
12underlying HARTOS APIs (GoalManager, MemoryGraph, ExpertAgentRegistry, etc.).
13"""
15import json
16import os
17import logging
18import inspect
19import glob as _glob
20from pathlib import Path
21from flask import Blueprint, request, jsonify
22from typing import Dict, Any, List, Optional, Callable
24logger = logging.getLogger('hartos_mcp')
26mcp_local_bp = Blueprint('mcp_local', __name__, url_prefix='/api/mcp/local')
29# ── Auth gate: local-loopback OR bearer token ─────────────────
30# The MCP bridge exposes HARTOS tools (social_query, dispatch_goal,
31# remember, seed_goals, Shell_Command, Create_Agent ...). If left
32# unauthenticated, ANY local process on the host can call them — on
33# shared Windows machines that is a cross-user privilege escalation
34# (attacker logged in as User B dumps User A's social DB, writes
35# poisoned entries to User A's memory graph, dispatches goals).
36#
37# Policy:
38# - Flat tier (single-user desktop): 127.0.0.1 only is enough.
39# - Regional/central: must present `Authorization: Bearer <token>`
40# matching the Nunba admin token.
41# The token file lives at `%LOCALAPPDATA%/Nunba/mcp.token` (Windows)
42# or `~/.nunba/mcp.token` (Unix) — owned by the current user, 0600
43# on Unix. Claude Code reads it and sends it as the bearer.
44_MCP_TOKEN_CACHE: Optional[str] = None
45# One-shot WARN emitter for HARTOS_MCP_DISABLE_AUTH=1 (see _mcp_auth_gate).
46_MCP_AUTH_DISABLED_WARNED: bool = False
49def _mcp_token_path() -> str:
50 """Path to the per-install MCP token file."""
51 import os as _os
52 _base = _os.environ.get('LOCALAPPDATA')
53 if _base:
54 return _os.path.join(_base, 'Nunba', 'mcp.token')
55 return _os.path.join(_os.path.expanduser('~'), '.nunba', 'mcp.token')
58def _env_flag_true(val: Optional[str]) -> bool:
59 """Parse an env-var truthy flag: '1', 'true', 'yes', 'on' → True (case-insensitive)."""
60 if val is None:
61 return False
62 return val.strip().lower() in ('1', 'true', 'yes', 'on')
65def _ensure_mcp_token() -> str:
66 """Read-or-create the MCP bearer token. Idempotent.
68 Source resolution order (first hit wins):
69 1. HARTOS_MCP_TOKEN — literal token string (Docker/K8s secrets,
70 CI environments that inject directly)
71 2. HARTOS_MCP_TOKEN_FILE — path to a file containing the token
72 (Vault/cert-manager/kubernetes secret
73 mounts, where the token rotates via a
74 file that we should re-read each time a
75 fresh cache is requested)
76 3. Default disk path — `%LOCALAPPDATA%/Nunba/mcp.token` on
77 Windows, `~/.nunba/mcp.token` on Unix
78 (the original behaviour used by the
79 Nunba desktop install)
80 """
81 global _MCP_TOKEN_CACHE
82 if _MCP_TOKEN_CACHE:
83 return _MCP_TOKEN_CACHE
84 import os as _os
85 import secrets as _secrets
87 # (1) Literal env-var token — highest priority, zero filesystem touch.
88 _env_tok = _os.environ.get('HARTOS_MCP_TOKEN', '').strip()
89 if _env_tok:
90 _MCP_TOKEN_CACHE = _env_tok
91 return _MCP_TOKEN_CACHE
93 # (2) Env-var-specified token FILE — for Vault/K8s mounted secrets.
94 _env_tok_file = _os.environ.get('HARTOS_MCP_TOKEN_FILE', '').strip()
95 if _env_tok_file:
96 try:
97 with open(_env_tok_file, encoding='utf-8') as _f:
98 _tok = _f.read().strip()
99 if _tok:
100 _MCP_TOKEN_CACHE = _tok
101 return _MCP_TOKEN_CACHE
102 except OSError as _e:
103 logger.warning(
104 "HARTOS_MCP_TOKEN_FILE=%s could not be read (%s) — "
105 "falling back to default disk path",
106 _env_tok_file, _e,
107 )
109 # (3) Default disk path — the original Nunba desktop behaviour.
110 _path = _mcp_token_path()
111 try:
112 if _os.path.isfile(_path):
113 with open(_path, encoding='utf-8') as _f:
114 _MCP_TOKEN_CACHE = _f.read().strip()
115 if _MCP_TOKEN_CACHE:
116 return _MCP_TOKEN_CACHE
117 # Create a new token
118 _os.makedirs(_os.path.dirname(_path), exist_ok=True)
119 _MCP_TOKEN_CACHE = _secrets.token_urlsafe(32)
120 with open(_path, 'w', encoding='utf-8') as _f:
121 _f.write(_MCP_TOKEN_CACHE)
122 try:
123 # 0600 on Unix (no-op on Windows, ACLs inherit user profile)
124 _os.chmod(_path, 0o600)
125 except OSError:
126 pass
127 return _MCP_TOKEN_CACHE
128 except OSError:
129 # Read-only fs — fall back to process-lifetime token
130 _MCP_TOKEN_CACHE = _secrets.token_urlsafe(32)
131 return _MCP_TOKEN_CACHE
134def _is_loopback_request() -> bool:
135 """True if the Flask request originates from 127.0.0.1/::1."""
136 from flask import request as _req
137 _addr = (_req.remote_addr or '').strip()
138 return _addr in ('127.0.0.1', '::1', 'localhost')
141# ── Public API for cross-package consumers (Nunba) ──────────────────────
142# Pre-refactor Nunba reached into the private `_ensure_mcp_token` symbol
143# (underscore prefix = "do not import"). That coupled Nunba's release
144# cadence to HARTOS internal naming and broke the moment HARTOS renamed
145# anything. These wrappers are the SUPPORTED contract — Nunba imports
146# them via `from integrations.mcp import get_mcp_token, rotate_mcp_token`.
148def get_mcp_token() -> str:
149 """Public accessor — return the current MCP bearer token.
151 Reads from the configured source (env var > env-pointed file > disk).
152 Idempotent + cached. Safe to call on the request hot path.
154 This is the supported entry point for cross-package consumers.
155 Internal callers can keep using `_ensure_mcp_token` for one release;
156 new code MUST use this.
157 """
158 return _ensure_mcp_token()
161def rotate_mcp_token() -> str:
162 """Public rotator — generate a new token, persist it, invalidate cache.
164 Returns the new token string. Any live MCP client using the old token
165 will start getting 403s on its next request — operators MUST re-paste
166 the new token into `.claude/settings.local.json` (or rebroadcast via
167 the configured secret-injection path).
169 Behaviour respects the same source-resolution order as
170 `_ensure_mcp_token`: if `HARTOS_MCP_TOKEN` is set, rotation is a
171 no-op (the env var is the source of truth and rotation must happen
172 upstream — at the orchestrator that injected the env var).
173 """
174 global _MCP_TOKEN_CACHE
175 import os as _os
176 import secrets as _secrets
177 # Env-var-pinned tokens cannot be rotated from inside the process —
178 # the upstream orchestrator owns the value. Emit a warning so the
179 # operator sees WHY the rotate POST returned the same token.
180 if _os.environ.get('HARTOS_MCP_TOKEN', '').strip():
181 logger.warning(
182 "rotate_mcp_token: HARTOS_MCP_TOKEN env var is set — "
183 "rotation must happen upstream; returning existing token",
184 )
185 return _ensure_mcp_token()
186 new_token = _secrets.token_urlsafe(32)
187 path = _mcp_token_path()
188 try:
189 _os.makedirs(_os.path.dirname(path), exist_ok=True)
190 with open(path, 'w', encoding='utf-8') as f:
191 f.write(new_token)
192 try:
193 _os.chmod(path, 0o600)
194 except OSError:
195 pass
196 except OSError as e:
197 logger.warning(
198 "rotate_mcp_token: failed to persist new token (%s) — "
199 "falling back to in-memory only", e,
200 )
201 _MCP_TOKEN_CACHE = new_token
202 return new_token
205def get_mcp_token_path() -> str:
206 """Public accessor for the on-disk token file path.
208 Nunba's `/api/admin/mcp/token/rotate` endpoint historically reached
209 into `_mcp_token_path` (also private). This wrapper is the supported
210 contract.
211 """
212 return _mcp_token_path()
215@mcp_local_bp.before_request
216def _mcp_auth_gate():
217 """Reject any request that is neither local-loopback nor token-authenticated.
218 On shared-host setups even loopback is not enough — require the token.
220 Env-var overrides (for HARTOS standalone / container / air-gapped
221 deployments that don't ship the Nunba desktop token-management UI):
222 - HARTOS_MCP_DISABLE_AUTH=1 → skip the auth gate entirely. Only
223 safe on internal/air-gapped networks where the port is not
224 reachable by untrusted callers (e.g., container sidecars, isolated
225 K8s namespaces). A single WARN is emitted on the first request.
226 - HARTOS_MCP_TOKEN, HARTOS_MCP_TOKEN_FILE → see `_ensure_mcp_token`
227 for how the token is sourced when auth is enabled.
228 """
229 from flask import request as _req, jsonify as _jsonify
230 import os as _os
231 global _MCP_AUTH_DISABLED_WARNED
232 # Health endpoint is open — it returns only a tool count, no data, no mutation.
233 if _req.path.endswith('/health'):
234 return None
235 # Env-var bypass — for HARTOS standalone / Docker / K8s / air-gapped.
236 if _env_flag_true(_os.environ.get('HARTOS_MCP_DISABLE_AUTH')):
237 if not _MCP_AUTH_DISABLED_WARNED:
238 _MCP_AUTH_DISABLED_WARNED = True
239 logger.warning(
240 "MCP auth disabled via env — use only for internal/"
241 "air-gapped deployments"
242 )
243 return None
244 _auth = _req.headers.get('Authorization', '')
245 _bearer = _auth[7:].strip() if _auth.startswith('Bearer ') else ''
246 _expected = _ensure_mcp_token()
247 if _bearer and _secrets_compare(_bearer, _expected):
248 return None
249 # On a multi-user box loopback-only is NOT enough; always require token
250 # for mutating endpoints. Read-only `/tools/list` is allowed via
251 # loopback to avoid breaking Claude Code's discovery phase while the
252 # user wires their client up (client reads `/tools/list`, then sees
253 # 403 on first `/tools/execute`, fetches the token from disk, retries).
254 if _req.path.endswith('/tools/list') and _is_loopback_request():
255 return None
256 return _jsonify({
257 'success': False,
258 'error': 'mcp: unauthorized — provide Authorization: Bearer <token> '
259 'from %LOCALAPPDATA%/Nunba/mcp.token',
260 }), 403
263def _secrets_compare(a: str, b: str) -> bool:
264 """Constant-time string compare to defeat timing attacks."""
265 import hmac as _hmac
266 return _hmac.compare_digest(a.encode('utf-8'), b.encode('utf-8'))
269# ── Tool Registry ──────────────────────────────────────────────
270_local_tools: List[Dict[str, Any]] = []
271_tools_loaded = False
274def _extract_parameters(fn) -> dict:
275 """Extract JSON Schema-style parameters from a function's signature."""
276 if fn is None:
277 return {}
278 sig = inspect.signature(fn)
279 properties = {}
280 required = []
281 for param_name, param in sig.parameters.items():
282 if param_name in ('self', 'cls'):
283 continue
284 prop = {"type": "string"}
285 annotation = param.annotation
286 if annotation != inspect.Parameter.empty:
287 if annotation in (int, float):
288 prop["type"] = "number"
289 elif annotation == bool:
290 prop["type"] = "boolean"
291 if param.default != inspect.Parameter.empty:
292 prop["default"] = param.default
293 else:
294 required.append(param_name)
295 properties[param_name] = prop
296 schema = {"type": "object", "properties": properties}
297 if required:
298 schema["required"] = required
299 return schema
302def _register_tool(name: str, description: str, fn: Callable):
303 """Register a tool function in the local registry."""
304 _local_tools.append({
305 "name": name,
306 "description": description,
307 "parameters": _extract_parameters(fn),
308 "fn": fn,
309 })
312def _register_tool_module(module_tools: List[Dict[str, Any]],
313 prefix: str = '') -> int:
314 """Register every tool declared in a module-level list like
315 THOUGHT_EXPERIMENT_TOOLS / AUTO_EVOLVE_TOOLS / AUTOEVOLVE_CODE_TOOLS.
317 Each entry is expected to follow the HARTOS ServiceToolRegistry schema:
318 {'name': str, 'func': callable, 'description': str, 'tags': [...]}
320 Skips entries whose name already exists in `_local_tools` (dedup — a
321 tool may legitimately appear in more than one module list during a
322 module-reorg migration; we prefer the first registration to keep the
323 schema stable across the session).
325 Returns the number of tools actually added so callers can log + assert.
326 """
327 added = 0
328 existing_names = {t['name'] for t in _local_tools}
329 for entry in module_tools:
330 try:
331 name = entry['name']
332 fn = entry['func']
333 desc = entry.get('description', '') or name
334 except (KeyError, TypeError) as e:
335 logger.warning(
336 f"_register_tool_module: malformed entry {entry!r} "
337 f"skipped ({type(e).__name__}: {e})"
338 )
339 continue
340 qualified_name = f'{prefix}{name}' if prefix else name
341 if qualified_name in existing_names:
342 logger.debug(
343 f"_register_tool_module: skipping duplicate {qualified_name!r}"
344 )
345 continue
346 _register_tool(qualified_name, desc, fn)
347 existing_names.add(qualified_name)
348 added += 1
349 return added
352# ── MCP tool arg aliases ────────────────────────────────────────
353# MCP clients (Claude Code, Cursor, other) historically called our
354# tools with slightly different kwarg names than the ones our Python
355# signatures declare — `search=` vs `query=`, `model_id=` vs `model=`,
356# `agent=` vs `agent_id=`, `user=` vs `user_id=`, etc. Before this
357# canonicalization layer those calls returned `TypeError: unexpected
358# keyword argument 'search'` (HTTP 400), breaking every integration
359# that built against a different naming convention.
360#
361# Keep the lookup here (single source of truth) — do NOT copy the alias
362# dict into the tool bodies. The mapping is ALIAS → CANONICAL, applied
363# only if the canonical key is not already present (so explicit clients
364# always win).
365#
366# New tools SHOULD declare kwargs that match their natural domain name;
367# this dict is a compatibility shim for external callers, not a licence
368# to invent parallel names internally.
369_TOOL_ARG_ALIASES: dict = {
370 # list_agents / search_agents
371 'list_agents': {'search': 'query', 'q': 'query', 'term': 'query',
372 'categ': 'category', 'type': 'category'},
373 # list_goals / goal_manager
374 'list_goals': {'goal': 'goal_type', 'type': 'goal_type',
375 'state': 'status'},
376 # onboard_model / switch_model / model_status
377 'onboard_model': {'model_id': 'model', 'name': 'model',
378 'repo': 'model', 'quantization': 'quant',
379 'quant_type': 'quant'},
380 'switch_model': {'model_id': 'model', 'name': 'model',
381 'repo': 'model', 'quantization': 'quant',
382 'quant_type': 'quant'},
383 # hive_connect / hive_disconnect / hive_session_status
384 'hive_connect': {'user': 'user_id', 'uid': 'user_id',
385 'scope': 'task_scope', 'tasks': 'task_scope',
386 'budget_spark': 'task_scope'}, # pass-through; func
387 # accepts extra via **kw
388 # create_hive_task
389 'create_hive_task': {'type': 'task_type', 'name': 'title',
390 'desc': 'description', 'body': 'description',
391 'instruction': 'instructions',
392 'steps': 'instructions'},
393 # hive_signal_feed
394 'hive_signal_feed': {'count': 'limit', 'top': 'limit', 'n': 'limit'},
395 # remember / recall (memory graph)
396 'remember': {'text': 'content', 'body': 'content',
397 'tag': 'tags', 'label': 'tags'},
398 'recall': {'q': 'query', 'search': 'query', 'term': 'query',
399 'top': 'limit', 'count': 'limit', 'n': 'limit'},
400 # social_query
401 'social_query': {'sql': 'query', 'q': 'query'},
402 # call_endpoint
403 'call_endpoint': {'endpoint': 'path', 'url': 'path',
404 'verb': 'method', 'body': 'data'},
405}
408def _canonicalize_args(tool_name: str, arguments: dict) -> dict:
409 """Apply per-tool alias map so MCP clients that use slightly
410 different kwarg names still hit the right Python parameter.
412 Only rewrites a key when the canonical target is NOT already in
413 `arguments` — explicit keys from the caller always win. Returns a
414 NEW dict; `arguments` is not mutated. Safe on unknown tool names
415 (pass-through). Silent on conflicts — if both the alias and the
416 canonical are set, the canonical is preserved (logged at debug
417 level for observability).
418 """
419 aliases = _TOOL_ARG_ALIASES.get(tool_name)
420 if not aliases or not isinstance(arguments, dict):
421 return arguments if isinstance(arguments, dict) else {}
422 out = dict(arguments) # shallow copy — args are JSON-serialisable
423 for alias, canonical in aliases.items():
424 if alias in out:
425 if canonical in out:
426 try:
427 logger.debug(
428 f"MCP {tool_name}: alias '{alias}' + canonical "
429 f"'{canonical}' both present — keeping canonical"
430 )
431 except Exception:
432 pass
433 out.pop(alias, None)
434 else:
435 out[canonical] = out.pop(alias)
436 return out
439# ── Lazy helpers (same as mcp_server.py, avoid import-time side effects) ──
441_registry = None
442_memory_graph = None
445def _get_registry():
446 global _registry
447 if _registry is None:
448 from integrations.expert_agents.registry import ExpertAgentRegistry
449 _registry = ExpertAgentRegistry()
450 return _registry
453def _get_db():
454 from integrations.social.models import get_db
455 return get_db()
458def _get_memory_graph(user_id: str = 'system'):
459 global _memory_graph
460 if _memory_graph is None:
461 from integrations.channels.memory.memory_graph import MemoryGraph
462 try:
463 from core.platform_paths import get_memory_graph_dir
464 db_path = get_memory_graph_dir()
465 except ImportError:
466 db_path = os.path.join(
467 os.path.expanduser('~'), 'Documents', 'Nunba', 'data', 'memory_graph'
468 )
469 _memory_graph = MemoryGraph(db_path=db_path, user_id=user_id)
470 return _memory_graph
473# ── Tool implementations ──────────────────────────────────────
474# Same logic as mcp_server.py tools, but without FastMCP decorators.
476def _tool_list_agents(category: Optional[str] = None, query: Optional[str] = None) -> str:
477 """List available expert agents. Filter by category or search by query."""
478 reg = _get_registry()
479 if query:
480 agents = reg.search_agents(query)
481 elif category:
482 from integrations.expert_agents.registry import AgentCategory
483 cat_map = {name.lower(): member for name, member in AgentCategory.__members__.items()}
484 cat = cat_map.get(category.lower())
485 if not cat:
486 return json.dumps({"error": f"Unknown category: {category}"})
487 agents = reg.get_agents_by_category(cat)
488 else:
489 agents = list(reg.agents.values())
491 result = []
492 for a in agents:
493 result.append({
494 "agent_id": a.agent_id, "name": a.name,
495 "category": a.category.name if hasattr(a.category, 'name') else str(a.category),
496 "description": a.description, "model_type": a.model_type,
497 })
499 prompts_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'prompts')
500 dynamic = []
501 if os.path.isdir(prompts_dir):
502 for f in _glob.glob(os.path.join(prompts_dir, '*.json')):
503 try:
504 with open(f) as fh:
505 data = json.load(fh)
506 dynamic.append({
507 "agent_id": data.get("prompt_id", Path(f).stem),
508 "name": data.get("agent_name", Path(f).stem),
509 "category": "dynamic_recipe",
510 "description": data.get("description", "Trained agent recipe"),
511 })
512 except Exception:
513 pass
515 return json.dumps({"expert_agents": len(result), "dynamic_agents": len(dynamic),
516 "agents": result[:50], "dynamic": dynamic[:20]}, indent=2)
519def _tool_list_goals(goal_type: Optional[str] = None, status: Optional[str] = None) -> str:
520 """List agent goals. Filter by type or status."""
521 try:
522 from integrations.agent_engine.goal_manager import GoalManager
523 db = _get_db()
524 try:
525 goals = GoalManager.list_goals(db, goal_type=goal_type, status=status)
526 return json.dumps({"count": len(goals), "goals": goals}, indent=2, default=str)
527 finally:
528 db.close()
529 except Exception as e:
530 return json.dumps({"error": str(e)})
533def _tool_create_goal(goal_type: str, title: str, description: str = '', spark_budget: int = 200) -> str:
534 """Create a new goal for agents to pursue."""
535 try:
536 from integrations.agent_engine.goal_manager import GoalManager
537 db = _get_db()
538 try:
539 result = GoalManager.create_goal(db, goal_type=goal_type, title=title,
540 description=description, spark_budget=spark_budget)
541 db.commit()
542 return json.dumps(result, indent=2, default=str)
543 finally:
544 db.close()
545 except Exception as e:
546 return json.dumps({"error": str(e)})
549def _tool_agent_status() -> str:
550 """Check agent daemon health, active dispatches, and system state.
552 Uses ``core.health_probe`` canonical probes — never reads env-var
553 snapshots or hardcoded ports. See module docstring there for the
554 root-cause notes from the 2026-05-01 false-negative incident.
555 """
556 from core.health_probe import probe_agent_daemon, probe_llm
557 status = probe_agent_daemon()
558 status['llm_server'] = probe_llm()
559 try:
560 reg = _get_registry()
561 status['expert_agents'] = len(reg.agents)
562 except Exception:
563 status['expert_agents'] = 'unknown'
564 return json.dumps(status, indent=2, default=str)
567def _tool_remember(content: str, memory_type: str = 'decision') -> str:
568 """Store a memory in the persistent memory graph."""
569 try:
570 mg = _get_memory_graph()
571 memory_id = mg.register(content=content,
572 metadata={'memory_type': memory_type, 'source_agent': 'mcp_bridge'})
573 return json.dumps({"stored": True, "memory_id": memory_id})
574 except Exception as e:
575 return json.dumps({"error": str(e)})
578def _tool_recall(query: str, top_k: int = 5) -> str:
579 """Search the persistent memory graph."""
580 try:
581 mg = _get_memory_graph()
582 memories = mg.recall(query=query, mode='hybrid', top_k=top_k)
583 result = []
584 for m in memories:
585 result.append({
586 "id": m.id, "content": m.content,
587 "memory_type": m.memory_type, "source_agent": m.source_agent,
588 "created_at": m.created_at,
589 })
590 return json.dumps({"count": len(result), "memories": result}, indent=2, default=str)
591 except Exception as e:
592 return json.dumps({"error": str(e)})
595def _tool_list_recipes() -> str:
596 """List trained agent recipes (prompts/*.json files)."""
597 prompts_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'prompts')
598 recipes = []
599 if os.path.isdir(prompts_dir):
600 for f in sorted(_glob.glob(os.path.join(prompts_dir, '*.json'))):
601 try:
602 with open(f) as fh:
603 data = json.load(fh)
604 recipes.append({
605 "file": Path(f).name,
606 "prompt_id": data.get("prompt_id", ""),
607 "agent_name": data.get("agent_name", ""),
608 "status": data.get("agent_status", ""),
609 "description": data.get("description", "")[:200],
610 })
611 except Exception:
612 recipes.append({"file": Path(f).name, "error": "parse failed"})
613 return json.dumps({"count": len(recipes), "recipes": recipes}, indent=2)
616def _tool_system_health() -> str:
617 """Full system health check: Flask server, LLM, DB, memory graph."""
618 from core.health_probe import probe_nunba_flask, probe_llm
619 health = {
620 'backend': probe_nunba_flask(),
621 'llm': probe_llm(),
622 }
623 try:
624 db = _get_db()
625 try:
626 from integrations.social.models import User
627 count = db.query(User).count()
628 health['db'] = {'status': 'up', 'user_count': count}
629 finally:
630 db.close()
631 except Exception as e:
632 health['db'] = {'status': 'error', 'detail': str(e)}
633 return json.dumps(health, indent=2, default=str)
636def _tool_social_query(query_type: str, limit: int = 20) -> str:
637 """Read-only social DB queries. Types: users, posts, goals, products, agents."""
638 try:
639 db = _get_db()
640 try:
641 if query_type == 'users':
642 from integrations.social.models import User
643 rows = db.query(User).order_by(User.created_at.desc()).limit(limit).all()
644 return json.dumps([{"id": r.id, "username": r.username,
645 "display_name": r.display_name} for r in rows], default=str)
646 elif query_type == 'goals':
647 from integrations.agent_engine.goal_manager import GoalManager
648 goals = GoalManager.list_goals(db)
649 return json.dumps({"count": len(goals), "goals": goals[:limit]}, default=str)
650 else:
651 return json.dumps({"error": f"Unknown query_type: {query_type}"})
652 finally:
653 db.close()
654 except Exception as e:
655 return json.dumps({"error": str(e)})
658# ── Watchdog & Monitoring Tools (read-only, no bypass) ─────────
660def _tool_watchdog_status() -> str:
661 """Get NodeWatchdog status — all monitored daemon threads, heartbeat ages, frozen/dead status."""
662 try:
663 from security.node_watchdog import get_watchdog
664 wd = get_watchdog()
665 if wd is None:
666 return json.dumps({"status": "not_started", "threads": {}})
667 return json.dumps(wd.get_status(), indent=2, default=str)
668 except Exception as e:
669 return json.dumps({"error": str(e)})
672def _tool_exception_report() -> str:
673 """Get recent exception patterns — grouped by type, count, and recency. Use this to find bugs to fix."""
674 try:
675 from exception_collector import ExceptionCollector
676 collector = ExceptionCollector.instance()
677 if collector is None:
678 return json.dumps({"error": "ExceptionCollector not initialized"})
679 import time
680 day_ago = time.time() - 86400
681 patterns = collector.get_patterns(since=day_ago, min_count=1)
682 result = []
683 for key, records in patterns.items():
684 result.append({
685 "pattern": key,
686 "count": len(records),
687 "first_seen": records[0].timestamp if records else None,
688 "last_seen": records[-1].timestamp if records else None,
689 "sample_traceback": records[-1].traceback_str[:500] if records else '',
690 "file": records[-1].filename if records else '',
691 "line": records[-1].lineno if records else 0,
692 })
693 result.sort(key=lambda x: x['count'], reverse=True)
694 return json.dumps({"total_patterns": len(result), "exceptions": result[:20]},
695 indent=2, default=str)
696 except Exception as e:
697 return json.dumps({"error": str(e)})
700def _tool_runtime_integrity() -> str:
701 """Check runtime integrity monitor — code tampering detection, guardrail hash verification."""
702 try:
703 from security.runtime_monitor import get_monitor
704 mon = get_monitor()
705 if mon is None:
706 return json.dumps({"status": "not_started"})
707 return json.dumps({
708 "running": mon._running,
709 "tampered": mon._tampered,
710 "check_interval": mon._check_interval,
711 })
712 except Exception as e:
713 return json.dumps({"error": str(e)})
716# ── Universal HARTOS API Gateway ───────────────────────────────
718def _tool_call_endpoint(method: str, path: str, body: Optional[str] = None) -> str:
719 """Call any HARTOS API endpoint. This gives access to ALL channels, handlers, and services.
721 Examples:
722 call_endpoint("GET", "/status")
723 call_endpoint("POST", "/chat", '{"user_id":"1","prompt_id":"demo","prompt":"hello"}')
724 call_endpoint("GET", "/api/social/communities")
725 call_endpoint("POST", "/api/social/posts", '{"title":"Hello","content":"World"}')
726 call_endpoint("GET", "/api/mcp/local/tools/list")
727 call_endpoint("GET", "/prompts")
728 call_endpoint("POST", "/api/instructions/enqueue", '{"user_id":"1","text":"research AI"}')
730 Available route prefixes:
731 /chat, /status, /prompts — core agent pipeline
732 /api/social/* — 82 social endpoints (posts, communities, feeds, karma, encounters)
733 /api/mcp/* — MCP server management
734 /api/instructions/* — instruction queue
735 /api/settings/* — compute/provider settings
736 /api/credentials/* — credential management
737 /a2a/* — agent-to-agent protocol
738 """
739 try:
740 from flask import current_app
741 app = current_app._get_current_object()
742 except RuntimeError:
743 # Not in request context — resolve via singleton accessor.
744 # NEVER eager-import hart_intelligence here: this is a worker
745 # thread (MCP HTTP handler) and a direct import races the
746 # canonical loader's import lock.
747 from core.safe_hartos_attr import safe_hartos_attr
748 app = safe_hartos_attr('app')
749 if app is None:
750 logger.info(
751 "MCP call_endpoint: HARTOS app not yet loaded — "
752 "method=%s path=%s — returning 503-style error.",
753 method, path,
754 )
755 return json.dumps({
756 "error": "HARTOS app not available (loader still init)",
757 })
759 if not path.startswith('/'):
760 path = '/' + path
762 method = method.upper()
763 parsed_body = None
764 if body:
765 try:
766 parsed_body = json.loads(body)
767 except json.JSONDecodeError:
768 return json.dumps({"error": f"Invalid JSON body: {body[:200]}"})
770 try:
771 with app.test_client() as client:
772 if method == 'GET':
773 resp = client.get(path)
774 elif method == 'POST':
775 resp = client.post(path, json=parsed_body, content_type='application/json')
776 elif method == 'PUT':
777 resp = client.put(path, json=parsed_body, content_type='application/json')
778 elif method == 'PATCH':
779 resp = client.patch(path, json=parsed_body, content_type='application/json')
780 elif method == 'DELETE':
781 resp = client.delete(path)
782 else:
783 return json.dumps({"error": f"Unsupported method: {method}"})
785 result = resp.get_json(silent=True)
786 if result is not None:
787 return json.dumps({"status": resp.status_code, "data": result}, default=str)
788 return json.dumps({"status": resp.status_code, "text": resp.get_data(as_text=True)[:2000]})
789 except Exception as e:
790 return json.dumps({"error": str(e)})
793def _tool_list_channels() -> str:
794 """List all available channel adapters and their status."""
795 try:
796 from integrations.channels.extensions import get_available_adapters
797 adapters = get_available_adapters()
798 channels = []
799 for name, factory in adapters.items():
800 channels.append({"name": name, "type": "extension"})
801 # Also check core adapters
802 core_adapters = ['discord', 'telegram', 'slack', 'whatsapp', 'signal', 'web', 'google_chat']
803 for name in core_adapters:
804 if name not in [c['name'] for c in channels]:
805 try:
806 mod = __import__(f'integrations.channels.{name}_adapter', fromlist=['_'])
807 channels.append({"name": name, "type": "core"})
808 except ImportError:
809 pass
810 return json.dumps({"count": len(channels), "channels": channels}, indent=2)
811 except Exception as e:
812 return json.dumps({"error": str(e)})
815def _tool_list_routes() -> str:
816 """List all registered Flask routes — shows every endpoint Claude Code can call via call_endpoint."""
817 try:
818 from flask import current_app
819 app = current_app._get_current_object()
820 except RuntimeError:
821 # Worker-thread safe — singleton accessor, no import lock race.
822 from core.safe_hartos_attr import safe_hartos_attr
823 app = safe_hartos_attr('app')
824 if app is None:
825 logger.info(
826 "MCP list_routes: HARTOS app not yet loaded — "
827 "returning empty 503 envelope.",
828 )
829 return json.dumps({
830 "error": "HARTOS app not available (loader still init)",
831 })
833 routes = []
834 for rule in app.url_map.iter_rules():
835 if rule.endpoint == 'static':
836 continue
837 routes.append({
838 "path": rule.rule,
839 "methods": sorted(list(rule.methods - {'HEAD', 'OPTIONS'})),
840 })
841 routes.sort(key=lambda r: r['path'])
842 return json.dumps({"count": len(routes), "routes": routes}, indent=2)
845# ── Tool Loading ──────────────────────────────────────────────
847def _load_tools():
848 """Register all local MCP tool functions."""
849 global _tools_loaded
850 if _tools_loaded:
851 return
852 _tools_loaded = True
854 # Read-only: observe the system
855 _register_tool('list_agents', 'List available expert agents', _tool_list_agents)
856 _register_tool('list_goals', 'List agent goals', _tool_list_goals)
857 _register_tool('agent_status', 'Check agent daemon health', _tool_agent_status)
858 _register_tool('list_recipes', 'List trained agent recipes', _tool_list_recipes)
859 _register_tool('system_health', 'Full system health check', _tool_system_health)
860 _register_tool('social_query', 'Read-only social DB queries', _tool_social_query)
862 # Memory (safe — memory graph only, no framework bypass)
863 _register_tool('remember', 'Store a memory in the memory graph', _tool_remember)
864 _register_tool('recall', 'Search the persistent memory graph', _tool_recall)
866 # Framework gateway — ALL writes go through Flask routes (guardrails, constitution, budget gate)
867 _register_tool('call_endpoint', 'Call any HARTOS API endpoint through the framework', _tool_call_endpoint)
868 _register_tool('list_routes', 'List all registered Flask routes', _tool_list_routes)
869 _register_tool('list_channels', 'List all available channel adapters', _tool_list_channels)
871 # Watchdog & monitoring (read-only)
872 _register_tool('watchdog_status', 'Get daemon thread health — frozen/dead detection', _tool_watchdog_status)
873 _register_tool('exception_report', 'Get recent exception patterns — find bugs to fix', _tool_exception_report)
874 _register_tool('runtime_integrity', 'Check code tampering and guardrail hash verification', _tool_runtime_integrity)
876 # ── Hive Meta-Orchestrator Tools ─────────────────────────────
877 # These let Claude Code drive the entire hive as a meta-network
879 def _tool_onboard_model(model: str, quant: str = 'auto'):
880 """Onboard a HuggingFace model: find GGUF, download, start llama.cpp, register.
881 Example: onboard_model(model='Qwen/Qwen3-8B', quant='Q4_K_M')"""
882 try:
883 from integrations.service_tools.model_onboarding import onboard
884 return onboard(model, quant=quant)
885 except Exception as e:
886 return {'status': 'error', 'error': str(e)}
888 def _tool_switch_model(model: str, quant: str = 'auto'):
889 """Hot-swap the active LLM to a different model (downloads if needed)."""
890 try:
891 from integrations.service_tools.model_onboarding import switch_model
892 return switch_model(model, quant=quant)
893 except Exception as e:
894 return {'status': 'error', 'error': str(e)}
896 def _tool_model_status():
897 """Get active model, server health, VRAM usage, downloaded models."""
898 try:
899 from integrations.service_tools.model_onboarding import status
900 return status()
901 except Exception as e:
902 return {'status': 'error', 'error': str(e)}
904 def _tool_hive_connect(user_id: str, task_scope: str = 'own_repos'):
905 """Connect this Claude Code session to the hive as a coding worker node.
906 task_scope: own_repos | public | any"""
907 try:
908 from integrations.coding_agent.claude_hive_session import get_hive_session
909 session = get_hive_session()
910 return session.connect(user_id, task_scope=task_scope)
911 except Exception as e:
912 return {'status': 'error', 'error': str(e)}
914 def _tool_hive_disconnect():
915 """Disconnect this Claude Code session from the hive."""
916 try:
917 from integrations.coding_agent.claude_hive_session import get_hive_session
918 return get_hive_session().disconnect()
919 except Exception as e:
920 return {'status': 'error', 'error': str(e)}
922 def _tool_hive_session_status():
923 """Get hive session status: connected, tasks completed, spark earned."""
924 try:
925 from integrations.coding_agent.claude_hive_session import get_hive_session
926 return get_hive_session().get_status()
927 except Exception as e:
928 return {'status': 'error', 'error': str(e)}
930 def _tool_create_hive_task(task_type: str, title: str, description: str, instructions: str):
931 """Create a coding task for the hive. Types: code_review, code_write, code_test,
932 model_onboard, benchmark, documentation, bug_fix, refactor"""
933 try:
934 from integrations.coding_agent.hive_task_protocol import get_dispatcher
935 task = get_dispatcher().create_task(task_type, title, description, instructions)
936 return task.to_dict()
937 except Exception as e:
938 return {'status': 'error', 'error': str(e)}
940 def _tool_dispatch_hive_tasks():
941 """Dispatch all pending hive tasks to available Claude Code sessions. Returns count dispatched."""
942 try:
943 from integrations.coding_agent.hive_task_protocol import get_dispatcher
944 count = get_dispatcher().dispatch_pending()
945 return {'dispatched': count}
946 except Exception as e:
947 return {'status': 'error', 'error': str(e)}
949 def _tool_hive_signal_stats():
950 """Get channel signal statistics: signal counts by type, by channel, total processed."""
951 try:
952 from integrations.channels.hive_signal_bridge import get_signal_bridge
953 return get_signal_bridge().get_stats()
954 except Exception as e:
955 return {'status': 'error', 'error': str(e)}
957 def _tool_hive_signal_feed(limit: int = 20):
958 """Get recent hive signals from all channels — what the community is talking about."""
959 try:
960 from integrations.channels.hive_signal_bridge import get_signal_bridge
961 return get_signal_bridge().get_signal_feed(limit=int(limit))
962 except Exception as e:
963 return {'status': 'error', 'error': str(e)}
965 def _tool_seed_goals():
966 """Seed all bootstrap goals (47 agents including 6 hive acceleration agents). Idempotent."""
967 try:
968 from integrations.social.models import get_db
969 from integrations.agent_engine.goal_seeding import seed_bootstrap_goals
970 db = get_db()
971 try:
972 count = seed_bootstrap_goals(db)
973 db.commit()
974 return {'seeded': count, 'status': 'ok'}
975 finally:
976 db.close()
977 except Exception as e:
978 return {'status': 'error', 'error': str(e)}
980 # Register hive orchestrator tools
981 _register_tool('onboard_model', 'Download HF model → GGUF → start llama.cpp inference', _tool_onboard_model)
982 _register_tool('switch_model', 'Hot-swap active LLM to a different model', _tool_switch_model)
983 _register_tool('model_status', 'Active model, server health, VRAM, downloads', _tool_model_status)
984 _register_tool('hive_connect', 'Connect Claude Code session to hive as worker node', _tool_hive_connect)
985 _register_tool('hive_disconnect', 'Disconnect Claude Code session from hive', _tool_hive_disconnect)
986 _register_tool('hive_session_status', 'Hive session: connected, tasks done, spark earned', _tool_hive_session_status)
987 _register_tool('create_hive_task', 'Create a coding task for hive Claude Code sessions', _tool_create_hive_task)
988 _register_tool('dispatch_hive_tasks', 'Dispatch pending tasks to available sessions', _tool_dispatch_hive_tasks)
989 _register_tool('hive_signal_stats', 'Channel signal stats: what the community needs', _tool_hive_signal_stats)
990 _register_tool('hive_signal_feed', 'Recent signals from all 30 channels', _tool_hive_signal_feed)
991 _register_tool('seed_goals', 'Seed all 47 bootstrap agents (idempotent)', _tool_seed_goals)
993 # ── Module-level tool arrays (auto-evolve + thought experiments + ──
994 # autoresearch code tools). These are maintained alongside the
995 # ServiceToolRegistry — importing the module-level list is the
996 # canonical way to consume them and keeps the MCP manifest in sync
997 # with the agent-side registry (single source of truth per Gate 4).
998 for mod_path, symbol, label in (
999 ('integrations.agent_engine.thought_experiment_tools',
1000 'THOUGHT_EXPERIMENT_TOOLS', 'thought_experiment'),
1001 ('integrations.agent_engine.auto_evolve',
1002 'AUTO_EVOLVE_TOOLS', 'auto_evolve'),
1003 ('integrations.coding_agent.autoevolve_code_tools',
1004 'AUTOEVOLVE_CODE_TOOLS', 'autoevolve_code'),
1005 ('integrations.coding_agent.backend_repair_tools',
1006 'BACKEND_REPAIR_TOOLS', 'backend_repair'),
1007 ):
1008 try:
1009 import importlib
1010 mod = importlib.import_module(mod_path)
1011 tools_list = getattr(mod, symbol, None)
1012 if not isinstance(tools_list, list):
1013 logger.warning(
1014 f"MCP bridge: {mod_path}.{symbol} is not a list "
1015 f"(got {type(tools_list).__name__}) — skipping"
1016 )
1017 continue
1018 added = _register_tool_module(tools_list)
1019 logger.info(
1020 f"MCP bridge: registered {added} {label} tools "
1021 f"from {mod_path}.{symbol}"
1022 )
1023 except ImportError as e:
1024 logger.warning(
1025 f"MCP bridge: {mod_path} unavailable ({e}) — "
1026 f"{label} tools NOT exposed via MCP"
1027 )
1029 logger.info(f"MCP HTTP bridge loaded {len(_local_tools)} local tools")
1032# ── REST Endpoints ─────────────────────────────────────────────
1034@mcp_local_bp.route('/health', methods=['GET'])
1035def mcp_health():
1036 """Health check for the local MCP bridge."""
1037 _load_tools()
1038 return jsonify({
1039 "status": "ok",
1040 "tools": len(_local_tools),
1041 "server": "hartos-mcp-local",
1042 })
1045@mcp_local_bp.route('/tools/list', methods=['GET'])
1046def mcp_list_tools():
1047 """List all locally available MCP tools with their schemas."""
1048 _load_tools()
1049 tools_out = []
1050 for t in _local_tools:
1051 tools_out.append({
1052 "name": t["name"],
1053 "description": t["description"],
1054 "parameters": t["parameters"],
1055 })
1056 return jsonify({"tools": tools_out})
1059@mcp_local_bp.route('/tools/execute', methods=['POST'])
1060def mcp_execute_tool():
1061 """Execute a local MCP tool.
1063 Request body: {"tool": "tool_name", "arguments": {"key": "value"}}
1064 """
1065 _load_tools()
1066 data = request.get_json(force=True, silent=True) or {}
1067 tool_name = data.get('tool', '').strip()
1068 arguments = data.get('arguments', {})
1070 if not tool_name:
1071 return jsonify({"success": False, "error": "tool name required"}), 400
1073 tool_entry = None
1074 for t in _local_tools:
1075 if t["name"] == tool_name:
1076 tool_entry = t
1077 break
1079 if tool_entry is None:
1080 available = [t["name"] for t in _local_tools]
1081 return jsonify({
1082 "success": False,
1083 "error": f"Unknown tool: {tool_name}",
1084 "available_tools": available,
1085 }), 404
1087 fn = tool_entry["fn"]
1088 if fn is None:
1089 return jsonify({"success": False, "error": f"Tool {tool_name} has no callable"}), 500
1091 # Apply per-tool alias canonicalization so MCP clients that call
1092 # with `search=` / `model_id=` / `user=` land on the correct Python
1093 # kwarg. See `_TOOL_ARG_ALIASES` above for the full mapping.
1094 arguments = _canonicalize_args(tool_name, arguments)
1096 try:
1097 result = fn(**arguments)
1098 if isinstance(result, str):
1099 try:
1100 parsed = json.loads(result)
1101 return jsonify({"success": True, "result": parsed})
1102 except json.JSONDecodeError:
1103 return jsonify({"success": True, "result": result})
1104 return jsonify({"success": True, "result": result})
1105 except TypeError as e:
1106 return jsonify({"success": False, "error": f"Invalid arguments: {e}"}), 400
1107 except Exception as e:
1108 logger.error(f"MCP tool {tool_name} execution error: {e}")
1109 return jsonify({"success": False, "error": str(e)}), 500
1112# ── Auto-registration ─────────────────────────────────────────
1114def auto_register_local_mcp():
1115 """Register the local HARTOS MCP server in the MCPToolRegistry.
1117 Called at boot so Nunba's MCPServerConnector auto-discovers local tools.
1118 Uses the backend port since tools are served from the same Flask app.
1119 """
1120 try:
1121 from core.port_registry import get_port
1122 from integrations.mcp.mcp_integration import mcp_registry, MCPServerConnector
1124 backend_port = get_port('backend')
1125 local_url = f"http://127.0.0.1:{backend_port}/api/mcp/local"
1127 if 'hartos_local' not in mcp_registry.servers:
1128 connector = MCPServerConnector('hartos_local', local_url)
1129 connector.connected = True # We are the server, skip health check
1130 mcp_registry.servers['hartos_local'] = connector
1131 logger.info(f"Auto-registered local MCP server at {local_url}")
1132 except Exception as e:
1133 logger.debug(f"Auto-register local MCP failed (non-critical): {e}")