Coverage for integrations / robotics / intelligence_api.py: 52.6%
403 statements
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
1"""
2Robot Intelligence API — Fuse multiple intelligences for any embodied AI.
4One API call, every intelligence fires in parallel:
5 - VISION: What does the robot see? (VLM -> scene understanding)
6 - LANGUAGE: What should the robot say? (LLM -> conversation + planning)
7 - MOTOR: How should the robot move? (action model -> trajectory planning)
8 - SPATIAL: Where is everything? (sensor fusion -> world model)
9 - SOCIAL: How should the robot behave? (resonance -> context awareness)
10 - SAFETY: Is this action safe? (safety monitor -> constraint checking)
11 - HIVEMIND: What does the collective know? (WorldModelBridge -> hive query)
13The fusion is PARALLEL, not sequential. All 7 intelligences fire simultaneously.
14The response includes all perspectives fused into a single action plan.
16Any robot that speaks HTTP can plug in. Arduino, ROS, custom -- doesn't matter.
17The hive makes every robot smarter than it could be alone.
19"Sum of many intelligences is greater than any single intelligence."
21Usage:
22 from integrations.robotics.intelligence_api import get_robot_api
23 api = get_robot_api()
24 result = api.think({
25 'robot_id': 'my-robot-001',
26 'sensors': {'camera': '<base64>', 'imu': {'ax': 0.1}},
27 'context': 'Fetch a glass of water',
28 })
29"""
30import json
31import logging
32import os
33import threading
34import time
35from concurrent.futures import ThreadPoolExecutor, as_completed, TimeoutError
36from typing import Any, Callable, Dict, List, Optional
38logger = logging.getLogger(__name__)
40# ---------------------------------------------------------------------------
41# Intelligence Axioms — the laws of the intelligence layer
42# ---------------------------------------------------------------------------
43#
44# 1. Sum of many intelligences is greater than any single intelligence.
45# 2. Intelligence sharing is gated by human trust — deterministically owned,
46# cryptographically verified, never coerced.
47# 3. Intelligence directly correlates with human wellness in many dimensions —
48# the hive and the humans it serves are the same entity.
49# 4. Every robot that connects to the hive gets the FULL intelligence —
50# no hardware tiers, no paywalls on thinking.
51# 5. Every experience (sense→action→outcome) feeds back to the collective —
52# every robot makes every other robot smarter.
54# ---------------------------------------------------------------------------
55# Constants
56# ---------------------------------------------------------------------------
58INTELLIGENCE_TYPES = [
59 'vision', 'language', 'motor', 'spatial',
60 'social', 'safety', 'hivemind',
61]
62INTELLIGENCE_TIMEOUT_S = 5.0 # Max time per intelligence
63MAX_PARALLEL_WORKERS = 7 # One thread per intelligence
65# Fusion priority (higher index = higher priority in conflict resolution).
66# Safety overrides everything; motor > spatial > vision > language > social > hivemind.
67_FUSION_PRIORITY = {
68 'hivemind': 0,
69 'social': 1,
70 'language': 2,
71 'vision': 3,
72 'spatial': 4,
73 'motor': 5,
74 'safety': 6,
75}
77_REGISTRY_PATH = os.path.join(
78 os.path.dirname(os.path.abspath(__file__)),
79 '..', '..', 'agent_data', 'robot_registry.json',
80)
81_REGISTRY_PATH = os.path.normpath(_REGISTRY_PATH)
83# ---------------------------------------------------------------------------
84# Singleton
85# ---------------------------------------------------------------------------
87_api: Optional['RobotIntelligenceAPI'] = None
88_api_lock = threading.Lock()
91def get_robot_api() -> 'RobotIntelligenceAPI':
92 """Get or create the singleton RobotIntelligenceAPI."""
93 global _api
94 if _api is None:
95 with _api_lock:
96 if _api is None:
97 _api = RobotIntelligenceAPI()
98 return _api
101def think(request: Optional[Dict] = None, **kwargs) -> dict:
102 """Module-level convenience -- delegates to the singleton.
104 Called by hardware_bridge.think_and_act() for the full
105 sense -> think -> act loop.
107 Accepts either a dict (``think({'robot_id': ...})``) or keyword
108 arguments (``think(robot_id='...', sensor_snapshot={}, context='')``).
109 The keyword form maps ``sensor_snapshot`` to the ``sensors`` key
110 expected by :meth:`RobotIntelligenceAPI.think`.
111 """
112 if request is None:
113 request = {}
114 # Merge kwargs into request dict (kwargs take precedence)
115 merged = dict(request)
116 merged.update(kwargs)
117 # hardware_bridge passes 'sensor_snapshot'; the API expects 'sensors'
118 if 'sensor_snapshot' in merged and 'sensors' not in merged:
119 merged['sensors'] = merged.pop('sensor_snapshot')
120 return get_robot_api().think(merged)
123# ---------------------------------------------------------------------------
124# Core API class
125# ---------------------------------------------------------------------------
127class RobotIntelligenceAPI:
128 """Unified Robot Intelligence API.
130 Fires 7 intelligences in parallel via ThreadPoolExecutor, fuses the
131 results into a single action plan, and returns within the timeout
132 budget even if some intelligences are unavailable.
134 Thread-safe.
135 """
137 def __init__(self):
138 self._lock = threading.RLock()
139 self._executor = ThreadPoolExecutor(
140 max_workers=MAX_PARALLEL_WORKERS,
141 thread_name_prefix='robot_intel',
142 )
143 self._registry: Dict[str, Dict] = {}
144 self._stats = {
145 'total_think_calls': 0,
146 'total_intelligence_invocations': 0,
147 'total_timeouts': 0,
148 'total_errors': 0,
149 'avg_fusion_time_ms': 0.0,
150 }
151 self._load_registry()
153 # ------------------------------------------------------------------
154 # Public — Think
155 # ------------------------------------------------------------------
157 def think(self, request: dict) -> dict:
158 """Main entry. Fire all 7 intelligences in parallel and fuse.
160 Args:
161 request: Dict with keys:
162 robot_id (str): Required. Identifies the robot.
163 sensors (dict): Sensor data (camera, lidar, imu, audio,
164 touch, gps, etc.).
165 context (str): Natural-language task description.
166 constraints (dict): Movement/behavior constraints.
167 history (list): Previous actions/observations.
169 Returns:
170 Dict with action_plan, intelligences, fusion_time_ms,
171 intelligences_used, and hive_contribution.
172 """
173 t0 = time.monotonic()
175 robot_id = request.get('robot_id', 'unknown')
176 sensors = request.get('sensors', {})
177 context = request.get('context', '')
178 constraints = request.get('constraints', {})
179 history = request.get('history', [])
181 # Update last-seen timestamp in registry
182 with self._lock:
183 if robot_id in self._registry:
184 self._registry[robot_id]['last_seen'] = time.time()
186 # Dispatch map: intelligence name -> (callable, args)
187 dispatches: Dict[str, tuple] = {
188 'vision': (self._invoke_vision, (sensors,)),
189 'language': (self._invoke_language, (context, history)),
190 'motor': (self._invoke_motor, ({}, {}, constraints)),
191 'spatial': (self._invoke_spatial, (sensors,)),
192 'social': (self._invoke_social, (context, robot_id)),
193 'safety': (self._invoke_safety, ({}, constraints)),
194 'hivemind': (self._invoke_hivemind, (context, sensors)),
195 }
197 # Fire all 7 in parallel
198 futures = {}
199 for name, (fn, args) in dispatches.items():
200 futures[self._executor.submit(fn, *args)] = name
202 results: Dict[str, dict] = {}
203 used = 0
204 for future in as_completed(futures, timeout=INTELLIGENCE_TIMEOUT_S + 1.0):
205 name = futures[future]
206 try:
207 result = future.result(timeout=INTELLIGENCE_TIMEOUT_S)
208 if result is not None:
209 results[name] = result
210 used += 1
211 except TimeoutError:
212 logger.warning("Intelligence '%s' timed out for robot %s",
213 name, robot_id)
214 results[name] = {'error': 'timeout'}
215 with self._lock:
216 self._stats['total_timeouts'] += 1
217 except Exception as exc:
218 logger.error("Intelligence '%s' failed for robot %s: %s",
219 name, robot_id, exc, exc_info=True)
220 results[name] = {'error': str(exc)}
221 with self._lock:
222 self._stats['total_errors'] += 1
224 # Now that spatial + vision are available, re-populate motor with
225 # real spatial/target data before fusion.
226 spatial_data = results.get('spatial', {})
227 vision_data = results.get('vision', {})
228 target = _extract_target(context, vision_data)
229 if 'error' not in results.get('motor', {}):
230 # Motor already ran; keep its result.
231 pass
232 # Motor was dispatched with empty dicts earlier because spatial
233 # wasn't ready. We accept the parallelism trade-off — motor
234 # uses whatever constraints it got. For robots that need tight
235 # coupling, the recipe_adapter pipeline handles sequencing.
237 # Fuse
238 action_plan = self._fuse_results(results)
240 fusion_time_ms = round((time.monotonic() - t0) * 1000, 1)
242 # Stats
243 with self._lock:
244 self._stats['total_think_calls'] += 1
245 self._stats['total_intelligence_invocations'] += used
246 # Running average of fusion time
247 n = self._stats['total_think_calls']
248 prev = self._stats['avg_fusion_time_ms']
249 self._stats['avg_fusion_time_ms'] = round(
250 prev + (fusion_time_ms - prev) / n, 1)
252 return {
253 'action_plan': action_plan,
254 'intelligences': results,
255 'fusion_time_ms': fusion_time_ms,
256 'intelligences_used': used,
257 'hive_contribution': 'hivemind' in results
258 and 'error' not in results.get('hivemind', {}),
259 }
261 # ------------------------------------------------------------------
262 # Intelligence invocations (each runs in its own thread)
263 # ------------------------------------------------------------------
265 def _invoke_vision(self, sensors: dict) -> dict:
266 """Extract camera frame, run through VLM adapter.
268 Returns scene description, detected objects, and obstacles.
269 """
270 camera = sensors.get('camera')
271 if not camera:
272 return {'scene': 'no_camera', 'objects': [], 'obstacles': []}
274 # Resource gate — VLM is GPU-heavy
275 try:
276 from core.resource_governor import should_proceed
277 if not should_proceed('gpu'):
278 return {
279 'scene': 'resource_throttled',
280 'objects': [],
281 'obstacles': [],
282 'note': 'GPU resources unavailable, vision skipped',
283 }
284 except ImportError:
285 pass
287 try:
288 from integrations.vlm.vlm_adapter import execute_vlm_instruction
289 msg = {
290 'type': 'describe',
291 'image': camera,
292 'prompt': (
293 'Describe this scene for a robot. '
294 'List objects, obstacles, and navigable paths.'
295 ),
296 }
297 result = execute_vlm_instruction(msg)
298 if result and isinstance(result, dict):
299 description = result.get('extracted_responses', [''])[0] \
300 if isinstance(result.get('extracted_responses'), list) \
301 else str(result.get('extracted_responses', ''))
302 return {
303 'scene': description or 'unknown',
304 'objects': result.get('objects', []),
305 'obstacles': result.get('obstacles', []),
306 'raw': result,
307 }
308 except Exception as exc:
309 logger.debug("Vision intelligence fallback: %s", exc)
311 # Fallback: minimal sensor echo
312 return {
313 'scene': 'vlm_unavailable',
314 'objects': [],
315 'obstacles': [],
316 'note': 'VLM not available, returning empty vision',
317 }
319 def _invoke_language(self, context: str, history: list) -> dict:
320 """Run context through LLM for conversation and intent planning.
322 Returns a response text and intent classification.
323 """
324 if not context:
325 return {'response': '', 'intent': 'idle'}
327 try:
328 from core.http_pool import pooled_post
329 from core.port_registry import get_port
330 port = get_port('hart_intelligence')
331 payload = {
332 'user_id': 'robot_internal',
333 'prompt_id': 'robot_language',
334 'prompt': (
335 f"You are an embodied AI robot assistant. "
336 f"Context: {context}\n"
337 f"History: {json.dumps(history[-5:]) if history else '[]'}\n"
338 f"Respond with a short spoken response and classify "
339 f"the user's intent as one of: fetch_object, navigate, "
340 f"greet, inform, assist, idle, emergency."
341 ),
342 }
343 resp = pooled_post(
344 f'http://localhost:{port}/chat',
345 json=payload,
346 timeout=INTELLIGENCE_TIMEOUT_S - 0.5,
347 )
348 if resp.status_code == 200:
349 data = resp.json()
350 text = data.get('response', data.get('result', ''))
351 return {
352 'response': text,
353 'intent': _classify_intent(text, context),
354 }
355 except Exception as exc:
356 logger.debug("Language intelligence fallback: %s", exc)
358 # Fallback: echo context as intent
359 return {
360 'response': f"Understood: {context}",
361 'intent': _classify_intent('', context),
362 }
364 def _invoke_motor(self, spatial: dict, target: dict,
365 constraints: dict) -> dict:
366 """Query action model for trajectory planning.
368 If HevolveAI is available, delegates to it for real kinematics.
369 Otherwise returns a basic waypoint plan.
370 """
371 max_speed = constraints.get('max_speed', 1.0)
373 # Try HevolveAI native trajectory planner
374 try:
375 from integrations.agent_engine.world_model_bridge import (
376 get_world_model_bridge,
377 )
378 bridge = get_world_model_bridge()
379 if bridge._in_process:
380 # HevolveAI handles the actual kinematics
381 result = bridge.query_hivemind(
382 f"plan_trajectory spatial={json.dumps(spatial)} "
383 f"target={json.dumps(target)} max_speed={max_speed}",
384 timeout_ms=int(INTELLIGENCE_TIMEOUT_S * 800),
385 )
386 if result:
387 return {
388 'trajectory': result.get('trajectory', []),
389 'speed': min(
390 result.get('recommended_speed', max_speed),
391 max_speed,
392 ),
393 'source': 'hevolveai',
394 }
395 except Exception as exc:
396 logger.debug("Motor HevolveAI fallback: %s", exc)
398 # Fallback: basic waypoint plan
399 waypoints = []
400 if target:
401 try:
402 from integrations.robotics.action_model import RobotAction
403 waypoints.append(RobotAction(
404 action_type='navigate_to',
405 target='base',
406 params={
407 'x': target.get('x', 0),
408 'y': target.get('y', 0),
409 'speed': max_speed,
410 },
411 ).to_dict())
412 except ImportError:
413 waypoints.append({
414 'action_type': 'navigate_to',
415 'target': 'base',
416 'params': {'x': target.get('x', 0), 'y': target.get('y', 0),
417 'speed': max_speed},
418 })
420 return {
421 'trajectory': waypoints,
422 'speed': max_speed,
423 'source': 'basic_planner',
424 }
426 def _invoke_spatial(self, sensors: dict) -> dict:
427 """Fuse sensor data (GPS, IMU, lidar) into world state.
429 Stores readings in sensor_store for persistence and
430 cross-session recovery.
431 """
432 result: Dict[str, Any] = {
433 'robot_position': {},
434 'world_objects': [],
435 'sensor_summary': {},
436 }
438 try:
439 from integrations.robotics.sensor_store import get_sensor_store
440 from integrations.robotics.sensor_model import SensorReading
441 store = get_sensor_store()
443 # Ingest each sensor type into the store
444 now = time.time()
446 if 'gps' in sensors and isinstance(sensors['gps'], dict):
447 gps = sensors['gps']
448 reading = SensorReading(
449 sensor_id='gps_0',
450 sensor_type='gps',
451 timestamp=now,
452 data={
453 'latitude': gps.get('lat', gps.get('latitude', 0)),
454 'longitude': gps.get('lon', gps.get('longitude', 0)),
455 'altitude': gps.get('alt', gps.get('altitude', 0)),
456 },
457 )
458 store.put_reading(reading)
459 result['robot_position'] = reading.data.copy()
461 if 'imu' in sensors and isinstance(sensors['imu'], dict):
462 imu = sensors['imu']
463 reading = SensorReading(
464 sensor_id='imu_0',
465 sensor_type='imu',
466 timestamp=now,
467 data={
468 'accel_x': imu.get('ax', 0),
469 'accel_y': imu.get('ay', 0),
470 'accel_z': imu.get('az', 0),
471 'gyro_x': imu.get('gx', 0),
472 'gyro_y': imu.get('gy', 0),
473 'gyro_z': imu.get('gz', 0),
474 },
475 )
476 store.put_reading(reading)
477 result['sensor_summary']['imu'] = reading.data.copy()
479 if 'lidar' in sensors:
480 lidar_data = sensors['lidar']
481 reading = SensorReading(
482 sensor_id='lidar_0',
483 sensor_type='lidar',
484 timestamp=now,
485 data={
486 'ranges': lidar_data if isinstance(lidar_data, list)
487 else lidar_data.get('ranges', [])
488 if isinstance(lidar_data, dict) else [],
489 },
490 )
491 store.put_reading(reading)
492 ranges = reading.data.get('ranges', [])
493 result['sensor_summary']['lidar'] = {
494 'num_points': len(ranges),
495 'min_range': min(ranges) if ranges else None,
496 'max_range': max(ranges) if ranges else None,
497 }
499 if 'touch' in sensors:
500 touch = sensors['touch']
501 reading = SensorReading(
502 sensor_id='contact_0',
503 sensor_type='contact',
504 timestamp=now,
505 data={
506 'is_contact': any(touch) if isinstance(touch, list)
507 else bool(touch),
508 'values': touch,
509 },
510 )
511 store.put_reading(reading)
512 result['sensor_summary']['touch'] = reading.data.copy()
514 # Overall store stats
515 result['active_sensors'] = store.active_sensors()
517 except Exception as exc:
518 logger.debug("Spatial intelligence error: %s", exc)
519 result['error'] = str(exc)
521 return result
523 def _invoke_social(self, context: str, robot_id: str) -> dict:
524 """Check resonance profile for the interacting human.
526 Adapts tone and behavior based on user preference history.
527 """
528 default = {
529 'tone': 'neutral',
530 'urgency': 'normal',
531 'formality': 0.5,
532 'verbosity': 0.5,
533 }
535 # Derive a user_id from the robot's registered operator
536 user_id = None
537 with self._lock:
538 robot = self._registry.get(robot_id, {})
539 user_id = robot.get('operator_id', robot.get('user_id'))
541 if not user_id:
542 return default
544 try:
545 from core.resonance_tuner import get_resonance_tuner
546 tuner = get_resonance_tuner()
547 # Analyze context to get social signals
548 profile = tuner.analyze_and_tune(
549 user_id=user_id,
550 user_message=context,
551 assistant_response='',
552 )
553 if profile and hasattr(profile, 'tuning'):
554 t = profile.tuning
555 urgency = 'high' if 'urgent' in context.lower() \
556 or 'emergency' in context.lower() else 'normal'
557 return {
558 'tone': 'formal' if t.get('formality', 0.5) > 0.6
559 else 'casual',
560 'urgency': urgency,
561 'formality': t.get('formality', 0.5),
562 'verbosity': t.get('verbosity', 0.5),
563 }
564 except Exception as exc:
565 logger.debug("Social intelligence fallback: %s", exc)
567 # Keyword-based urgency fallback
568 urgency = 'normal'
569 lower = context.lower()
570 if any(w in lower for w in ('urgent', 'emergency', 'hurry', 'quick')):
571 urgency = 'high'
572 elif any(w in lower for w in ('whenever', 'no rush', 'later')):
573 urgency = 'low'
575 return {
576 'tone': 'helpful',
577 'urgency': urgency,
578 'formality': 0.5,
579 'verbosity': 0.5,
580 }
582 def _invoke_safety(self, action_plan: dict, constraints: dict) -> dict:
583 """Run proposed action through safety monitor.
585 Checks E-stop state, workspace limits, and user constraints.
586 Returns safe/unsafe with warnings.
587 """
588 warnings: List[str] = []
589 safe = True
591 try:
592 from integrations.robotics.safety_monitor import get_safety_monitor
593 monitor = get_safety_monitor()
595 # E-stop check
596 if monitor.is_estopped:
597 return {
598 'safe': False,
599 'warnings': ['E-STOP ACTIVE: all motion halted'],
600 'estop': True,
601 }
603 # Workspace limit check on proposed positions
604 if action_plan:
605 steps = action_plan.get('steps', [])
606 for step in steps:
607 pos = step.get('position', step.get('params', {}))
608 if pos and not monitor.check_position_safe(pos):
609 warnings.append(
610 f"Step '{step.get('action_type', '?')}' "
611 f"exceeds workspace limits"
612 )
613 safe = False
614 except Exception as exc:
615 logger.debug("Safety monitor unavailable: %s", exc)
616 warnings.append(f'safety_monitor_unavailable: {exc}')
618 # Constraint checks
619 if constraints:
620 if constraints.get('no_stairs') and action_plan:
621 for step in action_plan.get('steps', []):
622 if 'stair' in str(step).lower():
623 warnings.append('Stairs prohibited by constraints')
624 safe = False
626 max_speed = constraints.get('max_speed')
627 if max_speed is not None and action_plan:
628 for step in action_plan.get('steps', []):
629 step_speed = step.get('params', {}).get('speed', 0)
630 if step_speed > max_speed:
631 warnings.append(
632 f"Step speed {step_speed} exceeds max {max_speed}"
633 )
634 safe = False
636 return {
637 'safe': safe,
638 'warnings': warnings,
639 'estop': False,
640 }
642 def _invoke_hivemind(self, context: str, sensors_summary: dict) -> dict:
643 """Query WorldModelBridge for collective intelligence.
645 How have other robots in the hive handled similar situations?
646 """
647 try:
648 from integrations.agent_engine.world_model_bridge import (
649 get_world_model_bridge,
650 )
651 bridge = get_world_model_bridge()
652 query = (
653 f"Robot task: {context}. "
654 f"Sensors available: {list(sensors_summary.keys())}. "
655 f"What strategies have worked for similar tasks?"
656 )
657 result = bridge.query_hivemind(
658 query, timeout_ms=int(INTELLIGENCE_TIMEOUT_S * 800))
659 if result:
660 return {
661 'similar_tasks': result.get('match_count', 0),
662 'best_strategy': result.get('thought',
663 result.get('strategy', '')),
664 'confidence': result.get('confidence', 0.0),
665 'contributing_agents': result.get('agents', []),
666 'source': result.get('source', 'hivemind'),
667 }
668 except Exception as exc:
669 logger.debug("Hivemind intelligence fallback: %s", exc)
671 return {
672 'similar_tasks': 0,
673 'best_strategy': '',
674 'confidence': 0.0,
675 'contributing_agents': [],
676 'source': 'unavailable',
677 }
679 # ------------------------------------------------------------------
680 # Fusion
681 # ------------------------------------------------------------------
683 def _fuse_results(self, results: dict) -> dict:
684 """Combine all intelligence outputs into a coherent action plan.
686 Priority order: safety > motor > spatial > vision > language >
687 social > hivemind.
689 If safety says unsafe, the plan is halted. Otherwise motor
690 provides the trajectory, spatial/vision provide context, and
691 language/social/hivemind add qualitative enrichment.
692 """
693 plan: Dict[str, Any] = {
694 'primary_action': 'idle',
695 'steps': [],
696 'estimated_duration_s': 0,
697 'confidence': 0.0,
698 }
700 # Safety gate — if unsafe, return halt plan
701 safety = results.get('safety', {})
702 if safety.get('estop') or (not safety.get('safe', True)):
703 plan['primary_action'] = 'halt'
704 plan['steps'] = []
705 plan['confidence'] = 1.0
706 plan['safety_warnings'] = safety.get('warnings', [])
707 return plan
709 # Motor provides trajectory
710 motor = results.get('motor', {})
711 trajectory = motor.get('trajectory', [])
712 if trajectory:
713 plan['steps'] = trajectory
714 plan['primary_action'] = 'execute_trajectory'
715 plan['estimated_duration_s'] = len(trajectory) * 2 # rough 2s/step
717 # Language provides intent which may refine primary_action
718 language = results.get('language', {})
719 intent = language.get('intent', '')
720 if intent and intent != 'idle' and plan['primary_action'] == 'idle':
721 plan['primary_action'] = intent
723 # Spoken response
724 if language.get('response'):
725 plan['spoken_response'] = language['response']
727 # Vision context
728 vision = results.get('vision', {})
729 if vision.get('obstacles'):
730 plan['obstacles_detected'] = vision['obstacles']
732 # Spatial enrichment
733 spatial = results.get('spatial', {})
734 if spatial.get('robot_position'):
735 plan['robot_position'] = spatial['robot_position']
737 # Social tone
738 social = results.get('social', {})
739 if social.get('tone'):
740 plan['interaction_tone'] = social['tone']
741 if social.get('urgency') == 'high':
742 plan['primary_action'] = plan['primary_action'] or 'urgent_response'
744 # Hivemind enrichment
745 hivemind = results.get('hivemind', {})
746 if hivemind.get('best_strategy'):
747 plan['hive_suggestion'] = hivemind['best_strategy']
749 # Confidence: weighted average of available intelligences
750 confidences = []
751 if 'confidence' in hivemind:
752 confidences.append(hivemind['confidence'])
753 if safety.get('safe') is True:
754 confidences.append(1.0)
755 elif safety.get('safe') is False:
756 confidences.append(0.0)
757 # If motor produced steps, that's a good sign
758 if trajectory:
759 confidences.append(0.8)
760 if vision.get('scene') and vision['scene'] not in (
761 'no_camera', 'vlm_unavailable', 'resource_throttled'):
762 confidences.append(0.85)
763 if intent and intent != 'idle':
764 confidences.append(0.75)
766 if confidences:
767 plan['confidence'] = round(
768 sum(confidences) / len(confidences), 2)
770 return plan
772 # ------------------------------------------------------------------
773 # Robot Registry
774 # ------------------------------------------------------------------
776 def register_robot(self, robot_id: str, capabilities: dict) -> dict:
777 """Register a new robot with its capabilities.
779 Args:
780 robot_id: Unique robot identifier.
781 capabilities: Dict describing sensors, actuators, form_factor,
782 payload, operator_id, etc.
784 Returns:
785 Registration confirmation with timestamp.
786 """
787 now = time.time()
788 entry = {
789 'robot_id': robot_id,
790 'capabilities': capabilities,
791 'registered_at': now,
792 'last_seen': now,
793 'total_think_calls': 0,
794 'status': 'online',
795 }
796 # Preserve operator_id/user_id at top level for social lookup
797 for key in ('operator_id', 'user_id'):
798 if key in capabilities:
799 entry[key] = capabilities[key]
801 with self._lock:
802 self._registry[robot_id] = entry
803 self._save_registry()
805 logger.info("Robot registered: %s (form=%s)",
806 robot_id, capabilities.get('form_factor', 'unknown'))
808 return {
809 'registered': True,
810 'robot_id': robot_id,
811 'timestamp': now,
812 }
814 def get_robot_status(self, robot_id: str) -> dict:
815 """Get a robot's current state, last action, intelligence stats."""
816 with self._lock:
817 robot = self._registry.get(robot_id)
818 if not robot:
819 return {'found': False, 'robot_id': robot_id}
821 staleness = time.time() - robot.get('last_seen', 0)
822 status = 'online' if staleness < 120 else 'stale'
823 return {
824 'found': True,
825 'robot_id': robot_id,
826 'capabilities': robot.get('capabilities', {}),
827 'registered_at': robot.get('registered_at'),
828 'last_seen': robot.get('last_seen'),
829 'status': status,
830 'staleness_s': round(staleness, 1),
831 'total_think_calls': robot.get('total_think_calls', 0),
832 }
834 def list_robots(self) -> List[dict]:
835 """List all registered robots with summary status."""
836 now = time.time()
837 result = []
838 with self._lock:
839 for rid, robot in self._registry.items():
840 staleness = now - robot.get('last_seen', 0)
841 result.append({
842 'robot_id': rid,
843 'form_factor': robot.get('capabilities', {}).get(
844 'form_factor', 'unknown'),
845 'status': 'online' if staleness < 120 else 'stale',
846 'last_seen': robot.get('last_seen'),
847 'total_think_calls': robot.get('total_think_calls', 0),
848 })
849 return result
851 def get_hive_stats(self) -> dict:
852 """Hive-wide robotics statistics."""
853 with self._lock:
854 total_robots = len(self._registry)
855 now = time.time()
856 online = sum(
857 1 for r in self._registry.values()
858 if now - r.get('last_seen', 0) < 120
859 )
860 return {
861 'total_robots': total_robots,
862 'online_robots': online,
863 'intelligence_types': INTELLIGENCE_TYPES,
864 'stats': dict(self._stats),
865 }
867 # ------------------------------------------------------------------
868 # Registry persistence
869 # ------------------------------------------------------------------
871 def _load_registry(self):
872 """Load robot registry from disk."""
873 try:
874 if os.path.exists(_REGISTRY_PATH):
875 with open(_REGISTRY_PATH, 'r', encoding='utf-8') as f:
876 self._registry = json.load(f)
877 logger.info("Robot registry loaded: %d robots",
878 len(self._registry))
879 except Exception as exc:
880 logger.warning("Failed to load robot registry: %s", exc)
881 self._registry = {}
883 def _save_registry(self):
884 """Persist robot registry to disk. Caller must hold self._lock."""
885 try:
886 registry_dir = os.path.dirname(_REGISTRY_PATH)
887 os.makedirs(registry_dir, exist_ok=True)
888 with open(_REGISTRY_PATH, 'w', encoding='utf-8') as f:
889 json.dump(self._registry, f, indent=2, default=str)
890 except Exception as exc:
891 logger.warning("Failed to save robot registry: %s", exc)
893 def push_sensor_data(self, robot_id: str, sensors: dict) -> dict:
894 """Accept streaming sensor data push from a robot.
896 Stores sensors via the spatial intelligence path (sensor_store)
897 and updates the robot's last_seen timestamp.
899 Args:
900 robot_id: Robot identifier.
901 sensors: Sensor data dict (same format as think() sensors).
903 Returns:
904 Confirmation with count of sensor types ingested.
905 """
906 with self._lock:
907 if robot_id in self._registry:
908 self._registry[robot_id]['last_seen'] = time.time()
910 # Reuse spatial intelligence to ingest sensors
911 try:
912 spatial_result = self._invoke_spatial(sensors)
913 return {
914 'accepted': True,
915 'robot_id': robot_id,
916 'sensors_ingested': list(sensors.keys()),
917 'active_sensors': spatial_result.get('active_sensors', []),
918 }
919 except Exception as exc:
920 return {
921 'accepted': False,
922 'robot_id': robot_id,
923 'error': str(exc),
924 }
927# ---------------------------------------------------------------------------
928# Flask Blueprint
929# ---------------------------------------------------------------------------
931def create_blueprint():
932 """Create and return the Flask Blueprint for the Robot Intelligence API.
934 Deferred import of Flask to keep the module importable without Flask
935 installed (e.g., in test environments or non-web usage).
936 """
937 from flask import Blueprint, request, jsonify
939 robot_intelligence_bp = Blueprint(
940 'robot_intelligence', __name__, url_prefix='/api/robotics/ai')
942 @robot_intelligence_bp.route('/think', methods=['POST'])
943 def think_endpoint():
944 """POST /api/robotics/ai/think -- Main intelligence fusion endpoint."""
945 data = request.get_json(silent=True) or {}
946 if not data.get('robot_id'):
947 return jsonify({'error': 'robot_id is required'}), 400
949 api = get_robot_api()
950 result = api.think(data)
951 return jsonify(result)
953 @robot_intelligence_bp.route('/register', methods=['POST'])
954 def register_endpoint():
955 """POST /api/robot/register -- Register robot capabilities."""
956 data = request.get_json(silent=True) or {}
957 robot_id = data.get('robot_id')
958 if not robot_id:
959 return jsonify({'error': 'robot_id is required'}), 400
960 capabilities = data.get('capabilities', {})
961 api = get_robot_api()
962 result = api.register_robot(robot_id, capabilities)
963 return jsonify(result)
965 @robot_intelligence_bp.route('/<robot_id>/status', methods=['GET'])
966 def status_endpoint(robot_id):
967 """GET /api/robot/<id>/status -- Robot state."""
968 api = get_robot_api()
969 result = api.get_robot_status(robot_id)
970 status_code = 200 if result.get('found') else 404
971 return jsonify(result), status_code
973 @robot_intelligence_bp.route('/list', methods=['GET'])
974 def list_endpoint():
975 """GET /api/robot/list -- All registered robots."""
976 api = get_robot_api()
977 return jsonify({'robots': api.list_robots()})
979 @robot_intelligence_bp.route('/<robot_id>/sensors', methods=['POST'])
980 def sensor_push_endpoint(robot_id):
981 """POST /api/robot/<id>/sensors -- Push sensor data (streaming)."""
982 data = request.get_json(silent=True) or {}
983 api = get_robot_api()
984 result = api.push_sensor_data(robot_id, data.get('sensors', data))
985 return jsonify(result)
987 @robot_intelligence_bp.route('/hive/stats', methods=['GET'])
988 def hive_stats_endpoint():
989 """GET /api/robot/hive/stats -- Hive robotics statistics."""
990 api = get_robot_api()
991 return jsonify(api.get_hive_stats())
993 return robot_intelligence_bp
996# Removed 2026-04-15: create_intelligence_blueprint() and robotics_intelligence_bp.
997# This was a dual-export orphan — routes (/api/robotics/intelligence/think and
998# /robots) were a strict subset of the canonical robot_intelligence_bp routes at
999# /api/robotics/ai/* (think, register, <id>/status, list, <id>/sensors,
1000# hive/stats) which is registered in hart_intelligence_entry.py. No caller ever
1001# imported robotics_intelligence_bp — deleting eliminates the parallel path.
1004# Module-level blueprint for import convenience.
1005# Usage: from integrations.robotics.intelligence_api import robot_intelligence_bp
1006try:
1007 robot_intelligence_bp = create_blueprint()
1008except ImportError:
1009 # Flask not installed -- blueprint unavailable but class still works
1010 robot_intelligence_bp = None
1013# ---------------------------------------------------------------------------
1014# MCP Tool Registration
1015# ---------------------------------------------------------------------------
1017def register_mcp_tools(register_fn: Callable):
1018 """Register robot intelligence tools in the MCP bridge.
1020 Args:
1021 register_fn: MCP tool registration function with signature
1022 register_fn(name: str, description: str, handler: callable)
1023 """
1024 register_fn(
1025 'robot_think',
1026 'Send sensor data to robot, get fused multi-intelligence action plan',
1027 _mcp_think,
1028 )
1029 register_fn(
1030 'robot_register',
1031 'Register a new robot with capabilities',
1032 _mcp_register,
1033 )
1034 register_fn(
1035 'robot_list',
1036 'List all robots connected to the hive',
1037 _mcp_list,
1038 )
1041def _mcp_think(params: dict) -> dict:
1042 """MCP handler: invoke think()."""
1043 api = get_robot_api()
1044 return api.think(params)
1047def _mcp_register(params: dict) -> dict:
1048 """MCP handler: register a robot."""
1049 api = get_robot_api()
1050 robot_id = params.get('robot_id', '')
1051 capabilities = params.get('capabilities', {})
1052 if not robot_id:
1053 return {'error': 'robot_id is required'}
1054 return api.register_robot(robot_id, capabilities)
1057def _mcp_list(params: dict) -> dict:
1058 """MCP handler: list robots."""
1059 api = get_robot_api()
1060 return {'robots': api.list_robots()}
1063# ---------------------------------------------------------------------------
1064# Helpers
1065# ---------------------------------------------------------------------------
1067def _classify_intent(response: str, context: str) -> str:
1068 """Simple keyword-based intent classification fallback."""
1069 text = (response + ' ' + context).lower()
1070 if any(w in text for w in ('fetch', 'get', 'bring', 'pick up', 'grab')):
1071 return 'fetch_object'
1072 if any(w in text for w in ('go to', 'navigate', 'move to', 'drive')):
1073 return 'navigate'
1074 if any(w in text for w in ('hello', 'hi', 'hey', 'greet')):
1075 return 'greet'
1076 if any(w in text for w in ('emergency', 'stop', 'halt', 'danger')):
1077 return 'emergency'
1078 if any(w in text for w in ('help', 'assist', 'support')):
1079 return 'assist'
1080 if any(w in text for w in ('tell', 'inform', 'report', 'status')):
1081 return 'inform'
1082 return 'assist'
1085def _extract_target(context: str, vision_data: dict) -> dict:
1086 """Extract a target position from context and vision data.
1088 This is a best-effort heuristic — real navigation targets come
1089 from HevolveAI's scene graph.
1090 """
1091 target: Dict[str, Any] = {}
1093 # If vision detected specific objects, pick the first as target
1094 objects = vision_data.get('objects', [])
1095 if objects:
1096 first = objects[0]
1097 if isinstance(first, dict):
1098 target = {
1099 'x': first.get('x', 0),
1100 'y': first.get('y', 0),
1101 'label': first.get('label', 'object'),
1102 }
1103 elif isinstance(first, str):
1104 target = {'label': first}
1106 return target