Coverage for core / constants.py: 89.6%

48 statements  

« prev     ^ index     » next       coverage.py v7.14.0, created at 2026-05-12 04:49 +0000

1"""Module-level constants shared across HARTOS. 

2 

3This file is the single source of truth for literal values that were 

4previously hardcoded in multiple modules. Before this file existed the 

5channel registry, flask integration, dynamic agent registry, test 

6fixtures, and example scripts each carried their own copy of 

7``10077`` / ``8888`` with no mechanism to keep them in sync. 

8 

9Import from here instead of repeating literals: 

10 

11 from core.constants import DEFAULT_USER_ID, DEFAULT_PROMPT_ID 

12 

13Why these specific values: 

14 DEFAULT_USER_ID = 10077 — the guest/unauthenticated Hevolve user 

15 account used by channel adapters, test fixtures, and 

16 standalone entry points that haven't resolved a real user yet. 

17 Any real user_id comes from UserChannelBinding resolution, 

18 JWT auth, or the frontend session — the default only fires 

19 when every other source is empty. 

20 DEFAULT_PROMPT_ID = 8888 — the pre-registered default agent prompt 

21 that serves generic chat when no custom agent_id is provided. 

22 Tests and the channel fallback path both point here so a 

23 brand-new install answers chat requests out of the box. 

24""" 

25 

26DEFAULT_USER_ID: int = 10077 

27DEFAULT_PROMPT_ID: int = 8888 

28 

29 

30# ────────────────────────────────────────────────────────────────────── 

31# HIVE_DEPTH — maximum hop count for any cross-host task / hivemind / 

32# federation propagation. 

33# 

34# The Hevolve topology is a strict 3-level pyramid: 

35# flat (desktop) → regional (edge) → central (cloud) 

36# A task submitted on a flat node may hop up to regional and up to 

37# central (2 hops = 3 levels). Any propagation deeper is either a bug 

38# (cycle) or an attempt to fan out beyond the published topology, and 

39# the coordinator must reject it. 

40# 

41# Single source of truth — consumed by: 

42# - integrations.distributed_agent.task_coordinator.submit_goal 

43# (stamps initial hop=0, rejects context['hop'] > HIVE_DEPTH) 

44# - integrations.distributed_agent.worker_loop (before re-dispatching 

45# a claimed task to a deeper hive layer) 

46# 

47# Keep in sync with security.key_delegation.get_node_tier() which also 

48# enumerates the same three tiers. 

49# ────────────────────────────────────────────────────────────────────── 

50HIVE_DEPTH: int = 3 

51 

52 

53# ISO 639-1 → language name mapping. 

54# Used by hart_intelligence_entry (system prompt), speculative_dispatcher 

55# (draft language prompt), and _persist_language (validation). 

56SUPPORTED_LANG_DICT = { 

57 "ar": "Arabic", "bg": "Bulgarian", "zh": "Chinese", 

58 "zh-cn": "Chinese (Simplified)", "nl": "Dutch", "fi": "Finnish", 

59 "fr": "French", "de": "German", "el": "Greek", "he": "Hebrew", 

60 "hu": "Hungarian", "is": "Icelandic", "id": "Indonesian", 

61 "ko": "Korean", "lv": "Latvian", "ms": "Malay", "fa": "Persian", 

62 "pl": "Polish", "pt": "Portuguese", "ro": "Romanian", "ru": "Russian", 

63 "es": "Spanish", "sw": "Swahili", "sv": "Swedish", "th": "Thai", 

64 "tr": "Turkish", "uk": "Ukrainian", "ur": "Urdu", "vi": "Vietnamese", 

65 "cy": "Welsh", "hi": "Hindi", "bn": "Bengali", "ta": "Tamil", 

66 "pa": "Punjabi", "gu": "Gujarati", "kn": "Kannada", "te": "Telugu", 

67 "mr": "Marathi", "ml": "Malayalam", "en": "English", 

68 # Indian English — Indic-accented variant. Code preserved (NOT 

69 # collapsed by `_normalize_lang` the way en-US is) because Nunba's 

70 # TTS preference for en-IN routes to Indic Parler (ai4bharat, 

71 # trained on All India Radio + Indic corpora) at position 1, while 

72 # plain `en` keeps the chatterbox-first American/expressive ladder. 

73 # See `Nunba/tts/tts_engine.py:_FALLBACK_LANG_ENGINE_PREFERENCE`. 

74 "en-IN": "English (Indian)", 

75 "ja": "Japanese", "it": "Italian", "ne": "Nepali", "si": "Sinhala", 

76 "or": "Odia", "as": "Assamese", "sd": "Sindhi", "ks": "Kashmiri", 

77 "doi": "Dogri", "mni": "Manipuri", "sa": "Sanskrit", "kok": "Konkani", 

78 "mai": "Maithili", "brx": "Bodo", "sat": "Santali", 

79 # SEA Brahmi-derived scripts — added for NON_LATIN_SCRIPT_LANGS 

80 # membership so the sub-1B draft-skip gate recognises them. 

81 "km": "Khmer", "lo": "Lao", "my": "Burmese", 

82 # Cyrillic / Greek — weaker but non-zero 0.8B coverage; listed so 

83 # NON_LATIN_SCRIPT_LANGS assertion passes. 

84 "sr": "Serbian", 

85} 

86 

87 

88# Indic-language ISO 639-1 codes (Brahmi-family scripts + Urdu/Sindhi 

89# in Perso-Arabic). Subset used by TTS routing (Indic Parler) and by 

90# NON_LATIN_SCRIPT_LANGS below. Single source for any code that needs 

91# "is this an Indic language?" — previously duplicated as _INDIC_LANGS 

92# in tts/tts_engine.py. 

93INDIC_LANGS = frozenset({ 

94 "as", "bn", "brx", "doi", "gu", "hi", "kn", "kok", "mai", 

95 "ml", "mni", "mr", "ne", "or", "pa", "sa", "sat", "sd", "ta", 

96 "te", "ur", 

97}) 

98 

99 

100# ISO 639-1 codes where sub-1B LLMs (the Qwen3.5-0.8B-class draft 

101# model) produce Latin-transliterated output ("Vanakkam" instead of 

102# native Tamil script) due to weak Unicode-script tokenizer coverage. 

103# 

104# Single source of truth — consumed by: 

105# - integrations.agent_engine.speculative_dispatcher 

106# (dispatch_draft_first skip-gate at runtime) 

107# - integrations.service_tools.model_lifecycle 

108# (on_lang_change subscriber — evicts draft on switch TO these) 

109# 

110# Derived from INDIC_LANGS plus the other non-Latin script families. 

111# Do NOT inline a duplicate frozenset anywhere else — import this. 

112NON_LATIN_SCRIPT_LANGS = INDIC_LANGS | frozenset({ 

113 # CJK 

114 "zh", "ja", "ko", 

115 # RTL (Arabic / Hebrew / Persian) 

116 "ar", "he", "fa", 

117 # Southeast Asian Brahmi-derived 

118 "th", "lo", "km", "my", 

119 # Cyrillic + Greek (historically included by HIE's inline 

120 # _NON_LATIN_LANGS; kept here for parity + weaker 0.8B coverage) 

121 "ru", "uk", "bg", "sr", "el", 

122}) 

123 

124# Invariant: every code in NON_LATIN_SCRIPT_LANGS must be a registered 

125# language in SUPPORTED_LANG_DICT. Fails loud at import time on drift, 

126# so adding a code to the set without registering its display name is 

127# a build-time error, not a runtime mystery. 

128assert NON_LATIN_SCRIPT_LANGS <= set(SUPPORTED_LANG_DICT), ( 

129 f"NON_LATIN_SCRIPT_LANGS has codes not in SUPPORTED_LANG_DICT: " 

130 f"{NON_LATIN_SCRIPT_LANGS - set(SUPPORTED_LANG_DICT)}" 

131) 

132 

133 

134# ────────────────────────────────────────────────────────────────────── 

135# GREETINGS — canonical, localized "first-run handshake" phrase per 

136# language. Used by the TTS first-run handshake smoke test 

137# (tts/tts_handshake.py) to synthesize a phrase the user actually hears 

138# before the "Voice engine ready" banner flips. 

139# 

140# Single source of truth — replaces two historical parallel paths: 

141# 1. tts/verified_synth._TEST_PHRASES (synthesis probe) 

142# 2. the "ready to use" string that the React card heuristically 

143# matched to flip isComplete before any audio had been produced. 

144# 

145# Contract: 

146# * Keys are ISO 639-1 codes that appear in SUPPORTED_LANG_DICT. 

147# * Values are phrases long enough to produce > MIN_AUDIO_BYTES 

148# (~0.5s at 22kHz mono) on CPU synth in under 30 seconds. 

149# * English 'en' is the fallback when a requested lang is missing. 

150# 

151# Scope — only the languages that TTS backends actually ship support 

152# for today. Do NOT bulk-add entries without verifying the engine 

153# can synth them; a missing entry falls back to English, which is 

154# preferable to synthesizing garbage. 

155# ────────────────────────────────────────────────────────────────────── 

156GREETINGS = { 

157 # Core — every Nunba install can hit these via at least one engine. 

158 "en": "Hey, I'm Nunba. Can you hear me?", 

159 "ta": "வணக்கம், நான் நண்பா. என்னுடைய குரல் கேட்கிறதா?", 

160 "hi": "नमस्ते, मैं नन्बा हूँ। क्या आप मुझे सुन सकते हैं?", 

161 # Indic Parler cohort — its 21-language allowlist, minus the 

162 # scripts we haven't hand-verified greetings for. 

163 # Transliteration intent: the brand "Nunba" reads aloud as "Nan-baa" 

164 # (rhymes with "Numba" the JIT lib). Indic scripts use "न + न" / 

165 # "ன + ன" / equivalent so TTS synth renders the intended phonetics. 

166 "bn": "হ্যালো, আমি নন্বা। আপনি কি আমাকে শুনতে পাচ্ছেন?", 

167 "te": "హలో, నేను నన్బా. మీరు నన్ను వినగలరా?", 

168 "ml": "ഹലോ, ഞാൻ നൻബ. എനിക്കു നിങ്ങൾ കേൾക്കാനാകുമോ?", 

169 "kn": "ಹಲೋ, ನಾನು ನನ್ಬಾ. ನೀವು ನನ್ನನ್ನು ಕೇಳಬಹುದೆ?", 

170 "mr": "नमस्कार, मी नन्बा. तुम्ही मला ऐकू शकता का?", 

171 "gu": "નમસ્તે, હું નન્બા છું. શું તમે મને સાંભળી શકો છો?", 

172 "pa": "ਸਤਿ ਸ੍ਰੀ ਅਕਾਲ, ਮੈਂ ਨਨਬਾ ਹਾਂ। ਕੀ ਤੁਸੀਂ ਮੈਨੂੰ ਸੁਣ ਸਕਦੇ ਹੋ?", 

173 "ur": "ہیلو، میں نَنبا ہوں۔ کیا آپ مجھے سن سکتے ہیں؟", 

174 # Chatterbox Multilingual + CosyVoice3 cohort 

175 "zh": "你好,我是 Nunba。你能听到我吗?", 

176 "ja": "こんにちは、私はNunbaです。聞こえますか?", 

177 "ko": "안녕하세요, 저는 Nunba입니다. 제 목소리가 들리시나요?", 

178 "fr": "Bonjour, je suis Nunba. Vous m'entendez ?", 

179 "es": "Hola, soy Nunba. ¿Me escuchas?", 

180 "de": "Hallo, ich bin Nunba. Kannst du mich hören?", 

181 "it": "Ciao, sono Nunba. Mi senti?", 

182 "ru": "Привет, я Nunba. Вы меня слышите?", 

183 "pt": "Olá, eu sou o Nunba. Você consegue me ouvir?", 

184} 

185 

186 

187# Fallback phrase when the requested language isn't in GREETINGS. 

188# Kept as a named constant (not a magic literal) so call sites read 

189# clearly and tests can refer to it by name. 

190GREETING_FALLBACK_LANG: str = "en" 

191 

192 

193# ────────────────────────────────────────────────────────────────────── 

194# Brand identity — used as the assistant's "Who am I?" sentence in 

195# every English chat path that doesn't already carry a per-agent 

196# persona. Single source of truth so HARTOS's draft prompt and Nunba's 

197# fallback chat handler can never drift on the brand wording. 

198# 

199# Non-English paths get their identity through 

200# core.agent_personality.get_regional_tone_prompt(lang) — which carries 

201# the Nunba name natively in script (e.g. Tamil "நண்பா"). This 

202# constant is for English (and any language with no regional-tone 

203# entry). 

204# 

205# Call sites: 

206# - integrations/agent_engine/speculative_dispatcher.py 

207# (HARTOS draft prompt persona_block default) 

208# - Nunba/routes/hartos_backend_adapter.py 

209# (cold-boot fallback chat system prompt) 

210# 

211# Phrasing intentionally short — every byte costs draft-prompt tokens 

212# and Nunba is also a TTS-spoken name (the brand identity reads 

213# naturally aloud). Per-site framing (privacy mention, language 

214# directive, etc.) is added on top by the call site, not baked here. 

215# ────────────────────────────────────────────────────────────────────── 

216NUNBA_BRAND_IDENTITY: str = ( 

217 "You are Nunba, a friendly and helpful local AI assistant. " 

218 "Hevolve.ai is the web cloud version of Nunba — same intelligence, " 

219 "different deployment. With hive enabled, you crowdsource " 

220 "intelligence from peer Nunba devices and Hevolve cloud nodes." 

221) 

222 

223 

224# Every GREETINGS key MUST be a registered language. Mirrors the 

225# NON_LATIN_SCRIPT_LANGS invariant above — a missing display name for 

226# a greeting-supported lang is a build-time error, not a runtime 

227# "None" appearing in a banner. 

228assert set(GREETINGS) <= set(SUPPORTED_LANG_DICT), ( 

229 f"GREETINGS has codes not in SUPPORTED_LANG_DICT: " 

230 f"{set(GREETINGS) - set(SUPPORTED_LANG_DICT)}" 

231) 

232assert GREETING_FALLBACK_LANG in GREETINGS, ( 

233 f"GREETING_FALLBACK_LANG={GREETING_FALLBACK_LANG!r} is not in GREETINGS" 

234) 

235 

236 

237# ────────────────────────────────────────────────────────────────────── 

238# VISION INTENT — keywords that indicate the user is asking Nunba to 

239# use the camera / describe the scene / see them / read a screen. 

240# When the draft 0.8B classifier flags a turn as `is_casual=True`, the 

241# dispatcher short-circuits to the draft reply without loading the 

242# LangChain tool registry — which means the `Visual_Context_Camera` 

243# tool never runs even though the user clearly needs vision. 

244# 

245# This set is consulted in hart_intelligence_entry.dispatch path as a 

246# safety net: if the message matches any pattern, we force the full 

247# LangChain path so `parse_visual_context` is reachable. 

248# 

249# Single source of truth — do NOT inline a parallel regex anywhere. 

250# Keep lower-cased; callers must lowercase the prompt before matching. 

251# ────────────────────────────────────────────────────────────────────── 

252import re as _vis_re 

253 

254VISION_INTENT_KEYWORDS: tuple = ( 

255 # direct camera / vision verbs 

256 "see me", "see my", "see what", "look at me", "look at my", 

257 "looking at me", "watch me", "watch my", 

258 # describe-scene phrases 

259 "what do you see", "what can you see", "what am i doing", 

260 "what am i wearing", "what am i holding", "what's in front of", 

261 "what is in front of", "what is on my", "what's on my", 

262 "describe me", "describe what", "describe the scene", 

263 "describe my", "describe this", 

264 # camera-specific 

265 "through my camera", "on my camera", "on the camera", 

266 "via camera", "using camera", "use the camera", "use my camera", 

267 "turn on camera", "turn on the camera", "start camera", 

268 # visual modality 

269 "can you see", "do you see", "are you seeing", 

270 "visual context", "visual question", "video call", 

271 # screen / ocr / read-what-is-shown 

272 "read the screen", "read my screen", "what's on screen", 

273 "what is on screen", "what does the screen show", 

274) 

275 

276# Word-boundary regex compiled once. Matching uses `search` so partial 

277# phrasings ("can you see what i'm wearing") hit. 

278_VISION_PATTERN_SRC: str = r"\b(?:" + "|".join( 

279 _vis_re.escape(kw) for kw in VISION_INTENT_KEYWORDS 

280) + r")\b" 

281VISION_INTENT_PATTERN = _vis_re.compile(_VISION_PATTERN_SRC, _vis_re.IGNORECASE) 

282 

283 

284def prompt_needs_vision(prompt: str) -> bool: 

285 """Return True if the prompt clearly requests a vision / camera 

286 capability (i.e. should route through the LangChain tool path so 

287 Visual_Context_Camera can fire) even if the draft classifier 

288 flagged the turn casual. 

289 

290 Cheap regex match — safe to call on every draft fall-through path. 

291 """ 

292 if not prompt: 

293 return False 

294 try: 

295 return bool(VISION_INTENT_PATTERN.search(prompt)) 

296 except Exception: 

297 return False 

298 

299 

300# ────────────────────────────────────────────────────────────────────── 

301# ENCOUNTER_TOPICS — WAMP topic namespace for the P2P encounter feature 

302# (BLE rotating-pubkey discovery → autonomous sighting correlation → 

303# avatar-only mutual-like swipe → icebreaker agent → map overlay). 

304# 

305# Full design in Claude-memory: project_encounter_icebreaker.md. 

306# 

307# Single source of truth — consumed by: 

308# - integrations.social.encounter_api (publish on swipe/match) 

309# - integrations.agent_engine.goal_seeding (encounter_icebreaker_agent 

310# subscribes to 'match' topic) 

311# - Nunba desktop wamp_router + landing-page crossbarWorker 

312# - Hevolve_React_Native AutobahnConnectionManager (subscribes to 

313# per-user 'sighting' and 'icebreaker' private topics) 

314# 

315# Per-user privacy scoping: 'sighting', 'swipe', 'icebreaker' are 

316# always prefixed with the user_id by the publisher; 'match' publishes 

317# TWO events (one per participant) so one user's subscription never 

318# leaks the other's pubkey outside the matched pair. 

319# 

320# Do NOT inline duplicate topic strings; import from here. 

321# ────────────────────────────────────────────────────────────────────── 

322ENCOUNTER_TOPIC_SIGHTING: str = 'com.hevolve.encounter.sighting' 

323ENCOUNTER_TOPIC_SWIPE: str = 'com.hevolve.encounter.swipe' 

324ENCOUNTER_TOPIC_MATCH: str = 'com.hevolve.encounter.match' 

325ENCOUNTER_TOPIC_ICEBREAKER: str = 'com.hevolve.encounter.icebreaker' 

326 

327ENCOUNTER_TOPICS: tuple = ( 

328 ENCOUNTER_TOPIC_SIGHTING, 

329 ENCOUNTER_TOPIC_SWIPE, 

330 ENCOUNTER_TOPIC_MATCH, 

331 ENCOUNTER_TOPIC_ICEBREAKER, 

332) 

333 

334# Invariant: all encounter topics share the canonical 'com.hevolve. 

335# encounter.' prefix so crossbar ACL rules + log grepping are uniform. 

336# A topic outside this prefix would not be scoped by the existing 

337# WAMP router authorization (wamp_router.py _handle_publish per-topic 

338# authorization, Task #301), so drift here is a security regression. 

339_ENCOUNTER_PREFIX = 'com.hevolve.encounter.' 

340assert all(t.startswith(_ENCOUNTER_PREFIX) for t in ENCOUNTER_TOPICS), ( 

341 f"ENCOUNTER_TOPICS must all share prefix {_ENCOUNTER_PREFIX!r}: " 

342 f"{[t for t in ENCOUNTER_TOPICS if not t.startswith(_ENCOUNTER_PREFIX)]}" 

343) 

344 

345 

346# ────────────────────────────────────────────────────────────────────── 

347# ENCOUNTER feature tunables — physical-world sighting correlation. 

348# A beacon is treated as a SIGHTING (autonomous pairing of the real 

349# person in front of the user with their rotating pubkey) only when 

350# ALL three conditions hold together. Loosening any one of these 

351# degrades to random-pairs-in-a-crowd; tightening any one degrades to 

352# never-fires. Values tuned for phone-in-hand, face-to-face scenario. 

353# Pocket-mode (low motion detected on both devices) relaxes compass 

354# tolerance to ±90° per BleSightingDetector rules. 

355# 

356# Consumed by: 

357# - Hevolve_React_Native BleSightingDetector (Kotlin port of same) 

358# - integrations.social.encounter_api.ENCOUNTER_SIGHTING_RULES 

359# - tests.unit.test_sighting_correlation 

360# ────────────────────────────────────────────────────────────────────── 

361ENCOUNTER_SIGHTING_RSSI_PEAK_DBM: int = -55 # ~1.5m line-of-sight 

362ENCOUNTER_SIGHTING_MIN_DWELL_SEC: int = 3 # both parties slowed/stopped 

363ENCOUNTER_SIGHTING_COMPASS_TOL_DEG: int = 30 # devices facing within cone 

364ENCOUNTER_PUBKEY_ROTATION_SEC: int = 15 * 60 # 15 min / relaunch / geo-shift 

365ENCOUNTER_DISCOVERABLE_TTL_SEC: int = 4 * 60 * 60 # 4h auto-off 

366ENCOUNTER_DISCOVERABLE_MAX_TOGGLES_24H: int = 6 

367ENCOUNTER_SIGHTING_EXPIRES_SEC: int = 24 * 60 * 60 # swipe grace window 

368ENCOUNTER_MATCH_WINDOW_SEC: int = 5 * 60 # both sightings must be 

369 # within this window to match 

370ENCOUNTER_DRAFT_MAX_CHARS: int = 220 # icebreaker length cap 

371 

372 

373# ────────────────────────────────────────────────────────────────────── 

374# CHAT_TOPICS — WAMP topic namespace for cross-device chat mirroring 

375# (U1-U8 workstream, task ledger #389). 

376# 

377# chat.new — a new assistant or user message was persisted. Payload 

378# carries the full ChatMessage row (seq, msg_id, user_id, 

379# agent_id, role, content, request_id, lang, device_id, 

380# attachments, created_at). Every device subscribed to the 

381# per-user topic mirrors the row into its local view. 

382# chat.ack — a subscriber ACKs receipt up to seq=N. Used by the server 

383# to decide when a message can be evicted from the hot cache 

384# (the durable row stays in the DB for cursor-pull replay). 

385# 

386# Per-user scoping: publisher MUST suffix the user_id so a subscriber 

387# can ONLY see their own messages. Enforced by Nunba's wamp_router 

388# _handle_publish per-topic authorization (Task #301). 

389# 

390# Do NOT inline duplicate topic strings; import from here. 

391# ────────────────────────────────────────────────────────────────────── 

392CHAT_TOPIC_NEW: str = 'com.hertzai.hevolve.chat.new' 

393CHAT_TOPIC_ACK: str = 'com.hertzai.hevolve.chat.ack' 

394 

395CHAT_TOPICS: tuple = ( 

396 CHAT_TOPIC_NEW, 

397 CHAT_TOPIC_ACK, 

398) 

399 

400# Invariant mirrors ENCOUNTER_TOPICS: shared prefix = uniform ACL. The 

401# existing chat-reply topic 'com.hertzai.hevolve.chat.{user_id}' at 

402# hart_intelligence_entry.py:2174,4211 uses the same prefix — per-user 

403# suffixing happens at publish-time, not at constant-definition time. 

404_CHAT_PREFIX = 'com.hertzai.hevolve.chat.' 

405assert all(t.startswith(_CHAT_PREFIX) for t in CHAT_TOPICS), ( 

406 f"CHAT_TOPICS must all share prefix {_CHAT_PREFIX!r}: " 

407 f"{[t for t in CHAT_TOPICS if not t.startswith(_CHAT_PREFIX)]}" 

408) 

409 

410# Cursor-pull tunables — bound the worst-case pull size so a freshly- 

411# restored device doesn't stall on a 10k-message replay, and so a 

412# malicious cursor=0 pull can't exfiltrate the whole table. 

413CHAT_CURSOR_PULL_MAX_ROWS: int = 500 

414CHAT_CURSOR_PULL_MAX_BYTES: int = 2 * 1024 * 1024 # 2 MB body cap 

415 

416 

417# Chat-hot-path stage strings (#508) — single i18n site, consumed by 

418# publish_chat_stage(). Keep values ≤ 60 chars (UI bubble truncates). 

419CHAT_STAGE_TEXTS: dict = { 

420 'loading_context': 'Loading your context…', 

421 'loading_memory': 'Recalling our recent chat…', 

422 'loading_tools': 'Preparing tools…', 

423 'thinking': 'Thinking…', 

424 'generating': 'Generating a response…', 

425 'finalizing': 'Finalizing the answer…', 

426 # Generic fallback used by _with_tool_logging — callers pass 

427 # text=TOOL_LABELS.get(name, 'Running {name}…') for the real text. 

428 'tool_call': 'Running a tool…', 

429} 

430CHAT_STAGES: frozenset = frozenset(CHAT_STAGE_TEXTS) 

431 

432 

433# Per-tool human-readable labels for tool_call stage emits (#508). Static 

434# entries cover the hardcoded tools in get_tools(is_first=True) + canonical 

435# provider/builtin tools. Dynamic tool registries (skills, service_tools, 

436# providers, Tier-2 goal-aware) should call register_tool_label() at the 

437# point of Tool() construction so their tools also get a friendly label. 

438# Unknown tools fall back to "Running {name}…" at the emit site. 

439# Keep values <= 60 chars (UI bubble truncates). 

440TOOL_LABELS: dict = { 

441 # Memory + history 

442 'FULL_HISTORY': 'Searching your message history…', 

443 'recall_memory': 'Recalling memories…', 

444 'remember_memory': 'Saving to memory…', 

445 # Computation + web 

446 'Calculator': 'Calculating…', 

447 'google-search': 'Searching the web…', 

448 # Vision / camera 

449 'Visual_Context_Camera': 'Looking through camera…', 

450 'Visual_Context_Watcher': 'Setting up a visual watcher…', 

451 'Request_Camera_Access': 'Requesting camera access…', 

452 'Image_Inference_Tool': 'Analyzing the image…', 

453 'Animate_Character': 'Animating your character…', 

454 # Media generation 

455 'Generate_Image': 'Generating an image…', 

456 'Text to image': 'Generating an image from text…', 

457 # Channels + invites + rooms 

458 'Connect_Channel': 'Connecting channel…', 

459 'Invite_Friend': 'Generating an invite link…', 

460 'Join_External_Room': 'Joining the room…', 

461 # Agent + planning 

462 'Agentic_Router': 'Planning the next steps…', 

463 'Create_Agent': 'Starting agent creation…', 

464 # System / OS 

465 'Shell_Command': 'Running a command…', 

466 'Computer_Action': 'Acting on your screen…', 

467 'Computer_Screenshot': 'Taking a screenshot…', 

468 'Request_Screen_Access': 'Requesting screen access…', 

469 # Cloud expert 

470 'Cloud_LLM': 'Consulting the cloud expert…', 

471 # Navigation + UX 

472 'Navigate_App': 'Opening the requested page…', 

473 'List_Pending_Actions': 'Listing pending actions…', 

474 # Data extraction + user lookup 

475 'Data_Extraction_From_URL': 'Extracting data from URL…', 

476 'User_details_tool': 'Looking up user details…', 

477 'OpenAPI_Specification': 'Calling the OpenAPI service…', 

478 # Resource requests + self-improvement 

479 'Request_Resource': 'Requesting a resource…', 

480 'Suggest_Share_Worthy_Content': 'Finding share-worthy content…', 

481 'Observe_User_Experience': 'Recording an observation…', 

482 'Self_Critique_And_Enhance': 'Reflecting on past suggestions…', 

483 # ── reuse_recipe.py inner tools (autogen function_map names) ── 

484 # #509 — added here (canonical static home) instead of being 

485 # dynamically register_tool_label()-ed at reuse_recipe import time. 

486 'txt2img': 'Generating an image…', 

487 'img2txt': 'Reading the image…', 

488 'save_data_in_memory': 'Saving to memory…', 

489 'get_saved_metadata': 'Listing saved memory…', 

490 'get_data_by_key': 'Recalling from memory…', 

491 'get_user_id': 'Looking up your user id…', 

492 'get_prompt_id': 'Looking up your prompt id…', 

493 'Generate_video': 'Generating a video…', 

494 'get_user_uploaded_file': 'Fetching your uploaded file…', 

495 'get_user_camera_inp': 'Reading from your camera…', 

496 'get_chat_history': 'Reading recent chat history…', 

497 'search_visual_history': 'Searching your visual history…', 

498 'register_visual_watcher': 'Registering a visual watcher…', 

499 'search_long_term_memory': 'Searching long-term memory…', 

500 'save_to_long_term_memory': 'Saving to long-term memory…', 

501 'create_scheduled_jobs': 'Scheduling a task…', 

502 'send_message_to_user': 'Sending you a message…', 

503 'send_presynthesized_video_to_user': 'Sending a video to you…', 

504 'send_message_in_seconds': 'Scheduling a message…', 

505 'consult_expert': 'Consulting the cloud expert…', 

506 'get_user_camera_inp_by_mins': 'Reading camera history…', 

507 'execute_windows_or_android_command': 'Running a system command…', 

508 'google_search': 'Searching the web…', 

509 'create_new_agent': 'Starting agent creation…', 

510 'update_persona': 'Updating role/persona in DB…', 

511 # ── journey_engine.py + outreach_crm_tools.py (autogen tools) ── 

512 # #509 — moved from inline _journey_ui_labels / _outreach_ui_labels 

513 # dicts at the call site so register_labeled_function picks them up 

514 # via TOOL_LABELS.get(name, …) without per-site dups. 

515 'view_journey_pipeline': 'Reviewing journey pipeline…', 

516 'advance_prospect_stage': 'Advancing prospect stage…', 

517 'run_journey_tick': 'Running journey tick…', 

518 'send_prospect_message': 'Sending prospect message…', 

519 'create_prospect': 'Creating CRM prospect…', 

520 'send_outreach_email': 'Sending outreach email…', 

521 'create_followup_sequence': 'Scheduling follow-up sequence…', 

522 'check_pending_followups': 'Checking pending follow-ups…', 

523 'move_prospect_stage': 'Moving prospect to next stage…', 

524 'get_pipeline_status': 'Loading pipeline status…', 

525 'list_sent_emails': 'Listing sent emails…', 

526 # ── system_introspect_tool.py — duplicate-of-truth from 

527 # _INTROSPECT_LABELS pulled into the canonical dict so the 

528 # static names live in one place (the module still keeps its 

529 # own dict for the LangChain `labeled_tool` call sites). ── 

530 'get_gpu_tier': 'Checking GPU tier…', 

531 'list_running_models': 'Looking up active models…', 

532 'get_tts_status': 'Checking TTS engine status…', 

533 'get_tier_thresholds': 'Reading tier thresholds…', 

534 'get_boot_decision': 'Reading boot rationale…', 

535 'get_system_health': 'Checking system health…', 

536 'list_decisions': 'Listing recorded decisions…', 

537 'explain_decision': 'Explaining a system decision…', 

538 # ── core/agent_tools.py @log_tool_execution-decorated functions 

539 # (the 28 "canonical" core tools shared by create_recipe + 

540 # reuse_recipe via register_core_tools). These are *autogen 

541 # function_map* names — snake_case — and are SEPARATE names from 

542 # their similarly-named LangChain Tool() literals above (e.g., 

543 # `text_2_image` (autogen) vs `Generate_Image` / `Text to image` 

544 # (LangChain). Same verb, same UX, registered both ways. A 

545 # follow-up task will canonicalize the names; for now we list 

546 # them all so the lookup never misses. ── 

547 'text_2_image': 'Generating an image…', 

548 'data_extraction_from_url': 'Extracting data from URL…', 

549 'device_control': 'Acting on your device…', 

550 'get_user_details': 'Looking up user details…', 

551 'observe_user_experience': 'Recording an observation…', 

552 'request_resource': 'Requesting a resource…', 

553 'self_critique_and_enhance': 'Reflecting on past suggestions…', 

554 'suggest_share_worthy_content': 'Finding share-worthy content…', 

555 # ── integrations/channels/agent_tools.py @log_tool_execution funcs 

556 # (channel-adapter tools shared by both create + reuse). ── 

557 'register_channel': 'Registering a channel…', 

558 'send_to_channel': 'Sending to a channel…', 

559 'send_install_link': 'Sending an install link…', 

560 'list_channels': 'Listing your channels…', 

561 'get_channel_context': 'Reading channel context…', 

562 'reconnect_channel': 'Reconnecting channel…', 

563 'disconnect_channel': 'Disconnecting channel…', 

564 # ── integrations/providers/agent_tools.py labeled_tool() literals 

565 # that the prior pass missed (casing drift from Generate_video). ── 

566 'Generate_Video': 'Generating a video…', 

567 'List_AI_Providers': 'Listing AI providers…', 

568 'Provider_Leaderboard': 'Loading provider leaderboard…', 

569} 

570 

571 

572def register_tool_label(name: str, label: str) -> None: 

573 """Register a UI label for a tool name. Used by dynamic tool registries 

574 (integrations.skills, integrations.service_tools, integrations.providers, 

575 Tier-2 goal-aware tool packs) to supply human-readable status text next 

576 to where they construct Tool() objects. Idempotent — overwrites prior 

577 entry for the same name so a registry can refine its labels over time. 

578 

579 Tools that don't register a label fall back to the generic 

580 'Running {name}…' template at the emit site in 

581 hart_intelligence_entry._with_tool_logging. 

582 """ 

583 if not name or not label: 

584 return 

585 TOOL_LABELS[str(name)] = str(label)[:60]