Coverage for core / resonance_tuner.py: 92.0%

288 statements  

« prev     ^ index     » next       coverage.py v7.14.0, created at 2026-05-12 04:49 +0000

1""" 

2ResonanceTuner — Continuous personality frequency tuning. 

3 

4HARTOS is the agentic orchestration layer. All actual learning (Hebbian, 

5Bayesian, probabilistic, gradient descent) lives in the HevolveAI sibling 

6repo. This module: 

7 1. Extracts interaction signals (pure heuristics, no LLM) 

8 2. Streams them to HevolveAI via WorldModelBridge for learning 

9 3. Applies corrections received from HevolveAI 

10 4. Uses EMA for immediate blending (fast local response while 

11 HevolveAI does the deep learning in the background) 

12 5. Exports anonymized resonance deltas for federation 

13 

14Integration: 

15 - Called after every /chat response (post-response hook) 

16 - DialogueStreamProcessor: continuous in-conversation tuning 

17 - WorldModelBridge: signals flow downstream to HevolveAI 

18 - FederatedAggregator: anonymized deltas across nodes 

19""" 

20 

21import logging 

22import math 

23import os 

24import time 

25import threading 

26from concurrent.futures import ThreadPoolExecutor 

27from dataclasses import dataclass, field 

28from typing import Dict, List, Optional, Tuple 

29 

30from .resonance_profile import ( 

31 UserResonanceProfile, save_resonance_profile, 

32 get_or_create_profile, load_resonance_profile, 

33 DEFAULT_TUNING, TUNING_DIM_KEYS, TUNING_DIM_COUNT, 

34 RESONANCE_STORAGE_DIR, 

35) 

36 

37logger = logging.getLogger(__name__) 

38 

39# EMA decay factor: higher = more weight on new observations 

40EMA_ALPHA = float(os.environ.get('RESONANCE_EMA_ALPHA', '0.15')) 

41 

42# Minimum interactions before tuning starts affecting personality 

43MIN_INTERACTIONS_FOR_TUNING = 3 

44 

45# Confidence growth rate (asymptotic toward 1.0) 

46CONFIDENCE_GROWTH_RATE = 0.05 

47 

48# Tuning history length for oscillation detection 

49TUNING_HISTORY_MAXLEN = 20 

50 

51# Oscillation threshold: if any dim variance exceeds this, flag for HevolveAI 

52OSCILLATION_VARIANCE_THRESHOLD = float(os.environ.get( 

53 'RESONANCE_OSCILLATION_THRESHOLD', '0.02')) 

54 

55 

56# ===================================================================== 

57# Interaction Signals 

58# ===================================================================== 

59 

60@dataclass 

61class InteractionSignals: 

62 """Extracted signals from a single user<->agent exchange.""" 

63 user_message_length: int = 0 

64 agent_response_length: int = 0 

65 formality_markers: float = 0.0 # 0.0=casual, 1.0=formal 

66 question_count: int = 0 

67 exclamation_count: int = 0 

68 technical_term_count: int = 0 

69 positive_sentiment: float = 0.5 # 0.0=negative, 1.0=positive 

70 response_time_ms: float = 0.0 

71 vocabulary_richness: float = 0.5 # type-token ratio proxy 

72 

73 

74class SignalExtractor: 

75 """Extract interaction signals from raw text. No LLM calls -- pure heuristics.""" 

76 

77 _FORMAL_WORDS = frozenset([ 

78 # English 

79 'please', 'kindly', 'regarding', 'therefore', 'furthermore', 

80 'accordingly', 'shall', 'hereby', 'pursuant', 'respectfully', 

81 'dear', 'sincerely', 'appreciate', 

82 # Tamil (transliterated) 

83 'mariyadhai', 'mariyadai', 'mariyathai', 'mariyatai', 

84 'mariathay', 'mariathaya', 'thayavuseydhu', 'thayavu', 

85 'vanakkam', 'nandri', 'aiya', 'iyya', 'amma', 

86 'ungalukku', 'ungal', 'thangal', 

87 # Hindi (transliterated) 

88 'kripaya', 'dhanyavaad', 'dhanyavad', 'shriman', 'shrimati', 

89 'namaste', 'namaskar', 'aadarniya', 'ji', 

90 'aap', 'aapka', 

91 # Telugu (transliterated) 

92 'dayachesi', 'dhanyavaadalu', 'garu', 'namaskaram', 

93 # Kannada (transliterated) 

94 'dayavittu', 'dhanyavadagalu', 'namaskara', 

95 # Malayalam (transliterated) 

96 'dayavayi', 'nanni', 'namaskaram', 

97 ]) 

98 

99 _CASUAL_WORDS = frozenset([ 

100 # English 

101 'hey', 'yo', 'sup', 'gonna', 'wanna', 'lol', 'haha', 'bruh', 

102 'cool', 'awesome', 'yeah', 'nah', 'ok', 'k', 'thx', 'ty', 

103 'omg', 'btw', 'imo', 'tbh', 'ngl', 

104 # Tamil (transliterated) 

105 'da', 'di', 'machan', 'machi', 'poda', 'podi', 

106 'vaada', 'vaadi', 'sarida', 'saridaa', 'dei', 'pa', 

107 # Hindi (transliterated) 

108 'yaar', 'bhai', 'abe', 'oye', 'be', 'bro', 

109 'arey', 'arre', 'achha', 

110 ]) 

111 

112 # Explicit tone-change instructions — user directly asking for formal/casual 

113 _FORMAL_INSTRUCTIONS = frozenset([ 

114 'speak respectfully', 'be respectful', 'be formal', 'speak formally', 

115 'use formal language', 'be polite', 'speak politely', 

116 'talk respectfully', 'talk formally', 'use respectful language', 

117 # Tamil 

118 'mariathaya pesu', 'mariyathaiya pesu', 'mariyathaya pesu', 

119 'mariyadhaiya pesu', 'mariadhaiya pesu', 

120 'mariyadhaya pesu', 'mariadhaya pesu', 

121 'respect ah pesu', 'respecta pesu', 

122 # Hindi 

123 'respect se baat karo', 'izzat se bolo', 

124 'sabhyata se bolo', 'tameez se baat karo', 

125 ]) 

126 

127 _CASUAL_INSTRUCTIONS = frozenset([ 

128 'speak casually', 'be casual', 'talk like a friend', 

129 'be informal', 'speak informally', 'chill out', 

130 # Tamil 

131 'normal ah pesu', 'casual ah pesu', 'friendly ah pesu', 

132 # Hindi 

133 'casual mein baat karo', 'normal baat karo', 

134 ]) 

135 

136 _TECH_WORDS = frozenset([ 

137 'api', 'endpoint', 'function', 'class', 'variable', 'database', 

138 'algorithm', 'deployment', 'configuration', 'infrastructure', 

139 'repository', 'dependency', 'microservice', 'container', 'pipeline', 

140 'latency', 'throughput', 'schema', 'query', 'regex', 

141 ]) 

142 

143 _POSITIVE_WORDS = frozenset([ 

144 'thanks', 'great', 'love', 'perfect', 'excellent', 'amazing', 

145 'good', 'nice', 'helpful', 'wonderful', 'appreciate', 

146 ]) 

147 

148 _NEGATIVE_WORDS = frozenset([ 

149 'bad', 'wrong', 'terrible', 'hate', 'awful', 'worse', 'useless', 

150 'broken', 'frustrated', 'confused', 'disappointed', 

151 ]) 

152 

153 @classmethod 

154 def extract(cls, user_message: str, agent_response: str, 

155 response_time_ms: float = 0.0) -> InteractionSignals: 

156 """Extract signals from a single exchange.""" 

157 msg_lower = user_message.lower() 

158 words = msg_lower.split() 

159 unique_words = set(words) 

160 word_count = max(len(words), 1) 

161 

162 # Explicit tone instruction detection (highest priority) 

163 explicit_formal = any(p in msg_lower for p in cls._FORMAL_INSTRUCTIONS) 

164 explicit_casual = any(p in msg_lower for p in cls._CASUAL_INSTRUCTIONS) 

165 

166 if explicit_formal: 

167 formality = 1.0 # Max formal — user explicitly asked 

168 elif explicit_casual: 

169 formality = 0.0 # Max casual — user explicitly asked 

170 else: 

171 # Word-level formality markers 

172 formal_count = sum(1 for w in words if w in cls._FORMAL_WORDS) 

173 casual_count = sum(1 for w in words if w in cls._CASUAL_WORDS) 

174 total_markers = formal_count + casual_count 

175 if total_markers > 0: 

176 formality = formal_count / total_markers 

177 else: 

178 formality = min(1.0, word_count / 50.0) * 0.5 + 0.25 

179 

180 tech_count = sum(1 for w in words if w in cls._TECH_WORDS) 

181 

182 pos = sum(1 for w in words if w in cls._POSITIVE_WORDS) 

183 neg = sum(1 for w in words if w in cls._NEGATIVE_WORDS) 

184 if pos + neg > 0: 

185 sentiment = pos / (pos + neg) 

186 else: 

187 sentiment = 0.5 

188 

189 ttr = len(unique_words) / word_count if word_count > 5 else 0.5 

190 

191 return InteractionSignals( 

192 user_message_length=len(user_message), 

193 agent_response_length=len(agent_response), 

194 formality_markers=formality, 

195 question_count=user_message.count('?'), 

196 exclamation_count=user_message.count('!'), 

197 technical_term_count=tech_count, 

198 positive_sentiment=sentiment, 

199 response_time_ms=response_time_ms, 

200 vocabulary_richness=ttr, 

201 ) 

202 

203 @classmethod 

204 def signals_to_scores(cls, signals: InteractionSignals) -> List[float]: 

205 """Convert signals to 8-dim vector matching TUNING_DIM_KEYS order.""" 

206 verbosity_signal = min(1.0, signals.user_message_length / 300.0) 

207 tech_signal = min(1.0, signals.technical_term_count / 5.0) 

208 warmth_signal = (signals.positive_sentiment * 0.6 + 

209 min(1.0, signals.exclamation_count / 3.0) * 0.4) 

210 pace_signal = 1.0 - min(1.0, signals.question_count / 3.0) * 0.5 

211 if signals.user_message_length < 30: 

212 pace_signal = min(pace_signal + 0.2, 1.0) 

213 

214 enc_signal = 0.6 if signals.positive_sentiment > 0.6 else max(0.4, signals.positive_sentiment) 

215 

216 return [ 

217 signals.formality_markers, # formality_score 

218 verbosity_signal, # verbosity_score 

219 warmth_signal, # warmth_score 

220 pace_signal, # pace_score 

221 tech_signal, # technical_depth 

222 enc_signal, # encouragement_level 

223 min(1.0, signals.exclamation_count / 5.0) * 0.3 + 0.2, # humor_receptivity 

224 0.5, # autonomy_preference 

225 ] 

226 

227 

228# ===================================================================== 

229# Dialogue Stream Processor — Continuous In-Conversation Tuning 

230# ===================================================================== 

231 

232@dataclass 

233class _StreamState: 

234 """Per-user conversation stream state.""" 

235 messages: List[Tuple[str, str, bool]] = field(default_factory=list) 

236 started_at: float = 0.0 

237 last_message_at: float = 0.0 

238 

239 

240class DialogueStreamProcessor: 

241 """Processes dialogue as a continuous stream, not just post-response. 

242 

243 Within a CREATE/REUSE execution, the AutoGen GroupChat exchanges many 

244 messages. Each user message is a tuning signal. Accumulated and 

245 streamed to HevolveAI for continuous learning. 

246 """ 

247 

248 def __init__(self, tuner: 'ResonanceTuner'): 

249 self._tuner = tuner 

250 self._streams: Dict[str, _StreamState] = {} 

251 self._lock = threading.Lock() 

252 

253 def on_message(self, user_id: str, speaker: str, text: str, 

254 is_user_message: bool = False, 

255 base_dir: str = None): 

256 """Called for every message in the GroupChat. 

257 

258 Only user messages are tuning signals. 

259 """ 

260 with self._lock: 

261 if user_id not in self._streams: 

262 self._streams[user_id] = _StreamState(started_at=time.time()) 

263 stream = self._streams[user_id] 

264 stream.messages.append((speaker, text, is_user_message)) 

265 stream.last_message_at = time.time() 

266 

267 if is_user_message and len(text.strip()) > 5: 

268 agent_response = "" 

269 with self._lock: 

270 for spk, txt, is_usr in reversed(stream.messages[:-1]): 

271 if not is_usr: 

272 agent_response = txt 

273 break 

274 

275 if agent_response: 

276 self._tuner.analyze_and_tune_async( 

277 user_id, text, agent_response, base_dir=base_dir) 

278 

279 def on_stream_end(self, user_id: str): 

280 """Clean up stream state when conversation ends.""" 

281 with self._lock: 

282 self._streams.pop(user_id, None) 

283 

284 def get_stream_length(self, user_id: str) -> int: 

285 """Number of messages in active stream.""" 

286 with self._lock: 

287 stream = self._streams.get(user_id) 

288 return len(stream.messages) if stream else 0 

289 

290 

291# ===================================================================== 

292# Core Tuning Engine 

293# ===================================================================== 

294 

295class ResonanceTuner: 

296 """Orchestration-layer tuner: EMA blending + signal dispatch to HevolveAI. 

297 

298 All actual learning (Hebbian, Bayesian, probabilistic, gradient descent) 

299 happens in HevolveAI. HARTOS extracts signals, applies fast EMA locally, 

300 and streams everything to HevolveAI for deep learning. 

301 """ 

302 

303 def __init__(self, alpha: float = EMA_ALPHA, 

304 auto_save: bool = True): 

305 self._alpha = alpha 

306 self._auto_save = auto_save 

307 self._executor = ThreadPoolExecutor( 

308 max_workers=1, thread_name_prefix='resonance_tune') 

309 self._lock = threading.Lock() 

310 self._stream_processor = DialogueStreamProcessor(self) 

311 self._stats = { 

312 'total_tunings': 0, 

313 'total_identifications': 0, 

314 'total_oscillations_detected': 0, 

315 'total_hevolveai_dispatches': 0, 

316 'total_hevolveai_corrections': 0, 

317 'total_stream_messages': 0, 

318 } 

319 

320 @property 

321 def stream(self) -> DialogueStreamProcessor: 

322 """Access the dialogue stream processor.""" 

323 return self._stream_processor 

324 

325 def analyze_and_tune(self, user_id: str, user_message: str, 

326 agent_response: str, response_time_ms: float = 0.0, 

327 base_dir: str = None) -> UserResonanceProfile: 

328 """Full pipeline: extract -> EMA blend -> dispatch to HevolveAI -> save. 

329 

330 Thread-safe. Called after every agent response. 

331 """ 

332 profile = get_or_create_profile(user_id, base_dir) 

333 signals = SignalExtractor.extract( 

334 user_message, agent_response, response_time_ms) 

335 signal_scores = SignalExtractor.signals_to_scores(signals) 

336 profile = self._tune_profile(profile, signals, signal_scores) 

337 

338 # Dispatch signals to HevolveAI for deep learning (truly fire-and-forget). 

339 # MUST be async — bridge.submit_correction has a 30s HTTP timeout 

340 # that was blocking the chat response path when localhost:8000 is down. 

341 self._executor.submit( 

342 self._dispatch_to_hevolveai, profile, signal_scores, 

343 user_message, agent_response) 

344 

345 if self._auto_save: 

346 save_resonance_profile(profile, base_dir) 

347 with self._lock: 

348 self._stats['total_tunings'] += 1 

349 

350 # Broadcast resonance tuning to EventBus 

351 try: 

352 from core.platform.events import emit_event 

353 emit_event('resonance.tuned', { 

354 'user_id': user_id, 

355 'confidence': profile.resonance_confidence, 

356 }) 

357 except Exception: 

358 pass 

359 

360 return profile 

361 

362 def analyze_and_tune_async(self, user_id: str, user_message: str, 

363 agent_response: str, 

364 response_time_ms: float = 0.0, 

365 base_dir: str = None) -> None: 

366 """Fire-and-forget background tuning (zero latency on response path).""" 

367 self._executor.submit( 

368 self.analyze_and_tune, user_id, user_message, 

369 agent_response, response_time_ms, base_dir) 

370 

371 def _tune_profile(self, profile: UserResonanceProfile, 

372 signals: InteractionSignals, 

373 signal_scores: List[float]) -> UserResonanceProfile: 

374 """EMA blending for fast local response. 

375 

376 This is the immediate, lightweight tuning that happens in HARTOS. 

377 The deep learning (Hebbian, Bayesian, etc.) happens asynchronously 

378 in HevolveAI and corrections flow back via apply_hevolveai_corrections(). 

379 """ 

380 a = profile.ema_alpha if profile.ema_alpha is not None else self._alpha 

381 current_vector = [profile.tuning[k] for k in TUNING_DIM_KEYS] 

382 

383 # EMA blend each dimension 

384 for i, key in enumerate(TUNING_DIM_KEYS): 

385 profile.tuning[key] = self._ema(current_vector[i], signal_scores[i], a) 

386 

387 # Track tuning history for oscillation detection 

388 snapshot = [profile.tuning[k] for k in TUNING_DIM_KEYS] 

389 profile.tuning_history.append(snapshot) 

390 if len(profile.tuning_history) > TUNING_HISTORY_MAXLEN: 

391 profile.tuning_history = profile.tuning_history[-TUNING_HISTORY_MAXLEN:] 

392 

393 # Detect oscillation -> flag for HevolveAI correction 

394 was_oscillating = profile.gradient_active 

395 profile.gradient_active = self._detect_oscillation(profile.tuning_history) 

396 if profile.gradient_active and not was_oscillating: 

397 with self._lock: 

398 self._stats['total_oscillations_detected'] += 1 

399 

400 # Metadata 

401 profile.vocabulary_complexity = self._ema( 

402 profile.vocabulary_complexity, signals.vocabulary_richness, a) 

403 profile.total_interactions += 1 

404 profile.avg_message_length = self._ema( 

405 profile.avg_message_length, signals.user_message_length, a) 

406 if signals.response_time_ms > 0: 

407 profile.avg_response_time_ms = self._ema( 

408 profile.avg_response_time_ms, signals.response_time_ms, a) 

409 profile.last_interaction_at = time.time() 

410 profile.updated_at = time.time() 

411 profile.resonance_confidence = 1.0 - math.exp( 

412 -CONFIDENCE_GROWTH_RATE * profile.total_interactions) 

413 

414 return profile 

415 

416 def _dispatch_to_hevolveai(self, profile: UserResonanceProfile, 

417 signal_scores: List[float], 

418 user_message: str, agent_response: str): 

419 """Stream resonance signals to HevolveAI for deep learning. 

420 

421 HevolveAI activates its full learning stack (Hebbian, Bayesian, 

422 probabilistic, gradient descent) on these signals. Corrections 

423 flow back via apply_hevolveai_corrections(). 

424 """ 

425 try: 

426 from integrations.agent_engine.world_model_bridge import get_world_model_bridge 

427 bridge = get_world_model_bridge() 

428 

429 # Embed resonance metadata in the experience payload 

430 # HevolveAI's learning pipeline picks this up automatically 

431 bridge.record_interaction( 

432 user_id=profile.user_id, 

433 prompt_id='resonance_tuning', 

434 prompt=user_message[:500], 

435 response=agent_response[:500], 

436 model_id='resonance_signal_stream', 

437 latency_ms=0, 

438 node_id=None, 

439 goal_id=None, 

440 ) 

441 

442 # If oscillation detected, request explicit correction 

443 if profile.gradient_active: 

444 bridge.submit_correction( 

445 original_response=str({k: profile.tuning[k] for k in TUNING_DIM_KEYS}), 

446 corrected_response='', 

447 expert_id='resonance_oscillation_detector', 

448 confidence=0.5, 

449 explanation='Resonance tuning oscillation detected', 

450 context={ 

451 'type': 'resonance_oscillation_correction', 

452 'user_id': profile.user_id, 

453 'signal_scores': signal_scores, 

454 'current_tuning': [profile.tuning[k] for k in TUNING_DIM_KEYS], 

455 'tuning_history': profile.tuning_history, 

456 'confidence': profile.resonance_confidence, 

457 }, 

458 ) 

459 

460 with self._lock: 

461 self._stats['total_hevolveai_dispatches'] += 1 

462 except ImportError: 

463 pass 

464 except Exception as e: 

465 logger.debug(f"HevolveAI dispatch skipped: {e}") 

466 

467 def apply_hevolveai_corrections(self, user_id: str, corrections: dict, 

468 base_dir: str = None): 

469 """Apply learning corrections from HevolveAI back to profile. 

470 

471 Closes the loop: signals -> HevolveAI learning -> corrections -> profile. 

472 Called when WorldModelBridge receives feedback from HevolveAI. 

473 """ 

474 tuning_corrections = corrections.get('tuning_corrections') 

475 if not tuning_corrections or not isinstance(tuning_corrections, list): 

476 return 

477 if len(tuning_corrections) != TUNING_DIM_COUNT: 

478 return 

479 

480 profile = get_or_create_profile(user_id, base_dir) 

481 

482 for i, key in enumerate(TUNING_DIM_KEYS): 

483 corrected = max(0.0, min(1.0, tuning_corrections[i])) 

484 # Blend: 70% current (local), 30% HevolveAI correction 

485 profile.tuning[key] = profile.tuning[key] * 0.7 + corrected * 0.3 

486 

487 profile.gradient_active = False 

488 profile.updated_at = time.time() 

489 

490 if self._auto_save: 

491 save_resonance_profile(profile, base_dir) 

492 

493 with self._lock: 

494 self._stats['total_hevolveai_corrections'] += 1 

495 logger.debug(f"Applied HevolveAI corrections for user {user_id}") 

496 

497 @staticmethod 

498 def _detect_oscillation(tuning_history: List[List[float]]) -> bool: 

499 """Check if tuning is oscillating (not converging). 

500 

501 Flags for HevolveAI gradient correction when variance exceeds threshold. 

502 """ 

503 if len(tuning_history) < 5: 

504 return False 

505 

506 recent = tuning_history[-TUNING_HISTORY_MAXLEN:] 

507 n_dims = len(recent[0]) if recent else 0 

508 

509 for d in range(n_dims): 

510 values = [snap[d] for snap in recent] 

511 mean = sum(values) / len(values) 

512 variance = sum((v - mean) ** 2 for v in values) / len(values) 

513 if variance > OSCILLATION_VARIANCE_THRESHOLD: 

514 return True 

515 return False 

516 

517 # ─── Federation: Export / Import ──────────────────────────────── 

518 

519 def export_resonance_delta(self, base_dir: str = None) -> dict: 

520 """Export anonymized local resonance stats for federation. 

521 

522 No individual user IDs or biometric data cross node boundaries. 

523 Only aggregated tuning distributions and interaction counts. 

524 """ 

525 base_dir = base_dir or RESONANCE_STORAGE_DIR 

526 if not os.path.isdir(base_dir): 

527 return {} 

528 

529 n = TUNING_DIM_COUNT 

530 tuning_sums = [0.0] * n 

531 tuning_sq_sums = [0.0] * n 

532 user_count = 0 

533 total_interactions = 0 

534 oscillation_count = 0 

535 

536 try: 

537 for fname in os.listdir(base_dir): 

538 if not fname.endswith('_resonance.json'): 

539 continue 

540 uid = fname.replace('_resonance.json', '') 

541 profile = load_resonance_profile(uid, base_dir) 

542 if profile is None or profile.total_interactions < MIN_INTERACTIONS_FOR_TUNING: 

543 continue 

544 

545 user_count += 1 

546 total_interactions += profile.total_interactions 

547 if profile.gradient_active: 

548 oscillation_count += 1 

549 

550 for i, key in enumerate(TUNING_DIM_KEYS): 

551 val = profile.tuning.get(key, 0.5) 

552 tuning_sums[i] += val 

553 tuning_sq_sums[i] += val * val 

554 except Exception as e: 

555 logger.debug(f"Resonance delta export error: {e}") 

556 return {} 

557 

558 if user_count == 0: 

559 return {} 

560 

561 avg_tuning = [s / user_count for s in tuning_sums] 

562 tuning_variance = [ 

563 tuning_sq_sums[i] / user_count - avg_tuning[i] ** 2 

564 for i in range(n) 

565 ] 

566 

567 return { 

568 'type': 'resonance_delta', 

569 'user_count': user_count, 

570 'total_interactions': total_interactions, 

571 'oscillation_count': oscillation_count, 

572 'avg_tuning': avg_tuning, 

573 'tuning_variance': tuning_variance, 

574 'dim_keys': list(TUNING_DIM_KEYS), 

575 'timestamp': time.time(), 

576 } 

577 

578 def import_hive_resonance(self, aggregated: dict, 

579 base_dir: str = None): 

580 """Apply hive-aggregated tuning insights to local profiles. 

581 

582 Nudges local profiles toward hive consensus for dimensions that 

583 have high local variance (uncertain). Well-tuned local dims 

584 are preserved (70% local, 30% hive). 

585 """ 

586 hive_avg = aggregated.get('avg_tuning') 

587 if not hive_avg or len(hive_avg) != TUNING_DIM_COUNT: 

588 return 

589 

590 base_dir = base_dir or RESONANCE_STORAGE_DIR 

591 if not os.path.isdir(base_dir): 

592 return 

593 

594 try: 

595 for fname in os.listdir(base_dir): 

596 if not fname.endswith('_resonance.json'): 

597 continue 

598 uid = fname.replace('_resonance.json', '') 

599 profile = load_resonance_profile(uid, base_dir) 

600 if profile is None or profile.total_interactions < MIN_INTERACTIONS_FOR_TUNING: 

601 continue 

602 

603 for i, key in enumerate(TUNING_DIM_KEYS): 

604 local_val = profile.tuning.get(key, 0.5) 

605 hive_val = hive_avg[i] 

606 profile.tuning[key] = local_val * 0.7 + hive_val * 0.3 

607 

608 profile.updated_at = time.time() 

609 save_resonance_profile(profile, base_dir) 

610 except Exception as e: 

611 logger.debug(f"Hive resonance import error: {e}") 

612 

613 @staticmethod 

614 def _ema(current: float, new_value: float, alpha: float) -> float: 

615 """Exponential moving average.""" 

616 return current * (1 - alpha) + new_value * alpha 

617 

618 def get_stats(self) -> dict: 

619 with self._lock: 

620 return dict(self._stats) 

621 

622 

623# ===================================================================== 

624# Pre-Tune — Apply current user message signals BEFORE building prompt 

625# ===================================================================== 

626 

627def pre_tune_from_input(profile: UserResonanceProfile, 

628 user_message: str) -> UserResonanceProfile: 

629 """Lightweight pre-tune: extract signals from the current user message 

630 and apply them to the profile BEFORE the LLM generates its response. 

631 

632 This fixes the one-turn-behind problem where "speak respectfully" only 

633 takes effect on the NEXT response instead of the current one. 

634 

635 Only applies formality (explicit tone overrides). Other dimensions 

636 (verbosity, warmth, etc.) still rely on full post-response tuning. 

637 Does NOT save to disk or dispatch to HevolveAI. 

638 """ 

639 signals = SignalExtractor.extract(user_message, '', 0.0) 

640 

641 # Only apply if there's a strong signal (explicit instruction or clear markers) 

642 has_explicit = any( 

643 p in user_message.lower() 

644 for p in SignalExtractor._FORMAL_INSTRUCTIONS | SignalExtractor._CASUAL_INSTRUCTIONS 

645 ) 

646 

647 if has_explicit: 

648 # Explicit instruction: override formality immediately (no EMA dampening) 

649 profile.tuning['formality_score'] = signals.formality_markers 

650 elif signals.formality_markers > 0.8 or signals.formality_markers < 0.2: 

651 # Strong word-level signal: apply with heavier weight than normal EMA 

652 alpha = 0.5 # Faster than default 0.15 

653 current = profile.tuning.get('formality_score', 0.5) 

654 profile.tuning['formality_score'] = current * (1 - alpha) + signals.formality_markers * alpha 

655 

656 return profile 

657 

658 

659# ===================================================================== 

660# Prompt Builder 

661# ===================================================================== 

662 

663def build_resonance_prompt(profile: UserResonanceProfile) -> str: 

664 """Generate system_message addon reflecting current tuning state.""" 

665 t = profile.tuning 

666 

667 # If formality has been explicitly overridden (0.0 or 1.0) even before 

668 # MIN_INTERACTIONS, emit at least the formality hint so the user's 

669 # tone request is honored immediately. 

670 formality_val = t.get('formality_score', 0.5) 

671 has_explicit_override = formality_val >= 0.9 or formality_val <= 0.1 

672 

673 if profile.total_interactions < MIN_INTERACTIONS_FOR_TUNING: 

674 if has_explicit_override: 

675 label = 'very formal and respectful' if formality_val >= 0.9 else 'very casual and friendly' 

676 return f"\nTONE PREFERENCE: This user prefers {label} language. Adapt immediately.\n" 

677 return "" 

678 

679 confidence_pct = int(profile.resonance_confidence * 100) 

680 

681 formality = _score_to_label(t.get('formality_score', 0.5), 

682 ['very casual', 'casual', 'neutral', 

683 'somewhat formal', 'very formal']) 

684 verbosity = _score_to_label(t.get('verbosity_score', 0.5), 

685 ['extremely brief', 'concise', 'balanced', 

686 'detailed', 'very thorough']) 

687 warmth = _score_to_label(t.get('warmth_score', 0.5), 

688 ['professionally distant', 'polite', 'friendly', 

689 'warm', 'very warm and personal']) 

690 pace = _score_to_label(t.get('pace_score', 0.5), 

691 ['very thorough/slow', 'thorough', 'balanced pace', 

692 'brisk', 'fast and action-oriented']) 

693 tech = _score_to_label(t.get('technical_depth', 0.5), 

694 ['very simple language', 'simple', 'moderate', 

695 'technical', 'highly technical']) 

696 encouragement = _score_to_label(t.get('encouragement_level', 0.5), 

697 ['matter-of-fact', 'light encouragement', 

698 'encouraging', 'warmly encouraging', 

699 'highly celebratory']) 

700 

701 return f""" 

702RESONANCE TUNING (learned from {profile.total_interactions} interactions, {confidence_pct}% confidence): 

703This user prefers: 

704- Formality: {formality} 

705- Detail level: {verbosity} 

706- Warmth: {warmth} 

707- Pace: {pace} 

708- Technical depth: {tech} 

709- Encouragement style: {encouragement} 

710Adapt your responses to match these preferences naturally. Do not mention this tuning to the user. 

711""" 

712 

713 

714def _score_to_label(score: float, labels: list) -> str: 

715 """Map a 0.0-1.0 score to one of N labels.""" 

716 idx = min(int(score * len(labels)), len(labels) - 1) 

717 return labels[idx] 

718 

719 

720# ===================================================================== 

721# Singleton 

722# ===================================================================== 

723 

724_tuner = None 

725_tuner_lock = threading.Lock() 

726 

727 

728def get_resonance_tuner() -> ResonanceTuner: 

729 """Get or create the singleton ResonanceTuner.""" 

730 global _tuner 

731 if _tuner is None: 

732 with _tuner_lock: 

733 if _tuner is None: 

734 _tuner = ResonanceTuner() 

735 return _tuner