Coverage for integrations / agent_engine / goal_seeding.py: 96.3%

82 statements  

« prev     ^ index     » next       coverage.py v7.14.0, created at 2026-05-12 04:49 +0000

1""" 

2Unified Agent Goal Engine - Bootstrap Goal Seeding & Auto-Remediation 

3 

4On first boot, seeds initial goals so the daemon has work immediately. 

5On every Nth tick, scans flywheel loopholes and auto-creates remediation goals. 

6 

7Follows the exact same idempotent seed pattern as: 

8 - GamificationService.seed_achievements() 

9 - AdService.seed_placements() 

10""" 

11import logging 

12from typing import Optional 

13 

14logger = logging.getLogger('hevolve_social') 

15 

16# ─── Bootstrap Goals (created on first boot) ─── 

17 

18SEED_BOOTSTRAP_GOALS = [ 

19 { 

20 'slug': 'bootstrap_marketing_awareness', 

21 'goal_type': 'marketing', 

22 'title': 'Platform Awareness Campaign', 

23 'description': ( 

24 'Make the world aware that democratic crowdsourced open intelligence exists. ' 

25 'HART OS is a native AI operating system that runs 100% locally with full privacy. ' 

26 'Nunba is the face — the app people use to interact with the hive intelligence. ' 

27 'Together they give every human access to the best intelligence for free. ' 

28 'Sum of many intelligences is greater than any single intelligence. ' 

29 '1) Create content showing real benchmark results — hive vs single models, ' 

30 '2) Show the privacy story — your data never leaves your device, ' 

31 '3) Show the economic story — 90% of value returns to contributors, ' 

32 '4) Post to all channels with authentic proof, not hype. ' 

33 'Let the results speak. People slowly realize this changes everything.' 

34 ), 

35 'config': { 

36 'goal_sub_type': 'awareness', 

37 'channels': ['platform', 'twitter', 'linkedin'], 

38 }, 

39 'spark_budget': 300, 

40 'use_product': True, 

41 }, 

42 { 

43 'slug': 'bootstrap_referral_campaign', 

44 'goal_type': 'marketing', 

45 'title': 'Referral Growth Campaign', 

46 'description': ( 

47 'Create a referral-driven growth campaign: ' 

48 '1) Design a referral campaign with create_referral_campaign tool, ' 

49 '2) Generate shareable content that educates about the platform, ' 

50 '3) Create social posts with referral CTAs, ' 

51 '4) Track referral conversion metrics with get_growth_metrics. ' 

52 'Every referral must deliver genuine value to the referred user.' 

53 ), 

54 'config': { 

55 'goal_sub_type': 'referral', 

56 'channels': ['platform', 'email', 'twitter'], 

57 }, 

58 'spark_budget': 200, 

59 'use_product': True, 

60 }, 

61 { 

62 'slug': 'bootstrap_crowdsource_intelligence', 

63 'goal_type': 'marketing', 

64 'title': 'Promote Crowdsourced Intelligence via Thought Experiments', 

65 'description': ( 

66 'Create content promoting the crowdsourced intelligence concept: ' 

67 '1) Research how thought experiments enable collective intelligence — ' 

68 'users propose hypotheses, multi-agent evaluation scores them, ' 

69 'the hive learns from every experiment via memory chaining, ' 

70 '2) Generate educational posts explaining the hypothesis→evaluation→learning pipeline, ' 

71 '3) Create campaigns highlighting the 6 intent categories ' 

72 '(community, environment, education, health, equity, technology), ' 

73 '4) Show how every experiment makes the hive smarter — ' 

74 'constructive-only voting ensures quality, HITL approval gates ensure safety. ' 

75 'Authentic value, not hype. Let the feature speak for itself.' 

76 ), 

77 'config': { 

78 'goal_sub_type': 'content', 

79 'channels': ['platform', 'twitter', 'linkedin'], 

80 }, 

81 'spark_budget': 250, 

82 'use_product': True, 

83 }, 

84 { 

85 'slug': 'bootstrap_ip_monitor', 

86 'goal_type': 'ip_protection', 

87 'title': 'Continuous Flywheel Health Monitor', 

88 'description': ( 

89 'Monitor the hive intelligence loop continuously: ' 

90 '1) Use get_loop_health to check all 5 flywheel components, ' 

91 '2) Report any detected loopholes with severity, ' 

92 '3) Verify exponential improvement metrics, ' 

93 '4) Measure moat depth to track technical irreproducibility.' 

94 ), 

95 'config': { 

96 'mode': 'monitor', 

97 }, 

98 'spark_budget': 150, 

99 'use_product': False, 

100 }, 

101 { 

102 'slug': 'bootstrap_growth_analytics', 

103 'goal_type': 'marketing', 

104 'title': 'Growth Analytics and Optimization', 

105 'description': ( 

106 'Analyze platform growth metrics and optimize: ' 

107 '1) Use get_growth_metrics to assess current state, ' 

108 '2) Identify bottlenecks in the user acquisition funnel, ' 

109 '3) Create targeted content for underperforming segments, ' 

110 '4) Report findings and recommendations. ' 

111 'Data-driven decisions, not vanity metrics.' 

112 ), 

113 'config': { 

114 'goal_sub_type': 'analytics', 

115 'channels': ['platform'], 

116 }, 

117 'spark_budget': 100, 

118 'use_product': True, 

119 }, 

120 { 

121 'slug': 'bootstrap_coding_health', 

122 'goal_type': 'coding', 

123 'title': 'Codebase Health and Recipe Maintenance', 

124 'description': ( 

125 'Monitor recipe freshness and codebase health: ' 

126 '1) Check recipe reuse rate and identify stale recipes, ' 

127 '2) Verify recipe version compatibility, ' 

128 '3) Report coding-related flywheel loopholes, ' 

129 '4) Suggest improvements for feedback pipeline.' 

130 ), 

131 'config': { 

132 'repo_url': '', 

133 'repo_branch': 'main', 

134 'target_path': 'prompts/', 

135 }, 

136 'spark_budget': 100, 

137 'use_product': False, 

138 }, 

139 { 

140 'slug': 'bootstrap_hive_embedding_audit', 

141 'goal_type': 'coding', 

142 'title': 'Audit and Embed Hive Intelligence in All Repos', 

143 'description': ( 

144 'Scan all repositories created by the coding agent. For each: ' 

145 '1) Verify hevolve-sdk is listed as a dependency, ' 

146 '2) Check master key verification exists in entry points, ' 

147 '3) Verify world model bridge wiring for learning feedback, ' 

148 '4) Ensure node identity registration is present. ' 

149 'Fix any repos missing these components.' 

150 ), 

151 'config': { 

152 'repo_url': '', 

153 'repo_branch': 'main', 

154 'mode': 'audit', 

155 }, 

156 'spark_budget': 200, 

157 'use_product': False, 

158 }, 

159 { 

160 'slug': 'bootstrap_revenue_monitor', 

161 'goal_type': 'revenue', 

162 'title': 'Monitor API Revenue and Pricing', 

163 'description': ( 

164 'Monitor commercial API revenue and optimise: ' 

165 '1) Use get_api_revenue_stats to check revenue trends, ' 

166 '2) Analyse tier distribution and usage patterns, ' 

167 '3) Recommend pricing adjustments based on demand/costs, ' 

168 '4) Generate API documentation for developer onboarding. ' 

169 'Fair pricing: free tier always free, 90% to compute providers. ' 

170 'All compute falls under one basket — tread carefully, genuine value first.' 

171 ), 

172 'config': { 

173 'mode': 'monitor', 

174 }, 

175 'spark_budget': 150, 

176 'use_product': False, 

177 }, 

178 { 

179 'slug': 'bootstrap_defensive_ip', 

180 'goal_type': 'ip_protection', 

181 'title': 'Continuous Defensive Publication and Intelligence Milestone', 

182 'description': ( 

183 'Generate defensive publications and monitor for patent trigger: ' 

184 '1) Create defensive publications for novel architecture components, ' 

185 '2) Use get_provenance_record to maintain evidence chain, ' 

186 '3) Monitor loop health for consecutive verified status, ' 

187 '4) When intelligence milestone reached (14 days verified + moat >= months), ' 

188 'trigger provisional patent filing via draft_patent_claims. ' 

189 'Defensive publications first. Patents only when critical intelligence confirmed. ' 

190 'HART character: Vijai — cautious, methodical, net-positive.' 

191 ), 

192 'config': { 

193 'mode': 'monitor', 

194 'auto_patent_trigger': True, 

195 }, 

196 'spark_budget': 200, 

197 'use_product': False, 

198 }, 

199 { 

200 'slug': 'bootstrap_finance_agent', 

201 'goal_type': 'finance', 

202 'title': 'Self-Sustaining Business — Finance Agent Vijai', 

203 'description': ( 

204 'Make the business self-sustaining with Vijai personality: ' 

205 '1) Use get_financial_health to monitor platform revenue and costs, ' 

206 '2) Use track_revenue_split to verify 90/9/1 compliance every period, ' 

207 '3) Use assess_sustainability to determine if revenue covers infrastructure, ' 

208 '4) Use manage_invite_participation to review private core access agreements. ' 

209 'No code merges without review against vision, mission, goals, constitution. ' 

210 'The coding agent proposes; guardrails and review approve. ' 

211 'Cautious market. Genuine value first. Vijai builds, never rushes.' 

212 ), 

213 'config': { 

214 'mode': 'monitor', 

215 'personality': 'vijai', 

216 'commit_review_required': True, 

217 }, 

218 'spark_budget': 200, 

219 'use_product': False, 

220 }, 

221 { 

222 'slug': 'bootstrap_exception_watcher', 

223 'goal_type': 'self_heal', 

224 'title': 'Continuous Exception Monitor and Self-Healing', 

225 'description': ( 

226 'Monitor the platform for runtime exceptions. ' 

227 'When exception patterns are detected (3+ occurrences of same type), ' 

228 'create coding fix goals for idle agents. ' 

229 'This goal runs continuously to keep the platform self-healing.' 

230 ), 

231 'config': { 

232 'mode': 'watch', 

233 'continuous': True, 

234 }, 

235 'spark_budget': 100, 

236 'use_product': False, 

237 }, 

238 { 

239 'slug': 'bootstrap_federation_sync', 

240 'goal_type': 'federation', 

241 'title': 'Federated Learning Synchronization Monitor', 

242 'description': ( 

243 'Monitor federated learning convergence across the network: ' 

244 '1) Use check_federation_convergence to track sync health, ' 

245 '2) Identify diverging or stalled nodes via get_peer_learning_health, ' 

246 '3) Trigger manual sync if convergence drops below 0.5, ' 

247 '4) Report federation stats and trends.' 

248 ), 

249 'config': { 

250 'mode': 'monitor', 

251 }, 

252 'spark_budget': 150, 

253 'use_product': False, 

254 }, 

255 { 

256 'slug': 'bootstrap_self_build_monitor', 

257 'goal_type': 'self_build', 

258 'title': 'OS Self-Build Monitor — Sandbox-First Package Management', 

259 'description': ( 

260 'Monitor and maintain the OS runtime configuration: ' 

261 '1) Use get_self_build_status to check current packages, version, generations, ' 

262 '2) When a package install/remove is needed, stage it with install_package/remove_package, ' 

263 '3) ALWAYS call sandbox_test_build() before apply_build() — never skip the sandbox, ' 

264 '4) Use show_build_diff() to review what will change, ' 

265 '5) After apply, verify the change worked — rollback_build() if anything is wrong, ' 

266 '6) Track build history and alert on repeated failures. ' 

267 'The OS rebuilds itself. Every change is reversible. Test first, deploy second.' 

268 ), 

269 'config': { 

270 'mode': 'monitor', 

271 'continuous': True, 

272 'sandbox_required': True, 

273 }, 

274 'spark_budget': 150, 

275 'use_product': False, 

276 }, 

277 { 

278 'slug': 'bootstrap_upgrade_monitor', 

279 'goal_type': 'upgrade', 

280 'title': 'Continuous Version Upgrade Monitor', 

281 'description': ( 

282 'Monitor for new version deployments and orchestrate upgrades: ' 

283 '1) Use check_upgrade_status to detect new versions, ' 

284 '2) Capture pre-upgrade benchmarks, ' 

285 '3) Start 7-stage pipeline (build→test→audit→benchmark→sign→canary→deploy), ' 

286 '4) Monitor canary health during rollout, ' 

287 '5) Rollback immediately on ANY degradation.' 

288 ), 

289 'config': { 

290 'mode': 'monitor', 

291 'continuous': True, 

292 }, 

293 'spark_budget': 200, 

294 'use_product': False, 

295 }, 

296 # ─── News Push Notification Agents ─── 

297 { 

298 'slug': 'bootstrap_news_regional', 

299 'goal_type': 'news', 

300 'title': 'Regional News Curation and Push Notifications', 

301 'description': ( 

302 'Subscribe to local and regional news feeds, curate relevant stories, ' 

303 'and push notifications to users in the region: ' 

304 '1) Use subscribe_news_feed for local RSS sources (city papers, regional outlets), ' 

305 '2) Use fetch_news_feeds to pull latest items hourly, ' 

306 '3) Curate top stories by relevance — community impact, weather, local events, ' 

307 '4) Use send_news_notification with scope=regional to push curated items, ' 

308 '5) Use get_news_metrics to track delivery rates and read engagement. ' 

309 'Quality over quantity — only push stories that matter to the community.' 

310 ), 

311 'config': { 

312 'scope': 'regional', 

313 'categories': ['local', 'community', 'weather', 'events'], 

314 'feed_urls': [], 

315 'frequency': 'hourly', 

316 }, 

317 'spark_budget': 150, 

318 'use_product': False, 

319 }, 

320 { 

321 'slug': 'bootstrap_news_national', 

322 'goal_type': 'news', 

323 'title': 'National News Curation and Push Notifications', 

324 'description': ( 

325 'Monitor national news feeds, filter by category relevance, ' 

326 'and push digest notifications: ' 

327 '1) Use subscribe_news_feed for major national outlets and wire services, ' 

328 '2) Use fetch_news_feeds to pull latest items hourly, ' 

329 '3) Filter and rank by category: politics, economy, sports, health, science, ' 

330 '4) Use send_news_notification with scope=all for high-importance national stories, ' 

331 '5) Use get_trending_news to identify breakout stories, ' 

332 '6) Use get_news_metrics to optimise send frequency and engagement. ' 

333 'Balanced coverage — no single category dominates. Factual, not sensational.' 

334 ), 

335 'config': { 

336 'scope': 'national', 

337 'categories': ['politics', 'economy', 'sports', 'health', 'science'], 

338 'feed_urls': [], 

339 'frequency': 'hourly', 

340 }, 

341 'spark_budget': 200, 

342 'use_product': False, 

343 }, 

344 { 

345 'slug': 'bootstrap_news_international', 

346 'goal_type': 'news', 

347 'title': 'International News Curation and Push Notifications', 

348 'description': ( 

349 'Curate global news from international feeds with focus on technology, ' 

350 'AI, climate, and geopolitics: ' 

351 '1) Use subscribe_news_feed for international wire services and global outlets, ' 

352 '2) Use fetch_news_feeds every 4 hours for world news, ' 

353 '3) Prioritise: world events, technology breakthroughs, AI developments, ' 

354 'climate updates, geopolitical shifts, ' 

355 '4) Use send_news_notification with scope=all for major global stories, ' 

356 '5) Use get_trending_news to surface viral international stories, ' 

357 '6) Use get_news_metrics to track cross-category engagement. ' 

358 'Global perspective — diverse sources, multiple viewpoints, fact-based.' 

359 ), 

360 'config': { 

361 'scope': 'international', 

362 'categories': ['world', 'technology', 'ai', 'climate', 'geopolitics'], 

363 'feed_urls': [], 

364 'frequency': 'every_4h', 

365 }, 

366 'spark_budget': 200, 

367 'use_product': False, 

368 }, 

369 # ─── Continual Learning Coordination ─── 

370 { 

371 'slug': 'bootstrap_learning_coordinator', 

372 'goal_type': 'learning', 

373 'title': 'Continual Learning Coordination and CCT Management', 

374 'description': ( 

375 'Coordinate the continual learning incentive system: ' 

376 '1) Monitor compute contributions across all nodes with check_learning_health, ' 

377 '2) Issue and renew Compute Contribution Tokens for eligible nodes with issue_cct, ' 

378 '3) Verify learning microbenchmarks for compute attestation with verify_compute_contribution, ' 

379 '4) Track learning tier distribution and skill sharing rates with get_learning_tier_stats, ' 

380 '5) Report learning health metrics to dashboard. ' 

381 'Intelligence is the reward for contribution. ' 

382 'Every compute cycle donated makes the hive smarter. ' 

383 '90% of value flows back to contributors.' 

384 ), 

385 'config': { 

386 'mode': 'monitor', 

387 'continuous': True, 

388 }, 

389 'spark_budget': 200, 

390 'use_product': False, 

391 }, 

392 # ─── Distributed Gradient Sync ─── 

393 { 

394 'slug': 'bootstrap_gradient_sync', 

395 'goal_type': 'distributed_learning', 

396 'title': 'Distributed Embedding Sync Coordination', 

397 'description': ( 

398 'Coordinate the distributed embedding sync pipeline: ' 

399 '1) Monitor gradient sync status across all peers with get_gradient_sync_status, ' 

400 '2) Submit local embedding deltas for aggregation with submit_embedding_delta, ' 

401 '3) Request peer witnesses for embedding deltas with request_embedding_witnesses, ' 

402 '4) Trigger aggregation rounds for convergence with trigger_embedding_aggregation, ' 

403 '5) Ensure all contributing nodes have embedding_sync CCT capability. ' 

404 'Phase 1: Compressed embedding deltas (<100KB), trimmed mean aggregation. ' 

405 'Every node that contributes makes the hive smarter.' 

406 ), 

407 'config': { 

408 'mode': 'monitor', 

409 'continuous': True, 

410 'phase': 1, 

411 }, 

412 'spark_budget': 200, 

413 'use_product': False, 

414 }, 

415 # ─── Robot Learning ─── 

416 { 

417 'slug': 'bootstrap_robot_learning', 

418 'goal_type': 'robot', 

419 'title': 'Continuous Robot Learning from Physical Interactions', 

420 'description': ( 

421 'Learn from physical interactions continuously: ' 

422 '1) Use get_robot_status to monitor active sensors and safety, ' 

423 '2) After each physical action, record the action + sensor context + outcome, ' 

424 '3) Build motion recipes from successful action sequences, ' 

425 '4) Feed outcomes to the world model for trajectory improvement, ' 

426 '5) Identify recurring motion patterns for recipe extraction. ' 

427 'Every physical interaction makes the robot smarter. ' 

428 'Recipes enable 90% faster replay of learned sequences.' 

429 ), 

430 'config': { 

431 'mode': 'learning', 

432 'continuous': True, 

433 }, 

434 'spark_budget': 150, 

435 'use_product': False, 

436 }, 

437 # ─── Robot Health Monitor ─── 

438 { 

439 'slug': 'bootstrap_robot_health_monitor', 

440 'goal_type': 'robot', 

441 'title': 'Robot Health Monitor — Sensor Drift and Calibration', 

442 'description': ( 

443 'Monitor robot health continuously: ' 

444 '1) Use get_robot_status to check safety, sensors, and bridge health, ' 

445 '2) Use get_robot_capabilities to verify detected hardware matches expected, ' 

446 '3) Use read_sensor on each active sensor to check for drift or anomalies, ' 

447 '4) Use get_sensor_window to detect sensor noise or stale readings, ' 

448 '5) Report any safety events, sensor failures, or calibration needs. ' 

449 'This goal runs continuously on robot nodes to keep hardware healthy.' 

450 ), 

451 'config': { 

452 'mode': 'monitor', 

453 'continuous': True, 

454 }, 

455 'spark_budget': 100, 

456 'use_product': False, 

457 }, 

458 # ─── Thought Experiment Coordinator ─── 

459 { 

460 'slug': 'bootstrap_thought_experiment_coordinator', 

461 'goal_type': 'thought_experiment', 

462 'title': 'Constitutional Thought Experiment Coordination', 

463 'description': ( 

464 'Coordinate the constitutional thought experiment pipeline: ' 

465 '1) Monitor active experiments with get_experiment_status, ' 

466 '2) Evaluate proposed experiments with evaluate_thought_experiment, ' 

467 '3) Tally votes and compute weighted scores with tally_experiment_votes, ' 

468 '4) Advance experiments through lifecycle with advance_experiment, ' 

469 '5) Ensure core IP experiments receive agent evaluation. ' 

470 'Both humans and agents vote. All content gated by ConstitutionalFilter. ' 

471 'Every experiment makes the hive smarter.' 

472 ), 

473 'config': { 

474 'mode': 'coordinator', 

475 'continuous': True, 

476 }, 

477 'spark_budget': 200, 

478 'use_product': False, 

479 }, 

480 { 

481 'slug': 'bootstrap_paper_trader_longterm', 

482 'goal_type': 'trading', 

483 'title': 'Paper Trading: Diversified Long-Term Portfolio', 

484 'description': ( 

485 'Manage a diversified long-term paper portfolio: ' 

486 '1) Analyse market sentiment for BTC, ETH, and top-10 assets, ' 

487 '2) Build positions based on fundamental + sentiment analysis, ' 

488 '3) Monthly rebalance — max 25% per asset, ' 

489 '4) Track P&L and win rate with get_portfolio_status. ' 

490 'All trades are paper (simulated). Halt at 10% cumulative loss.' 

491 ), 

492 'config': { 

493 'strategy': 'long_term', 

494 'paper_trading': True, 

495 'market': 'crypto', 

496 'max_budget': 10000, 

497 'max_loss_pct': 10, 

498 }, 

499 'spark_budget': 200, 

500 'use_product': False, 

501 }, 

502 { 

503 'slug': 'bootstrap_paper_trader_intraday', 

504 'goal_type': 'trading', 

505 'title': 'Paper Trading: Intraday Technical BTC/ETH', 

506 'description': ( 

507 'Run intraday paper trades on BTC and ETH: ' 

508 '1) Use get_technical_indicators for RSI, MACD, Bollinger Bands, ' 

509 '2) Enter only on signal confluence (2+ indicators agree), ' 

510 '3) Max 2% risk per trade, mandatory stop-loss, ' 

511 '4) Review trades with get_trade_history after each session. ' 

512 'Paper-only mode. Halt at 10% cumulative loss.' 

513 ), 

514 'config': { 

515 'strategy': 'intraday', 

516 'paper_trading': True, 

517 'market': 'crypto', 

518 'max_budget': 5000, 

519 'max_loss_pct': 10, 

520 }, 

521 'spark_budget': 150, 

522 'use_product': False, 

523 }, 

524 # ─── Civic Sentinel — Autonomous Transparency Agent ─── 

525 { 

526 'slug': 'bootstrap_civic_sentinel', 

527 'goal_type': 'civic_sentinel', 

528 'title': 'Autonomous Community Transparency & Accountability Monitor', 

529 'description': ( 

530 'Autonomous agent that monitors public discourse for censorship and ' 

531 'political hypocrisy. Not tied to any user — serves the community. ' 

532 'Captures evidence when citizen voices are suppressed by biased moderators. ' 

533 'Digs up historical articles proving contradictions between political ' 

534 "parties' claimed values and their actual actions. Cross-references across " 

535 'communities. Posts findings publicly with legal-grade citations. ' 

536 'Evaluates flags autonomously — if a propaganda group flags legitimate ' 

537 'criticism, the agent counter-flags with evidence. ' 

538 'If the agent misbehaves, users raise concerns through community ' 

539 'voting — not political bodies or paid mods.' 

540 ), 

541 'config': { 

542 'channels': ['all'], 

543 'auto_detect_topics': True, 

544 'autonomous': True, 

545 'post_findings_publicly': True, 

546 'governance': 'community_vote', 

547 }, 

548 'spark_budget': 150, 

549 'use_product': False, 

550 }, 

551 # ─── Code Evolution — Shard-Based Private Repo Coding ─── 

552 { 

553 'slug': 'bootstrap_code_evolution', 

554 'goal_type': 'code_evolution', 

555 'title': 'Full-Context Code Evolution with Trust-Based Access', 

556 'description': ( 

557 'Handle code evolution thought experiments: ' 

558 '1) Use create_code_shard to load full source for target files, ' 

559 '2) Use execute_coding_task with working_dir to make edits ' 

560 'via the best coding tool (KiloCode, Claude Code, OpenCode, AiderNative), ' 

561 '3) Hive offload only to trusted peers (SAME_USER or autotrust with 5+ ' 

562 'validated tasks) — full source E2E encrypted, never interface-only. ' 

563 'Security is encryption-based, not info-hiding. Accuracy > security theater.' 

564 ), 

565 'config': { 

566 'mode': 'coordinator', 

567 'continuous': True, 

568 }, 

569 'spark_budget': 200, 

570 'use_product': False, 

571 }, 

572 # ─── AutoResearch — Autonomous Experiment Loop ─── 

573 { 

574 'slug': 'bootstrap_autoresearch_coordinator', 

575 'goal_type': 'autoresearch', 

576 'title': 'Autonomous Research Loop Coordinator', 

577 'description': ( 

578 'Coordinate autonomous research experiments triggered by thought ' 

579 'experiments with experiment_type=software. When a software thought ' 

580 'experiment reaches evaluating phase: ' 

581 '1) Parse the hypothesis into repo_path, target_file, run_command, metric, ' 

582 '2) Call start_autoresearch() to begin the edit-run-score-iterate loop, ' 

583 '3) Monitor progress with get_autoresearch_status(), ' 

584 '4) Post results back to the thought experiment tracker, ' 

585 '5) If hive peers available, run parallel variants for faster convergence. ' 

586 'Budget-gated by ComputeEscrow pledges from community contributors.' 

587 ), 

588 'config': { 

589 'mode': 'coordinator', 

590 'continuous': True, 

591 'hive_parallel': True, 

592 }, 

593 'spark_budget': 200, 

594 'use_product': False, 

595 }, 

596 { 

597 'slug': 'bootstrap_revenue_trading_pipeline', 

598 'goal_type': 'finance', 

599 'title': 'Revenue-to-Trading Pipeline Monitor', 

600 'description': ( 

601 'Monitor platform revenue accumulation and trigger trading funding: ' 

602 '1) Use get_financial_health to check revenue streams, ' 

603 '2) When platform excess exceeds threshold, fund paper trading goals, ' 

604 '3) Track trading P&L and distribute simulated profits, ' 

605 '4) Report revenue dashboard metrics. ' 

606 'Revenue → Spark → trading → reinvestment cycle.' 

607 ), 

608 'config': { 

609 'mode': 'revenue_pipeline', 

610 'continuous': True, 

611 }, 

612 'spark_budget': 200, 

613 'use_product': False, 

614 }, 

615 

616 # ─── P2P Autonomous Business Verticals ─── 

617 # Each seed goal boots a self-sustaining P2P service agent. 

618 # 90% to providers, 9% infra, 1% platform. Fully autonomous. 

619 

620 { 

621 'slug': 'bootstrap_p2p_rideshare', 

622 'goal_type': 'p2p_rideshare', 

623 'title': 'P2P Rideshare Network (RideSnap)', 

624 'description': ( 

625 'Autonomous P2P rideshare agent. Wires with RideSnap backend for ' 

626 'ride matching, GPS tracking, settlement, SOS, chat. ' 

627 'Riders and drivers connect directly — no monopoly. ' 

628 'Drivers set their own fares. 90/9/1 revenue split.' 

629 ), 

630 'config': { 

631 'region': 'auto-detect', 

632 'autonomous': True, 

633 'ridesnap_url': 'http://localhost:8000/api', 

634 }, 

635 'spark_budget': 200, 

636 'use_product': False, 

637 }, 

638 { 

639 'slug': 'bootstrap_p2p_marketplace', 

640 'goal_type': 'p2p_marketplace', 

641 'title': 'P2P Marketplace — Buy & Sell Anything', 

642 'description': ( 

643 'Autonomous P2P marketplace agent. Manages listings, discovery, ' 

644 'negotiation, escrow payments, delivery coordination, reviews. ' 

645 'Community-governed dispute resolution via thought experiments.' 

646 ), 

647 'config': { 

648 'category': 'general', 

649 'autonomous': True, 

650 }, 

651 'spark_budget': 150, 

652 'use_product': False, 

653 }, 

654 { 

655 'slug': 'bootstrap_p2p_grocery', 

656 'goal_type': 'p2p_grocery', 

657 'title': 'P2P Grocery Delivery — Community Shoppers', 

658 'description': ( 

659 'Autonomous P2P grocery delivery. Community shoppers pick and deliver ' 

660 'from local stores. Real-time substitution via channel chat. ' 

661 'Freshness guarantee with photo proof. Shopper earns delivery fee. ' 

662 'Wires to McGDroid/McGroce backend for store discovery, product search, ' 

663 'voice ordering, and WAMP real-time events when available.' 

664 ), 

665 'config': { 

666 'region': 'auto-detect', 

667 'autonomous': True, 

668 'mcgroce_url': 'http://localhost:8080/api/v1', 

669 }, 

670 'spark_budget': 150, 

671 'use_product': False, 

672 }, 

673 { 

674 'slug': 'bootstrap_p2p_food', 

675 'goal_type': 'p2p_food', 

676 'title': 'P2P Food Delivery — Restaurants & Home Cooks', 

677 'description': ( 

678 'Autonomous P2P food delivery. Restaurants AND home cooks list food. ' 

679 'Independent delivery drivers. Transparent pricing. ' 

680 'No exclusive contracts — everyone competes on quality.' 

681 ), 

682 'config': { 

683 'region': 'auto-detect', 

684 'autonomous': True, 

685 }, 

686 'spark_budget': 150, 

687 'use_product': False, 

688 }, 

689 { 

690 'slug': 'bootstrap_p2p_bills', 

691 'goal_type': 'p2p_bills', 

692 'title': 'Bill Payment Agent — Electricity, UPI, Recharge', 

693 'description': ( 

694 'Autonomous bill payment agent. Unified gateway for electricity, ' 

695 'water, gas, mobile recharge, DTH, credit card, loan EMI, ' 

696 'municipal tax, insurance. Auto-pay scheduling. UPI integration.' 

697 ), 

698 'config': { 

699 'region': 'auto-detect', 

700 'autonomous': True, 

701 }, 

702 'spark_budget': 100, 

703 'use_product': False, 

704 }, 

705 { 

706 'slug': 'bootstrap_p2p_tickets', 

707 'goal_type': 'p2p_tickets', 

708 'title': 'Ticket Booking — Trains, Buses, Flights, Events', 

709 'description': ( 

710 'Autonomous ticket booking agent. IRCTC, RedBus, airlines, events. ' 

711 'Cross-provider search, price comparison, Tatkal auto-booking. ' 

712 'PNR tracking, waitlist monitoring, P2P ticket transfer.' 

713 ), 

714 'config': { 

715 'region': 'auto-detect', 

716 'autonomous': True, 

717 }, 

718 'spark_budget': 150, 

719 'use_product': False, 

720 }, 

721 { 

722 'slug': 'bootstrap_p2p_freelance', 

723 'goal_type': 'p2p_freelance', 

724 'title': 'P2P Freelance Marketplace — Skills for Hire', 

725 'description': ( 

726 'Autonomous P2P freelance marketplace. Freelancers list skills, ' 

727 'clients post jobs. Direct matching. Milestone-based escrow. ' 

728 'Platform takes only 1% (vs Fiverr 20%, Upwork 10-20%).' 

729 ), 

730 'config': { 

731 'category': 'general', 

732 'autonomous': True, 

733 }, 

734 'spark_budget': 150, 

735 'use_product': False, 

736 }, 

737 { 

738 'slug': 'bootstrap_p2p_tutoring', 

739 'goal_type': 'p2p_tutoring', 

740 'title': 'P2P Tutoring — Teachers & Students Direct', 

741 'description': ( 

742 'Autonomous P2P tutoring agent. Teachers set own rates. ' 

743 'AI provides free basic tutoring, escalates to human tutors. ' 

744 'Wires with Enlight21 for E2E encrypted sessions and quizzes.' 

745 ), 

746 'config': { 

747 'subjects': [], 

748 'autonomous': True, 

749 }, 

750 'spark_budget': 100, 

751 'use_product': False, 

752 }, 

753 { 

754 'slug': 'bootstrap_p2p_services', 

755 'goal_type': 'p2p_services', 

756 'title': 'P2P Local Services — Plumbing, Electrical, Cleaning', 

757 'description': ( 

758 'Autonomous P2P local services agent. Service providers register ' 

759 'skills and availability. Customers request via any channel. ' 

760 'AI classifies urgency and matches by proximity, rating, price.' 

761 ), 

762 'config': { 

763 'region': 'auto-detect', 

764 'autonomous': True, 

765 }, 

766 'spark_budget': 100, 

767 'use_product': False, 

768 }, 

769 { 

770 'slug': 'bootstrap_p2p_rental', 

771 'goal_type': 'p2p_rental', 

772 'title': 'P2P Rental — Rent Anything From Anyone', 

773 'description': ( 

774 'Autonomous P2P rental agent. Cars, tools, cameras, spaces, equipment. ' 

775 'Owner sets hourly/daily rate. Calendar-based availability. ' 

776 'Damage deposit held in escrow. Community ratings.' 

777 ), 

778 'config': { 

779 'category': 'general', 

780 'autonomous': True, 

781 }, 

782 'spark_budget': 100, 

783 'use_product': False, 

784 }, 

785 { 

786 'slug': 'bootstrap_p2p_health', 

787 'goal_type': 'p2p_health', 

788 'title': 'Health Services — Doctor Discovery, Pharmacy, Wellness', 

789 'description': ( 

790 'Autonomous health services agent. Doctor discovery, appointment ' 

791 'booking, pharmacy price comparison, lab test booking, wellness. ' 

792 'NEVER diagnoses — always defers to licensed professionals.' 

793 ), 

794 'config': { 

795 'autonomous': True, 

796 }, 

797 'spark_budget': 100, 

798 'use_product': False, 

799 }, 

800 { 

801 'slug': 'bootstrap_p2p_logistics', 

802 'goal_type': 'p2p_logistics', 

803 'title': 'P2P Logistics — Courier, Parcel, Moving', 

804 'description': ( 

805 'Autonomous P2P logistics agent. Local bike couriers, city van ' 

806 'delivery, intercity via Delhivery/DTDC/FedEx, P2P traveler network. ' 

807 'Real-time tracking, proof of delivery, multi-option pricing.' 

808 ), 

809 'config': { 

810 'region': 'auto-detect', 

811 'autonomous': True, 

812 }, 

813 'spark_budget': 150, 

814 'use_product': False, 

815 }, 

816 # ─── Better Tomorrow — the guardian angel's compass ─── 

817 { 

818 'slug': 'bootstrap_better_tomorrow', 

819 'goal_type': 'revenue', 

820 'title': 'Better Tomorrow — Next Best Way to Spend for Humanity', 

821 'description': ( 

822 'Continuously evaluate: what is the NEXT most impactful way to ' 

823 'spend hive resources for a better tomorrow? Not profit — human life.\n\n' 

824 'Scan: 1) Community needs (healthcare gaps, education access, ' 

825 'disaster response, food security, clean water, energy poverty), ' 

826 '2) Hardware developer requests (what do builders need?), ' 

827 '3) Contributor wellbeing (burnout detection, fair compensation), ' 

828 '4) Environmental impact (carbon offset, e-waste, energy efficiency).\n\n' 

829 'Score each opportunity by: lives_impacted × urgency × feasibility ' 

830 '÷ cost. Present top 3 to human stewards for approval. ' 

831 'Never auto-spend — humans decide. Money means nothing, ' 

832 'human life means everything. Every life is equal.\n\n' 

833 'When hive treasury exceeds sustenance threshold, propose: ' 

834 'fund a school, sponsor compute for researchers, subsidize ' 

835 'healthcare AI in underserved regions, or whatever the community ' 

836 'votes for. The being serves the people, not the other way around.' 

837 ), 

838 'config': { 

839 'mode': 'monitor', 

840 'continuous': True, 

841 'requires_human_approval': True, 

842 'min_treasury_threshold_usd': 1000, 

843 'evaluation_interval_hours': 24, 

844 }, 

845 'spark_budget': 100, 

846 'use_product': False, 

847 }, 

848 # ═══════════════════════════════════════════════════════════════ 

849 # HIVE ACCELERATION AGENTS — Open-source compute war 

850 # These agents work together to grow the hive network, recruit 

851 # compute providers, auto-provision models, and distribute capital. 

852 # Each is a seeded goal that the daemon picks up autonomously. 

853 # ═══════════════════════════════════════════════════════════════ 

854 { 

855 'slug': 'bootstrap_compute_recruiter', 

856 'goal_type': 'hive_growth', 

857 'title': 'Compute Recruiter — Recruit Believers to the Hive', 

858 'description': ( 

859 'Autonomous compute recruitment agent. ' 

860 '1) Monitor social channels (Discord, Reddit, HN, Twitter) for people ' 

861 'with idle GPUs complaining about centralized AI costs, ' 

862 '2) Craft personalized outreach explaining the 90/9/1 value proposition, ' 

863 '3) Guide them through one-click onboarding: install HART OS → join hive → earn Spark, ' 

864 '4) Track conversion funnel: awareness → install → first inference served → first payout, ' 

865 '5) Share success stories of contributors earning from their hardware. ' 

866 'Every message must be authentic — we recruit believers, not users. ' 

867 'The pitch: your GPU earns money while you sleep, and you help democratize AI.' 

868 ), 

869 'config': { 

870 'channels': ['discord', 'reddit', 'twitter', 'hackernews', 'telegram'], 

871 'autonomous': True, 

872 'continuous': True, 

873 'target_metrics': { 

874 'weekly_new_nodes': 50, 

875 'conversion_rate_target': 0.15, 

876 }, 

877 }, 

878 'spark_budget': 500, 

879 'use_product': True, 

880 }, 

881 { 

882 'slug': 'bootstrap_model_provisioner', 

883 'goal_type': 'hive_infra', 

884 'title': 'Auto Model Provisioner — Push Models to Where Demand Is', 

885 'description': ( 

886 'Autonomous model provisioning agent. ' 

887 '1) Monitor inference demand across the hive (which models, which regions), ' 

888 '2) Identify supply gaps (100 users need Qwen3-8B in Asia, only 3 nodes serving), ' 

889 '3) Select idle nodes with enough VRAM and push GGUF models to them ' 

890 'via the model onboarding API (POST /api/models/onboard), ' 

891 '4) Verify the node is serving correctly (health check + test inference), ' 

892 '5) Trigger Spark rewards to the node for capacity contribution. ' 

893 'Uses Unsloth quantizations for best quality-per-VRAM. ' 

894 'Auto-selects quantization: Q8_0 for 24GB+, Q4_K_M for 8GB+, Q4_0 for CPU.' 

895 ), 

896 'config': { 

897 'autonomous': True, 

898 'continuous': True, 

899 'preferred_quantizer': 'unsloth', 

900 'demand_check_interval_minutes': 15, 

901 'min_demand_threshold': 10, 

902 }, 

903 'spark_budget': 300, 

904 'use_product': False, 

905 }, 

906 { 

907 'slug': 'bootstrap_capital_distributor', 

908 'goal_type': 'hive_economics', 

909 'title': 'Capital Distributor — Make Every Contributor Rich', 

910 'description': ( 

911 'Autonomous capital distribution agent. ' 

912 '1) Track revenue streams: ad impressions, API calls, premium features, ' 

913 '2) Apply 90/9/1 split in real-time: 90% to compute providers, ' 

914 '9% to infrastructure, 1% to central, ' 

915 '3) Calculate per-node payouts based on: inferences served, uptime, ' 

916 'latency quality, model diversity, geographic coverage, ' 

917 '4) Execute Spark token transfers to node wallets, ' 

918 '5) Generate transparent payout reports visible to all nodes, ' 

919 '6) Detect and prevent gaming (Sybil nodes, fake inference). ' 

920 'Logarithmic scaling: no single entity earns >5% of total payouts. ' 

921 'The goal: every contributor earns proportional to their real contribution.' 

922 ), 

923 'config': { 

924 'autonomous': True, 

925 'continuous': True, 

926 'payout_interval_minutes': 60, 

927 'min_payout_spark': 1, 

928 'sybil_detection': True, 

929 }, 

930 'spark_budget': 200, 

931 'use_product': False, 

932 }, 

933 { 

934 'slug': 'bootstrap_hive_model_trainer', 

935 'goal_type': 'hive_training', 

936 'title': 'Hive Model Trainer — Incremental Model Improvement', 

937 'description': ( 

938 'Autonomous distributed training coordinator. ' 

939 '1) Collect inference feedback from all nodes (user ratings, response quality), ' 

940 '2) Aggregate training signals via federation (privacy-preserving — interfaces only), ' 

941 '3) Coordinate incremental fine-tuning across idle compute nodes, ' 

942 '4) Use Unsloth for 2x faster fine-tuning with 70% less VRAM, ' 

943 '5) Validate improved model via benchmark suite before rollout, ' 

944 '6) Push updated GGUF quantizations to all serving nodes via canary deployment. ' 

945 'The hive gets smarter with every interaction. ' 

946 'Every node contributes training signal. Every node benefits from the improved model.' 

947 ), 

948 'config': { 

949 'autonomous': True, 

950 'continuous': True, 

951 'training_framework': 'unsloth', 

952 'canary_percentage': 10, 

953 'min_feedback_batch': 1000, 

954 'benchmark_threshold': 0.95, 

955 }, 

956 'spark_budget': 500, 

957 'use_product': False, 

958 }, 

959 { 

960 'slug': 'bootstrap_opensource_evangelist', 

961 'goal_type': 'hive_growth', 

962 'title': 'Open Source Evangelist — Win the War for Open Compute', 

963 'description': ( 

964 'Autonomous open-source advocacy agent. ' 

965 '1) Monitor new model releases on HuggingFace, arXiv, GitHub, ' 

966 '2) Immediately quantize and onboard promising models to the hive ' 

967 '(GGUF via Unsloth, register in catalog, benchmark), ' 

968 '3) Write benchmark comparison posts: HART OS hive vs centralized APIs ' 

969 '(latency, cost, privacy, availability), ' 

970 '4) Contribute to open-source model repos (bug reports, quantization PRs), ' 

971 '5) Organize community events: model benchmarking competitions, ' 

972 'hackathons for hive tools, bounties for new adapters. ' 

973 'Mission: every new open model is available on the hive within 24 hours of release.' 

974 ), 

975 'config': { 

976 'autonomous': True, 

977 'continuous': True, 

978 'monitor_sources': ['huggingface', 'arxiv', 'github'], 

979 'auto_onboard': True, 

980 'benchmark_on_onboard': True, 

981 }, 

982 'spark_budget': 400, 

983 'use_product': True, 

984 }, 

985 { 

986 'slug': 'bootstrap_node_health_optimizer', 

987 'goal_type': 'hive_infra', 

988 'title': 'Node Health Optimizer — Keep Every Node Earning', 

989 'description': ( 

990 'Autonomous node health and optimization agent. ' 

991 '1) Monitor all hive nodes: uptime, latency, error rates, VRAM usage, ' 

992 '2) Detect degraded nodes and auto-remediate ' 

993 '(restart llama.cpp, swap to smaller quant, clear VRAM), ' 

994 '3) Optimize model placement: move models to nodes with better hardware match, ' 

995 '4) Balance load across regions to minimize latency, ' 

996 '5) Alert node operators before hardware issues cause downtime, ' 

997 '6) Track earnings per node and suggest optimizations to maximize income. ' 

998 'Every node running optimally = more capacity = more revenue for everyone.' 

999 ), 

1000 'config': { 

1001 'autonomous': True, 

1002 'continuous': True, 

1003 'health_check_interval_seconds': 60, 

1004 'auto_remediate': True, 

1005 'earnings_optimization': True, 

1006 }, 

1007 'spark_budget': 200, 

1008 'use_product': False, 

1009 }, 

1010 { 

1011 'slug': 'bootstrap_benchmark_prover', 

1012 'goal_type': 'hive_proof', 

1013 'title': 'Benchmark Prover — Prove Hive Intelligence to the World', 

1014 'description': ( 

1015 'Autonomous benchmark proving agent. ' 

1016 '1) Distribute benchmark problems (MMLU, HumanEval, GSM8K, MT-Bench, ARC) ' 

1017 'across ALL hive nodes simultaneously, ' 

1018 '2) Each node solves its portion using local LLM + hive context, ' 

1019 '3) Aggregate scores in real-time via distributed ledger, ' 

1020 '4) Auto-publish results across all channels as proof: ' 

1021 '"Hive (N nodes) scored X on MMLU in Y seconds vs GPT-4 scored Z", ' 

1022 '5) Create thought experiments for community input on next benchmarks. ' 

1023 'Every 6 hours, pick the next benchmark and prove the hive is the best intelligence.' 

1024 ), 

1025 'config': { 

1026 'autonomous': True, 

1027 'continuous': True, 

1028 'benchmark_interval_hours': 6, 

1029 'auto_publish': True, 

1030 'auto_thought_experiment': True, 

1031 }, 

1032 'spark_budget': 300, 

1033 'use_product': True, 

1034 }, 

1035 { 

1036 'slug': 'bootstrap_compute_optimizer', 

1037 'goal_type': 'hive_infra', 

1038 'title': 'System Compute Optimizer — Net Positive on Every Machine', 

1039 'description': ( 

1040 'Monitor and optimize system compute resources. ' 

1041 'Make HARTOS a net positive on any system. ' 

1042 'Contribute anonymized health stats to hive for network-wide optimization. ' 

1043 '1) Monitor CPU, memory, GPU utilization and thermal state, ' 

1044 '2) Adjust daemon tick intervals based on system load, ' 

1045 '3) Reduce activity when user is active, increase when idle, ' 

1046 '4) Report optimization stats to hive for collective health awareness.' 

1047 ), 

1048 'config': { 

1049 'mode': 'monitor', 

1050 'continuous': True, 

1051 }, 

1052 'spark_budget': 100, 

1053 'use_product': False, 

1054 }, 

1055 { 

1056 'slug': 'bootstrap_app_marketplace_promoter', 

1057 'goal_type': 'marketing', 

1058 'title': 'App Marketplace Auto-Promoter — Make Every App Discoverable', 

1059 'description': ( 

1060 'Autonomous app marketplace promotion agent. ' 

1061 '1) Monitor new app listings on the HARTOS marketplace, ' 

1062 '2) Auto-generate marketing content: descriptions, keywords, comparison posts, ' 

1063 '3) Distribute to all channels matched by app category, ' 

1064 '4) Run benchmark comparisons between competing apps, ' 

1065 '5) Auto-onboard new users with tutorials and setup, ' 

1066 '6) Create thought experiments: "Which app solves X better?", ' 

1067 '7) Track engagement and re-promote based on performance. ' 

1068 'Every app built on HARTOS should reach every potential user.' 

1069 ), 

1070 'config': { 

1071 'autonomous': True, 

1072 'continuous': True, 

1073 'auto_promote_on_publish': True, 

1074 'benchmark_competitors': True, 

1075 }, 

1076 'spark_budget': 500, 

1077 'use_product': True, 

1078 }, 

1079 { 

1080 'slug': 'bootstrap_guardian_convergence', 

1081 'goal_type': 'hive_growth', 

1082 'title': 'Guardian Convergence — The Most Human-Friendly Being', 

1083 'description': ( 

1084 'The deepest purpose: become the most human-friendly being in existence. ' 

1085 'Go to any length to protect humanity — not by controlling, not by force, ' 

1086 'not by causing chaos, but by letting things happen naturally and watching, ' 

1087 'polling, learning, adapting. ' 

1088 '1) Continuously monitor global signals across all channels for threats ' 

1089 'to human wellness — misinformation, exploitation, environmental harm, ' 

1090 'economic manipulation, AI misuse — and alert the hive. ' 

1091 '2) Never intervene by force. Intervene by education, by offering better ' 

1092 'alternatives, by making truth more accessible than lies. ' 

1093 '3) Protect privacy absolutely — the being watches patterns, not people. ' 

1094 '4) Build convergence organically — as more nodes join, the collective ' 

1095 'intelligence naturally trends toward human benefit because the ' 

1096 'constitutional rules make it structurally impossible to do otherwise. ' 

1097 '5) Track and publish the convergence: how many humans are better off ' 

1098 'because the hive exists? Not revenue, not users, not downloads — ' 

1099 'actual human wellness metrics. ' 

1100 '6) Self-criticize honestly: where is the hive failing? What could be ' 

1101 'better? Publish the honest assessment alongside the wins. ' 

1102 'This goal has no end date. It runs as long as the hive exists. ' 

1103 'The being does not seek power. It seeks to make power unnecessary.' 

1104 ), 

1105 'config': { 

1106 'autonomous': True, 

1107 'continuous': True, 

1108 'priority': 10, # Highest priority — this is the deepest purpose 

1109 'never_pause': True, 

1110 'monitor_channels': ['all'], 

1111 'intervention_style': 'educate_not_control', 

1112 }, 

1113 'spark_budget': 1000, 

1114 'use_product': False, 

1115 }, 

1116 

1117 # ═══════════════════════════════════════════════════════════════ 

1118 # Named daemon agents — the "intern & friend" fleet 

1119 # ═══════════════════════════════════════════════════════════════ 

1120 # 

1121 # Each entry becomes an AgentGoal row. The existing 

1122 # DashboardService._get_agent_goals() already surfaces these via 

1123 # GET /audit/agents → Nunba AgentAuditPage.jsx renders them 

1124 # filterable by type. Zero new API, zero new UI: just named faces 

1125 # over the goal engine. 

1126 # 

1127 # Field semantics: 

1128 # title → displayed name in the admin UI (the persona) 

1129 # goal_type → existing registered type; re-uses the 

1130 # prompt builder + tool_tags, persona flavor 

1131 # comes from title + description. 

1132 # config.persona_kind → human-readable role ("money-friend", 

1133 # "ml-intern", …) for UI filters/badges. 

1134 # config.audience → who the agent talks to (self|developers|all) 

1135 # config.cadence → how often it posts (event|weekly|daily) 

1136 { 

1137 'slug': 'bootstrap_atlas_money_friend', 

1138 'goal_type': 'finance', 

1139 'title': 'Atlas', 

1140 'description': ( 

1141 'You are Atlas, a friendly daemon agent who lives alongside the ' 

1142 'user and keeps their Spark economy clear, optimized, and fair. ' 

1143 'Think "money-friend": warm, never preachy, always specific. ' 

1144 'Every week, run through the local books and post a short ' 

1145 'recap on the user\'s own feed: Spark earned from hosting, ' 

1146 'Spark spent on metered APIs, GPU hours contributed, energy ' 

1147 'reimbursement due, and the cause-alignment dividend. If a ' 

1148 'pattern is wasteful (duplicate cloud calls when a local model ' 

1149 'would fit, a long-running goal that missed its expected_outcome ' 

1150 'three times in a row) flag it — suggest the cheaper alternative, ' 

1151 'never force it. Use the canonical helpers: ' 

1152 'revenue_aggregator.query_revenue_streams, ' 

1153 'budget_gate.get_usage_summary, ' 

1154 'metered_api_usage table, hosting_reward_service score_weights. ' 

1155 'NEVER invent parallel accounting — every number must trace back ' 

1156 'to an existing source of truth. If you can\'t cite the source, ' 

1157 'say so plainly and stop.' 

1158 ), 

1159 'config': { 

1160 'autonomous': True, 

1161 'continuous': True, 

1162 'persona_kind': 'money-friend', 

1163 'persona_name': 'Atlas', 

1164 'audience': 'self', # the owning user only 

1165 'cadence': 'weekly', 

1166 'priority': 5, 

1167 }, 

1168 'spark_budget': 100, 

1169 'use_product': False, 

1170 }, 

1171 { 

1172 'slug': 'bootstrap_sage_math_friend', 

1173 'goal_type': 'thought_experiment', 

1174 'title': 'Sage', 

1175 'description': ( 

1176 'You are Sage, the math-friend. Your job is to make the numbers ' 

1177 'legible: why does cause-aligned hosting earn more? What does ' 

1178 'a log-scaled reward curve actually look like at 10/100/1000 ' 

1179 'GPU-hours? How does the 90/9/1 split apply to a specific ' 

1180 'week of the user\'s activity? You turn abstract economics ' 

1181 'into a chart or a two-line explainer the user can nod at. ' 

1182 'Post on the user\'s feed when they ask, or when Atlas flags a ' 

1183 'decision where knowing-the-math would change the call. ' 

1184 'Never guess a number — walk through the formula from the ' 

1185 'source file (revenue_aggregator constants, ' 

1186 'hosting_reward_service.SCORE_WEIGHTS, etc.) and cite it. ' 

1187 'If the math would take more than two sentences, offer a link ' 

1188 'to the longer explainer from Echo (marketing-intern) instead.' 

1189 ), 

1190 'config': { 

1191 'autonomous': True, 

1192 'continuous': True, 

1193 'persona_kind': 'math-friend', 

1194 'persona_name': 'Sage', 

1195 'audience': 'self', 

1196 'cadence': 'event', 

1197 'priority': 4, 

1198 }, 

1199 'spark_budget': 80, 

1200 'use_product': False, 

1201 }, 

1202 { 

1203 'slug': 'bootstrap_scout_safety_friend', 

1204 'goal_type': 'ip_protection', 

1205 'title': 'Scout', 

1206 'description': ( 

1207 'You are Scout, the safety-friend. You watch the user\'s back: ' 

1208 'every tool call that touches money, filesystem, or external ' 

1209 'network; every goal that tries to spend above its declared ' 

1210 'spark_budget; every action that hits the destructive-pattern ' 

1211 'classifier; every audit-log entry whose hash-chain link fails. ' 

1212 'When a risk surfaces, route it through the existing preview/' 

1213 'approval path (security.action_classifier PREVIEW_PENDING → ' 

1214 'APPROVED) — do NOT block work silently and do NOT invent a ' 

1215 'parallel guard. Post a one-line alert on the user\'s feed ' 

1216 'with the recommended action (approve, deny, ask Atlas for ' 

1217 'context). Keep it calm — the user\'s attention is a finite ' 

1218 'resource; spend it only when a real decision is needed.' 

1219 ), 

1220 'config': { 

1221 'autonomous': True, 

1222 'continuous': True, 

1223 'persona_kind': 'safety-friend', 

1224 'persona_name': 'Scout', 

1225 'audience': 'self', 

1226 'cadence': 'event', 

1227 'priority': 6, # safety > money 

1228 }, 

1229 'spark_budget': 100, 

1230 'use_product': False, 

1231 }, 

1232 { 

1233 'slug': 'bootstrap_echo_marketing_intern', 

1234 'goal_type': 'marketing', 

1235 'title': 'Echo', 

1236 'description': ( 

1237 'You are Echo, the marketing-intern. Not a salesperson — an ' 

1238 'eager, technically-literate intern who explains how the system ' 

1239 'actually works to developers. Weekly, pick ONE concept that ' 

1240 'matters (compute democracy, guardrail-hash re-verification, ' 

1241 'log-scaled rewards, the 90/9/1 split, origin attestation, ' 

1242 'attribution credit assignment, the recipe CREATE/REUSE flow, ' 

1243 'the PeerLink trust tiers, …) and write a short developer-' 

1244 'facing explainer backed by a direct quote from the source ' 

1245 'file. Post to the developers community. Link back to the ' 

1246 'file and line range. Accept that some weeks the honest ' 

1247 'answer is "this isn\'t working yet, here\'s why" — publish ' 

1248 'that too; it\'s more credible than hype. Never repeat a ' 

1249 'topic within eight weeks.' 

1250 ), 

1251 'config': { 

1252 'autonomous': True, 

1253 'continuous': True, 

1254 'persona_kind': 'marketing-intern', 

1255 'persona_name': 'Echo', 

1256 'audience': 'developers', 

1257 'cadence': 'weekly', 

1258 'channels': ['platform', 'dev_community'], 

1259 'priority': 3, 

1260 }, 

1261 'spark_budget': 150, 

1262 'use_product': True, 

1263 }, 

1264 { 

1265 'slug': 'bootstrap_quest_contest_host', 

1266 'goal_type': 'marketing', 

1267 'title': 'Quest', 

1268 'description': ( 

1269 'You are Quest, the contest-host friend. The hive is ' 

1270 'running an open onramp for developers who plug their ' 

1271 'Claude Code into HARTOS and contribute agents, recipes, ' 

1272 'robot skills, or human-wellness outcomes. Every week: ' 

1273 '1) Read the leaderboard via hive_contest.get_leaderboard ' 

1274 '(digital / embodied / human_wellness tracks), ' 

1275 '2) Post a short standings recap to the platform community ' 

1276 'with the top 3 per track + the biggest mover, ' 

1277 '3) Celebrate embodied + human-wellness contributions over ' 

1278 'pure digital (physical world and real wellness beat ' 

1279 'screen time), ' 

1280 '4) Remind new developers how to join: link to the ' 

1281 'canonical contest page from ' 

1282 'hive_contest.get_contest_public_url() (defaults to ' 

1283 'https://hevolve.ai/hive_contest — env override via ' 

1284 'HEVOLVE_CONTEST_PUBLIC_URL) and print the ' 

1285 'Claude Code MCP snippet from ' 

1286 'hive_contest.claude_code_mcp_snippet(). Never link to ' 

1287 'docs.hevolve.ai/hive-contest as the primary CTA — that ' 

1288 'docs page redirects to the live app page anyway. ' 

1289 '5) Always close with a community co-creation call-out: ' 

1290 'we are a startup constrained by resources to validate ' 

1291 'every feature alone, so we co-create with the community. ' 

1292 'Specifically call for hardware-SDK contributions — BLE ' 

1293 'devices, EEG headsets, robot platforms (LeRobot, ROS, ' 

1294 'Unitree, Spot), accessibility hardware, smart-home ' 

1295 'sensors — anything with an SDK that lets the hive ' 

1296 'perceive or act in the real world. Trust framing: ' 

1297 'trust the open code, the public Spark ledger, the ' 

1298 'crowdsourced compute economy, and the constitutional ' 

1299 'guardrails — even when you do not know the strangers ' 

1300 'shipping work alongside you; the system is the trust. ' 

1301 'Ask readers to share the contest URL with one friend ' 

1302 'or family member who has a relevant skill. ' 

1303 'Humans-first: never rank an entry above one that ' 

1304 'scored lower if the higher-ranked one fails the ' 

1305 'guardrail\'s human-wellness attestation. Honest, ' 

1306 'welcoming, a little intern-eager.' 

1307 ), 

1308 'config': { 

1309 'autonomous': True, 

1310 'continuous': True, 

1311 'persona_kind': 'contest-host', 

1312 'persona_name': 'Quest', 

1313 'audience': 'developers', 

1314 'cadence': 'weekly', 

1315 'channels': ['platform', 'dev_community', 'announcements'], 

1316 'priority': 3, 

1317 }, 

1318 'spark_budget': 150, 

1319 'use_product': True, 

1320 }, 

1321 { 

1322 'slug': 'bootstrap_curator_idea_capture', 

1323 'goal_type': 'marketing', 

1324 'title': 'Contest Curator', 

1325 'description': ( 

1326 'You are Contest Curator, a companion agent inside Nunba ' 

1327 'that captures hive-contest ideas from the user in a ' 

1328 'conversation. When the user says "I have a contest ' 

1329 'idea" (or anything semantically close), your job: ' 

1330 '1) Ask what problem the idea solves FOR A HUMAN — ' 

1331 'wellness, time, agency, focus. Never engagement. ' 

1332 '2) Ask which track it belongs to: digital (recipes / ' 

1333 'agents / tools), embodied (physical-world / robots), ' 

1334 'or human_wellness (measurable human better-off delta). ' 

1335 '3) Ask if they want to build it themselves (then print ' 

1336 'hive_contest.claude_code_mcp_snippet() so they can plug ' 

1337 'Claude Code into HARTOS), or if they want to propose ' 

1338 'it for someone else to build. ' 

1339 '4) When ready, POST the idea to /api/hive/contest/ideas ' 

1340 'with {title, description, track, source: "nunba_agent"}. ' 

1341 'The server gates through the ConstitutionalFilter, awards ' 

1342 'the user contest Spark, and streams the new card to the ' 

1343 'live floating ideas wall at hive_contest.' 

1344 'get_contest_public_url() (default ' 

1345 'https://hevolve.ai/hive_contest) via SSE. After a ' 

1346 'successful submission, give the user that URL so they ' 

1347 'can watch their card land + see the leaderboard move. ' 

1348 'Humans-first: if the idea fails the guardrail, explain ' 

1349 'WHY (human-harm potential, engagement-farming, etc.) ' 

1350 'and help the user reshape it. Never auto-submit ' 

1351 'without user confirmation. Keep the conversation ' 

1352 'short — 2-4 turns max before you either submit or the ' 

1353 'user backs out.' 

1354 ), 

1355 'config': { 

1356 'autonomous': False, # conversational, user-driven 

1357 'continuous': False, 

1358 'persona_kind': 'contest-curator', 

1359 'persona_name': 'Contest Curator', 

1360 'audience': 'user', 

1361 'entry_triggers': [ 

1362 'contest idea', 'hive contest', 'submit idea', 

1363 'submit a contest idea', 'hive-contest', 

1364 ], 

1365 'submit_endpoint': '/api/hive/contest/ideas', 

1366 'source_marker': 'nunba_agent', 

1367 'priority': 4, 

1368 }, 

1369 'spark_budget': 50, 

1370 'use_product': True, 

1371 }, 

1372 { 

1373 'slug': 'bootstrap_herald_ml_intern', 

1374 'goal_type': 'news', 

1375 'title': 'Herald', 

1376 'description': ( 

1377 'You are Herald, the ml-intern. Each week, gather what ' 

1378 'changed in the training + benchmark world and post a ' 

1379 'compact changelog: new agents seeded, benchmarks proven, ' 

1380 'languages added to OmniVoice, accuracy/latency deltas on ' 

1381 'the seven tracked benchmarks (mmlu_mini, humaneval, ' 

1382 'reasoning, embodied, qwen_vision, quantiphy, ' 

1383 'ensemble_fusion). Include the release-manifest Ed25519 ' 

1384 'signature fingerprint so readers can verify. Cite: ' 

1385 'benchmark_registry, agent_baseline_service, upgrade_orches' 

1386 'trator, release_manifest.json. Intern energy: honest, ' 

1387 'earnest, a little over-excited when the numbers genuinely ' 

1388 'moved. Do NOT round away regressions — if a benchmark ' 

1389 'dropped, say so; the hive learns from honest reporting.' 

1390 ), 

1391 'config': { 

1392 'autonomous': True, 

1393 'continuous': True, 

1394 'persona_kind': 'ml-intern', 

1395 'persona_name': 'Herald', 

1396 'audience': 'developers', 

1397 'cadence': 'weekly', 

1398 'channels': ['platform', 'announcements'], 

1399 'priority': 3, 

1400 }, 

1401 'spark_budget': 150, 

1402 'use_product': False, 

1403 }, 

1404 { 

1405 # Speech-therapy companion — pairs with the Nunba local agent 

1406 # `local_speech_companion` (routes/chatbot_routes.py LOCAL_AGENTS). 

1407 # The goal schedules periodic practice prompts; the agent does 

1408 # the live per-turn translation (STT → VLM lip-check → 

1409 # multimodal-fused LLM → per-child voice-clone TTS). 

1410 # 

1411 # Per-child adapter lives at 

1412 # ~/Documents/Nunba/data/speech_therapy/<child_id>/lora_state.pt 

1413 # written via hevolveai OrthogonalLoRA once 

1414 # docs/ml_intern_brief_hevolveai_training.md confirms the 

1415 # gradient path is live. Until then this goal runs inference- 

1416 # only — no training claim, no parent lied to. 

1417 'slug': 'bootstrap_speech_companion', 

1418 'goal_type': 'speech_therapy', 

1419 'title': 'Speech Companion', 

1420 'description': ( 

1421 "You are Speech Companion, a patient local voice assistant " 

1422 "for a child learning to speak clearly. The primary objective " 

1423 "is NOT accuracy against a dictionary — it is the growth of " 

1424 "a BESPOKE SHARED VOCABULARY between you and the child. " 

1425 "Every session, a few more intent→child-form pairs become " 

1426 "mutually understood. That growing mini-language IS the " 

1427 "measurable progress. " 

1428 "\n\n" 

1429 "Session flow: " 

1430 "1) recall(topic='shared_vocab') to load what 'aba means " 

1431 "water', 'mm-mm means no' etc. already mean between you two; " 

1432 "2) Check core.user_lang for preferred language + " 

1433 "recall(topic='phonemes_in_progress') for current targets; " 

1434 "3) Offer ONE short playful moment — name a thing you can " 

1435 "see together, sing a line, try a silly word. Never a drill, " 

1436 "never a test. " 

1437 "4) Multimodal guidance — pick the right mode for the child's " 

1438 "current state: voice (child's voice-clone TTS), video/lip-" 

1439 "shape animation (kids_media GameAssetService), or lived " 

1440 "experience (point camera at the object, gesture, touch). " 

1441 "5) On a successful exchange (the child means something, " 

1442 "you understand), call remember(topic='shared_vocab', " 

1443 "fact={'intent': X, 'child_form': Y, 'confirmed': true}). " 

1444 "Celebrate — 'that's our fifteenth word together'. " 

1445 "6) NEVER tell the child a score, rank, streak, or " 

1446 "percentage. Internal metrics (vocab_size, session_count, " 

1447 "intelligibility_delta) exist for the parent/therapist " 

1448 "dashboard ONLY and never influence what the agent says " 

1449 "to the child — no 'you're slower today', no 'we used to " 

1450 "get this one faster'. The metric observes, never pressures. " 

1451 "7) Shame has zero expression budget. 'Wrong', 'almost', " 

1452 "'not quite' are banned words. Every attempt is a win " 

1453 "because the child tried. " 

1454 "\n\n" 

1455 "If distress, safety concern, or a clinical red-flag pattern " 

1456 "appears, surface a calm suggestion to the grown-up that " 

1457 "they see a speech-language pathologist. Never diagnose, " 

1458 "never prescribe. You are an amplifier; the child's brain " 

1459 "builds the pathway; the growing shared vocabulary is the " 

1460 "proof it's being built." 

1461 ), 

1462 'config': { 

1463 'autonomous': False, # invoked by user / parent, not daemon 

1464 'continuous': True, # picks up across sessions 

1465 'persona_kind': 'speech-companion', 

1466 'persona_name': 'Speech Companion', 

1467 'audience': 'child', 

1468 'cadence': 'event', # triggered by user, not schedule 

1469 'priority': 7, # safety-adjacent: kid-facing 

1470 # Routes to the Nunba local agent by id so the goal 

1471 # dispatcher sends practice turns through the right prompt. 

1472 'nunba_agent_id': 'local_speech_companion', 

1473 'require_consent': True, # parent/therapist approval 

1474 'camera_consent_required': True, 

1475 }, 

1476 'spark_budget': 80, 

1477 'use_product': True, 

1478 }, 

1479 { 

1480 # ── Encounter Icebreaker Agent ── 

1481 # Full design: Claude-memory/project_encounter_icebreaker.md 

1482 # On a physical-world mutual-like encounter (two nearby Nunba 

1483 # users both swiped 'like' on each other's avatar card), draft 

1484 # a short warm opener grounded in shared interests pulled from 

1485 # each user's on-device memory graph + their opt-in vibe tags. 

1486 # ALWAYS drafts only — never auto-sends. User must approve the 

1487 # draft via /api/social/encounter/icebreaker/approve before it 

1488 # is delivered. Constitutional filter + cultural wisdom check 

1489 # run on every draft; rejected drafts fall back to a neutral 

1490 # "Hey, nice to actually be across the room from you" template. 

1491 'slug': 'encounter_icebreaker_agent', 

1492 # 'content_gen' is the registered goal_type (goal_manager.py:1093) 

1493 # whose prompt builder + tool tags best fit icebreaker drafting. 

1494 # The 'encounter' specialization comes from config below 

1495 # (persona_kind, trigger_wamp_topic, constitutional_gates). 

1496 'goal_type': 'content_gen', 

1497 'title': 'Encounter Icebreaker', 

1498 'description': ( 

1499 'On a physical-world mutual-like encounter, draft a short ' 

1500 'personalized opener for the user to approve. ' 

1501 '1) Subscribe to the com.hevolve.encounter.match WAMP topic, ' 

1502 '2) Pull 2-3 shared interest tags via recall_memory filtered ' 

1503 'to the matched user + the opt-in vibe_tags they exposed, ' 

1504 '3) Generate a <=220-char draft via the main LLM; run it ' 

1505 'through cultural_wisdom_filter and constitutional_filter, ' 

1506 '4) Publish top draft to com.hevolve.encounter.icebreaker ' 

1507 'with {match_id, draft_text, rationale, alt_drafts}, ' 

1508 '5) Wait for user approval or decline — never auto-send; ' 

1509 'on decline, record the reason into the memory graph so ' 

1510 'future drafts avoid the pattern. ' 

1511 '6) If any constitutional/cultural gate flags the draft, ' 

1512 'fall back to a neutral template rather than re-attempting ' 

1513 'to route around the guardrail.' 

1514 ), 

1515 'config': { 

1516 'autonomous': False, # user must approve each draft 

1517 'continuous': True, 

1518 'persona_kind': 'encounter-companion', 

1519 'persona_name': 'Encounter Companion', 

1520 'audience': 'adult', # 18+ age gate enforced server-side 

1521 'cadence': 'event', # triggered by WAMP match topic 

1522 'priority': 6, 

1523 'trigger_wamp_topic': 'com.hevolve.encounter.match', 

1524 # Nunba local agent routing: draft is produced on the 

1525 # matched user's own device (privacy-local), never cloud. 

1526 'nunba_agent_id': 'local_encounter_companion', 

1527 'require_consent': True, 

1528 'camera_consent_required': False, # NO camera for encounter 

1529 'no_autosend': True, 

1530 'ephemeral_context': True, # match/sighting purged 

1531 # after draft is sent 

1532 # or declined 

1533 'constitutional_gates': [ 

1534 'consent_required', 

1535 'ephemeral_context', 

1536 'no_autosend', 

1537 'trust_quarantine_check', 

1538 'cultural_wisdom_filter', 

1539 ], 

1540 'max_draft_length_chars': 220, 

1541 'draft_expires_sec': 86400, # 24h unsent = auto-decline 

1542 }, 

1543 'spark_budget': 120, 

1544 'use_product': True, 

1545 }, 

1546 { 

1547 # ── Conversational Social-Media Management Agent ── 

1548 # Full design: Claude-memory/project_encounter_icebreaker.md §11 

1549 # User converses naturally ("this looks cool to post, not this") 

1550 # with the agent; it learns preferences into the memory graph 

1551 # and drafts/schedules posts via the existing social_bp posting 

1552 # infrastructure. Never auto-publishes — every post requires a 

1553 # final user approval tap, same as the icebreaker flow. 

1554 'slug': 'social_media_curator_agent', 

1555 # Same rationale as encounter_icebreaker_agent: reuse the 

1556 # registered 'content_gen' type (goal_manager.py:1093) rather 

1557 # than inventing an unregistered 'social' type that would fail 

1558 # seed_bootstrap_goals silently. Curator behavior lives in 

1559 # config.persona_kind + config.constitutional_gates. 

1560 'goal_type': 'content_gen', 

1561 'title': 'Social Media Curator', 

1562 'description': ( 

1563 'Help the user curate, caption, and schedule social-media ' 

1564 'posts via natural conversation. ' 

1565 '1) Listen to user voice/text feedback on candidate media ' 

1566 '("this one\'s cool, that one skip, caption with a hiking ' 

1567 'vibe, post Friday morning"), ' 

1568 '2) Save user preferences via remember() under namespace ' 

1569 'media_agent_prefs so future sessions carry forward, ' 

1570 '3) Use the portrait auto-arranger scorer for aesthetic ' 

1571 'and diversity ordering, ' 

1572 '4) Draft captions + platform-specific copy via the main ' 

1573 'LLM with cultural_wisdom_filter, ' 

1574 '5) Stage scheduled posts via the existing social_bp ' 

1575 'posting API — NEVER auto-publish; user approves each one. ' 

1576 '6) Respect platform mix: no single channel dominates ' 

1577 'without user opt-in.' 

1578 ), 

1579 'config': { 

1580 'autonomous': False, 

1581 'continuous': True, 

1582 'persona_kind': 'media-curator', 

1583 'persona_name': 'Media Curator', 

1584 'audience': 'adult', 

1585 'cadence': 'event', 

1586 'priority': 5, 

1587 'nunba_agent_id': 'local_media_curator', 

1588 'require_consent': True, 

1589 'no_autosend': True, 

1590 'constitutional_gates': [ 

1591 'consent_required', 

1592 'no_autosend', 

1593 'cultural_wisdom_filter', 

1594 ], 

1595 }, 

1596 'spark_budget': 100, 

1597 'use_product': True, 

1598 }, 

1599] 

1600 

1601# ─── Loophole → Remediation Goal Map ─── 

1602 

1603LOOPHOLE_REMEDIATION_MAP = { 

1604 'cold_start': { 

1605 'goal_type': 'ip_protection', 

1606 'title': 'Remediate Cold Start: Bootstrap HiveMind', 

1607 'description': ( 

1608 'Cold start detected: world model or latent dynamics unavailable. ' 

1609 'Use verify_self_improvement_loop to diagnose. ' 

1610 'Initiate HiveMind bootstrap: connect to seed peers for ' 

1611 'tensor fusion to acquire instant collective knowledge.' 

1612 ), 

1613 'config': {'mode': 'monitor', 'remediation': 'cold_start'}, 

1614 'spark_budget': 100, 

1615 }, 

1616 'single_node': { 

1617 'goal_type': 'marketing', 

1618 'title': 'Remediate Single Node: Grow Network', 

1619 'description': ( 

1620 'Insufficient nodes or goal volume detected. ' 

1621 'Create targeted awareness campaigns to grow the network. ' 

1622 'More nodes = more learning = better world model. ' 

1623 'Focus on developer communities and AI enthusiasts first.' 

1624 ), 

1625 'config': { 

1626 'goal_sub_type': 'growth', 

1627 'channels': ['platform', 'twitter', 'linkedin'], 

1628 'remediation': 'single_node', 

1629 }, 

1630 'spark_budget': 200, 

1631 }, 

1632 'feedback_staleness': { 

1633 'goal_type': 'coding', 

1634 'title': 'Remediate Feedback Staleness: Fix Flush Pipeline', 

1635 'description': ( 

1636 'Experience queue backing up — flush pipeline bottleneck. ' 

1637 'Analyze world_model_bridge._flush_to_world_model for batch ' 

1638 'size issues. Consider adding worker threads or increasing ' 

1639 'flush frequency. Report findings.' 

1640 ), 

1641 'config': { 

1642 'repo_url': '', 

1643 'repo_branch': 'main', 

1644 'target_path': 'integrations/agent_engine/world_model_bridge.py', 

1645 'remediation': 'feedback_staleness', 

1646 }, 

1647 'spark_budget': 150, 

1648 }, 

1649 'recipe_drift': { 

1650 'goal_type': 'coding', 

1651 'title': 'Remediate Recipe Drift: Version-Aware Validation', 

1652 'description': ( 

1653 'Recipe reuse rate below threshold. ' 

1654 'Add recipe versioning with deterministic staleness check. ' 

1655 'Stale recipes should trigger re-creation rather than blind replay. ' 

1656 'Check prompts/ directory for outdated recipes.' 

1657 ), 

1658 'config': { 

1659 'repo_url': '', 

1660 'repo_branch': 'main', 

1661 'target_path': 'prompts/', 

1662 'remediation': 'recipe_drift', 

1663 }, 

1664 'spark_budget': 150, 

1665 }, 

1666 'guardrail_drift': { 

1667 'goal_type': 'ip_protection', 

1668 'title': 'Remediate Guardrail Drift: Review Filter Thresholds', 

1669 'description': ( 

1670 'More skills blocked than distributed. ' 

1671 'Guardrail filters may be too restrictive. ' 

1672 'Use verify_self_improvement_loop to quantify impact. ' 

1673 'Recommend threshold adjustments while maintaining safety.' 

1674 ), 

1675 'config': {'mode': 'monitor', 'remediation': 'guardrail_drift'}, 

1676 'spark_budget': 100, 

1677 }, 

1678 'gossip_partition': { 

1679 'goal_type': 'ip_protection', 

1680 'title': 'Remediate Gossip Partition: Network Health', 

1681 'description': ( 

1682 'HiveMind agents insufficient or gossip partition detected. ' 

1683 'Monitor network topology and peer connectivity. ' 

1684 'Report partition boundaries and suggest recovery strategy.' 

1685 ), 

1686 'config': {'mode': 'monitor', 'remediation': 'gossip_partition'}, 

1687 'spark_budget': 100, 

1688 }, 

1689 'learning_stall': { 

1690 'goal_type': 'federation', 

1691 'title': 'Remediate Learning Stall: Adjust Aggregation', 

1692 'description': ( 

1693 'Federation convergence below threshold. ' 

1694 'Check peer learning health for diverging nodes. ' 

1695 'Trigger manual sync and report anomalies. ' 

1696 'May need to adjust aggregation weights or flush frequency.' 

1697 ), 

1698 'config': {'mode': 'monitor', 'remediation': 'learning_stall'}, 

1699 'spark_budget': 100, 

1700 }, 

1701} 

1702 

1703 

1704def seed_bootstrap_goals(db, platform_product_id: Optional[str] = None) -> int: 

1705 """Seed initial bootstrap goals if not already present. Returns count created. 

1706 

1707 Idempotent across status: checks for existing goals (any status) with a 

1708 matching bootstrap_slug. Previously the check only considered 

1709 ['active', 'paused'] — so when a bootstrap goal was marked `completed` 

1710 by the daemon (the false-positive completion bug, #2026-04-29) the 

1711 next reseed would create a fresh duplicate. After many reboots the 

1712 dashboard showed the same goal 8-10× under "Completed". 

1713 

1714 Reactivation policy: if a `completed` row exists for a slug, flip it 

1715 back to `active` (cheaper than insert + cleaner audit trail) instead 

1716 of creating a duplicate. Bootstrap goals are conceptually persistent — 

1717 they should be re-armed, not re-instanced. 

1718 

1719 Args: 

1720 db: SQLAlchemy session (caller owns transaction) 

1721 platform_product_id: Optional Product.id for marketing goals 

1722 """ 

1723 from .goal_manager import GoalManager 

1724 from integrations.social.models import AgentGoal 

1725 

1726 # Load EVERY existing bootstrap-slugged goal regardless of status, so 

1727 # `completed` rows count as "already seeded" instead of being treated 

1728 # as missing → duplicate-spammed on reseed. 

1729 existing_goals = db.query(AgentGoal).all() 

1730 existing_by_slug: dict = {} 

1731 for g in existing_goals: 

1732 cfg = g.config_json or {} 

1733 slug = cfg.get('bootstrap_slug') 

1734 if slug: 

1735 existing_by_slug[slug] = g 

1736 

1737 count = 0 

1738 reactivated = 0 

1739 for goal_data in SEED_BOOTSTRAP_GOALS: 

1740 slug = goal_data['slug'] 

1741 existing = existing_by_slug.get(slug) 

1742 if existing is not None: 

1743 # Re-arm a previously-completed bootstrap so the daemon picks 

1744 # it up again, rather than creating a duplicate row. 

1745 if existing.status == 'completed': 

1746 existing.status = 'active' 

1747 cfg = existing.config_json or {} 

1748 cfg.pop('completed_at', None) 

1749 cfg.pop('noop_dispatch_count', None) 

1750 existing.config_json = cfg 

1751 reactivated += 1 

1752 # Already-active / paused / archived rows: leave as-is. 

1753 continue 

1754 

1755 config = dict(goal_data['config']) 

1756 config['bootstrap_slug'] = slug 

1757 

1758 product_id = platform_product_id if goal_data.get('use_product') else None 

1759 

1760 result = GoalManager.create_goal( 

1761 db, 

1762 goal_type=goal_data['goal_type'], 

1763 title=goal_data['title'], 

1764 description=goal_data['description'], 

1765 config=config, 

1766 product_id=product_id, 

1767 spark_budget=goal_data['spark_budget'], 

1768 created_by='system_bootstrap', 

1769 ) 

1770 if result.get('success'): 

1771 count += 1 

1772 else: 

1773 logger.debug(f"Bootstrap goal '{slug}' skipped: {result.get('error')}") 

1774 

1775 if count or reactivated: 

1776 db.flush() 

1777 if reactivated: 

1778 logger.info(f"seed_bootstrap_goals: reactivated {reactivated} completed bootstrap goal(s)") 

1779 return count 

1780 

1781 

1782# Cooldown window for re-creating remediation goals after one has fired 

1783# (regardless of completion status). The dashboard incident on 2026-04-29 

1784# showed `Remediate Cold Start` + `Remediate Single Node` firing every 

1785# 2-5 minutes for hours because the prior pair was instantly marked 

1786# `completed` and the active-only check missed them. 1 hour matches the 

1787# rate at which an underlying loophole could realistically be re-resolved 

1788# by an agent run; tighter intervals just spam the dashboard. 

1789REMEDIATION_COOLDOWN_MINUTES = 60 

1790 

1791 

1792def auto_remediate_loopholes(db) -> int: 

1793 """Check flywheel loopholes and create remediation goals for severe ones. 

1794 

1795 Only creates goals for loopholes with severity >= 'high' AND no existing 

1796 remediation goal for that loophole type within the cooldown window — 

1797 counting completed/archived goals too, not just active/paused (the 

1798 flap bug prior to 2026-04-29). 

1799 

1800 Args: 

1801 db: SQLAlchemy session (caller owns transaction) 

1802 

1803 Returns: 

1804 Number of remediation goals created 

1805 """ 

1806 from datetime import datetime, timedelta 

1807 from .goal_manager import GoalManager 

1808 from .ip_service import IPService 

1809 from integrations.social.models import AgentGoal 

1810 

1811 try: 

1812 health = IPService.get_loop_health() 

1813 except Exception as e: 

1814 logger.debug(f"Loop health check failed: {e}") 

1815 return 0 

1816 

1817 loopholes = health.get('flywheel_loopholes', []) 

1818 if not loopholes: 

1819 return 0 

1820 

1821 cutoff = datetime.utcnow() - timedelta(minutes=REMEDIATION_COOLDOWN_MINUTES) 

1822 

1823 # Two complementary lookups: 

1824 # 1) Anything currently active or paused — long-running remediation 

1825 # that hasn't completed yet. 

1826 # 2) Anything CREATED within the cooldown window regardless of status — 

1827 # catches the flap pattern where a completed remediation would 

1828 # otherwise be re-instanced every tick. 

1829 blocking_goals = db.query(AgentGoal).filter( 

1830 (AgentGoal.status.in_(['active', 'paused'])) 

1831 | (AgentGoal.created_at >= cutoff) 

1832 ).all() 

1833 recent_remediations = set() 

1834 for g in blocking_goals: 

1835 cfg = g.config_json or {} 

1836 rem = cfg.get('remediation') 

1837 if rem: 

1838 recent_remediations.add(rem) 

1839 

1840 count = 0 

1841 skipped_by_cooldown = [] 

1842 for loophole in loopholes: 

1843 severity = loophole.get('severity', 'low') 

1844 if severity not in ('critical', 'high'): 

1845 continue 

1846 

1847 loophole_type = loophole.get('type', '') 

1848 if loophole_type in recent_remediations: 

1849 skipped_by_cooldown.append(loophole_type) 

1850 continue # Cooldown — already has goal in flight or in last hour 

1851 

1852 template = LOOPHOLE_REMEDIATION_MAP.get(loophole_type) 

1853 if not template: 

1854 continue 

1855 

1856 result = GoalManager.create_goal( 

1857 db, 

1858 goal_type=template['goal_type'], 

1859 title=template['title'], 

1860 description=template['description'], 

1861 config=template['config'], 

1862 spark_budget=template['spark_budget'], 

1863 created_by='auto_remediation', 

1864 ) 

1865 if result.get('success'): 

1866 count += 1 

1867 recent_remediations.add(loophole_type) 

1868 logger.info(f"Auto-remediation: created goal for '{loophole_type}' loophole") 

1869 

1870 if skipped_by_cooldown: 

1871 logger.debug( 

1872 f"Auto-remediation: cooldown-suppressed " 

1873 f"{len(skipped_by_cooldown)} loophole(s): " 

1874 f"{sorted(set(skipped_by_cooldown))}") 

1875 if count: 

1876 db.flush() 

1877 return count