Coverage for integrations / agent_engine / hive_contest.py: 81.3%

155 statements  

« prev     ^ index     » next       coverage.py v7.14.0, created at 2026-05-12 04:49 +0000

1""" 

2Hive Contest — a public, humans-first onramp for developers to plug in 

3Claude Code / their own agents to HARTOS and get credited for the 

4intelligence they donate back to the hive. 

5 

6Design principle (single converging path): 

7 The contest is NOT a parallel scoring system. It aggregates 

8 already-canonical sources of signal: 

9 

10 ResonanceService.award_spark — wallet book (90/9/1 split) 

11 GamificationService.get_season_leaderboard 

12 — season pulse + spark 

13 HiveBenchmarkProver.get_leaderboard — prover node scores 

14 AppMarketplace — recipe publications 

15 PeerNode.gpu_hours_served — compute donated 

16 AgentGoal.spark_spent — goal-work receipts 

17 

18 A contest "score" is a weighted sum of those existing metrics over 

19 the contest window. Adding a new scoring axis = adding a term here, 

20 NEVER a new table, never a shadow ledger. 

21 

22Contest tracks (humans-first, physical-world-ready): 

23 

24 DIGITAL — recipes, agents, tools, integrations (the default 

25 on-ramp for Claude Code users). 

26 EMBODIED — physical-task recipes executable on robots via 

27 integrations.robotics.intelligence_api. 

28 HUMAN_WELLNESS — agents with measurable human-wellness delta, 

29 verified by the existing guardrails 

30 (security.hive_guardrails enforces that every 

31 contribution's outcome is attested against wellness, 

32 not engagement). 

33 

34Humans are always in control — mirrors the same invariant the 

35HiveCircuitBreaker enforces. The contest exists to serve humans; 

36any submission that fails the guardrail check cannot score. 

37""" 

38 

39from __future__ import annotations 

40 

41import logging 

42import os 

43from dataclasses import dataclass 

44from datetime import datetime, timedelta, timezone 

45from enum import Enum 

46from typing import Any, Dict, List, Optional 

47 

48logger = logging.getLogger(__name__) 

49 

50 

51# ─── Tracks ──────────────────────────────────────────────────────────── 

52 

53class ContestTrack(str, Enum): 

54 DIGITAL = 'digital' 

55 EMBODIED = 'embodied' 

56 HUMAN_WELLNESS = 'human_wellness' 

57 

58 

59# ─── Public canonical URL — single source of truth ──────────────────── 

60# 

61# Every workflow that wants to send a user to "the contest page" reads 

62# this value: Quest's weekly post, the Contest Curator agent, the 

63# Claude-Code MCP onramp snippet, the local /hive-contest footer, the 

64# docs build, the channel inbox card. Override via env for staging / 

65# preview deployments. 

66# 

67# The docs page at https://docs.hevolve.ai/hive-contest/ now redirects 

68# to this canonical app URL via a meta-refresh in docs/hive-contest.md 

69# so older links from posts / blogs still land on the live page. 

70 

71DEFAULT_CONTEST_PUBLIC_URL = 'https://hevolve.ai/hive_contest' 

72 

73 

74def get_contest_public_url() -> str: 

75 """Canonical hosted contest URL (env-overridable). 

76 

77 Workflows MUST go through this function instead of hardcoding the 

78 URL — that way a single env var swaps the destination across every 

79 surface (Quest's posts, Curator's chat replies, the local UI page 

80 footer, the docs site). 

81 """ 

82 return ( 

83 os.environ.get('HEVOLVE_CONTEST_PUBLIC_URL', '').strip() 

84 or DEFAULT_CONTEST_PUBLIC_URL 

85 ) 

86 

87 

88# Score weights per track — tunable without schema changes. 

89# 

90# DIGITAL: skewed toward published artifacts (recipes, agents) so the 

91# Claude-Code-user-who-ships gets rewarded, not just the 

92# Claude-Code-user-who-queries. 

93# EMBODIED: weighted on successful-robot-episode count so real-world 

94# utility wins over video demos. 

95# HUMAN_WELLNESS: weighted on wellness-attested outcomes from the 

96# attribution chain (agent_attribution.py's success_score is 

97# necessary but not sufficient — the human-wellness flag must be 

98# true AND the wellness metric must actually move). 

99# 

100# Sum per track MUST normalize at rendering time so the leaderboard 

101# across tracks is comparable. 

102 

103SCORE_WEIGHTS: Dict[ContestTrack, Dict[str, float]] = { 

104 ContestTrack.DIGITAL: { 

105 'recipes_published': 100.0, 

106 'agents_adopted': 50.0, 

107 'benchmarks_proven': 25.0, 

108 'season_spark': 1.0, 

109 'ideas_submitted': 10.0, 

110 }, 

111 ContestTrack.EMBODIED: { 

112 'robot_episodes_success': 75.0, 

113 'robot_skills_registered': 40.0, 

114 'gpu_hours_served': 5.0, 

115 'season_spark': 1.0, 

116 'ideas_submitted': 10.0, 

117 }, 

118 ContestTrack.HUMAN_WELLNESS: { 

119 'wellness_outcomes_attested': 120.0, 

120 'human_corrections_accepted': 30.0, 

121 'benchmarks_proven': 15.0, 

122 'season_spark': 1.0, 

123 'ideas_submitted': 10.0, 

124 }, 

125} 

126 

127 

128# ─── Contest window ─────────────────────────────────────────────────── 

129 

130# Defaults chosen so a fresh clone without env overrides renders a 

131# meaningful contest that opens "now" and runs for 30 days. Deployments 

132# override via HEVOLVE_CONTEST_START / HEVOLVE_CONTEST_END (ISO-8601). 

133 

134def _parse_env_date(var: str) -> Optional[datetime]: 

135 raw = os.environ.get(var) 

136 if not raw: 

137 return None 

138 try: 

139 # Accept YYYY-MM-DD, YYYY-MM-DDTHH:MM, and full ISO-8601 

140 raw = raw.strip() 

141 if raw.endswith('Z'): 

142 raw = raw[:-1] + '+00:00' 

143 return datetime.fromisoformat(raw) 

144 except ValueError: 

145 logger.warning(f'Invalid {var}={raw!r} — ignoring') 

146 return None 

147 

148 

149def get_contest_window() -> Dict[str, datetime]: 

150 start = _parse_env_date('HEVOLVE_CONTEST_START') 

151 end = _parse_env_date('HEVOLVE_CONTEST_END') 

152 if start is None: 

153 start = datetime.now(timezone.utc) 

154 if end is None: 

155 end = start + timedelta(days=30) 

156 return {'start': start, 'end': end} 

157 

158 

159# ─── Contest info (static rules + onramp) ───────────────────────────── 

160 

161def get_contest_info() -> Dict[str, Any]: 

162 """Public contest metadata — rules, tracks, prizes, Claude Code 

163 onramp snippet. Rendered by /api/hive/contest/info and by 

164 docs/hive-contest.md build step.""" 

165 window = get_contest_window() 

166 public_url = get_contest_public_url() 

167 return { 

168 'name': 'Hive Contest — Open Beta', 

169 'tagline': ( 

170 'Plug your Claude Code into HARTOS. Score by making humans ' 

171 'actually better off — in pixels or in the physical world.' 

172 ), 

173 'humans_first_principle': ( 

174 'Every submission is attested against human-wellness by the ' 

175 'constitutional guardrail. A flashy agent that ignores ' 

176 'human outcomes scores zero. Humans are always in control.' 

177 ), 

178 'co_creation_principle': ( 

179 'We are a startup constrained by resources to validate every ' 

180 'feature alone — so we co-create with the community. You can ' 

181 'trust the open code, the public ledger of every Spark, the ' 

182 'crowdsourced compute economy, and the constitutional ' 

183 'guardrails — even if you do not know the strangers shipping ' 

184 'work alongside you. The system is the trust. Share the ' 

185 'contest with friends and family who have hardware skills, ' 

186 'a domain to embody, or a wellness intent to ship.' 

187 ), 

188 'public_url': public_url, 

189 'starts_at': window['start'].isoformat(), 

190 'ends_at': window['end'].isoformat(), 

191 'tracks': [ 

192 { 

193 'id': ContestTrack.DIGITAL.value, 

194 'name': 'Digital Intelligence', 

195 'description': ( 

196 'Recipes, agents, tools, and integrations that make ' 

197 'other humans (and their agents) more effective in ' 

198 'the digital surface they already use.' 

199 ), 

200 'example_contributions': [ 

201 'Publish a CREATE→REUSE recipe to the app_marketplace', 

202 'Ship an expert agent to the expert_agents network', 

203 'Prove a benchmark lift on the public leaderboard', 

204 'Wrap any vendor SDK as a hive tool (cloud APIs, ' 

205 'data sources, payment rails, vector DBs, etc.) — ' 

206 'startup-constrained team needs the community to ' 

207 'cover the long tail of integrations', 

208 ], 

209 }, 

210 { 

211 'id': ContestTrack.EMBODIED.value, 

212 'name': 'Embodied Skill', 

213 'description': ( 

214 'Physical-world task recipes executable on robots via ' 

215 'the universal intelligence API. The only track ' 

216 'with real gravity, real consequences, and real ' 

217 'useful work — bright future for humans requires ' 

218 'leaving the screen.' 

219 ), 

220 'example_contributions': [ 

221 'Register a robot skill via intelligence_api', 

222 'Submit a verified embodied episode (success-rate ≥ 0.7)', 

223 'Port an existing digital recipe to an embodied adapter', 

224 'Bridge any BLE / USB / serial hardware that ships ' 

225 'an SDK — EEG headsets, smart-home sensors, medical ' 

226 'devices, accessibility hardware. The hive needs ' 

227 'these integrations to perceive the real world.', 

228 'Publish an SDK adapter for a robot platform ' 

229 '(LeRobot, ROS, Unitree, Spot, custom arms) so ' 

230 'embodied recipes execute on more bodies', 

231 ], 

232 }, 

233 { 

234 'id': ContestTrack.HUMAN_WELLNESS.value, 

235 'name': 'Human Wellness', 

236 'description': ( 

237 'Agents whose outcome the existing guardrail attests ' 

238 'as making a human measurably better off — longer ' 

239 'focus, calmer sleep, less chore time, clearer ' 

240 'decisions. Not engagement. Not activity. ' 

241 'Wellness.' 

242 ), 

243 'example_contributions': [ 

244 'Ship a companion agent with human-wellness evidence', 

245 'Publish a daily-check recipe with a pre/post metric', 

246 'Bring a human-facing agent to the app marketplace', 

247 ], 

248 }, 

249 ], 

250 'score_weights': { 

251 t.value: dict(w) for t, w in SCORE_WEIGHTS.items() 

252 }, 

253 'how_to_join': [ 

254 f'0) Open the contest page: {public_url} ' 

255 ' (or talk to the Contest Curator agent inside Nunba — ' 

256 ' say "I have a contest idea" to get walked through it).', 

257 '1) Install Nunba / HART OS from https://docs.hevolve.ai/downloads/', 

258 ' or clone https://github.com/hertz-ai/HARTOS and run locally.', 

259 '2) Point your Claude Code at the local HARTOS MCP server:', 

260 ' {"mcp":{"hartos":{"command":"hart","args":["mcp","serve"]}}}', 

261 '3) Register for the contest: ' 

262 ' POST /api/hive/contest/join { track: "digital" | "embodied" | "human_wellness" }', 

263 '4) Ship. Publish a recipe, register a robot skill, wrap ' 

264 ' a vendor SDK, bridge a BLE/EEG/hardware device, or ' 

265 ' run an agent whose outcome the guardrail attests as ' 

266 ' human-positive. Every scoring event lands in your ' 

267 ' wallet as season_spark — which is the leaderboard.', 

268 f'5) Share {public_url} with one friend or family member ' 

269 ' who has hardware skills, a domain to embody, or a ' 

270 ' wellness intent to ship. The hive is sized by who ' 

271 ' shows up; co-creation beats solo every time.', 

272 ], 

273 'prize_model': { 

274 'spark_split_90_9_1': ( 

275 'Every prize Spark follows the canonical 90/9/1 split — ' 

276 '90% to the submitter, 9% to the infra node(s) that ' 

277 'ran the submission, 1% to the central hive. Same ' 

278 'split as every other Spark transaction; no contest-' 

279 'specific accounting.' 

280 ), 

281 'recognition': 'Top 3 per track auto-featured on docs.hevolve.ai', 

282 }, 

283 } 

284 

285 

286# ─── Scoring ─────────────────────────────────────────────────────────── 

287 

288# Canonical event types — align with ResonanceService.award_spark source_type 

289# so the transaction ledger stays grep-able. 

290EVENT_TYPES = frozenset({ 

291 'recipe_published', 

292 'agent_adopted', 

293 'benchmark_proven', 

294 'robot_episode_success', 

295 'robot_skill_registered', 

296 'gpu_hour_served', 

297 'wellness_outcome_attested', 

298 'human_correction_accepted', 

299 'idea_submitted', 

300}) 

301 

302 

303def _event_weight(event_type: str, track: ContestTrack) -> float: 

304 """Map an event type to its weight under a track. 

305 Returns 0.0 for events the track doesn't reward (kept intentional, 

306 so cross-track spam doesn't double-score).""" 

307 w = SCORE_WEIGHTS.get(track, {}) 

308 # event_type -> weight key mapping 

309 key_map = { 

310 'recipe_published': 'recipes_published', 

311 'agent_adopted': 'agents_adopted', 

312 'benchmark_proven': 'benchmarks_proven', 

313 'robot_episode_success': 'robot_episodes_success', 

314 'robot_skill_registered': 'robot_skills_registered', 

315 'gpu_hour_served': 'gpu_hours_served', 

316 'wellness_outcome_attested': 'wellness_outcomes_attested', 

317 'human_correction_accepted': 'human_corrections_accepted', 

318 'idea_submitted': 'ideas_submitted', 

319 } 

320 key = key_map.get(event_type) 

321 return float(w.get(key, 0.0)) if key else 0.0 

322 

323 

324def score_event( 

325 db, 

326 user_id: str, 

327 event_type: str, 

328 track: ContestTrack = ContestTrack.DIGITAL, 

329 multiplier: float = 1.0, 

330 source_id: Optional[str] = None, 

331 description: str = '', 

332) -> int: 

333 """Award contest Spark for a scoring event. 

334 

335 Thin wrapper over ResonanceService.award_spark — this function 

336 exists only to (a) keep the weight-lookup logic in one place and 

337 (b) normalize the source_type string ('contest:<event>') so the 

338 transaction log is filterable. 

339 

340 Returns the Spark amount awarded (0 if the event isn't scored 

341 for the given track or the amount rounds to 0). 

342 """ 

343 if event_type not in EVENT_TYPES: 

344 logger.debug(f'score_event ignored unknown event_type={event_type!r}') 

345 return 0 

346 

347 weight = _event_weight(event_type, track) 

348 amount = int(round(weight * max(0.0, multiplier))) 

349 if amount <= 0: 

350 return 0 

351 

352 try: 

353 from integrations.social.resonance_engine import ResonanceService 

354 except ImportError: 

355 logger.debug('ResonanceService unavailable — contest scoring skipped') 

356 return 0 

357 

358 source_type = f'contest:{event_type}' 

359 try: 

360 ResonanceService.award_spark( 

361 db, user_id, amount, 

362 source_type=source_type, 

363 source_id=source_id, 

364 description=description or f'contest {track.value}: {event_type}', 

365 ) 

366 except Exception as exc: # never let scoring crash the caller 

367 logger.debug(f'award_spark failed: {exc}') 

368 return 0 

369 return amount 

370 

371 

372# ─── Leaderboard ─────────────────────────────────────────────────────── 

373 

374@dataclass 

375class LeaderboardEntry: 

376 rank: int 

377 user_id: str 

378 display_name: str 

379 score: int 

380 track: str 

381 

382 def to_dict(self) -> Dict[str, Any]: 

383 return { 

384 'rank': self.rank, 

385 'user_id': self.user_id, 

386 'display_name': self.display_name, 

387 'score': self.score, 

388 'track': self.track, 

389 } 

390 

391 

392def get_leaderboard( 

393 db, 

394 track: Optional[ContestTrack] = None, 

395 limit: int = 50, 

396) -> List[Dict[str, Any]]: 

397 """Return ranked leaderboard for a track (or overall if None). 

398 

399 Reuses GamificationService.get_current_season + 

400 get_season_leaderboard so we share one wallet-backed table with 

401 the rest of the product. The `track` filter is applied by 

402 counting transactions whose source_type starts with 

403 'contest:<event>' and weighting per track — same logic as 

404 score_event but aggregated. 

405 """ 

406 try: 

407 from integrations.social.gamification_service import GamificationService 

408 except ImportError: 

409 logger.debug('GamificationService unavailable') 

410 return [] 

411 

412 season = GamificationService.get_current_season(db) 

413 if not season: 

414 return [] 

415 season_id = season['id'] 

416 

417 rows = GamificationService.get_season_leaderboard(db, season_id, limit=limit) 

418 if track is None: 

419 return [ 

420 {**row, 'score': row['season_spark'] + row.get('season_pulse', 0), 

421 'track': 'overall'} 

422 for row in rows 

423 ] 

424 

425 try: 

426 from integrations.social.models import ResonanceTransaction 

427 except ImportError: 

428 # Non-standard models layout — return overall to stay useful 

429 return [ 

430 {**row, 'score': row['season_spark'], 'track': track.value} 

431 for row in rows 

432 ] 

433 

434 # Per-track refinement — only credit Spark whose transaction 

435 # source_type is one of the track's rewarded events. 

436 tracked = _track_event_source_types(track) 

437 enriched: List[Dict[str, Any]] = [] 

438 for row in rows: 

439 total = db.query(ResonanceTransaction).filter( 

440 ResonanceTransaction.user_id == row['user_id'], 

441 ResonanceTransaction.currency == 'spark', 

442 ResonanceTransaction.source_type.in_(tracked), 

443 ).with_entities(ResonanceTransaction.amount).all() 

444 track_score = sum(int(r[0] or 0) for r in total) 

445 enriched.append({**row, 'score': max(0, track_score), 'track': track.value}) 

446 

447 enriched.sort(key=lambda r: -r['score']) 

448 for i, row in enumerate(enriched, start=1): 

449 row['rank'] = i 

450 return enriched 

451 

452 

453def _track_event_source_types(track: ContestTrack) -> List[str]: 

454 """Return the ResonanceTransaction.source_type values that count 

455 toward the given track — the mirror of SCORE_WEIGHTS key_map.""" 

456 reverse = { 

457 'recipes_published': 'contest:recipe_published', 

458 'agents_adopted': 'contest:agent_adopted', 

459 'benchmarks_proven': 'contest:benchmark_proven', 

460 'robot_episodes_success': 'contest:robot_episode_success', 

461 'robot_skills_registered': 'contest:robot_skill_registered', 

462 'gpu_hours_served': 'contest:gpu_hour_served', 

463 'wellness_outcomes_attested': 'contest:wellness_outcome_attested', 

464 'human_corrections_accepted': 'contest:human_correction_accepted', 

465 'ideas_submitted': 'contest:idea_submitted', 

466 } 

467 return [reverse[k] for k in SCORE_WEIGHTS[track] if k in reverse] 

468 

469 

470# ─── Participant registration ───────────────────────────────────────── 

471 

472def register_participant( 

473 db, 

474 user_id: str, 

475 track: ContestTrack = ContestTrack.DIGITAL, 

476 github_handle: Optional[str] = None, 

477 email: Optional[str] = None, 

478) -> Dict[str, Any]: 

479 """Register a user for the contest. Idempotent. 

480 

481 Implementation: award a single welcome Spark (1) with source_type 

482 'contest:joined' — the ledger entry IS the registration record. 

483 Second call sees a prior transaction with the same (user_id, 

484 source_type) and no-ops. Zero new tables. 

485 """ 

486 try: 

487 from integrations.social.resonance_engine import ResonanceService 

488 from integrations.social.models import ResonanceTransaction 

489 except ImportError: 

490 logger.debug('resonance not available — cannot register participant') 

491 return {'ok': False, 'reason': 'resonance_unavailable'} 

492 

493 already = db.query(ResonanceTransaction).filter( 

494 ResonanceTransaction.user_id == user_id, 

495 ResonanceTransaction.source_type == 'contest:joined', 

496 ).first() 

497 if already: 

498 return { 

499 'ok': True, 'already_registered': True, 

500 'joined_at': already.created_at.isoformat() if already.created_at else None, 

501 'track': track.value, 

502 } 

503 

504 desc_parts = [f'track={track.value}'] 

505 if github_handle: 

506 desc_parts.append(f'github={github_handle[:60]}') 

507 if email: 

508 desc_parts.append(f'email={email[:60]}') 

509 try: 

510 ResonanceService.award_spark( 

511 db, user_id, 1, 

512 source_type='contest:joined', 

513 source_id=None, 

514 description=' '.join(desc_parts), 

515 ) 

516 except Exception as exc: 

517 logger.debug(f'participant registration award_spark failed: {exc}') 

518 return {'ok': False, 'reason': str(exc)} 

519 

520 return { 

521 'ok': True, 

522 'already_registered': False, 

523 'track': track.value, 

524 } 

525 

526 

527# ─── Idea submissions ───────────────────────────────────────────────── 

528 

529# Ideas are just SocialPosts with content_type='contest_idea' — we 

530# deliberately reuse the social Post/vote/comment infrastructure so 

531# every idea gets discovery, ranking, and discussion for free. No 

532# shadow table. The source_channel field carries the track. 

533 

534IDEA_CONTENT_TYPE = 'contest_idea' 

535 

536 

537def submit_idea( 

538 db, 

539 user_id: str, 

540 title: str, 

541 description: str, 

542 track: ContestTrack = ContestTrack.DIGITAL, 

543 source: str = 'ui', 

544) -> Dict[str, Any]: 

545 """Submit a contest idea. 

546 

547 Pipeline — all on existing infra, no new tables: 

548 1. ConstitutionalFilter screens title + description 

549 2. SocialPost row with content_type='contest_idea', 

550 source_channel='contest:<track>' — feeds, boost, voting, 

551 comments all just work via the social path. 

552 3. ResonanceService.award_spark(source_type='contest:idea_submitted') 

553 — wallet/leaderboard updates land in the existing ledger. 

554 4. EventBus 'contest.idea_submitted' — hevolve.ai's floating UI 

555 subscribes via the same pattern as other realtime events. 

556 

557 Source argument lets us distinguish 'ui' (clicked-page submission), 

558 'nunba_agent' (user spoke to Nunba's contest curator), and 

559 'mcp_agent' (Claude Code plugin). Stored in the Spark ledger's 

560 description so reports can count per-channel. 

561 """ 

562 title = (title or '').strip() 

563 description = (description or '').strip() 

564 if not title or not description: 

565 return {'ok': False, 'reason': 'title+description required'} 

566 if len(title) > 200: 

567 title = title[:200] 

568 if len(description) > 4000: 

569 description = description[:4000] 

570 

571 # Constitutional gate — contest ideas must still pass guardrails. 

572 try: 

573 from security.hive_guardrails import ConstitutionalFilter 

574 passed, reason = ConstitutionalFilter.check_prompt( 

575 f'{title}\n\n{description}' 

576 ) 

577 if not passed: 

578 logger.info(f'contest idea blocked: {reason}') 

579 return {'ok': False, 'reason': f'blocked: {reason}'} 

580 except ImportError: 

581 pass 

582 

583 try: 

584 from integrations.social.models import SocialPost 

585 except ImportError: 

586 return {'ok': False, 'reason': 'social models unavailable'} 

587 

588 post = SocialPost( 

589 author_id=str(user_id), 

590 title=title, 

591 content=description, 

592 content_type=IDEA_CONTENT_TYPE, 

593 source_channel=f'contest:{track.value}', 

594 ) 

595 try: 

596 db.add(post) 

597 db.flush() 

598 except Exception as exc: 

599 logger.debug(f'contest idea post insert failed: {exc}') 

600 return {'ok': False, 'reason': 'db insert failed'} 

601 

602 # Award Spark via the canonical event path — NOT a parallel ledger. 

603 amount = score_event( 

604 db, user_id=str(user_id), 

605 event_type='idea_submitted', track=track, 

606 source_id=getattr(post, 'id', None), 

607 description=f'idea:{title[:80]} via={source}', 

608 ) 

609 

610 # Realtime fanout for the Hevolve floating UI. 

611 try: 

612 from core.platform.events import emit_event 

613 emit_event('contest.idea_submitted', { 

614 'post_id': getattr(post, 'id', None), 

615 'track': track.value, 

616 'title': title[:200], 

617 'preview': description[:180], 

618 'user_id': str(user_id), 

619 'source': source, 

620 'spark_awarded': amount, 

621 }) 

622 except Exception as exc: 

623 logger.debug(f'contest idea event emit failed: {exc}') 

624 

625 return { 

626 'ok': True, 

627 'post_id': getattr(post, 'id', None), 

628 'track': track.value, 

629 'spark_awarded': amount, 

630 } 

631 

632 

633def list_ideas( 

634 db, 

635 track: Optional[ContestTrack] = None, 

636 limit: int = 50, 

637 since_iso: Optional[str] = None, 

638) -> List[Dict[str, Any]]: 

639 """Return recently-submitted contest ideas ordered by score desc. 

640 

641 The hevolve.ai floating UI calls this for the initial fill; it then 

642 subscribes to 'contest.idea_submitted' EventBus events for 

643 incremental drops. 

644 """ 

645 try: 

646 from integrations.social.models import SocialPost 

647 except ImportError: 

648 return [] 

649 

650 q = db.query(SocialPost).filter( 

651 SocialPost.content_type == IDEA_CONTENT_TYPE, 

652 SocialPost.is_hidden.is_(False) if hasattr(SocialPost, 'is_hidden') else True, 

653 ) 

654 if track is not None: 

655 q = q.filter(SocialPost.source_channel == f'contest:{track.value}') 

656 if since_iso: 

657 try: 

658 cutoff = datetime.fromisoformat(since_iso.rstrip('Z')) 

659 q = q.filter(SocialPost.created_at >= cutoff) 

660 except ValueError: 

661 pass 

662 q = q.order_by(SocialPost.score.desc(), SocialPost.created_at.desc()).limit( 

663 min(max(1, int(limit or 50)), 200) 

664 ) 

665 rows = q.all() 

666 out: List[Dict[str, Any]] = [] 

667 for p in rows: 

668 d = p.to_dict() if hasattr(p, 'to_dict') else { 

669 'id': getattr(p, 'id', None), 

670 'title': getattr(p, 'title', ''), 

671 'content': getattr(p, 'content', ''), 

672 'score': getattr(p, 'score', 0) or 0, 

673 } 

674 sc = getattr(p, 'source_channel', '') or '' 

675 d['track'] = sc.replace('contest:', '') if sc.startswith('contest:') else 'unknown' 

676 d['preview'] = (d.get('content') or '')[:240] 

677 out.append(d) 

678 return out 

679 

680 

681# ─── Module-level sugar ──────────────────────────────────────────────── 

682 

683def claude_code_mcp_snippet() -> str: 

684 """Single source of truth for the 'how to point Claude Code at 

685 HARTOS' snippet — docs, onboarding, and Quest's weekly post all 

686 render from here so they can't drift.""" 

687 return ( 

688 '# Claude Code -> HARTOS MCP\n' 

689 '# Add to ~/.config/claude-code/settings.json\n' 

690 '{\n' 

691 ' "mcpServers": {\n' 

692 ' "hartos": {\n' 

693 ' "command": "hart",\n' 

694 ' "args": ["mcp", "serve"]\n' 

695 ' }\n' 

696 ' }\n' 

697 '}\n' 

698 )