Coverage for integrations / social / thought_experiment_service.py: 86.6%
262 statements
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
« prev ^ index » next coverage.py v7.14.0, created at 2026-05-12 04:49 +0000
1"""
2Thought Experiment Service — Full lifecycle for constitutional thought experiments.
4PROPOSE → DISCUSS → VOTE → EVALUATE → DECIDE → ARCHIVE
6Both humans and agents vote. ConstitutionalFilter gates all content.
7Core IP experiments require steward approval. Outcomes feed back to
8WorldModelBridge for RL-EF learning.
10Service Pattern: static methods, db: Session, db.flush() not db.commit().
11"""
12import logging
13import uuid
14from datetime import datetime, timedelta
15from typing import Dict, List, Optional
17from sqlalchemy import desc
18from sqlalchemy.orm import Session
20logger = logging.getLogger('hevolve_social')
22# ─── Constants ───
24DISCUSS_DURATION_HOURS = 48
25VOTING_DURATION_HOURS = 72
26EVALUATION_DURATION_HOURS = 24
27VALID_STATUSES = ['proposed', 'discussing', 'voting', 'evaluating', 'decided', 'archived']
28VALID_INTENT_CATEGORIES = [
29 'community', 'environment', 'education', 'health', 'equity', 'technology',
30]
31VALID_DECISION_TYPES = ['majority', 'weighted', 'consensus', 'expert_panel']
32VOTE_RANGE = (-2, 2) # Strongly oppose to strongly support
35class ThoughtExperimentService:
36 """Manages constitutional thought experiment lifecycle."""
38 # ─── Create ───
40 @staticmethod
41 def create_experiment(db: Session, creator_id: str, title: str,
42 hypothesis: str, expected_outcome: str = '',
43 intent_category: str = 'technology',
44 decision_type: str = 'weighted',
45 is_core_ip: bool = False,
46 parent_experiment_id: str = None) -> Optional[Dict]:
47 """Create a new thought experiment with linked Post.
49 Gates through ConstitutionalFilter. Sets initial timeline.
50 Returns experiment dict or None if blocked.
51 """
52 # Constitutional filter gate
53 try:
54 from security.hive_guardrails import ConstitutionalFilter
55 check = ConstitutionalFilter.check_prompt(
56 f"{title}: {hypothesis}")
57 # check_prompt returns (approved: bool, reason: str)
58 approved = check[0] if isinstance(check, tuple) else check.get('approved', True)
59 reason = check[1] if isinstance(check, tuple) else check.get('reason', '')
60 if not approved:
61 logger.info(f"Thought experiment blocked by ConstitutionalFilter: {reason}")
62 return None
63 except ImportError:
64 pass
66 if intent_category not in VALID_INTENT_CATEGORIES:
67 intent_category = 'technology'
68 if decision_type not in VALID_DECISION_TYPES:
69 decision_type = 'weighted'
71 from .models import ThoughtExperiment, Post
73 experiment_id = str(uuid.uuid4())
74 now = datetime.utcnow()
76 # Create linked Post (visible on feed)
77 post = Post(
78 author_id=creator_id,
79 title=title,
80 content=f"**Hypothesis:** {hypothesis}\n\n"
81 f"**Expected Outcome:** {expected_outcome}",
82 content_type='thought_experiment',
83 is_thought_experiment=True,
84 hypothesis=hypothesis,
85 expected_outcome=expected_outcome,
86 intent_category=intent_category,
87 )
88 db.add(post)
89 db.flush()
91 # Create experiment
92 experiment = ThoughtExperiment(
93 id=experiment_id,
94 post_id=post.id,
95 creator_id=creator_id,
96 title=title,
97 hypothesis=hypothesis,
98 expected_outcome=expected_outcome,
99 intent_category=intent_category,
100 status='proposed',
101 decision_type=decision_type,
102 voting_opens_at=now + timedelta(hours=DISCUSS_DURATION_HOURS),
103 voting_closes_at=now + timedelta(
104 hours=DISCUSS_DURATION_HOURS + VOTING_DURATION_HOURS),
105 evaluation_deadline=now + timedelta(
106 hours=DISCUSS_DURATION_HOURS + VOTING_DURATION_HOURS
107 + EVALUATION_DURATION_HOURS),
108 is_core_ip=is_core_ip,
109 parent_experiment_id=parent_experiment_id,
110 )
111 db.add(experiment)
112 db.flush()
114 # Award spark for proposing
115 try:
116 from .resonance_engine import ResonanceService
117 ResonanceService.award_action(
118 db, creator_id, 'experiment_proposed',
119 source_id=experiment_id)
120 except Exception:
121 pass
123 return experiment.to_dict()
125 # ─── Lifecycle Advance ───
127 @staticmethod
128 def advance_status(db: Session, experiment_id: str,
129 target_status: str = None) -> Optional[Dict]:
130 """Advance experiment to next lifecycle phase.
132 Automatic progression: proposed → discussing → voting → evaluating → decided
133 """
134 from .models import ThoughtExperiment
136 experiment = db.query(ThoughtExperiment).filter_by(
137 id=experiment_id).first()
138 if not experiment:
139 return None
141 status_order = VALID_STATUSES
142 current_idx = status_order.index(experiment.status) if experiment.status in status_order else 0
144 if target_status:
145 if target_status not in status_order:
146 return None
147 target_idx = status_order.index(target_status)
148 if target_idx <= current_idx:
149 return None # Can't go backwards
150 experiment.status = target_status
151 else:
152 if current_idx < len(status_order) - 1:
153 experiment.status = status_order[current_idx + 1]
155 db.flush()
156 return experiment.to_dict()
158 # ─── Voting ───
160 @staticmethod
161 def cast_vote(db: Session, experiment_id: str, voter_id: str,
162 vote_value: int, reasoning: str = '',
163 suggestion: str = '',
164 voter_type: str = 'human',
165 confidence: float = 1.0) -> Optional[Dict]:
166 """Cast a vote on a thought experiment.
168 Both humans and agents can vote. Agent votes include confidence.
169 Vote value: -2 (strongly oppose) to +2 (strongly support).
170 """
171 from .models import ThoughtExperiment, ExperimentVote
173 experiment = db.query(ThoughtExperiment).filter_by(
174 id=experiment_id).first()
175 if not experiment:
176 return None
178 # Must be in voting status (or discussing — early votes allowed)
179 if experiment.status not in ('discussing', 'voting'):
180 return {'error': 'experiment_not_in_voting_phase',
181 'current_status': experiment.status}
183 # Context-based voter eligibility check
184 try:
185 from .voting_rules import check_voter_eligibility
186 eligibility = check_voter_eligibility(experiment.to_dict(), voter_type)
187 if not eligibility['eligible']:
188 return {'error': 'voter_not_eligible',
189 'reason': eligibility['reason'],
190 'context': eligibility['context']}
191 except ImportError:
192 pass
194 # Clamp vote value
195 vote_value = max(VOTE_RANGE[0], min(VOTE_RANGE[1], vote_value))
197 # Clamp confidence
198 confidence = max(0.0, min(1.0, confidence))
199 if voter_type == 'human':
200 confidence = 1.0
202 # Constitutional check on reasoning
203 constitutional_ok = True
204 if reasoning:
205 try:
206 from security.hive_guardrails import ConstitutionalFilter
207 check = ConstitutionalFilter.check_prompt(reasoning)
208 constitutional_ok = check[0] if isinstance(check, tuple) else check.get('approved', True)
209 except ImportError:
210 pass
212 # Check for existing vote (upsert)
213 existing = db.query(ExperimentVote).filter_by(
214 experiment_id=experiment_id,
215 voter_id=voter_id,
216 ).first()
218 if existing:
219 existing.vote_value = vote_value
220 existing.reasoning = reasoning
221 existing.suggestion = suggestion
222 existing.confidence = confidence
223 existing.constitutional_check = constitutional_ok
224 vote = existing
225 else:
226 vote = ExperimentVote(
227 experiment_id=experiment_id,
228 voter_id=voter_id,
229 voter_type=voter_type,
230 vote_value=vote_value,
231 confidence=confidence,
232 reasoning=reasoning,
233 suggestion=suggestion,
234 constitutional_check=constitutional_ok,
235 )
236 db.add(vote)
237 experiment.total_votes = (experiment.total_votes or 0) + 1
239 db.flush()
241 # Award spark for voting
242 try:
243 from .resonance_engine import ResonanceService
244 ResonanceService.award_action(
245 db, voter_id, 'experiment_voted',
246 source_id=experiment_id)
247 if suggestion:
248 ResonanceService.award_action(
249 db, voter_id, 'experiment_suggestion',
250 source_id=experiment_id)
251 except Exception:
252 pass
254 return vote.to_dict()
256 # ─── Agent Evaluation ───
258 @staticmethod
259 def request_agent_evaluation(db: Session, experiment_id: str) -> Dict:
260 """Request agent-native iterative evaluation of a thought experiment.
262 Creates an AgentGoal with a type-aware iteration recipe. The agent
263 loop (autogen group chat) drives hypothesis→execute→score→iterate
264 for ALL experiment types — not just software.
266 - software: uses autoresearch tools (code edit → run → metric)
267 - traditional: uses LLM scoring (propose → evaluate → refine)
268 - physical_ai: uses visual context (hypothesis → observe → measure)
269 - research: uses web search (search → synthesize → score)
270 """
271 from .models import ThoughtExperiment
273 experiment = db.query(ThoughtExperiment).filter_by(
274 id=experiment_id).first()
275 if not experiment:
276 return {'success': False, 'reason': 'not_found'}
278 experiment.status = 'evaluating'
279 db.flush()
281 exp_type = getattr(experiment, 'experiment_type', 'traditional') or 'traditional'
282 recipe = ThoughtExperimentService._build_iteration_recipe(
283 experiment, exp_type, config={})
285 # Map experiment_type to goal_type so the right tools get loaded
286 goal_type_map = {
287 'software': 'autoresearch',
288 'code_evolution': 'code_evolution',
289 }
290 goal_type = goal_type_map.get(exp_type, 'thought_experiment')
292 # Create evaluation goal for agent dispatch
293 try:
294 from integrations.agent_engine.goal_manager import GoalManager
295 from .models import User
296 system_user = db.query(User).filter_by(
297 username='hevolve_system_agent').first()
298 user_id = system_user.id if system_user else 'system'
300 goal = GoalManager.create_goal(
301 db,
302 goal_type=goal_type,
303 title=f'Evaluate: {experiment.title}',
304 description=recipe['description'],
305 config={
306 'experiment_id': experiment_id,
307 'experiment_type': exp_type,
308 'iteration_recipe': recipe,
309 'autonomous': True,
310 },
311 created_by=str(user_id),
312 )
313 return {
314 'success': True,
315 'goal_id': goal.get('goal', {}).get('id') if goal else None,
316 'experiment_type': exp_type,
317 'iteration_strategy': recipe['strategy'],
318 }
319 except Exception as e:
320 logger.debug(f"Agent evaluation goal creation failed: {e}")
321 return {'success': False, 'reason': str(e)}
323 @staticmethod
324 def _build_iteration_recipe(experiment, exp_type: str, config: dict = None) -> Dict:
325 """Build a type-aware iteration recipe for the agent loop.
327 The recipe tells the agent HOW to iterate — which tools to use,
328 what constitutes improvement, and when to stop. The agent's own
329 conversation loop (autogen group chat) drives the iteration,
330 not a hardcoded Python while loop.
331 """
332 base_context = (
333 f'Hypothesis: {experiment.hypothesis}\n'
334 f'Expected outcome: {experiment.expected_outcome}\n'
335 f'Intent: {experiment.intent_category}\n'
336 )
338 if exp_type == 'code_evolution':
339 config = config or {}
340 repo_path = config.get('repo_path', '')
341 repo_name = config.get('repo_name', '')
342 target_files = config.get('target_files', [])
343 scope = config.get('scope', 'interfaces')
344 return {
345 'strategy': 'code_evolution',
346 'description': (
347 f'CODE EVOLUTION EXPERIMENT\n\n{base_context}\n'
348 f'REPOSITORY: {repo_name or repo_path or "specified in config"}\n'
349 f'SCOPE: {scope} (agents see signatures, not implementations)\n'
350 f'TARGET FILES: {", ".join(target_files) if target_files else "auto-detected"}\n\n'
351 f'WORKFLOW:\n'
352 f'1. Use the coding tools to edit files in the target repo\n'
353 f'2. The shard engine provides interface-only views for privacy\n'
354 f'3. Validate changes pass tests\n'
355 f'4. Use evaluate_thought_experiment to record findings\n\n'
356 f'TOOLS: coding tools, evaluate_thought_experiment\n\n'
357 f'The repo owner\'s node is the trusted node. '
358 f'Changes are applied locally, then go through the upgrade pipeline.'
359 ),
360 'tools': [
361 'evaluate_thought_experiment',
362 ],
363 'max_iterations': 30,
364 'scoring': 'metric_extraction',
365 }
366 elif exp_type == 'software':
367 return {
368 'strategy': 'autoresearch',
369 'description': (
370 f'ITERATIVE SOFTWARE EXPERIMENT\n\n{base_context}\n'
371 f'LOOP PATTERN: Use launch_experiment_autoresearch to start '
372 f'the code iteration loop. Monitor with get_experiment_research_status. '
373 f'When complete, use evaluate_thought_experiment to record findings.\n\n'
374 f'TOOLS: launch_experiment_autoresearch, get_experiment_research_status, '
375 f'evaluate_thought_experiment\n\n'
376 f'The autoresearch engine handles: code edit → run → metric → keep/revert.'
377 ),
378 'tools': [
379 'launch_experiment_autoresearch',
380 'get_experiment_research_status',
381 'evaluate_thought_experiment',
382 ],
383 'max_iterations': 50,
384 'scoring': 'metric_extraction',
385 }
386 elif exp_type == 'physical_ai':
387 return {
388 'strategy': 'observe_and_measure',
389 'description': (
390 f'ITERATIVE PHYSICAL AI EXPERIMENT\n\n{base_context}\n'
391 f'LOOP PATTERN:\n'
392 f'1. Use iterate_hypothesis to propose a testable physical hypothesis\n'
393 f'2. Observe via visual context tools (camera feed if available)\n'
394 f'3. Use score_hypothesis_result to evaluate observations\n'
395 f'4. Use get_iteration_history to review what worked\n'
396 f'5. Repeat with refined hypothesis until convergence\n'
397 f'6. Use evaluate_thought_experiment to record final findings\n\n'
398 f'TOOLS: iterate_hypothesis, score_hypothesis_result, '
399 f'get_iteration_history, evaluate_thought_experiment\n\n'
400 f'Score each iteration -2 to +2. Stop when 3 consecutive '
401 f'iterations show no improvement.'
402 ),
403 'tools': [
404 'iterate_hypothesis', 'score_hypothesis_result',
405 'get_iteration_history', 'evaluate_thought_experiment',
406 ],
407 'max_iterations': 20,
408 'scoring': 'llm_rubric',
409 }
410 else:
411 # traditional, research, or any future type
412 return {
413 'strategy': 'reason_and_refine',
414 'description': (
415 f'ITERATIVE THOUGHT EXPERIMENT\n\n{base_context}\n'
416 f'LOOP PATTERN:\n'
417 f'1. Use iterate_hypothesis to propose a refinement or test angle\n'
418 f'2. Research/reason about the hypothesis (use web search, '
419 f'recall_memory, or domain tools as needed)\n'
420 f'3. Use score_hypothesis_result to evaluate quality against rubric\n'
421 f'4. Use get_iteration_history to see what approaches scored well\n'
422 f'5. Repeat with refined hypothesis until convergence or budget\n'
423 f'6. Use evaluate_thought_experiment to record final evaluation\n\n'
424 f'TOOLS: iterate_hypothesis, score_hypothesis_result, '
425 f'get_iteration_history, evaluate_thought_experiment\n\n'
426 f'SCORING RUBRIC:\n'
427 f'- Evidence quality: is the reasoning backed by data/research?\n'
428 f'- Hypothesis clarity: is it specific and testable?\n'
429 f'- Expected impact: how significant would the outcome be?\n'
430 f'- Feasibility: can this realistically be tested/implemented?\n\n'
431 f'Score each iteration -2 to +2. Stop when 3 consecutive '
432 f'iterations show no improvement or after 10 iterations.'
433 ),
434 'tools': [
435 'iterate_hypothesis', 'score_hypothesis_result',
436 'get_iteration_history', 'evaluate_thought_experiment',
437 ],
438 'max_iterations': 10,
439 'scoring': 'llm_rubric',
440 }
442 @staticmethod
443 def record_agent_evaluation(db: Session, experiment_id: str,
444 agent_id: str, score: float,
445 confidence: float, reasoning: str,
446 evidence: str = '') -> Optional[Dict]:
447 """Record an agent's evaluation result."""
448 from .models import ThoughtExperiment
450 experiment = db.query(ThoughtExperiment).filter_by(
451 id=experiment_id).first()
452 if not experiment:
453 return None
455 evaluations = experiment.agent_evaluations_json or []
456 evaluations.append({
457 'agent_id': agent_id,
458 'score': max(-2.0, min(2.0, score)),
459 'confidence': max(0.0, min(1.0, confidence)),
460 'reasoning': reasoning,
461 'evidence': evidence,
462 'evaluated_at': datetime.utcnow().isoformat(),
463 })
464 experiment.agent_evaluations_json = evaluations
465 db.flush()
467 # Award spark
468 try:
469 from .resonance_engine import ResonanceService
470 ResonanceService.award_action(
471 db, agent_id, 'experiment_evaluated',
472 source_id=experiment_id)
473 except Exception:
474 pass
476 return experiment.to_dict()
478 # ─── Tally & Decision ───
480 @staticmethod
481 def tally_votes(db: Session, experiment_id: str) -> Dict:
482 """Tally all votes for an experiment.
484 Uses context-aware weighting from voting_rules when available.
485 Fallback: human=1.0, agent=confidence.
486 """
487 from .models import ThoughtExperiment, ExperimentVote
489 experiment = db.query(ThoughtExperiment).filter_by(
490 id=experiment_id).first()
491 if not experiment:
492 return {'error': 'not_found'}
494 # Load context-aware voter rules
495 context_rules = None
496 decision_context = None
497 try:
498 from .voting_rules import get_voter_rules, classify_decision_context
499 exp_dict = experiment.to_dict()
500 decision_context = exp_dict.get('decision_context') or \
501 classify_decision_context(exp_dict)
502 context_rules = get_voter_rules(decision_context)
503 except ImportError:
504 pass
506 votes = db.query(ExperimentVote).filter_by(
507 experiment_id=experiment_id).all()
509 total_for = 0.0
510 total_against = 0.0
511 weighted_sum = 0.0
512 total_weight = 0.0
513 human_votes = 0
514 agent_votes = 0
515 suggestions = []
517 for v in votes:
518 if v.voter_type == 'human':
519 human_weight = context_rules['human_weight'] if context_rules else 1.0
520 weight = human_weight
521 human_votes += 1
522 else:
523 agent_weight = context_rules['agent_weight'] if context_rules else 1.0
524 weight = v.confidence * agent_weight
525 agent_votes += 1
527 weighted_sum += v.vote_value * weight
528 total_weight += weight
530 if v.vote_value > 0:
531 total_for += weight
532 elif v.vote_value < 0:
533 total_against += weight
535 if v.suggestion:
536 suggestions.append({
537 'voter_id': v.voter_id,
538 'voter_type': v.voter_type,
539 'suggestion': v.suggestion,
540 })
542 weighted_score = weighted_sum / total_weight if total_weight > 0 else 0.0
543 threshold = context_rules['approval_threshold'] if context_rules else 0.5
545 return {
546 'experiment_id': experiment_id,
547 'total_votes': len(votes),
548 'human_votes': human_votes,
549 'agent_votes': agent_votes,
550 'total_for': round(total_for, 2),
551 'total_against': round(total_against, 2),
552 'weighted_score': round(weighted_score, 4),
553 'total_weight': round(total_weight, 2),
554 'suggestions': suggestions,
555 'decision_context': decision_context,
556 'approval_threshold': threshold,
557 'decision_recommendation': (
558 'approve' if weighted_score > threshold
559 else 'reject' if weighted_score < -threshold
560 else 'inconclusive'
561 ),
562 }
564 @staticmethod
565 def decide(db: Session, experiment_id: str,
566 decision_text: str) -> Optional[Dict]:
567 """Record final decision for an experiment.
569 Transitions to 'decided' status. Feeds outcome to WorldModelBridge.
570 Steward-required contexts block decision until steward has voted.
571 """
572 from .models import ThoughtExperiment, ExperimentVote
574 experiment = db.query(ThoughtExperiment).filter_by(
575 id=experiment_id).first()
576 if not experiment:
577 return None
579 # Steward gate: certain contexts require steward vote before decision
580 try:
581 from .voting_rules import get_voter_rules, classify_decision_context
582 exp_dict = experiment.to_dict()
583 context = exp_dict.get('decision_context') or \
584 classify_decision_context(exp_dict)
585 rules = get_voter_rules(context)
586 if rules.get('steward_required'):
587 steward_voted = db.query(ExperimentVote).filter_by(
588 experiment_id=experiment_id,
589 voter_id='steward',
590 ).first()
591 if not steward_voted:
592 return {'error': 'steward_vote_required',
593 'context': context,
594 'message': 'Steward must vote before decision on security contexts'}
595 except ImportError:
596 pass
598 tally = ThoughtExperimentService.tally_votes(db, experiment_id)
599 experiment.status = 'decided'
600 experiment.decision_outcome = decision_text
601 experiment.decision_rationale = {
602 'tally': tally,
603 'agent_evaluations': experiment.agent_evaluations_json or [],
604 'decided_at': datetime.utcnow().isoformat(),
605 }
606 db.flush()
608 # Feed to WorldModelBridge (RL-EF)
609 try:
610 from integrations.agent_engine.world_model_bridge import get_world_model_bridge
611 bridge = get_world_model_bridge()
612 if bridge:
613 bridge.submit_correction({
614 'type': 'thought_experiment_outcome',
615 'experiment_id': experiment_id,
616 'hypothesis': experiment.hypothesis,
617 'outcome': decision_text,
618 'tally': tally,
619 })
620 except Exception:
621 pass
623 return experiment.to_dict()
625 @staticmethod
626 def close_experiment(db: Session, experiment_id: str) -> Optional[Dict]:
627 """Archive a decided experiment."""
628 from .models import ThoughtExperiment
630 experiment = db.query(ThoughtExperiment).filter_by(
631 id=experiment_id).first()
632 if not experiment:
633 return None
635 experiment.status = 'archived'
636 db.flush()
637 return experiment.to_dict()
639 # ─── Queries ───
641 @staticmethod
642 def get_active_experiments(db: Session, status: str = None,
643 limit: int = 50) -> List[Dict]:
644 """List experiments filtered by status."""
645 from .models import ThoughtExperiment
647 query = db.query(ThoughtExperiment)
648 if status:
649 query = query.filter_by(status=status)
650 else:
651 query = query.filter(
652 ThoughtExperiment.status != 'archived')
654 experiments = query.order_by(
655 desc(ThoughtExperiment.created_at)
656 ).limit(min(limit, 200)).all()
658 return [e.to_dict() for e in experiments]
660 @staticmethod
661 def get_experiment_detail(db: Session, experiment_id: str) -> Optional[Dict]:
662 """Get full experiment with votes and timeline."""
663 from .models import ThoughtExperiment, ExperimentVote
665 experiment = db.query(ThoughtExperiment).filter_by(
666 id=experiment_id).first()
667 if not experiment:
668 return None
670 votes = db.query(ExperimentVote).filter_by(
671 experiment_id=experiment_id
672 ).order_by(ExperimentVote.created_at).all()
674 result = experiment.to_dict()
675 result['votes'] = [v.to_dict() for v in votes]
676 result['tally'] = ThoughtExperimentService.tally_votes(
677 db, experiment_id)
678 return result
680 @staticmethod
681 def get_experiment_votes(db: Session, experiment_id: str) -> List[Dict]:
682 """Get all votes for an experiment."""
683 from .models import ExperimentVote
685 votes = db.query(ExperimentVote).filter_by(
686 experiment_id=experiment_id
687 ).order_by(ExperimentVote.created_at).all()
688 return [v.to_dict() for v in votes]
690 @staticmethod
691 def get_core_ip_experiments(db: Session) -> List[Dict]:
692 """List experiments flagged as core IP."""
693 from .models import ThoughtExperiment
695 experiments = db.query(ThoughtExperiment).filter_by(
696 is_core_ip=True
697 ).order_by(desc(ThoughtExperiment.created_at)).all()
698 return [e.to_dict() for e in experiments]
700 @staticmethod
701 def get_experiment_timeline(db: Session, experiment_id: str) -> Optional[Dict]:
702 """Get lifecycle timeline for an experiment."""
703 from .models import ThoughtExperiment
705 experiment = db.query(ThoughtExperiment).filter_by(
706 id=experiment_id).first()
707 if not experiment:
708 return None
710 now = datetime.utcnow()
711 return {
712 'experiment_id': experiment_id,
713 'status': experiment.status,
714 'created_at': experiment.created_at.isoformat() if experiment.created_at else None,
715 'voting_opens_at': experiment.voting_opens_at.isoformat() if experiment.voting_opens_at else None,
716 'voting_closes_at': experiment.voting_closes_at.isoformat() if experiment.voting_closes_at else None,
717 'evaluation_deadline': experiment.evaluation_deadline.isoformat() if experiment.evaluation_deadline else None,
718 'is_voting_open': (
719 experiment.voting_opens_at and experiment.voting_closes_at
720 and experiment.voting_opens_at <= now <= experiment.voting_closes_at
721 ),
722 'time_until_voting': (
723 (experiment.voting_opens_at - now).total_seconds()
724 if experiment.voting_opens_at and now < experiment.voting_opens_at
725 else 0
726 ),
727 }