|
|
""" |
|
|
LangGraph State Definitions for SPARKNET |
|
|
Defines state schema, enums, and output models for workflows |
|
|
""" |
|
|
|
|
|
from typing import TypedDict, Annotated, Sequence, Dict, Any, List, Optional |
|
|
from enum import Enum |
|
|
from datetime import datetime |
|
|
from pydantic import BaseModel, Field |
|
|
from langchain_core.messages import BaseMessage |
|
|
from langgraph.graph.message import add_messages |
|
|
|
|
|
|
|
|
class ScenarioType(str, Enum): |
|
|
""" |
|
|
VISTA/Horizon EU scenario types for Technology Transfer Office (TTO) automation. |
|
|
Each scenario has a dedicated multi-agent workflow aligned with TTO operations. |
|
|
|
|
|
Coverage Status: |
|
|
- FULLY COVERED (3): Patent Wake-Up, Agreement Safety, Partner Matching |
|
|
- PARTIALLY COVERED (5): License Compliance, Award Identification, IP Portfolio, Due Diligence, Reporting |
|
|
- NOT COVERED (2): Grant Writing, Negotiation Support |
|
|
""" |
|
|
|
|
|
PATENT_WAKEUP = "patent_wakeup" |
|
|
AGREEMENT_SAFETY = "agreement_safety" |
|
|
PARTNER_MATCHING = "partner_matching" |
|
|
|
|
|
|
|
|
LICENSE_COMPLIANCE = "license_compliance" |
|
|
AWARD_IDENTIFICATION = "award_identification" |
|
|
|
|
|
|
|
|
IP_PORTFOLIO = "ip_portfolio" |
|
|
DUE_DILIGENCE = "due_diligence" |
|
|
REPORTING = "reporting" |
|
|
|
|
|
|
|
|
GENERAL = "general" |
|
|
|
|
|
|
|
|
class TaskStatus(str, Enum): |
|
|
""" |
|
|
Task execution status throughout workflow. |
|
|
""" |
|
|
PENDING = "pending" |
|
|
PLANNING = "planning" |
|
|
EXECUTING = "executing" |
|
|
VALIDATING = "validating" |
|
|
REFINING = "refining" |
|
|
COMPLETED = "completed" |
|
|
FAILED = "failed" |
|
|
|
|
|
|
|
|
class AgentState(TypedDict): |
|
|
""" |
|
|
LangGraph state for SPARKNET workflows. |
|
|
|
|
|
This state is passed between all agents in the workflow. |
|
|
Uses Annotated with add_messages for automatic message history management. |
|
|
""" |
|
|
|
|
|
|
|
|
messages: Annotated[Sequence[BaseMessage], add_messages] |
|
|
|
|
|
|
|
|
task_id: str |
|
|
task_description: str |
|
|
scenario: ScenarioType |
|
|
status: TaskStatus |
|
|
|
|
|
|
|
|
current_agent: Optional[str] |
|
|
iteration_count: int |
|
|
max_iterations: int |
|
|
|
|
|
|
|
|
subtasks: Optional[List[Dict[str, Any]]] |
|
|
execution_order: Optional[List[List[str]]] |
|
|
|
|
|
|
|
|
agent_outputs: Dict[str, Any] |
|
|
intermediate_results: List[Dict[str, Any]] |
|
|
|
|
|
|
|
|
validation_score: Optional[float] |
|
|
validation_feedback: Optional[str] |
|
|
validation_issues: List[str] |
|
|
validation_suggestions: List[str] |
|
|
|
|
|
|
|
|
retrieved_context: List[Dict[str, Any]] |
|
|
document_metadata: Dict[str, Any] |
|
|
input_data: Dict[str, Any] |
|
|
|
|
|
|
|
|
final_output: Optional[Any] |
|
|
success: bool |
|
|
error: Optional[str] |
|
|
|
|
|
|
|
|
start_time: datetime |
|
|
end_time: Optional[datetime] |
|
|
execution_time_seconds: Optional[float] |
|
|
|
|
|
|
|
|
requires_human_approval: bool |
|
|
human_feedback: Optional[str] |
|
|
|
|
|
|
|
|
class WorkflowOutput(BaseModel): |
|
|
""" |
|
|
Structured output from SPARKNET workflows. |
|
|
Used for serialization and API responses. |
|
|
""" |
|
|
|
|
|
task_id: str = Field(..., description="Unique task identifier") |
|
|
scenario: ScenarioType = Field(..., description="Scenario type executed") |
|
|
status: TaskStatus = Field(..., description="Final task status") |
|
|
success: bool = Field(..., description="Whether task completed successfully") |
|
|
|
|
|
|
|
|
output: Any = Field(..., description="Primary output/result") |
|
|
intermediate_results: List[Dict[str, Any]] = Field( |
|
|
default_factory=list, |
|
|
description="Intermediate results from agents" |
|
|
) |
|
|
|
|
|
|
|
|
quality_score: Optional[float] = Field( |
|
|
None, |
|
|
ge=0.0, |
|
|
le=1.0, |
|
|
description="Quality score from validation (0.0-1.0)" |
|
|
) |
|
|
validation_feedback: Optional[str] = Field( |
|
|
None, |
|
|
description="Feedback from CriticAgent" |
|
|
) |
|
|
|
|
|
|
|
|
iterations_used: int = Field(..., description="Number of refinement iterations") |
|
|
execution_time_seconds: float = Field(..., description="Total execution time") |
|
|
agents_involved: List[str] = Field( |
|
|
default_factory=list, |
|
|
description="List of agents that participated" |
|
|
) |
|
|
|
|
|
|
|
|
subtasks: List[Dict[str, Any]] = Field( |
|
|
default_factory=list, |
|
|
description="Subtasks created during planning" |
|
|
) |
|
|
agent_outputs: Dict[str, Any] = Field( |
|
|
default_factory=dict, |
|
|
description="Outputs from individual agents" |
|
|
) |
|
|
|
|
|
|
|
|
@property |
|
|
def validation_score(self) -> Optional[float]: |
|
|
"""Alias for quality_score for backward compatibility.""" |
|
|
return self.quality_score |
|
|
|
|
|
|
|
|
message_count: int = Field(..., description="Number of messages exchanged") |
|
|
|
|
|
|
|
|
error: Optional[str] = Field(None, description="Error message if failed") |
|
|
warnings: List[str] = Field(default_factory=list, description="Warnings during execution") |
|
|
|
|
|
|
|
|
start_time: datetime = Field(..., description="Workflow start time") |
|
|
end_time: datetime = Field(..., description="Workflow end time") |
|
|
|
|
|
class Config: |
|
|
json_schema_extra = { |
|
|
"example": { |
|
|
"task_id": "task_12345", |
|
|
"scenario": "patent_wakeup", |
|
|
"status": "completed", |
|
|
"success": True, |
|
|
"output": { |
|
|
"valorization_roadmap": "...", |
|
|
"market_analysis": "...", |
|
|
"stakeholder_matches": [...] |
|
|
}, |
|
|
"quality_score": 0.92, |
|
|
"validation_feedback": "Excellent quality. All criteria met.", |
|
|
"iterations_used": 2, |
|
|
"execution_time_seconds": 45.3, |
|
|
"agents_involved": ["PlannerAgent", "DocumentAnalysisAgent", "MarketAnalysisAgent", "CriticAgent"], |
|
|
"message_count": 18, |
|
|
"start_time": "2025-11-04T10:00:00", |
|
|
"end_time": "2025-11-04T10:00:45" |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
class ValidationResult(BaseModel): |
|
|
""" |
|
|
Structured validation result from CriticAgent. |
|
|
Compatible with existing CriticAgent implementation. |
|
|
""" |
|
|
|
|
|
valid: bool = Field(..., description="Whether output meets quality thresholds") |
|
|
overall_score: float = Field(..., ge=0.0, le=1.0, description="Overall quality score") |
|
|
dimension_scores: Dict[str, float] = Field( |
|
|
..., |
|
|
description="Scores for individual quality dimensions" |
|
|
) |
|
|
issues: List[str] = Field( |
|
|
default_factory=list, |
|
|
description="List of identified issues" |
|
|
) |
|
|
suggestions: List[str] = Field( |
|
|
default_factory=list, |
|
|
description="Improvement suggestions" |
|
|
) |
|
|
details: Dict[str, Any] = Field( |
|
|
default_factory=dict, |
|
|
description="Additional validation details" |
|
|
) |
|
|
|
|
|
|
|
|
class SubTask(BaseModel): |
|
|
""" |
|
|
Individual subtask from PlannerAgent. |
|
|
Compatible with existing PlannerAgent implementation. |
|
|
""" |
|
|
|
|
|
id: str = Field(..., description="Unique subtask ID") |
|
|
description: str = Field(..., description="What needs to be done") |
|
|
agent_type: str = Field(..., description="Which agent should handle this") |
|
|
dependencies: List[str] = Field( |
|
|
default_factory=list, |
|
|
description="IDs of subtasks this depends on" |
|
|
) |
|
|
estimated_duration: float = Field( |
|
|
default=0.0, |
|
|
description="Estimated duration in seconds" |
|
|
) |
|
|
priority: int = Field(default=0, description="Priority level") |
|
|
parameters: Dict[str, Any] = Field( |
|
|
default_factory=dict, |
|
|
description="Agent-specific parameters" |
|
|
) |
|
|
status: TaskStatus = Field( |
|
|
default=TaskStatus.PENDING, |
|
|
description="Current status" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_initial_state( |
|
|
task_id: str, |
|
|
task_description: str, |
|
|
scenario: ScenarioType = ScenarioType.GENERAL, |
|
|
max_iterations: int = 3, |
|
|
input_data: Optional[Dict[str, Any]] = None, |
|
|
) -> AgentState: |
|
|
""" |
|
|
Create initial AgentState for a new workflow. |
|
|
|
|
|
Args: |
|
|
task_id: Unique task identifier |
|
|
task_description: Natural language task description |
|
|
scenario: VISTA scenario type |
|
|
max_iterations: Maximum refinement iterations |
|
|
input_data: Optional input data for workflow (e.g., patent_path) |
|
|
|
|
|
Returns: |
|
|
Initialized AgentState |
|
|
""" |
|
|
return AgentState( |
|
|
messages=[], |
|
|
task_id=task_id, |
|
|
task_description=task_description, |
|
|
scenario=scenario, |
|
|
status=TaskStatus.PENDING, |
|
|
current_agent=None, |
|
|
iteration_count=0, |
|
|
max_iterations=max_iterations, |
|
|
subtasks=None, |
|
|
execution_order=None, |
|
|
agent_outputs={}, |
|
|
intermediate_results=[], |
|
|
validation_score=None, |
|
|
validation_feedback=None, |
|
|
validation_issues=[], |
|
|
validation_suggestions=[], |
|
|
retrieved_context=[], |
|
|
document_metadata={}, |
|
|
input_data=input_data or {}, |
|
|
final_output=None, |
|
|
success=False, |
|
|
error=None, |
|
|
start_time=datetime.now(), |
|
|
end_time=None, |
|
|
execution_time_seconds=None, |
|
|
requires_human_approval=False, |
|
|
human_feedback=None, |
|
|
) |
|
|
|
|
|
|
|
|
def state_to_output(state: AgentState) -> WorkflowOutput: |
|
|
""" |
|
|
Convert AgentState to WorkflowOutput for serialization. |
|
|
|
|
|
Args: |
|
|
state: Current workflow state |
|
|
|
|
|
Returns: |
|
|
WorkflowOutput model |
|
|
""" |
|
|
end_time = state.get("end_time") or datetime.now() |
|
|
execution_time = (end_time - state["start_time"]).total_seconds() |
|
|
|
|
|
|
|
|
subtasks = state.get("subtasks") |
|
|
if subtasks is None: |
|
|
subtasks = [] |
|
|
|
|
|
agent_outputs = state.get("agent_outputs") |
|
|
if agent_outputs is None: |
|
|
agent_outputs = {} |
|
|
|
|
|
return WorkflowOutput( |
|
|
task_id=state["task_id"], |
|
|
scenario=state["scenario"], |
|
|
status=state["status"], |
|
|
success=state["success"], |
|
|
output=state.get("final_output"), |
|
|
intermediate_results=state.get("intermediate_results") or [], |
|
|
quality_score=state.get("validation_score"), |
|
|
validation_feedback=state.get("validation_feedback"), |
|
|
iterations_used=state.get("iteration_count", 0), |
|
|
execution_time_seconds=execution_time, |
|
|
agents_involved=list(agent_outputs.keys()), |
|
|
subtasks=subtasks, |
|
|
agent_outputs=agent_outputs, |
|
|
message_count=len(state.get("messages") or []), |
|
|
error=state.get("error"), |
|
|
warnings=[], |
|
|
start_time=state["start_time"], |
|
|
end_time=end_time, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Claim(BaseModel): |
|
|
"""Individual patent claim""" |
|
|
claim_number: int = Field(..., description="Claim number") |
|
|
claim_type: str = Field(..., description="independent or dependent") |
|
|
claim_text: str = Field(..., description="Full claim text") |
|
|
depends_on: Optional[int] = Field(None, description="Parent claim number if dependent") |
|
|
|
|
|
|
|
|
class PatentAnalysis(BaseModel): |
|
|
"""Complete patent analysis output from DocumentAnalysisAgent""" |
|
|
patent_id: str = Field(..., description="Patent identifier") |
|
|
title: str = Field(..., description="Patent title") |
|
|
abstract: str = Field(..., description="Patent abstract") |
|
|
|
|
|
|
|
|
independent_claims: List[Claim] = Field(default_factory=list, description="Independent claims") |
|
|
dependent_claims: List[Claim] = Field(default_factory=list, description="Dependent claims") |
|
|
total_claims: int = Field(..., description="Total number of claims") |
|
|
|
|
|
|
|
|
ipc_classification: List[str] = Field(default_factory=list, description="IPC codes") |
|
|
technical_domains: List[str] = Field(default_factory=list, description="Technology domains") |
|
|
key_innovations: List[str] = Field(default_factory=list, description="Key innovations") |
|
|
novelty_assessment: str = Field(..., description="Assessment of novelty") |
|
|
|
|
|
|
|
|
trl_level: int = Field(..., ge=1, le=9, description="Technology Readiness Level") |
|
|
trl_justification: str = Field(..., description="Reasoning for TRL assessment") |
|
|
commercialization_potential: str = Field(..., description="High, Medium, or Low") |
|
|
potential_applications: List[str] = Field(default_factory=list, description="Application areas") |
|
|
|
|
|
|
|
|
inventors: List[str] = Field(default_factory=list, description="Inventor names") |
|
|
assignees: List[str] = Field(default_factory=list, description="Assignee organizations") |
|
|
filing_date: Optional[str] = Field(None, description="Filing date") |
|
|
publication_date: Optional[str] = Field(None, description="Publication date") |
|
|
|
|
|
|
|
|
confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence") |
|
|
extraction_completeness: float = Field(..., ge=0.0, le=1.0, description="Extraction completeness") |
|
|
|
|
|
|
|
|
class MarketOpportunity(BaseModel): |
|
|
"""Individual market opportunity""" |
|
|
sector: str = Field(..., description="Industry sector name") |
|
|
sector_description: str = Field(..., description="Sector description") |
|
|
market_size_usd: Optional[float] = Field(None, description="Market size in USD") |
|
|
growth_rate_percent: Optional[float] = Field(None, description="Annual growth rate") |
|
|
technology_fit: str = Field(..., description="Excellent, Good, or Fair") |
|
|
market_gap: str = Field(..., description="Specific gap this technology fills") |
|
|
competitive_advantage: str = Field(..., description="Key competitive advantages") |
|
|
geographic_focus: List[str] = Field(default_factory=list, description="Target regions") |
|
|
time_to_market_months: int = Field(..., description="Estimated time to market") |
|
|
risk_level: str = Field(..., description="Low, Medium, or High") |
|
|
priority_score: float = Field(..., ge=0.0, le=1.0, description="Priority ranking") |
|
|
|
|
|
|
|
|
class MarketAnalysis(BaseModel): |
|
|
"""Complete market analysis output from MarketAnalysisAgent""" |
|
|
opportunities: List[MarketOpportunity] = Field(default_factory=list, description="Market opportunities") |
|
|
top_sectors: List[str] = Field(default_factory=list, description="Top 3 sectors by priority") |
|
|
|
|
|
|
|
|
total_addressable_market_usd: Optional[float] = Field(None, description="Total addressable market") |
|
|
market_readiness: str = Field(..., description="Ready, Emerging, or Early") |
|
|
competitive_landscape: str = Field(..., description="Competitive landscape assessment") |
|
|
regulatory_considerations: List[str] = Field(default_factory=list, description="Regulatory issues") |
|
|
|
|
|
|
|
|
recommended_focus: str = Field(..., description="Recommended market focus") |
|
|
strategic_positioning: str = Field(..., description="Strategic positioning advice") |
|
|
go_to_market_strategy: str = Field(..., description="Go-to-market strategy") |
|
|
|
|
|
|
|
|
confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence") |
|
|
research_depth: int = Field(..., description="Number of sources consulted") |
|
|
|
|
|
|
|
|
class StakeholderMatch(BaseModel): |
|
|
"""Match between patent and potential partner""" |
|
|
stakeholder_name: str = Field(..., description="Stakeholder name") |
|
|
stakeholder_type: str = Field(..., description="Investor, Company, University, etc.") |
|
|
|
|
|
|
|
|
location: str = Field(..., description="Geographic location") |
|
|
contact_info: Optional[Dict] = Field(None, description="Contact details") |
|
|
|
|
|
|
|
|
overall_fit_score: float = Field(..., ge=0.0, le=1.0, description="Overall match score") |
|
|
technical_fit: float = Field(..., ge=0.0, le=1.0, description="Technical capability match") |
|
|
market_fit: float = Field(..., ge=0.0, le=1.0, description="Market sector alignment") |
|
|
geographic_fit: float = Field(..., ge=0.0, le=1.0, description="Geographic compatibility") |
|
|
strategic_fit: float = Field(..., ge=0.0, le=1.0, description="Strategic alignment") |
|
|
|
|
|
|
|
|
match_rationale: str = Field(..., description="Why this is a good match") |
|
|
collaboration_opportunities: List[str] = Field(default_factory=list, description="Potential collaborations") |
|
|
potential_value: str = Field(..., description="High, Medium, or Low") |
|
|
|
|
|
|
|
|
recommended_approach: str = Field(..., description="How to approach this stakeholder") |
|
|
talking_points: List[str] = Field(default_factory=list, description="Key talking points") |
|
|
|
|
|
|
|
|
class ValorizationBrief(BaseModel): |
|
|
"""Complete valorization package from OutreachAgent""" |
|
|
patent_id: str = Field(..., description="Patent identifier") |
|
|
|
|
|
|
|
|
content: str = Field(..., description="Full markdown content") |
|
|
pdf_path: str = Field(..., description="Path to generated PDF") |
|
|
|
|
|
|
|
|
executive_summary: str = Field(..., description="Executive summary") |
|
|
technology_overview: str = Field(..., description="Technology overview section") |
|
|
market_analysis_summary: str = Field(..., description="Market analysis summary") |
|
|
partner_recommendations: str = Field(..., description="Partner recommendations") |
|
|
|
|
|
|
|
|
top_opportunities: List[str] = Field(default_factory=list, description="Top market opportunities") |
|
|
recommended_partners: List[str] = Field(default_factory=list, description="Top 5 partners") |
|
|
key_takeaways: List[str] = Field(default_factory=list, description="Key takeaways") |
|
|
|
|
|
|
|
|
generated_date: str = Field(..., description="Generation date") |
|
|
version: str = Field(default="1.0", description="Document version") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ComplianceStatus(str, Enum): |
|
|
"""License compliance status for monitoring.""" |
|
|
COMPLIANT = "compliant" |
|
|
NON_COMPLIANT = "non_compliant" |
|
|
AT_RISK = "at_risk" |
|
|
PENDING_REVIEW = "pending_review" |
|
|
EXPIRED = "expired" |
|
|
|
|
|
|
|
|
class LicenseComplianceAnalysis(BaseModel): |
|
|
""" |
|
|
License compliance analysis output from LicenseComplianceAgent. |
|
|
|
|
|
GDPR Note: This model may contain references to personal data |
|
|
(licensee contacts, payment info). Implement appropriate access |
|
|
controls and data retention policies. |
|
|
""" |
|
|
license_id: str = Field(..., description="License agreement identifier") |
|
|
agreement_name: str = Field(..., description="Name of the agreement") |
|
|
licensee: str = Field(..., description="Licensee organization name") |
|
|
|
|
|
|
|
|
overall_status: ComplianceStatus = Field(..., description="Overall compliance status") |
|
|
compliance_score: float = Field(..., ge=0.0, le=1.0, description="Compliance score 0-1") |
|
|
|
|
|
|
|
|
payments_current: bool = Field(..., description="All payments up to date") |
|
|
payments_overdue: int = Field(default=0, description="Number of overdue payments") |
|
|
total_outstanding: float = Field(default=0.0, description="Total outstanding amount") |
|
|
currency: str = Field(default="EUR", description="Currency code") |
|
|
|
|
|
|
|
|
milestones_on_track: bool = Field(..., description="All milestones on track") |
|
|
milestones_overdue: int = Field(default=0, description="Number of overdue milestones") |
|
|
next_milestone_date: Optional[str] = Field(None, description="Next milestone due date") |
|
|
|
|
|
|
|
|
active_alerts: List[str] = Field(default_factory=list, description="Active compliance alerts") |
|
|
issues_identified: List[str] = Field(default_factory=list, description="Identified issues") |
|
|
recommendations: List[str] = Field(default_factory=list, description="Compliance recommendations") |
|
|
|
|
|
|
|
|
confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence") |
|
|
human_review_required: bool = Field(default=False, description="Requires human review") |
|
|
last_reviewed: Optional[str] = Field(None, description="Last human review date") |
|
|
|
|
|
|
|
|
class RevenueReport(BaseModel): |
|
|
"""Revenue report for license portfolio.""" |
|
|
report_id: str = Field(..., description="Report identifier") |
|
|
period_start: str = Field(..., description="Reporting period start") |
|
|
period_end: str = Field(..., description="Reporting period end") |
|
|
|
|
|
|
|
|
total_revenue: float = Field(..., description="Total revenue in period") |
|
|
currency: str = Field(default="EUR", description="Currency code") |
|
|
by_license: Dict[str, float] = Field(default_factory=dict, description="Revenue by license") |
|
|
by_type: Dict[str, float] = Field(default_factory=dict, description="Revenue by type") |
|
|
|
|
|
|
|
|
vs_previous_period: Optional[float] = Field(None, description="% change vs previous period") |
|
|
vs_forecast: Optional[float] = Field(None, description="% vs forecast") |
|
|
|
|
|
|
|
|
confidence_score: float = Field(..., ge=0.0, le=1.0, description="Report confidence") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class FundingOpportunity(BaseModel): |
|
|
""" |
|
|
Funding opportunity identified by the award scanning system. |
|
|
|
|
|
Represents grants, awards, and other funding opportunities |
|
|
matched to research capabilities. |
|
|
""" |
|
|
opportunity_id: str = Field(..., description="Opportunity identifier") |
|
|
title: str = Field(..., description="Opportunity title") |
|
|
description: str = Field(..., description="Full description") |
|
|
|
|
|
|
|
|
funder: str = Field(..., description="Funding organization name") |
|
|
funder_type: str = Field(..., description="Type: government, EU, foundation, corporate") |
|
|
program_name: Optional[str] = Field(None, description="Funding program name") |
|
|
|
|
|
|
|
|
amount_min: Optional[float] = Field(None, description="Minimum funding amount") |
|
|
amount_max: Optional[float] = Field(None, description="Maximum funding amount") |
|
|
currency: str = Field(default="EUR", description="Currency code") |
|
|
funding_type: str = Field(..., description="Type: grant, award, prize, fellowship") |
|
|
|
|
|
|
|
|
deadline: Optional[str] = Field(None, description="Application deadline") |
|
|
duration_months: Optional[int] = Field(None, description="Funding duration in months") |
|
|
decision_date: Optional[str] = Field(None, description="Expected decision date") |
|
|
|
|
|
|
|
|
match_score: float = Field(..., ge=0.0, le=1.0, description="Match score with capabilities") |
|
|
match_rationale: str = Field(..., description="Why this is a good match") |
|
|
eligibility_status: str = Field(..., description="eligible, ineligible, partial, unknown") |
|
|
eligibility_notes: List[str] = Field(default_factory=list, description="Eligibility details") |
|
|
|
|
|
|
|
|
recommended_action: str = Field(..., description="Recommended next step") |
|
|
application_effort: str = Field(..., description="Low, Medium, High effort required") |
|
|
success_likelihood: str = Field(..., description="Low, Medium, High likelihood") |
|
|
|
|
|
|
|
|
url: Optional[str] = Field(None, description="Opportunity URL") |
|
|
keywords: List[str] = Field(default_factory=list, description="Relevant keywords") |
|
|
research_areas: List[str] = Field(default_factory=list, description="Matching research areas") |
|
|
discovered_date: str = Field(..., description="When opportunity was discovered") |
|
|
|
|
|
|
|
|
confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence") |
|
|
|
|
|
|
|
|
class AwardApplicationStatus(BaseModel): |
|
|
"""Status tracking for award/grant applications.""" |
|
|
application_id: str = Field(..., description="Application identifier") |
|
|
opportunity_id: str = Field(..., description="Target opportunity") |
|
|
|
|
|
|
|
|
status: str = Field(..., description="draft, internal_review, submitted, under_review, awarded, rejected") |
|
|
submitted_date: Optional[str] = Field(None, description="Submission date") |
|
|
decision_date: Optional[str] = Field(None, description="Decision received date") |
|
|
|
|
|
|
|
|
documents_completed: int = Field(default=0, description="Completed documents") |
|
|
documents_required: int = Field(default=0, description="Total required documents") |
|
|
documents_pending_review: int = Field(default=0, description="Documents pending review") |
|
|
|
|
|
|
|
|
overall_score: Optional[float] = Field(None, ge=0.0, le=1.0, description="Application quality score") |
|
|
critic_validation: Optional[Dict[str, Any]] = Field(None, description="CriticAgent validation result") |
|
|
human_approved: bool = Field(default=False, description="Human approval received") |
|
|
|
|
|
|
|
|
internal_notes: List[str] = Field(default_factory=list, description="Internal notes") |
|
|
feedback: Optional[str] = Field(None, description="Feedback from funder if received") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class HumanDecisionPoint(BaseModel): |
|
|
""" |
|
|
Human-in-the-loop decision point for workflow orchestration. |
|
|
|
|
|
Captures when and why human input is required, and tracks |
|
|
the decision made. |
|
|
""" |
|
|
decision_id: str = Field(..., description="Decision point identifier") |
|
|
workflow_id: str = Field(..., description="Parent workflow ID") |
|
|
scenario: ScenarioType = Field(..., description="Scenario requiring decision") |
|
|
|
|
|
|
|
|
decision_type: str = Field(..., description="Type: approval, selection, verification, override") |
|
|
question: str = Field(..., description="Decision question for human") |
|
|
context: str = Field(..., description="Context and background for decision") |
|
|
options: List[str] = Field(default_factory=list, description="Available options") |
|
|
|
|
|
|
|
|
ai_recommendation: Optional[str] = Field(None, description="AI recommended option") |
|
|
ai_confidence: Optional[float] = Field(None, ge=0.0, le=1.0, description="AI confidence in recommendation") |
|
|
ai_rationale: Optional[str] = Field(None, description="Rationale for AI recommendation") |
|
|
|
|
|
|
|
|
human_decision: Optional[str] = Field(None, description="Human selected option") |
|
|
human_rationale: Optional[str] = Field(None, description="Human provided rationale") |
|
|
decided_by: Optional[str] = Field(None, description="User who made decision") |
|
|
decided_at: Optional[str] = Field(None, description="Timestamp of decision") |
|
|
|
|
|
|
|
|
status: str = Field(default="pending", description="pending, decided, expired, skipped") |
|
|
expires_at: Optional[str] = Field(None, description="When decision times out") |
|
|
|
|
|
|
|
|
created_at: str = Field(..., description="When decision point was created") |
|
|
|
|
|
|
|
|
class SourceVerification(BaseModel): |
|
|
""" |
|
|
Source verification for hallucination mitigation. |
|
|
|
|
|
Tracks sources used by AI agents and their verification status. |
|
|
""" |
|
|
verification_id: str = Field(..., description="Verification identifier") |
|
|
claim: str = Field(..., description="AI-generated claim to verify") |
|
|
|
|
|
|
|
|
sources: List[Dict[str, Any]] = Field(default_factory=list, description="Supporting sources") |
|
|
source_count: int = Field(default=0, description="Number of sources found") |
|
|
|
|
|
|
|
|
verified: bool = Field(..., description="Claim is verified by sources") |
|
|
verification_score: float = Field(..., ge=0.0, le=1.0, description="Verification confidence") |
|
|
verification_method: str = Field(..., description="How verification was performed") |
|
|
|
|
|
|
|
|
discrepancies: List[str] = Field(default_factory=list, description="Discrepancies found") |
|
|
warnings: List[str] = Field(default_factory=list, description="Verification warnings") |
|
|
|
|
|
|
|
|
verified_at: str = Field(..., description="When verification was performed") |
|
|
|