File size: 29,294 Bytes
a9dc537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76c3b0a
 
 
 
 
 
 
a9dc537
76c3b0a
a9dc537
 
 
76c3b0a
 
 
 
 
 
 
 
 
 
 
a9dc537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76c3b0a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
"""
LangGraph State Definitions for SPARKNET
Defines state schema, enums, and output models for workflows
"""

from typing import TypedDict, Annotated, Sequence, Dict, Any, List, Optional
from enum import Enum
from datetime import datetime
from pydantic import BaseModel, Field
from langchain_core.messages import BaseMessage
from langgraph.graph.message import add_messages


class ScenarioType(str, Enum):
    """
    VISTA/Horizon EU scenario types for Technology Transfer Office (TTO) automation.
    Each scenario has a dedicated multi-agent workflow aligned with TTO operations.

    Coverage Status:
    - FULLY COVERED (3): Patent Wake-Up, Agreement Safety, Partner Matching
    - PARTIALLY COVERED (5): License Compliance, Award Identification, IP Portfolio, Due Diligence, Reporting
    - NOT COVERED (2): Grant Writing, Negotiation Support
    """
    # Fully Implemented Scenarios
    PATENT_WAKEUP = "patent_wakeup"  # Scenario 1: Dormant IP valorization
    AGREEMENT_SAFETY = "agreement_safety"  # Scenario 2: Legal agreement review
    PARTNER_MATCHING = "partner_matching"  # Scenario 5: Stakeholder matching

    # New Scenarios (Placeholder - Partially Implemented)
    LICENSE_COMPLIANCE = "license_compliance"  # Scenario 3: License tracking & compliance
    AWARD_IDENTIFICATION = "award_identification"  # Scenario 4: Funding & award opportunities

    # Future Scenarios (Not Yet Implemented)
    IP_PORTFOLIO = "ip_portfolio"  # IP portfolio management
    DUE_DILIGENCE = "due_diligence"  # Technology due diligence
    REPORTING = "reporting"  # TTO metrics and reporting

    # General Purpose
    GENERAL = "general"  # Custom/general purpose tasks


class TaskStatus(str, Enum):
    """
    Task execution status throughout workflow.
    """
    PENDING = "pending"
    PLANNING = "planning"
    EXECUTING = "executing"
    VALIDATING = "validating"
    REFINING = "refining"
    COMPLETED = "completed"
    FAILED = "failed"


class AgentState(TypedDict):
    """
    LangGraph state for SPARKNET workflows.

    This state is passed between all agents in the workflow.
    Uses Annotated with add_messages for automatic message history management.
    """

    # Message history (automatically managed by LangGraph)
    messages: Annotated[Sequence[BaseMessage], add_messages]

    # Task information
    task_id: str
    task_description: str
    scenario: ScenarioType
    status: TaskStatus

    # Workflow execution
    current_agent: Optional[str]  # Which agent is currently processing
    iteration_count: int  # Number of refinement iterations
    max_iterations: int  # Maximum allowed iterations

    # Planning stage outputs
    subtasks: Optional[List[Dict[str, Any]]]  # From PlannerAgent
    execution_order: Optional[List[List[str]]]  # Parallel execution layers

    # Execution stage outputs
    agent_outputs: Dict[str, Any]  # Outputs from each specialized agent
    intermediate_results: List[Dict[str, Any]]  # Intermediate results

    # Validation stage
    validation_score: Optional[float]  # Quality score from CriticAgent
    validation_feedback: Optional[str]  # Detailed feedback
    validation_issues: List[str]  # List of identified issues
    validation_suggestions: List[str]  # Improvement suggestions

    # Memory and context
    retrieved_context: List[Dict[str, Any]]  # From MemoryAgent
    document_metadata: Dict[str, Any]  # Metadata about input documents
    input_data: Dict[str, Any]  # Input data for the workflow (e.g., patent_path)

    # Final output
    final_output: Optional[Any]  # Final workflow result
    success: bool  # Whether workflow completed successfully
    error: Optional[str]  # Error message if failed

    # Metadata
    start_time: datetime
    end_time: Optional[datetime]
    execution_time_seconds: Optional[float]

    # Human-in-the-loop
    requires_human_approval: bool
    human_feedback: Optional[str]


class WorkflowOutput(BaseModel):
    """
    Structured output from SPARKNET workflows.
    Used for serialization and API responses.
    """

    task_id: str = Field(..., description="Unique task identifier")
    scenario: ScenarioType = Field(..., description="Scenario type executed")
    status: TaskStatus = Field(..., description="Final task status")
    success: bool = Field(..., description="Whether task completed successfully")

    # Results
    output: Any = Field(..., description="Primary output/result")
    intermediate_results: List[Dict[str, Any]] = Field(
        default_factory=list,
        description="Intermediate results from agents"
    )

    # Quality metrics
    quality_score: Optional[float] = Field(
        None,
        ge=0.0,
        le=1.0,
        description="Quality score from validation (0.0-1.0)"
    )
    validation_feedback: Optional[str] = Field(
        None,
        description="Feedback from CriticAgent"
    )

    # Execution metadata
    iterations_used: int = Field(..., description="Number of refinement iterations")
    execution_time_seconds: float = Field(..., description="Total execution time")
    agents_involved: List[str] = Field(
        default_factory=list,
        description="List of agents that participated"
    )

    # Workflow details
    subtasks: List[Dict[str, Any]] = Field(
        default_factory=list,
        description="Subtasks created during planning"
    )
    agent_outputs: Dict[str, Any] = Field(
        default_factory=dict,
        description="Outputs from individual agents"
    )

    # Validation score (alias for quality_score for compatibility)
    @property
    def validation_score(self) -> Optional[float]:
        """Alias for quality_score for backward compatibility."""
        return self.quality_score

    # Message history
    message_count: int = Field(..., description="Number of messages exchanged")

    # Error handling
    error: Optional[str] = Field(None, description="Error message if failed")
    warnings: List[str] = Field(default_factory=list, description="Warnings during execution")

    # Timestamps
    start_time: datetime = Field(..., description="Workflow start time")
    end_time: datetime = Field(..., description="Workflow end time")

    class Config:
        json_schema_extra = {
            "example": {
                "task_id": "task_12345",
                "scenario": "patent_wakeup",
                "status": "completed",
                "success": True,
                "output": {
                    "valorization_roadmap": "...",
                    "market_analysis": "...",
                    "stakeholder_matches": [...]
                },
                "quality_score": 0.92,
                "validation_feedback": "Excellent quality. All criteria met.",
                "iterations_used": 2,
                "execution_time_seconds": 45.3,
                "agents_involved": ["PlannerAgent", "DocumentAnalysisAgent", "MarketAnalysisAgent", "CriticAgent"],
                "message_count": 18,
                "start_time": "2025-11-04T10:00:00",
                "end_time": "2025-11-04T10:00:45"
            }
        }


class ValidationResult(BaseModel):
    """
    Structured validation result from CriticAgent.
    Compatible with existing CriticAgent implementation.
    """

    valid: bool = Field(..., description="Whether output meets quality thresholds")
    overall_score: float = Field(..., ge=0.0, le=1.0, description="Overall quality score")
    dimension_scores: Dict[str, float] = Field(
        ...,
        description="Scores for individual quality dimensions"
    )
    issues: List[str] = Field(
        default_factory=list,
        description="List of identified issues"
    )
    suggestions: List[str] = Field(
        default_factory=list,
        description="Improvement suggestions"
    )
    details: Dict[str, Any] = Field(
        default_factory=dict,
        description="Additional validation details"
    )


class SubTask(BaseModel):
    """
    Individual subtask from PlannerAgent.
    Compatible with existing PlannerAgent implementation.
    """

    id: str = Field(..., description="Unique subtask ID")
    description: str = Field(..., description="What needs to be done")
    agent_type: str = Field(..., description="Which agent should handle this")
    dependencies: List[str] = Field(
        default_factory=list,
        description="IDs of subtasks this depends on"
    )
    estimated_duration: float = Field(
        default=0.0,
        description="Estimated duration in seconds"
    )
    priority: int = Field(default=0, description="Priority level")
    parameters: Dict[str, Any] = Field(
        default_factory=dict,
        description="Agent-specific parameters"
    )
    status: TaskStatus = Field(
        default=TaskStatus.PENDING,
        description="Current status"
    )


# Helper functions for state management

def create_initial_state(
    task_id: str,
    task_description: str,
    scenario: ScenarioType = ScenarioType.GENERAL,
    max_iterations: int = 3,
    input_data: Optional[Dict[str, Any]] = None,
) -> AgentState:
    """
    Create initial AgentState for a new workflow.

    Args:
        task_id: Unique task identifier
        task_description: Natural language task description
        scenario: VISTA scenario type
        max_iterations: Maximum refinement iterations
        input_data: Optional input data for workflow (e.g., patent_path)

    Returns:
        Initialized AgentState
    """
    return AgentState(
        messages=[],
        task_id=task_id,
        task_description=task_description,
        scenario=scenario,
        status=TaskStatus.PENDING,
        current_agent=None,
        iteration_count=0,
        max_iterations=max_iterations,
        subtasks=None,
        execution_order=None,
        agent_outputs={},
        intermediate_results=[],
        validation_score=None,
        validation_feedback=None,
        validation_issues=[],
        validation_suggestions=[],
        retrieved_context=[],
        document_metadata={},
        input_data=input_data or {},
        final_output=None,
        success=False,
        error=None,
        start_time=datetime.now(),
        end_time=None,
        execution_time_seconds=None,
        requires_human_approval=False,
        human_feedback=None,
    )


def state_to_output(state: AgentState) -> WorkflowOutput:
    """
    Convert AgentState to WorkflowOutput for serialization.

    Args:
        state: Current workflow state

    Returns:
        WorkflowOutput model
    """
    end_time = state.get("end_time") or datetime.now()
    execution_time = (end_time - state["start_time"]).total_seconds()

    # Handle None values by providing defaults
    subtasks = state.get("subtasks")
    if subtasks is None:
        subtasks = []

    agent_outputs = state.get("agent_outputs")
    if agent_outputs is None:
        agent_outputs = {}

    return WorkflowOutput(
        task_id=state["task_id"],
        scenario=state["scenario"],
        status=state["status"],
        success=state["success"],
        output=state.get("final_output"),
        intermediate_results=state.get("intermediate_results") or [],
        quality_score=state.get("validation_score"),
        validation_feedback=state.get("validation_feedback"),
        iterations_used=state.get("iteration_count", 0),
        execution_time_seconds=execution_time,
        agents_involved=list(agent_outputs.keys()),
        subtasks=subtasks,
        agent_outputs=agent_outputs,
        message_count=len(state.get("messages") or []),
        error=state.get("error"),
        warnings=[],  # Can be populated from validation_issues
        start_time=state["start_time"],
        end_time=end_time,
    )


# ============================================================================
# Patent Wake-Up Scenario Models (Scenario 1)
# ============================================================================

class Claim(BaseModel):
    """Individual patent claim"""
    claim_number: int = Field(..., description="Claim number")
    claim_type: str = Field(..., description="independent or dependent")
    claim_text: str = Field(..., description="Full claim text")
    depends_on: Optional[int] = Field(None, description="Parent claim number if dependent")


class PatentAnalysis(BaseModel):
    """Complete patent analysis output from DocumentAnalysisAgent"""
    patent_id: str = Field(..., description="Patent identifier")
    title: str = Field(..., description="Patent title")
    abstract: str = Field(..., description="Patent abstract")

    # Claims
    independent_claims: List[Claim] = Field(default_factory=list, description="Independent claims")
    dependent_claims: List[Claim] = Field(default_factory=list, description="Dependent claims")
    total_claims: int = Field(..., description="Total number of claims")

    # Technical details
    ipc_classification: List[str] = Field(default_factory=list, description="IPC codes")
    technical_domains: List[str] = Field(default_factory=list, description="Technology domains")
    key_innovations: List[str] = Field(default_factory=list, description="Key innovations")
    novelty_assessment: str = Field(..., description="Assessment of novelty")

    # Commercialization
    trl_level: int = Field(..., ge=1, le=9, description="Technology Readiness Level")
    trl_justification: str = Field(..., description="Reasoning for TRL assessment")
    commercialization_potential: str = Field(..., description="High, Medium, or Low")
    potential_applications: List[str] = Field(default_factory=list, description="Application areas")

    # Metadata
    inventors: List[str] = Field(default_factory=list, description="Inventor names")
    assignees: List[str] = Field(default_factory=list, description="Assignee organizations")
    filing_date: Optional[str] = Field(None, description="Filing date")
    publication_date: Optional[str] = Field(None, description="Publication date")

    # Analysis quality
    confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence")
    extraction_completeness: float = Field(..., ge=0.0, le=1.0, description="Extraction completeness")


class MarketOpportunity(BaseModel):
    """Individual market opportunity"""
    sector: str = Field(..., description="Industry sector name")
    sector_description: str = Field(..., description="Sector description")
    market_size_usd: Optional[float] = Field(None, description="Market size in USD")
    growth_rate_percent: Optional[float] = Field(None, description="Annual growth rate")
    technology_fit: str = Field(..., description="Excellent, Good, or Fair")
    market_gap: str = Field(..., description="Specific gap this technology fills")
    competitive_advantage: str = Field(..., description="Key competitive advantages")
    geographic_focus: List[str] = Field(default_factory=list, description="Target regions")
    time_to_market_months: int = Field(..., description="Estimated time to market")
    risk_level: str = Field(..., description="Low, Medium, or High")
    priority_score: float = Field(..., ge=0.0, le=1.0, description="Priority ranking")


class MarketAnalysis(BaseModel):
    """Complete market analysis output from MarketAnalysisAgent"""
    opportunities: List[MarketOpportunity] = Field(default_factory=list, description="Market opportunities")
    top_sectors: List[str] = Field(default_factory=list, description="Top 3 sectors by priority")

    # Overall assessment
    total_addressable_market_usd: Optional[float] = Field(None, description="Total addressable market")
    market_readiness: str = Field(..., description="Ready, Emerging, or Early")
    competitive_landscape: str = Field(..., description="Competitive landscape assessment")
    regulatory_considerations: List[str] = Field(default_factory=list, description="Regulatory issues")

    # Recommendations
    recommended_focus: str = Field(..., description="Recommended market focus")
    strategic_positioning: str = Field(..., description="Strategic positioning advice")
    go_to_market_strategy: str = Field(..., description="Go-to-market strategy")

    # Quality
    confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence")
    research_depth: int = Field(..., description="Number of sources consulted")


class StakeholderMatch(BaseModel):
    """Match between patent and potential partner"""
    stakeholder_name: str = Field(..., description="Stakeholder name")
    stakeholder_type: str = Field(..., description="Investor, Company, University, etc.")

    # Contact information
    location: str = Field(..., description="Geographic location")
    contact_info: Optional[Dict] = Field(None, description="Contact details")

    # Match scores
    overall_fit_score: float = Field(..., ge=0.0, le=1.0, description="Overall match score")
    technical_fit: float = Field(..., ge=0.0, le=1.0, description="Technical capability match")
    market_fit: float = Field(..., ge=0.0, le=1.0, description="Market sector alignment")
    geographic_fit: float = Field(..., ge=0.0, le=1.0, description="Geographic compatibility")
    strategic_fit: float = Field(..., ge=0.0, le=1.0, description="Strategic alignment")

    # Explanation
    match_rationale: str = Field(..., description="Why this is a good match")
    collaboration_opportunities: List[str] = Field(default_factory=list, description="Potential collaborations")
    potential_value: str = Field(..., description="High, Medium, or Low")

    # Next steps
    recommended_approach: str = Field(..., description="How to approach this stakeholder")
    talking_points: List[str] = Field(default_factory=list, description="Key talking points")


class ValorizationBrief(BaseModel):
    """Complete valorization package from OutreachAgent"""
    patent_id: str = Field(..., description="Patent identifier")

    # Document content
    content: str = Field(..., description="Full markdown content")
    pdf_path: str = Field(..., description="Path to generated PDF")

    # Key sections (extracted)
    executive_summary: str = Field(..., description="Executive summary")
    technology_overview: str = Field(..., description="Technology overview section")
    market_analysis_summary: str = Field(..., description="Market analysis summary")
    partner_recommendations: str = Field(..., description="Partner recommendations")

    # Highlights
    top_opportunities: List[str] = Field(default_factory=list, description="Top market opportunities")
    recommended_partners: List[str] = Field(default_factory=list, description="Top 5 partners")
    key_takeaways: List[str] = Field(default_factory=list, description="Key takeaways")

    # Metadata
    generated_date: str = Field(..., description="Generation date")
    version: str = Field(default="1.0", description="Document version")


# ============================================================================
# License Compliance Monitoring Models (Scenario 3)
# ============================================================================

class ComplianceStatus(str, Enum):
    """License compliance status for monitoring."""
    COMPLIANT = "compliant"
    NON_COMPLIANT = "non_compliant"
    AT_RISK = "at_risk"
    PENDING_REVIEW = "pending_review"
    EXPIRED = "expired"


class LicenseComplianceAnalysis(BaseModel):
    """
    License compliance analysis output from LicenseComplianceAgent.

    GDPR Note: This model may contain references to personal data
    (licensee contacts, payment info). Implement appropriate access
    controls and data retention policies.
    """
    license_id: str = Field(..., description="License agreement identifier")
    agreement_name: str = Field(..., description="Name of the agreement")
    licensee: str = Field(..., description="Licensee organization name")

    # Compliance status
    overall_status: ComplianceStatus = Field(..., description="Overall compliance status")
    compliance_score: float = Field(..., ge=0.0, le=1.0, description="Compliance score 0-1")

    # Payment compliance
    payments_current: bool = Field(..., description="All payments up to date")
    payments_overdue: int = Field(default=0, description="Number of overdue payments")
    total_outstanding: float = Field(default=0.0, description="Total outstanding amount")
    currency: str = Field(default="EUR", description="Currency code")

    # Milestone compliance
    milestones_on_track: bool = Field(..., description="All milestones on track")
    milestones_overdue: int = Field(default=0, description="Number of overdue milestones")
    next_milestone_date: Optional[str] = Field(None, description="Next milestone due date")

    # Alerts and issues
    active_alerts: List[str] = Field(default_factory=list, description="Active compliance alerts")
    issues_identified: List[str] = Field(default_factory=list, description="Identified issues")
    recommendations: List[str] = Field(default_factory=list, description="Compliance recommendations")

    # Confidence and validation
    confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence")
    human_review_required: bool = Field(default=False, description="Requires human review")
    last_reviewed: Optional[str] = Field(None, description="Last human review date")


class RevenueReport(BaseModel):
    """Revenue report for license portfolio."""
    report_id: str = Field(..., description="Report identifier")
    period_start: str = Field(..., description="Reporting period start")
    period_end: str = Field(..., description="Reporting period end")

    # Revenue summary
    total_revenue: float = Field(..., description="Total revenue in period")
    currency: str = Field(default="EUR", description="Currency code")
    by_license: Dict[str, float] = Field(default_factory=dict, description="Revenue by license")
    by_type: Dict[str, float] = Field(default_factory=dict, description="Revenue by type")

    # Comparisons
    vs_previous_period: Optional[float] = Field(None, description="% change vs previous period")
    vs_forecast: Optional[float] = Field(None, description="% vs forecast")

    # Analysis quality
    confidence_score: float = Field(..., ge=0.0, le=1.0, description="Report confidence")


# ============================================================================
# Award Identification Models (Scenario 4)
# ============================================================================

class FundingOpportunity(BaseModel):
    """
    Funding opportunity identified by the award scanning system.

    Represents grants, awards, and other funding opportunities
    matched to research capabilities.
    """
    opportunity_id: str = Field(..., description="Opportunity identifier")
    title: str = Field(..., description="Opportunity title")
    description: str = Field(..., description="Full description")

    # Funder information
    funder: str = Field(..., description="Funding organization name")
    funder_type: str = Field(..., description="Type: government, EU, foundation, corporate")
    program_name: Optional[str] = Field(None, description="Funding program name")

    # Funding details
    amount_min: Optional[float] = Field(None, description="Minimum funding amount")
    amount_max: Optional[float] = Field(None, description="Maximum funding amount")
    currency: str = Field(default="EUR", description="Currency code")
    funding_type: str = Field(..., description="Type: grant, award, prize, fellowship")

    # Timing
    deadline: Optional[str] = Field(None, description="Application deadline")
    duration_months: Optional[int] = Field(None, description="Funding duration in months")
    decision_date: Optional[str] = Field(None, description="Expected decision date")

    # Matching
    match_score: float = Field(..., ge=0.0, le=1.0, description="Match score with capabilities")
    match_rationale: str = Field(..., description="Why this is a good match")
    eligibility_status: str = Field(..., description="eligible, ineligible, partial, unknown")
    eligibility_notes: List[str] = Field(default_factory=list, description="Eligibility details")

    # Next steps
    recommended_action: str = Field(..., description="Recommended next step")
    application_effort: str = Field(..., description="Low, Medium, High effort required")
    success_likelihood: str = Field(..., description="Low, Medium, High likelihood")

    # Metadata
    url: Optional[str] = Field(None, description="Opportunity URL")
    keywords: List[str] = Field(default_factory=list, description="Relevant keywords")
    research_areas: List[str] = Field(default_factory=list, description="Matching research areas")
    discovered_date: str = Field(..., description="When opportunity was discovered")

    # Quality
    confidence_score: float = Field(..., ge=0.0, le=1.0, description="Analysis confidence")


class AwardApplicationStatus(BaseModel):
    """Status tracking for award/grant applications."""
    application_id: str = Field(..., description="Application identifier")
    opportunity_id: str = Field(..., description="Target opportunity")

    # Status
    status: str = Field(..., description="draft, internal_review, submitted, under_review, awarded, rejected")
    submitted_date: Optional[str] = Field(None, description="Submission date")
    decision_date: Optional[str] = Field(None, description="Decision received date")

    # Documents
    documents_completed: int = Field(default=0, description="Completed documents")
    documents_required: int = Field(default=0, description="Total required documents")
    documents_pending_review: int = Field(default=0, description="Documents pending review")

    # Quality
    overall_score: Optional[float] = Field(None, ge=0.0, le=1.0, description="Application quality score")
    critic_validation: Optional[Dict[str, Any]] = Field(None, description="CriticAgent validation result")
    human_approved: bool = Field(default=False, description="Human approval received")

    # Notes
    internal_notes: List[str] = Field(default_factory=list, description="Internal notes")
    feedback: Optional[str] = Field(None, description="Feedback from funder if received")


# ============================================================================
# Human-in-the-Loop Decision Models
# ============================================================================

class HumanDecisionPoint(BaseModel):
    """
    Human-in-the-loop decision point for workflow orchestration.

    Captures when and why human input is required, and tracks
    the decision made.
    """
    decision_id: str = Field(..., description="Decision point identifier")
    workflow_id: str = Field(..., description="Parent workflow ID")
    scenario: ScenarioType = Field(..., description="Scenario requiring decision")

    # Decision context
    decision_type: str = Field(..., description="Type: approval, selection, verification, override")
    question: str = Field(..., description="Decision question for human")
    context: str = Field(..., description="Context and background for decision")
    options: List[str] = Field(default_factory=list, description="Available options")

    # AI recommendation
    ai_recommendation: Optional[str] = Field(None, description="AI recommended option")
    ai_confidence: Optional[float] = Field(None, ge=0.0, le=1.0, description="AI confidence in recommendation")
    ai_rationale: Optional[str] = Field(None, description="Rationale for AI recommendation")

    # Human decision
    human_decision: Optional[str] = Field(None, description="Human selected option")
    human_rationale: Optional[str] = Field(None, description="Human provided rationale")
    decided_by: Optional[str] = Field(None, description="User who made decision")
    decided_at: Optional[str] = Field(None, description="Timestamp of decision")

    # Status
    status: str = Field(default="pending", description="pending, decided, expired, skipped")
    expires_at: Optional[str] = Field(None, description="When decision times out")

    # Audit
    created_at: str = Field(..., description="When decision point was created")


class SourceVerification(BaseModel):
    """
    Source verification for hallucination mitigation.

    Tracks sources used by AI agents and their verification status.
    """
    verification_id: str = Field(..., description="Verification identifier")
    claim: str = Field(..., description="AI-generated claim to verify")

    # Sources
    sources: List[Dict[str, Any]] = Field(default_factory=list, description="Supporting sources")
    source_count: int = Field(default=0, description="Number of sources found")

    # Verification
    verified: bool = Field(..., description="Claim is verified by sources")
    verification_score: float = Field(..., ge=0.0, le=1.0, description="Verification confidence")
    verification_method: str = Field(..., description="How verification was performed")

    # Issues
    discrepancies: List[str] = Field(default_factory=list, description="Discrepancies found")
    warnings: List[str] = Field(default_factory=list, description="Verification warnings")

    # Metadata
    verified_at: str = Field(..., description="When verification was performed")