File size: 4,001 Bytes
a9dc537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
"""
Debug script to test patent workflow execution
"""
import asyncio
import sys
from pathlib import Path
from loguru import logger

# Configure logger for debugging
logger.remove()
logger.add(sys.stdout, level="DEBUG")

async def test_document_analysis():
    """Test just the document analysis step"""
    from src.llm.langchain_ollama_client import LangChainOllamaClient
    from src.agents.scenario1 import DocumentAnalysisAgent

    logger.info("=" * 70)
    logger.info("Testing Document Analysis Agent")
    logger.info("=" * 70)

    # Initialize LLM client
    logger.info("Initializing LLM client...")
    llm_client = LangChainOllamaClient()

    # Initialize agent
    logger.info("Initializing DocumentAnalysisAgent...")
    agent = DocumentAnalysisAgent(llm_client=llm_client, memory_agent=None)

    # Test with a patent file
    patent_path = "/home/mhamdan/SPARKNET/Dataset/Microsoft July 2006.pdf"
    logger.info(f"Testing with patent: {patent_path}")

    try:
        logger.info("Starting patent analysis...")
        analysis = await agent.analyze_patent(patent_path)

        logger.success("✅ Analysis completed!")
        logger.info(f"Patent Title: {analysis.title}")
        logger.info(f"TRL Level: {analysis.trl_level}")
        logger.info(f"Key Innovations: {len(analysis.key_innovations)}")
        logger.info(f"Technical Domains: {analysis.technical_domains}")

        return analysis

    except Exception as e:
        logger.error(f"❌ Analysis failed: {e}")
        import traceback
        traceback.print_exc()
        return None

async def test_full_workflow():
    """Test the full workflow"""
    from src.llm.langchain_ollama_client import LangChainOllamaClient
    from src.agents.planner_agent import create_planner_agent
    from src.agents.critic_agent import create_critic_agent
    from src.agents.memory_agent import create_memory_agent
    from src.workflow.langgraph_workflow import create_workflow
    from src.workflow.langgraph_state import ScenarioType

    logger.info("=" * 70)
    logger.info("Testing Full Workflow")
    logger.info("=" * 70)

    # Initialize components
    logger.info("Initializing LLM client...")
    llm_client = LangChainOllamaClient()

    logger.info("Initializing agents...")
    planner = create_planner_agent(llm_client)
    critic = create_critic_agent(llm_client)
    memory = create_memory_agent(llm_client)

    logger.info("Creating workflow...")
    workflow = create_workflow(
        llm_client=llm_client,
        planner_agent=planner,
        critic_agent=critic,
        memory_agent=memory
    )

    # Test workflow
    patent_path = "/home/mhamdan/SPARKNET/Dataset/Microsoft July 2006.pdf"
    logger.info(f"Testing workflow with patent: {patent_path}")

    try:
        result = await workflow.run(
            task_description=f"Analyze patent: Microsoft July 2006.pdf and create valorization roadmap",
            scenario=ScenarioType.PATENT_WAKEUP,
            input_data={"patent_path": patent_path},
            task_id="debug_test_001"
        )

        logger.success("✅ Workflow completed!")
        logger.info(f"Success: {result.success}")
        logger.info(f"Quality Score: {result.quality_score}")
        logger.info(f"Iterations: {result.iterations_used}")
        logger.info(f"Execution Time: {result.execution_time_seconds:.2f}s")

        return result

    except Exception as e:
        logger.error(f"❌ Workflow failed: {e}")
        import traceback
        traceback.print_exc()
        return None

if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="Debug SPARKNET workflow")
    parser.add_argument("--test", choices=["doc", "full"], default="doc",
                       help="Test to run: 'doc' for document analysis only, 'full' for full workflow")
    args = parser.parse_args()

    if args.test == "doc":
        asyncio.run(test_document_analysis())
    else:
        asyncio.run(test_full_workflow())