Spaces:
Sleeping
Sleeping
google-labs-jules[bot]
feat: implement AutoStream conversational AI sales agent with LangGraph
0643073 | import pytest | |
| from agent.graph import app | |
| from agent.state import AgentState | |
| from agent.nodes import IntentResponse, LeadExtractionResponse | |
| from langchain_core.runnables import RunnableLambda | |
| def simulate_conversation(messages, mock_llm_setup_func): | |
| """ | |
| Helper utility that simulates a multi-turn conversation. | |
| Feeds messages sequentially through the agent graph and returns the final state. | |
| """ | |
| state = AgentState( | |
| conversation_history=[], | |
| current_message="", | |
| detected_intent=None, | |
| retrieved_documents=[], | |
| user_name=None, | |
| user_email=None, | |
| creator_platform=None, | |
| lead_ready=False, | |
| response="" | |
| ) | |
| for idx, msg in enumerate(messages): | |
| state["current_message"] = msg | |
| mock_llm_setup_func(idx) | |
| state = app.invoke(state) | |
| state["conversation_history"].append({"role": "user", "content": state["current_message"]}) | |
| state["conversation_history"].append({"role": "assistant", "content": state["response"]}) | |
| return state | |
| def test_agent_e2e(mocker): | |
| mock_llm = mocker.MagicMock() | |
| mocker.patch('agent.nodes.get_llm', return_value=mock_llm) | |
| mocker.patch('agent.nodes.retrieve_documents', return_value=["We have Basic and Pro plans for $29 and $79."]) | |
| mock_tool = mocker.patch('agent.nodes.mock_lead_capture') | |
| messages = [ | |
| "Hi", | |
| "Tell me about pricing", | |
| "I want the Pro plan for my YouTube channel", | |
| "My name is Alex", | |
| "alex@email.com" | |
| ] | |
| def setup_mocks_for_turn(idx): | |
| if idx == 0: | |
| mock_chain = RunnableLambda(lambda x: IntentResponse(intent="GREETING", confidence=0.99)) | |
| mock_llm.with_structured_output.return_value = mock_chain | |
| elif idx == 1: | |
| mock_chain = RunnableLambda(lambda x: IntentResponse(intent="PRICING_QUERY", confidence=0.99)) | |
| mock_llm.with_structured_output.return_value = mock_chain | |
| class FakeResponse: | |
| content = "We have Basic and Pro plans." | |
| mock_llm.invoke.return_value = FakeResponse() | |
| elif idx == 2: | |
| def mock_structured_output(schema): | |
| if schema.__name__ == "IntentResponse": | |
| return RunnableLambda(lambda x: IntentResponse(intent="HIGH_INTENT_LEAD", confidence=0.99)) | |
| else: | |
| return RunnableLambda(lambda x: LeadExtractionResponse(user_name=None, user_email=None, creator_platform="YouTube")) | |
| mock_llm.with_structured_output.side_effect = mock_structured_output | |
| elif idx == 3: | |
| def mock_structured_output(schema): | |
| if schema.__name__ == "IntentResponse": | |
| return RunnableLambda(lambda x: IntentResponse(intent="HIGH_INTENT_LEAD", confidence=0.99)) | |
| else: | |
| return RunnableLambda(lambda x: LeadExtractionResponse(user_name="Alex", user_email=None, creator_platform=None)) | |
| mock_llm.with_structured_output.side_effect = mock_structured_output | |
| elif idx == 4: | |
| def mock_structured_output(schema): | |
| if schema.__name__ == "IntentResponse": | |
| return RunnableLambda(lambda x: IntentResponse(intent="HIGH_INTENT_LEAD", confidence=0.99)) | |
| else: | |
| return RunnableLambda(lambda x: LeadExtractionResponse(user_name=None, user_email="alex@email.com", creator_platform=None)) | |
| mock_llm.with_structured_output.side_effect = mock_structured_output | |
| final_state = simulate_conversation(messages, setup_mocks_for_turn) | |
| assert final_state.get("user_name") == "Alex" | |
| assert final_state.get("user_email") == "alex@email.com" | |
| assert final_state.get("creator_platform") == "YouTube" | |
| assert final_state.get("lead_ready") is True | |
| mock_tool.assert_called_once_with("Alex", "alex@email.com", "YouTube") | |