repo_id stringclasses 155
values | entity_id stringlengths 5 331 | uri stringlengths 27 361 | kind stringclasses 4
values | name stringlengths 1 295 | path stringlengths 5 130 | language stringclasses 1
value | start_line int32 1 27k | end_line int32 1 27k | context_start_line int32 1 26.9k | context_end_line int32 1 27k | code stringlengths 0 20k | source_hash stringlengths 64 64 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
AgentLab | py:main | program://AgentLab/module/main#L1-L74 | module | main | main.py | python | 1 | 74 | 1 | 74 | """
Note: This script is a convenience script to launch experiments instead of using
the command line.
Copy this script and modify at will, but don't push your changes to the
repository.
"""
import logging
from agentlab.agents.generic_agent import (
AGENT_LLAMA3_70B,
AGENT_LLAMA31_70B,
RANDOM_SEARCH_AGEN... | cf52282bd08c6ac5ba93fcfc1ccacc25b04c07ed59f1f8a8ad630906d8499366 | false |
AgentLab | py:main_workarena_debug | program://AgentLab/module/main_workarena_debug#L1-L77 | module | main_workarena_debug | main_workarena_debug.py | python | 1 | 77 | 1 | 77 | """
Note: This script is a convenience script to launch experiments instead of using
the command line.
Copy this script and modify at will, but don't push your changes to the
repository.
"""
import logging
from copy import deepcopy
import bgym
from agentlab.agents.tool_use_agent.tool_use_agent import (
DEFAULT_... | 92a99e0d2b6b2fb8c18fb1060abf2c17ba8d9a100132f242959c7fb8ac18a4d8 | false |
AgentLab | py:add_study_to_repro_journal | program://AgentLab/module/add_study_to_repro_journal#L1-L18 | module | add_study_to_repro_journal | add_study_to_repro_journal.py | python | 1 | 18 | 1 | 18 | import os
from pathlib import Path
from agentlab.experiments.study import Study
base_dir = "/home/toolkit/ui_copilot_results"
exp_paths = [
"2025-01-31_22-08-34_genericagent-o3-mini-2025-01-31-on-workarena-l1",
# '2025-02-02_01-53-45_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1',
"2025-02-02_0... | dc9b4b94f8f744a3656b875dd287be370af8551604785b276bd041bd6ba5b408 | false |
AgentLab | py:tests.test_main | program://AgentLab/module/tests.test_main#L1-L28 | module | tests.test_main | tests/test_main.py | python | 1 | 28 | 1 | 28 | import subprocess
import sys
from pathlib import Path
import pytest
@pytest.mark.pricy
def test_main_script_execution():
# this should trigger agent_4o_mini on miniwob_tiny_test unless this was
# reconfigured differently.
path = Path(__file__).parent.parent / "main.py"
sys.path.insert(0, str(path.pa... | 1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348 | false |
AgentLab | py:tests.test_main.test_main_script_execution | program://AgentLab/function/tests.test_main.test_main_script_execution#L9-L24 | function | test_main_script_execution | tests/test_main.py | python | 9 | 24 | 1 | 28 | import subprocess
import sys
from pathlib import Path
import pytest
@pytest.mark.pricy
def test_main_script_execution():
# this should trigger agent_4o_mini on miniwob_tiny_test unless this was
# reconfigured differently.
path = Path(__file__).parent.parent / "main.py"
sys.path.insert(0, str(path.pa... | 1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348 | false |
AgentLab | py:tests.test_ui_assistant | program://AgentLab/module/tests.test_ui_assistant#L1-L9 | module | tests.test_ui_assistant | tests/test_ui_assistant.py | python | 1 | 9 | 1 | 9 | from agentlab.ui_assistant import make_exp_args
from agentlab.agents.generic_agent import AGENT_4o
def test_make_exp_args():
"""Basic unit test to detect refactoring errors."""
exp_args = make_exp_args(AGENT_4o, "https://www.google.com")
assert exp_args.agent_args.flags.action.demo_mode == "default" | 3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a | false |
AgentLab | py:tests.test_ui_assistant.test_make_exp_args | program://AgentLab/function/tests.test_ui_assistant.test_make_exp_args#L5-L9 | function | test_make_exp_args | tests/test_ui_assistant.py | python | 5 | 9 | 1 | 9 | from agentlab.ui_assistant import make_exp_args
from agentlab.agents.generic_agent import AGENT_4o
def test_make_exp_args():
"""Basic unit test to detect refactoring errors."""
exp_args = make_exp_args(AGENT_4o, "https://www.google.com")
assert exp_args.agent_args.flags.action.demo_mode == "default" | 3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a | false |
AgentLab | py:tests.verify_rate_limit_anthropic | program://AgentLab/module/tests.verify_rate_limit_anthropic#L1-L89 | module | tests.verify_rate_limit_anthropic | tests/verify_rate_limit_anthropic.py | python | 1 | 89 | 1 | 89 | import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import anthropic
client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
def make_request(messages):
response = client.messages.create(
model="claude-3-5-sonnet-20241022", max_tokens=10, messages=messages... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.make_request | program://AgentLab/function/tests.verify_rate_limit_anthropic.make_request#L10-L14 | function | make_request | tests/verify_rate_limit_anthropic.py | python | 10 | 14 | 1 | 34 | import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import anthropic
client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
def make_request(messages):
response = client.messages.create(
model="claude-3-5-sonnet-20241022", max_tokens=10, messages=messages... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.make_message | program://AgentLab/function/tests.verify_rate_limit_anthropic.make_message#L17-L26 | function | make_message | tests/verify_rate_limit_anthropic.py | python | 17 | 26 | 1 | 46 | import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import anthropic
client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
def make_request(messages):
response = client.messages.create(
model="claude-3-5-sonnet-20241022", max_tokens=10, messages=messages... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.add_cache_control | program://AgentLab/function/tests.verify_rate_limit_anthropic.add_cache_control#L29-L30 | function | add_cache_control | tests/verify_rate_limit_anthropic.py | python | 29 | 30 | 9 | 50 |
def make_request(messages):
response = client.messages.create(
model="claude-3-5-sonnet-20241022", max_tokens=10, messages=messages
)
return response.usage
def make_message(text):
return {
"role": "user",
"content": [
{
"type": "text",
... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.remove_cache_control | program://AgentLab/function/tests.verify_rate_limit_anthropic.remove_cache_control#L33-L35 | function | remove_cache_control | tests/verify_rate_limit_anthropic.py | python | 33 | 35 | 13 | 55 | )
return response.usage
def make_message(text):
return {
"role": "user",
"content": [
{
"type": "text",
"text": text,
}
],
}
def add_cache_control(message: dict, cache_type="ephemeral"):
message["content"][0]["cache_... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.test_rate_limit_single | program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit_single#L38-L63 | function | test_rate_limit_single | tests/verify_rate_limit_anthropic.py | python | 38 | 63 | 18 | 83 | return {
"role": "user",
"content": [
{
"type": "text",
"text": text,
}
],
}
def add_cache_control(message: dict, cache_type="ephemeral"):
message["content"][0]["cache_control"] = {"type": cache_type}
def remove_cache_contro... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.test_rate_limit_parallel | program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit_parallel#L66-L76 | function | test_rate_limit_parallel | tests/verify_rate_limit_anthropic.py | python | 66 | 76 | 46 | 89 | messages = []
# Add all previous conversation turns
for i in range(5):
if i == 0:
messages.append(make_message(big_text))
t0 = time.time()
else:
messages.append(make_message(medium_text))
add_cache_control(messages[-1])
try:
us... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.verify_rate_limit_anthropic.test_rate_limit | program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit#L79-L81 | function | test_rate_limit | tests/verify_rate_limit_anthropic.py | python | 79 | 81 | 59 | 89 | print(f"{dt:.2f}: Thread {thread_id}: {usage}")
except Exception as e:
print(f"Thread {thread_id}: Error - {e}")
break
remove_cache_control(messages[-1])
def test_rate_limit_parallel(num_threads=3):
print(f"Starting parallel rate limit test with {num_threads} th... | a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8 | false |
AgentLab | py:tests.analyze.test_overlay_utils | program://AgentLab/module/tests.analyze.test_overlay_utils#L1-L81 | module | tests.analyze.test_overlay_utils | tests/analyze/test_overlay_utils.py | python | 1 | 81 | 1 | 81 | from PIL import Image
from agentlab.analyze import overlay_utils
def test_parse_function_calls():
test_code = """
mouse_click(34, 59)
fill("a234", "test")
click('b123', button="right", modifiers=["Shift", "Control"])
select_option("c456", ["option1", "option2"])
"""
result = overlay_utils.parse_function_ca... | 9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1 | false |
AgentLab | py:tests.analyze.test_overlay_utils.test_parse_function_calls | program://AgentLab/function/tests.analyze.test_overlay_utils.test_parse_function_calls#L6-L23 | function | test_parse_function_calls | tests/analyze/test_overlay_utils.py | python | 6 | 23 | 1 | 43 | from PIL import Image
from agentlab.analyze import overlay_utils
def test_parse_function_calls():
test_code = """
mouse_click(34, 59)
fill("a234", "test")
click('b123', button="right", modifiers=["Shift", "Control"])
select_option("c456", ["option1", "option2"])
"""
result = overlay_utils.parse_function_ca... | 9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1 | false |
AgentLab | py:tests.analyze.test_overlay_utils.test_filtering_args | program://AgentLab/function/tests.analyze.test_overlay_utils.test_filtering_args#L26-L45 | function | test_filtering_args | tests/analyze/test_overlay_utils.py | python | 26 | 45 | 6 | 65 | def test_parse_function_calls():
test_code = """
mouse_click(34, 59)
fill("a234", "test")
click('b123', button="right", modifiers=["Shift", "Control"])
select_option("c456", ["option1", "option2"])
"""
result = overlay_utils.parse_function_calls(test_code)
assert result[1].function_name == "mouse_click"
... | 9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1 | false |
AgentLab | py:tests.analyze.test_overlay_utils.manual_eval | program://AgentLab/function/tests.analyze.test_overlay_utils.manual_eval#L48-L77 | function | manual_eval | tests/analyze/test_overlay_utils.py | python | 48 | 77 | 28 | 81 | mouse_click(34, 59)
fill("a234", "test")
mouse_drag_and_drop(34, 59, to_x=100, to_y=200)
drag_and_drop("a123", "b456")
"""
result = overlay_utils.parse_function_calls(test_code)
args = overlay_utils.find_bids_and_xy_pairs(result)
assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_o... | 9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1 | false |
AgentLab | py:tests.analyze.test_inspect_results | program://AgentLab/module/tests.analyze.test_inspect_results#L1-L35 | module | tests.analyze.test_inspect_results | tests/analyze/test_inspect_results.py | python | 1 | 35 | 1 | 35 | from pathlib import Path
import shutil
import tempfile
import pandas as pd
from agentlab.analyze.inspect_results import get_study_summary
def test_get_study_summary():
with tempfile.TemporaryDirectory() as tmp_dir:
study_dir = Path(tmp_dir) / "test_study"
study_dir_original = Path(__file__).par... | 1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9 | false |
AgentLab | py:tests.analyze.test_inspect_results.test_get_study_summary | program://AgentLab/function/tests.analyze.test_inspect_results.test_get_study_summary#L9-L31 | function | test_get_study_summary | tests/analyze/test_inspect_results.py | python | 9 | 31 | 1 | 35 | from pathlib import Path
import shutil
import tempfile
import pandas as pd
from agentlab.analyze.inspect_results import get_study_summary
def test_get_study_summary():
with tempfile.TemporaryDirectory() as tmp_dir:
study_dir = Path(tmp_dir) / "test_study"
study_dir_original = Path(__file__).par... | 1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9 | false |
AgentLab | py:tests.llm.test_huggingface_utils | program://AgentLab/module/tests.llm.test_huggingface_utils#L1-L32 | module | tests.llm.test_huggingface_utils | tests/llm/test_huggingface_utils.py | python | 1 | 32 | 1 | 32 | import pytest
from agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message
from agentlab.llm.llm_utils import download_and_save_model
from agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE
# TODO(optimass): figure out a good model for all tests
@pytest.mark.skip(reas... | edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3 | false |
AgentLab | py:tests.llm.test_huggingface_utils.test_CustomLLMChatbot_locally | program://AgentLab/function/tests.llm.test_huggingface_utils.test_CustomLLMChatbot_locally#L11-L24 | function | test_CustomLLMChatbot_locally | tests/llm/test_huggingface_utils.py | python | 11 | 24 | 1 | 32 | import pytest
from agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message
from agentlab.llm.llm_utils import download_and_save_model
from agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE
# TODO(optimass): figure out a good model for all tests
@pytest.mark.skip(reas... | edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3 | false |
AgentLab | py:tests.llm.test_huggingface_utils.test_download_and_save_model | program://AgentLab/function/tests.llm.test_huggingface_utils.test_download_and_save_model#L28-L32 | function | test_download_and_save_model | tests/llm/test_huggingface_utils.py | python | 28 | 32 | 8 | 32 |
@pytest.mark.skip(reason="Requires a local model checkpoint")
def test_CustomLLMChatbot_locally():
# model_path = "google/flan-t5-base" # remote model on HuggingFace Hub
model_path = "/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft" # local model in shared volum
chatbot = HuggingFaceURLChatModel(mod... | edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3 | false |
AgentLab | py:tests.llm.test_llm_configs | program://AgentLab/module/tests.llm.test_llm_configs#L1-L8 | module | tests.llm.test_llm_configs | tests/llm/test_llm_configs.py | python | 1 | 8 | 1 | 8 | from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT
from agentlab.llm.chat_api import BaseModelArgs
def test_llm_configs():
for _, args in CHAT_MODEL_ARGS_DICT.items():
assert isinstance(args, BaseModelArgs) | c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880 | false |
AgentLab | py:tests.llm.test_llm_configs.test_llm_configs | program://AgentLab/function/tests.llm.test_llm_configs.test_llm_configs#L5-L8 | function | test_llm_configs | tests/llm/test_llm_configs.py | python | 5 | 8 | 1 | 8 | from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT
from agentlab.llm.chat_api import BaseModelArgs
def test_llm_configs():
for _, args in CHAT_MODEL_ARGS_DICT.items():
assert isinstance(args, BaseModelArgs) | c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880 | false |
AgentLab | py:tests.llm.test_tracking | program://AgentLab/module/tests.llm.test_tracking#L1-L177 | module | tests.llm.test_tracking | tests/llm/test_tracking.py | python | 1 | 177 | 1 | 177 | import os
import time
from functools import partial
import pytest
import agentlab.llm.tracking as tracking
from agentlab.llm.chat_api import (
AzureChatModel,
OpenAIChatModel,
OpenRouterChatModel,
make_system_message,
make_user_message,
)
def test_get_action_decorator():
action, agent_info =... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_get_action_decorator | program://AgentLab/function/tests.llm.test_tracking.test_get_action_decorator#L17-L24 | function | test_get_action_decorator | tests/llm/test_tracking.py | python | 17 | 24 | 1 | 44 | import os
import time
from functools import partial
import pytest
import agentlab.llm.tracking as tracking
from agentlab.llm.chat_api import (
AzureChatModel,
OpenAIChatModel,
OpenRouterChatModel,
make_system_message,
make_user_message,
)
def test_get_action_decorator():
action, agent_info =... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_get_pricing_openrouter | program://AgentLab/function/tests.llm.test_tracking.test_get_pricing_openrouter#L39-L46 | function | test_get_pricing_openrouter | tests/llm/test_tracking.py | python | 39 | 46 | 19 | 66 | assert action == "action"
assert agent_info["stats"] == {
"input_tokens": 1,
"output_tokens": 1,
"cost": 1.0,
}
OPENROUTER_API_KEY_AVAILABLE = os.environ.get("OPENROUTER_API_KEY") is not None
OPENROUTER_MODELS = (
"anthropic/claude-3.5-sonnet",
"meta-llama/llama-3.1-405b-i... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_get_pricing_openai | program://AgentLab/function/tests.llm.test_tracking.test_get_pricing_openai#L49-L54 | function | test_get_pricing_openai | tests/llm/test_tracking.py | python | 49 | 54 | 29 | 74 | OPENROUTER_MODELS = (
"anthropic/claude-3.5-sonnet",
"meta-llama/llama-3.1-405b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"google/gemini-pro-1.5",
)
@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason="OpenRouter API key is not available")
def tes... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.call_llm | program://AgentLab/function/tests.llm.test_tracking.call_llm#L57-L62 | function | call_llm | tests/llm/test_tracking.py | python | 57 | 62 | 37 | 82 |
@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason="OpenRouter API key is not available")
def test_get_pricing_openrouter():
pricing = tracking.get_pricing_openrouter()
assert isinstance(pricing, dict)
assert all(isinstance(v, dict) for v in pricing.values())
for model in OPENROUTER_MODELS:
... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_tracker | program://AgentLab/function/tests.llm.test_tracking.test_tracker#L65-L69 | function | test_tracker | tests/llm/test_tracking.py | python | 65 | 69 | 45 | 89 | assert isinstance(pricing[model], dict)
assert all(isinstance(v, float) for v in pricing[model].values())
def test_get_pricing_openai():
pricing = tracking.get_pricing_openai()
assert isinstance(pricing, dict)
assert all("prompt" in pricing[model] and "completion" in pricing[model] for mod... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_imbricate_trackers | program://AgentLab/function/tests.llm.test_tracking.test_imbricate_trackers#L72-L85 | function | test_imbricate_trackers | tests/llm/test_tracking.py | python | 72 | 85 | 52 | 105 | assert all("prompt" in pricing[model] and "completion" in pricing[model] for model in pricing)
assert all(isinstance(pricing[model]["prompt"], float) for model in pricing)
assert all(isinstance(pricing[model]["completion"], float) for model in pricing)
def call_llm():
if hasattr(tracking.TRACKER, "ins... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_threaded_trackers | program://AgentLab/function/tests.llm.test_tracking.test_threaded_trackers#L88-L115 | function | test_threaded_trackers | tests/llm/test_tracking.py | python | 88 | 115 | 68 | 135 |
assert tracker.stats["cost"] == 1
def test_imbricate_trackers():
with tracking.set_tracker() as tracker4:
with tracking.set_tracker() as tracker1:
_, _ = call_llm()
with tracking.set_tracker() as tracker3:
_, _ = call_llm()
_, _ = call_llm()
wit... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_openai_chat_model | program://AgentLab/function/tests.llm.test_tracking.test_openai_chat_model#L123-L135 | function | test_openai_chat_model | tests/llm/test_tracking.py | python | 123 | 135 | 103 | 155 | results[1] = tracker.stats
results = [None] * 2
threads = [
threading.Thread(target=partial(thread_1, results=results)),
threading.Thread(target=partial(thread_2, results=results)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_azure_chat_model | program://AgentLab/function/tests.llm.test_tracking.test_azure_chat_model#L148-L160 | function | test_azure_chat_model | tests/llm/test_tracking.py | python | 148 | 160 | 128 | 177 | messages = [
make_system_message("You are an helpful virtual assistant"),
make_user_message("Give the third prime number"),
]
with tracking.set_tracker() as tracker:
answer = chat_model(messages)
assert "5" in answer.get("content")
assert tracker.stats["cost"] > 0
AZURE_OPE... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.test_openrouter_chat_model | program://AgentLab/function/tests.llm.test_tracking.test_openrouter_chat_model#L165-L177 | function | test_openrouter_chat_model | tests/llm/test_tracking.py | python | 165 | 177 | 145 | 177 | @pytest.mark.skipif(
not AZURE_OPENAI_API_KEY_AVAILABLE, reason="Azure OpenAI API key is not available"
)
def test_azure_chat_model():
chat_model = AzureChatModel(model_name="gpt-4.1-nano", deployment_name="gpt-4.1-nano")
assert chat_model.input_cost > 0
assert chat_model.output_cost > 0
messages =... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.thread_1 | program://AgentLab/function/tests.llm.test_tracking.thread_1#L92-L97 | function | thread_1 | tests/llm/test_tracking.py | python | 92 | 97 | 72 | 117 | def test_imbricate_trackers():
with tracking.set_tracker() as tracker4:
with tracking.set_tracker() as tracker1:
_, _ = call_llm()
with tracking.set_tracker() as tracker3:
_, _ = call_llm()
_, _ = call_llm()
with tracking.set_tracker() as tracker1bis:
... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_tracking.thread_2 | program://AgentLab/function/tests.llm.test_tracking.thread_2#L99-L103 | function | thread_2 | tests/llm/test_tracking.py | python | 99 | 103 | 79 | 123 | with tracking.set_tracker() as tracker1bis:
_, _ = call_llm()
assert tracker1.stats["cost"] == 1
assert tracker1bis.stats["cost"] == 1
assert tracker3.stats["cost"] == 3
assert tracker4.stats["cost"] == 4
def test_threaded_trackers():
"""thread_2 occurs in the middle o... | 216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813 | false |
AgentLab | py:tests.llm.test_litellm_api | program://AgentLab/module/tests.llm.test_litellm_api#L1-L167 | module | tests.llm.test_litellm_api | tests/llm/test_litellm_api.py | python | 1 | 167 | 1 | 167 | import os
from functools import partial
import pytest
from agentlab.llm.litellm_api import LiteLLMModelArgs
from agentlab.llm.response_api import APIPayload, LLMOutput
chat_api_tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get the current weather in a given locatio... | 3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527 | false |
AgentLab | py:tests.llm.test_litellm_api.add_user_messages | program://AgentLab/function/tests.llm.test_litellm_api.add_user_messages#L58-L62 | function | add_user_messages | tests/llm/test_litellm_api.py | python | 58 | 62 | 38 | 82 | "description": "The location to get the time for.",
}
},
"required": ["location"],
},
},
]
# test_config (setting name, BaseModelArgs, model_name, tools)
tool_test_configs = [
("gpt-4.1", LiteLLMModelArgs, "openai/gpt-4.1-2025-04-14", chat_ap... | 3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527 | false |
AgentLab | py:tests.llm.test_litellm_api.test_multi_action_tool_calls | program://AgentLab/function/tests.llm.test_litellm_api.test_multi_action_tool_calls#L67-L103 | function | test_multi_action_tool_calls | tests/llm/test_litellm_api.py | python | 67 | 103 | 47 | 123 | # test_config (setting name, BaseModelArgs, model_name, tools)
tool_test_configs = [
("gpt-4.1", LiteLLMModelArgs, "openai/gpt-4.1-2025-04-14", chat_api_tools),
# ("claude-3", LiteLLMModelArgs, "anthropic/claude-3-haiku-20240307", anthropic_tools), # fails for parallel tool calls
# ("claude-3.7", LiteLLMM... | 3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527 | false |
AgentLab | py:tests.llm.test_litellm_api.test_single_tool_call | program://AgentLab/function/tests.llm.test_litellm_api.test_single_tool_call#L110-L137 | function | test_single_tool_call | tests/llm/test_litellm_api.py | python | 110 | 137 | 90 | 157 | APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)
)
num_tool_calls = len(response.tool_calls) if response.tool_calls else 0
row = {
"model": name,
"checkpoint": checkpoint_name,
"tool_choice": t... | 3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527 | false |
AgentLab | py:tests.llm.test_litellm_api.test_force_tool_call | program://AgentLab/function/tests.llm.test_litellm_api.test_force_tool_call#L142-L161 | function | test_force_tool_call | tests/llm/test_litellm_api.py | python | 142 | 161 | 122 | 167 | llm_class = partial(llm_class, use_only_first_toolcall=True)
model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)
llm, msg_builder = model_args.make_model(), model_args.get_message_builder()
messages = add_user_messages(msg_builder)
... | 3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527 | false |
AgentLab | py:tests.llm.test_response_api | program://AgentLab/module/tests.llm.test_response_api#L1-L803 | module | tests.llm.test_response_api | tests/llm/test_response_api.py | python | 1 | 803 | 1 | 803 | import os
from typing import Any, Dict, List, Optional
from unittest.mock import MagicMock, patch
import anthropic
import openai
import pytest
from agentlab.llm import tracking
from agentlab.llm.response_api import (
AnthropicAPIMessageBuilder,
APIPayload,
ClaudeResponseModelArgs,
LLMOutput,
OpenA... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | true |
AgentLab | py:tests.llm.test_response_api.create_mock_openai_chat_completion | program://AgentLab/function/tests.llm.test_response_api.create_mock_openai_chat_completion#L23-L81 | function | create_mock_openai_chat_completion | tests/llm/test_response_api.py | python | 23 | 81 | 3 | 101 | from unittest.mock import MagicMock, patch
import anthropic
import openai
import pytest
from agentlab.llm import tracking
from agentlab.llm.response_api import (
AnthropicAPIMessageBuilder,
APIPayload,
ClaudeResponseModelArgs,
LLMOutput,
OpenAIChatCompletionAPIMessageBuilder,
OpenAIChatModelAr... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.create_mock_anthropic_response | program://AgentLab/function/tests.llm.test_response_api.create_mock_anthropic_response#L148-L173 | function | create_mock_anthropic_response | tests/llm/test_response_api.py | python | 148 | 173 | 128 | 193 | ]
anthropic_tools = [
{
"name": "get_weather",
"description": "Get the current weather in a given location.",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "Th... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.create_mock_openai_responses_api_response | program://AgentLab/function/tests.llm.test_response_api.create_mock_openai_responses_api_response#L176-L218 | function | create_mock_openai_responses_api_response | tests/llm/test_response_api.py | python | 176 | 218 | 156 | 238 | if text_content:
text_block = MagicMock(spec=anthropic.types.TextBlock)
text_block.type = "text"
text_block.text = text_content
response.content.append(text_block)
if tool_use:
tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)
tool_use_block.type = "to... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_response_api_message_builder_text | program://AgentLab/function/tests.llm.test_response_api.test_openai_response_api_message_builder_text#L224-L230 | function | test_openai_response_api_message_builder_text | tests/llm/test_response_api.py | python | 224 | 230 | 204 | 250 | output_item_mock.summary.append(summary_text_mock)
response_mock.output.append(output_item_mock)
# Token usage for pricing tracking
response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)
response_mock.usage.input_tokens = input_tokens
respon... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_response_api_message_builder_image | program://AgentLab/function/tests.llm.test_response_api.test_openai_response_api_message_builder_image#L233-L241 | function | test_openai_response_api_message_builder_image | tests/llm/test_response_api.py | python | 233 | 241 | 213 | 261 | response_mock.usage.completion_tokens = output_tokens
input_tokens_details_mock = MagicMock()
input_tokens_details_mock.cached_tokens = 0
response_mock.usage.input_tokens_details = input_tokens_details_mock
return response_mock
# --- Test MessageBuilders ---
def test_openai_response_api_message... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_anthropic_api_message_builder_text | program://AgentLab/function/tests.llm.test_response_api.test_anthropic_api_message_builder_text#L244-L250 | function | test_anthropic_api_message_builder_text | tests/llm/test_response_api.py | python | 244 | 250 | 224 | 270 | def test_openai_response_api_message_builder_text():
builder = OpenAIResponseAPIMessageBuilder.user()
builder.add_text("Hello, world!")
messages = builder.prepare_message()
assert len(messages) == 1
assert messages[0]["role"] == "user"
assert messages[0]["content"] == [{"type": "input_text", "te... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_anthropic_api_message_builder_image | program://AgentLab/function/tests.llm.test_response_api.test_anthropic_api_message_builder_image#L253-L264 | function | test_anthropic_api_message_builder_image | tests/llm/test_response_api.py | python | 253 | 264 | 233 | 284 | def test_openai_response_api_message_builder_image():
builder = OpenAIResponseAPIMessageBuilder.user()
builder.add_image("data:image/png;base64,SIMPLEBASE64STRING")
messages = builder.prepare_message()
assert len(messages) == 1
assert messages[0]["role"] == "user"
assert messages[0]["content"] =... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_text | program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_text#L267-L274 | function | test_openai_chat_completion_api_message_builder_text | tests/llm/test_response_api.py | python | 267 | 274 | 247 | 294 | messages = builder.prepare_message()
assert len(messages) == 1
assert messages[0]["role"] == "user"
assert messages[0]["content"] == [{"type": "text", "text": "Hello, Anthropic!"}]
def test_anthropic_api_message_builder_image():
builder = AnthropicAPIMessageBuilder.user()
builder.add_image("da... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_image | program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_image#L277-L286 | function | test_openai_chat_completion_api_message_builder_image | tests/llm/test_response_api.py | python | 277 | 286 | 257 | 306 | assert len(messages) == 1
assert messages[0]["role"] == "user"
assert len(messages[0]["content"]) == 1
image_content = messages[0]["content"][0]
assert image_content["type"] == "image"
assert image_content["source"]["type"] == "base64"
assert image_content["source"]["media_type"] == "image/p... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_chat_completion_model_parse_and_cost | program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_parse_and_cost#L289-L328 | function | test_openai_chat_completion_model_parse_and_cost | tests/llm/test_response_api.py | python | 289 | 328 | 269 | 348 | builder.add_text("Hello, ChatCompletion!")
messages = builder.prepare_message()
assert len(messages) == 1
assert messages[0]["role"] == "user"
assert messages[0]["content"] == [{"type": "text", "text": "Hello, ChatCompletion!"}]
def test_openai_chat_completion_api_message_builder_image():
bui... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_claude_response_model_parse_and_cost | program://AgentLab/function/tests.llm.test_response_api.test_claude_response_model_parse_and_cost#L331-L357 | function | test_claude_response_model_parse_and_cost | tests/llm/test_response_api.py | python | 331 | 357 | 311 | 377 | ) as mock_create:
with tracking.set_tracker() as global_tracker:
messages = [
OpenAIChatCompletionAPIMessageBuilder.user().add_text(
"What's the weather in Paris?"
)
]
payload = APIPayload(messages=messages)
... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_response_model_parse_and_cost | program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_parse_and_cost#L360-L401 | function | test_openai_response_model_parse_and_cost | tests/llm/test_response_api.py | python | 360 | 401 | 340 | 421 | )
with patch.object(
model.client.messages, "create", return_value=mock_anthropic_api_response
) as mock_create:
with tracking.set_tracker() as global_tracker:
messages = [AnthropicAPIMessageBuilder.user().add_text("Search for latest news")]
payload = APIPayload(mess... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_chat_completion_model_pricy_call | program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_pricy_call#L409-L433 | function | test_openai_chat_completion_model_pricy_call | tests/llm/test_response_api.py | python | 409 | 433 | 389 | 453 | parsed_output = model(payload)
mock_create_method.assert_called_once()
fn_calls = [
content
for content in parsed_output.tool_calls.raw_calls.output
if content.type == "function_call"
]
assert parsed_output.action == "get_current_weather(location='Boston, MA', unit='... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_claude_response_model_pricy_call | program://AgentLab/function/tests.llm.test_response_api.test_claude_response_model_pricy_call#L438-L460 | function | test_claude_response_model_pricy_call | tests/llm/test_response_api.py | python | 438 | 460 | 418 | 480 | model = args.make_model()
with tracking.set_tracker() as global_tracker:
messages = [
OpenAIChatCompletionAPIMessageBuilder.user().add_text("What is the weather in Paris?")
]
payload = APIPayload(messages=messages, tools=tools, tool_choice="required")
parsed_output =... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_response_model_pricy_call | program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_pricy_call#L465-L488 | function | test_openai_response_model_pricy_call | tests/llm/test_response_api.py | python | 465 | 488 | 445 | 508 | )
tools = anthropic_tools
model = args.make_model()
with tracking.set_tracker() as global_tracker:
messages = [AnthropicAPIMessageBuilder.user().add_text("What is the weather in Paris?")]
payload = APIPayload(messages=messages, tools=tools)
parsed_output = model(payload)
as... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_response_model_with_multiple_messages_and_cost_tracking | program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_with_multiple_messages_and_cost_tracking#L493-L542 | function | test_openai_response_model_with_multiple_messages_and_cost_tracking | tests/llm/test_response_api.py | python | 493 | 542 | 473 | 562 | model = args.make_model()
with tracking.set_tracker() as global_tracker:
messages = [
OpenAIResponseAPIMessageBuilder.user().add_text("What is the weather in Paris?")
]
payload = APIPayload(messages=messages, tools=tools)
parsed_output = model(payload)
assert pa... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking | program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking#L547-L617 | function | test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking | tests/llm/test_response_api.py | python | 547 | 617 | 527 | 637 | delta_output = tracker.stats["output_tokens"] - prev_output
delta_cost = tracker.stats["cost"] - prev_cost
assert prev_input > 0
assert prev_output > 0
assert prev_cost > 0
assert parsed.raw_response is not None
assert (
parsed.action == """get_weather(location='Delhi', unit... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_claude_model_with_multiple_messages_pricy_call | program://AgentLab/function/tests.llm.test_response_api.test_claude_model_with_multiple_messages_pricy_call#L622-L684 | function | test_claude_model_with_multiple_messages_pricy_call | tests/llm/test_response_api.py | python | 622 | 684 | 602 | 704 | delta_output = tracker.stats["output_tokens"] - prev_output
delta_cost = tracker.stats["cost"] - prev_cost
assert prev_input > 0
assert prev_output > 0
assert prev_cost > 0
assert parsed.raw_response is not None
assert (
parsed.action == """get_weather(location='Delhi')"""
... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_multi_action_tool_calls | program://AgentLab/function/tests.llm.test_response_api.test_multi_action_tool_calls#L690-L750 | function | test_multi_action_tool_calls | tests/llm/test_response_api.py | python | 690 | 750 | 670 | 770 | delta_cost = global_tracker.stats["cost"] - prev_cost
assert prev_input > 0, "Expected previous input tokens to be greater than 0"
assert prev_output > 0, "Expected previous output tokens to be greater than 0"
assert prev_cost > 0, "Expected previous cost value to be greater than 0"
assert llm_... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.test_tool_call_to_python_code | program://AgentLab/function/tests.llm.test_response_api.test_tool_call_to_python_code#L791-L798 | function | test_tool_call_to_python_code | tests/llm/test_response_api.py | python | 791 | 798 | 771 | 803 | {
"infinity": float("inf"),
"nan": float("nan"),
"negative_zero": -0.0,
"scientific": 1.23e-45,
},
"complex_call(infinity=inf, nan=nan, negative_zero=-0.0, scientific=1.23e-45)",
),
# 4. Deeply nested structures that could stress repr()
... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_response_api.add_user_messages | program://AgentLab/function/tests.llm.test_response_api.add_user_messages#L710-L714 | function | add_user_messages | tests/llm/test_response_api.py | python | 710 | 714 | 690 | 734 | def test_multi_action_tool_calls():
"""
Test that the model can produce multiple tool calls in parallel.
Uncomment commented lines to see the full behaviour of models and tool choices.
"""
# test_config (setting name, BaseModelArgs, model_name, tools)
tool_test_configs = [
(
... | 81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3 | false |
AgentLab | py:tests.llm.test_chat_api | program://AgentLab/module/tests.llm.test_chat_api#L1-L93 | module | tests.llm.test_chat_api | tests/llm/test_chat_api.py | python | 1 | 93 | 1 | 93 | import os
import pytest
from agentlab.llm.chat_api import (
AnthropicModelArgs,
AzureModelArgs,
OpenAIModelArgs,
make_system_message,
make_user_message,
)
# TODO(optimass): figure out a good model for all tests
if "AGENTLAB_LOCAL_TEST" in os.environ:
skip_tests = os.environ["AGENTLAB_LOCAL_... | 1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b | false |
AgentLab | py:tests.llm.test_chat_api.test_api_model_args_azure | program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_azure#L27-L44 | function | test_api_model_args_azure | tests/llm/test_chat_api.py | python | 27 | 44 | 7 | 64 | AzureModelArgs,
OpenAIModelArgs,
make_system_message,
make_user_message,
)
# TODO(optimass): figure out a good model for all tests
if "AGENTLAB_LOCAL_TEST" in os.environ:
skip_tests = os.environ["AGENTLAB_LOCAL_TEST"] != "1"
else:
skip_tests = False
@pytest.mark.pricy
@pytest.mark.skipif(sk... | 1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b | false |
AgentLab | py:tests.llm.test_chat_api.test_api_model_args_openai | program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_openai#L50-L66 | function | test_api_model_args_openai | tests/llm/test_chat_api.py | python | 50 | 66 | 30 | 86 | deployment_name="gpt-4.1-nano",
max_total_tokens=8192,
max_input_tokens=8192 - 512,
max_new_tokens=512,
temperature=1e-1,
)
model = model_args.make_model()
messages = [
make_system_message("You are an helpful virtual assistant"),
make_user_message("Gi... | 1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b | false |
AgentLab | py:tests.llm.test_chat_api.test_api_model_args_anthropic | program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_anthropic#L74-L89 | function | test_api_model_args_anthropic | tests/llm/test_chat_api.py | python | 74 | 89 | 54 | 93 | max_input_tokens=8192 - 512,
max_new_tokens=512,
temperature=1e-1,
)
model = model_args.make_model()
messages = [
make_system_message("You are an helpful virtual assistant"),
make_user_message("Give the third prime number"),
]
answer = model(messages)
as... | 1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b | false |
AgentLab | py:tests.llm.test_llm_utils | program://AgentLab/module/tests.llm.test_llm_utils#L1-L280 | module | tests.llm.test_llm_utils | tests/llm/test_llm_utils.py | python | 1 | 280 | 1 | 280 | import warnings
from typing import Literal
from unittest.mock import Mock
import httpx
import pytest
from openai import RateLimitError
from agentlab.llm import llm_utils
from agentlab.llm.chat_api import make_system_message
yaml_str = """Analysis:
This is the analysis
Summary: This is the summary
Confidence Score:... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_yaml_parser | program://AgentLab/function/tests.llm.test_llm_utils.test_yaml_parser#L21-L26 | function | test_yaml_parser | tests/llm/test_llm_utils.py | python | 21 | 26 | 1 | 46 | import warnings
from typing import Literal
from unittest.mock import Mock
import httpx
import pytest
from openai import RateLimitError
from agentlab.llm import llm_utils
from agentlab.llm.chat_api import make_system_message
yaml_str = """Analysis:
This is the analysis
Summary: This is the summary
Confidence Score:... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_truncate_tokens | program://AgentLab/function/tests.llm.test_llm_utils.test_truncate_tokens#L29-L32 | function | test_truncate_tokens | tests/llm/test_llm_utils.py | python | 29 | 32 | 9 | 52 | from agentlab.llm import llm_utils
from agentlab.llm.chat_api import make_system_message
yaml_str = """Analysis:
This is the analysis
Summary: This is the summary
Confidence Score: 7
"""
def test_yaml_parser():
ans, _, _ = llm_utils.yaml_parser(yaml_str)
print(ans)
assert ans["Analysis"] == "This is th... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_count_tokens | program://AgentLab/function/tests.llm.test_llm_utils.test_count_tokens#L35-L37 | function | test_count_tokens | tests/llm/test_llm_utils.py | python | 35 | 37 | 15 | 57 | Summary: This is the summary
Confidence Score: 7
"""
def test_yaml_parser():
ans, _, _ = llm_utils.yaml_parser(yaml_str)
print(ans)
assert ans["Analysis"] == "This is the analysis"
assert ans["Summary"] == "This is the summary"
assert ans["Confidence Score"] == 7
def test_truncate_tokens():
... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_json_parser | program://AgentLab/function/tests.llm.test_llm_utils.test_json_parser#L40-L60 | function | test_json_parser | tests/llm/test_llm_utils.py | python | 40 | 60 | 20 | 80 |
def test_yaml_parser():
ans, _, _ = llm_utils.yaml_parser(yaml_str)
print(ans)
assert ans["Analysis"] == "This is the analysis"
assert ans["Summary"] == "This is the summary"
assert ans["Confidence Score"] == 7
def test_truncate_tokens():
text = "This is a simple test."
truncated = llm_ut... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_compress_string | program://AgentLab/function/tests.llm.test_llm_utils.test_compress_string#L63-L91 | function | test_compress_string | tests/llm/test_llm_utils.py | python | 63 | 91 | 43 | 111 |
# deactivate warnings
warnings.filterwarnings("ignore")
value, valid, retry_message = llm_utils.json_parser(message)
assert value == {"test": "Hello, World!"}
assert valid == True
assert retry_message == ""
# Testing invalid JSON
message = '{"test": "Hello, World!"' # missing closing... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.MockChatOpenAI | program://AgentLab/class/tests.llm.test_llm_utils.MockChatOpenAI#L95-L100 | class | MockChatOpenAI | tests/llm/test_llm_utils.py | python | 95 | 100 | 75 | 120 |
expected_output = """\
<definitions>
§-0:
This is a test
for paragraph.
¶-0:
This is a second test.
</definitions>
§-0
¶-0
hola
¶-0
§-0"""
compressed_text = llm_utils.compress_string(text)
assert compressed_text == expected_output
# Mock ChatOpenAI class
class MockChatOpenAI:
def call(self, messages... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.mock_parser | program://AgentLab/function/tests.llm.test_llm_utils.mock_parser#L103-L107 | function | mock_parser | tests/llm/test_llm_utils.py | python | 103 | 107 | 83 | 127 | </definitions>
§-0
¶-0
hola
¶-0
§-0"""
compressed_text = llm_utils.compress_string(text)
assert compressed_text == expected_output
# Mock ChatOpenAI class
class MockChatOpenAI:
def call(self, messages):
return "mocked response"
def __call__(self, messages):
return self.call(messages)... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.mock_rate_limit_error | program://AgentLab/function/tests.llm.test_llm_utils.mock_rate_limit_error#L110-L126 | function | mock_rate_limit_error | tests/llm/test_llm_utils.py | python | 110 | 126 | 90 | 146 | compressed_text = llm_utils.compress_string(text)
assert compressed_text == expected_output
# Mock ChatOpenAI class
class MockChatOpenAI:
def call(self, messages):
return "mocked response"
def __call__(self, messages):
return self.call(messages)
def mock_parser(answer):
if answe... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_successful_parse_before_max_retries | program://AgentLab/function/tests.llm.test_llm_utils.test_successful_parse_before_max_retries#L173-L189 | function | test_successful_parse_before_max_retries | tests/llm/test_llm_utils.py | python | 173 | 189 | 153 | 209 | # side_effect=[
# mock_rate_limit_error("Rate limit reached. Please try again in 2s."),
# make_system_message("correct content"),
# ]
# )
# result = llm_utils.retry(
# mock_chat,
# [],
# n_retry=4,
# parser=mock_parser,
# rate_limi... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_unsuccessful_parse_before_max_retries | program://AgentLab/function/tests.llm.test_llm_utils.test_unsuccessful_parse_before_max_retries#L192-L207 | function | test_unsuccessful_parse_before_max_retries | tests/llm/test_llm_utils.py | python | 192 | 207 | 172 | 227 | # Mock a successful parser response to test function exit before max retries
def test_successful_parse_before_max_retries():
mock_chat = MockChatOpenAI()
# mock a chat that returns the wrong content the first 2 time, but the right
# content on the 3rd time
mock_chat.call = Mock(
side_effect=[
... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_retry_parse_raises | program://AgentLab/function/tests.llm.test_llm_utils.test_retry_parse_raises#L210-L216 | function | test_retry_parse_raises | tests/llm/test_llm_utils.py | python | 210 | 216 | 190 | 236 |
def test_unsuccessful_parse_before_max_retries():
mock_chat = MockChatOpenAI()
# mock a chat that returns the wrong content the first 2 time, but the right
# content on the 3rd time
mock_chat.call = Mock(
side_effect=[
make_system_message("wrong content"),
make_system... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_extract_code_blocks | program://AgentLab/function/tests.llm.test_llm_utils.test_extract_code_blocks#L219-L244 | function | test_extract_code_blocks | tests/llm/test_llm_utils.py | python | 219 | 244 | 199 | 264 | make_system_message("wrong content"),
make_system_message("wrong content"),
make_system_message("correct content"),
]
)
with pytest.raises(llm_utils.ParseError):
result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 2, mock_parser)
assert mock_chat.... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_message_merge_only_text | program://AgentLab/function/tests.llm.test_llm_utils.test_message_merge_only_text#L247-L254 | function | test_message_merge_only_text | tests/llm/test_llm_utils.py | python | 247 | 254 | 227 | 274 | ```
More code without a language.
```
Another block of code:
```javascript
console.log("Hello, world!");
```
An inline code block ```click()```
"""
expected_output = [
("python", 'def hello_world():\n print("Hello, world!")'),
("", "More code without a language."),
("javascript", 'consol... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.test_message_merge_text_image | program://AgentLab/function/tests.llm.test_llm_utils.test_message_merge_text_image#L257-L270 | function | test_message_merge_text_image | tests/llm/test_llm_utils.py | python | 257 | 270 | 237 | 280 | expected_output = [
("python", 'def hello_world():\n print("Hello, world!")'),
("", "More code without a language."),
("javascript", 'console.log("Hello, world!");'),
("", "click()"),
]
assert llm_utils.extract_code_blocks(text) == expected_output
def test_message_merge... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.call | program://AgentLab/function/tests.llm.test_llm_utils.call#L96-L97 | function | call | tests/llm/test_llm_utils.py | python | 96 | 97 | 76 | 117 | expected_output = """\
<definitions>
§-0:
This is a test
for paragraph.
¶-0:
This is a second test.
</definitions>
§-0
¶-0
hola
¶-0
§-0"""
compressed_text = llm_utils.compress_string(text)
assert compressed_text == expected_output
# Mock ChatOpenAI class
class MockChatOpenAI:
def call(self, messages)... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.llm.test_llm_utils.__call__ | program://AgentLab/function/tests.llm.test_llm_utils.__call__#L99-L100 | function | __call__ | tests/llm/test_llm_utils.py | python | 99 | 100 | 79 | 120 | This is a test
for paragraph.
¶-0:
This is a second test.
</definitions>
§-0
¶-0
hola
¶-0
§-0"""
compressed_text = llm_utils.compress_string(text)
assert compressed_text == expected_output
# Mock ChatOpenAI class
class MockChatOpenAI:
def call(self, messages):
return "mocked response"
def __... | da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447 | false |
AgentLab | py:tests.experiments.test_launch_exp | program://AgentLab/module/tests.experiments.test_launch_exp#L1-L128 | module | tests.experiments.test_launch_exp | tests/experiments/test_launch_exp.py | python | 1 | 128 | 1 | 128 | import math
import tempfile
from pathlib import Path
import pytest
from agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5, AGENT_4o_MINI
from agentlab.agents.generic_agent.generic_agent import GenericAgentArgs
from agentlab.analyze import inspect_results
from agentlab.experiments.launch_exp import (
... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp.test_relaunch_study | program://AgentLab/function/tests.experiments.test_launch_exp.test_relaunch_study#L20-L29 | function | test_relaunch_study | tests/experiments/test_launch_exp.py | python | 20 | 29 | 1 | 49 | import math
import tempfile
from pathlib import Path
import pytest
from agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5, AGENT_4o_MINI
from agentlab.agents.generic_agent.generic_agent import GenericAgentArgs
from agentlab.analyze import inspect_results
from agentlab.experiments.launch_exp import (
... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp._test_launch_system | program://AgentLab/function/tests.experiments.test_launch_exp._test_launch_system#L32-L81 | function | _test_launch_system | tests/experiments/test_launch_exp.py | python | 32 | 81 | 12 | 101 | non_dummy_count,
run_experiments,
)
from agentlab.experiments.loop import EnvArgs, ExpArgs
from agentlab.experiments.study import Study
from agentlab.llm.chat_api import CheatMiniWoBLLMArgs
def test_relaunch_study():
study_dir = Path(__file__).parent.parent / "data" / "test_study"
exp_args_list = find... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp.test_launch_system_joblib | program://AgentLab/function/tests.experiments.test_launch_exp.test_launch_system_joblib#L84-L85 | function | test_launch_system_joblib | tests/experiments/test_launch_exp.py | python | 84 | 85 | 64 | 105 |
for _, row in results_df.iterrows():
if row.stack_trace is not None:
print(row.stack_trace)
if cause_timeout:
# assert row.err_msg is not None
assert math.isnan(row.cum_reward) or row.cum_reward == 0
else:
asser... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp.test_launch_system_sequntial | program://AgentLab/function/tests.experiments.test_launch_exp.test_launch_system_sequntial#L88-L89 | function | test_launch_system_sequntial | tests/experiments/test_launch_exp.py | python | 88 | 89 | 68 | 109 | if cause_timeout:
# assert row.err_msg is not None
assert math.isnan(row.cum_reward) or row.cum_reward == 0
else:
assert row.err_msg is None
assert row.cum_reward == 1.0
study_summary = inspect_results.summarize_study(resul... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp.test_launch_system_ray | program://AgentLab/function/tests.experiments.test_launch_exp.test_launch_system_ray#L92-L93 | function | test_launch_system_ray | tests/experiments/test_launch_exp.py | python | 92 | 93 | 72 | 113 | assert row.err_msg is None
assert row.cum_reward == 1.0
study_summary = inspect_results.summarize_study(results_df)
assert len(study_summary) == 1
assert study_summary.std_err.iloc[0] == 0
if not cause_timeout:
assert study_summary.n_complete... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp.test_timeout_ray | program://AgentLab/function/tests.experiments.test_launch_exp.test_timeout_ray#L97-L98 | function | test_timeout_ray | tests/experiments/test_launch_exp.py | python | 97 | 98 | 77 | 118 | assert study_summary.std_err.iloc[0] == 0
if not cause_timeout:
assert study_summary.n_completed.iloc[0] == "3/3"
assert study_summary.avg_reward.iloc[0] == 1.0
def test_launch_system_joblib():
_test_launch_system(backend="joblib")
def test_launch_system_sequntial():
... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_launch_exp.test_4o_mini_on_miniwob_tiny_test | program://AgentLab/function/tests.experiments.test_launch_exp.test_4o_mini_on_miniwob_tiny_test#L102-L121 | function | test_4o_mini_on_miniwob_tiny_test | tests/experiments/test_launch_exp.py | python | 102 | 121 | 82 | 128 |
def test_launch_system_joblib():
_test_launch_system(backend="joblib")
def test_launch_system_sequntial():
_test_launch_system(backend="sequential")
def test_launch_system_ray():
_test_launch_system(backend="ray")
@pytest.mark.pricy
def test_timeout_ray():
_test_launch_system(backend="ray", caus... | 2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285 | false |
AgentLab | py:tests.experiments.test_multi_server | program://AgentLab/module/tests.experiments.test_multi_server#L1-L37 | module | tests.experiments.test_multi_server | tests/experiments/test_multi_server.py | python | 1 | 37 | 1 | 37 | from agentlab.experiments.multi_server import WebArenaInstanceVars
from browsergym.webarena.instance import WebArenaInstance
def test_webarena_multiserver():
instance_1 = WebArenaInstanceVars(
base_url="http://webarena1.eastus.cloudapp.azure.com",
shopping="8082/",
shopping_admin="8083/ad... | a2e6320bd23ca7f9804927c7f751ae02a9d2ebc372b66404a1cc1c46ccf4d86f | false |
AgentLab | py:tests.experiments.test_multi_server.test_webarena_multiserver | program://AgentLab/function/tests.experiments.test_multi_server.test_webarena_multiserver#L5-L33 | function | test_webarena_multiserver | tests/experiments/test_multi_server.py | python | 5 | 33 | 1 | 37 | from agentlab.experiments.multi_server import WebArenaInstanceVars
from browsergym.webarena.instance import WebArenaInstance
def test_webarena_multiserver():
instance_1 = WebArenaInstanceVars(
base_url="http://webarena1.eastus.cloudapp.azure.com",
shopping="8082/",
shopping_admin="8083/ad... | a2e6320bd23ca7f9804927c7f751ae02a9d2ebc372b66404a1cc1c46ccf4d86f | false |
AgentLab | py:tests.experiments.test_ray | program://AgentLab/module/tests.experiments.test_ray#L1-L80 | module | tests.experiments.test_ray | tests/experiments/test_ray.py | python | 1 | 80 | 1 | 80 | import bgym
import pytest
import ray
from flaky import flaky
from agentlab.experiments.exp_utils import MockedExpArgs, add_dependencies
from agentlab.experiments.graph_execution_ray import execute_task_graph
TASK_TIME = 3
@flaky(max_runs=3, min_passes=1)
def test_execute_task_graph():
# Define a list of ExpArgs... | cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1 | false |
AgentLab | py:tests.experiments.test_ray.test_execute_task_graph | program://AgentLab/function/tests.experiments.test_ray.test_execute_task_graph#L13-L32 | function | test_execute_task_graph | tests/experiments/test_ray.py | python | 13 | 32 | 1 | 52 | import bgym
import pytest
import ray
from flaky import flaky
from agentlab.experiments.exp_utils import MockedExpArgs, add_dependencies
from agentlab.experiments.graph_execution_ray import execute_task_graph
TASK_TIME = 3
@flaky(max_runs=3, min_passes=1)
def test_execute_task_graph():
# Define a list of ExpArgs... | cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1 | false |
AgentLab | py:tests.experiments.test_ray.test_add_dependencies | program://AgentLab/function/tests.experiments.test_ray.test_add_dependencies#L47-L75 | function | test_add_dependencies | tests/experiments/test_ray.py | python | 47 | 75 | 27 | 80 |
# Verify that all tasks were executed in the proper order
assert exp_args_list[0].start_time < exp_args_list[1].start_time
assert exp_args_list[0].start_time < exp_args_list[2].start_time
assert exp_args_list[1].end_time < exp_args_list[3].start_time
assert exp_args_list[2].end_time < exp_args_list... | cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.