sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
mlflow/mlflow:mlflow/demo/generators/traces.py | from __future__ import annotations
import hashlib
import logging
import random
import re
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Literal
import mlflow
from mlflow.demo.base import (
DEMO_EXPERIMENT_NAME,
DEMO_PROMPT_PREFIX,
BaseDemoGenerator,... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/demo/generators/traces.py",
"license": "Apache License 2.0",
"lines": 569,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/demo/test_demo_integration.py | """Integration tests for the demo data framework.
These tests run against a real MLflow tracking server to verify that demo data
is correctly persisted, retrieved, and cleaned up on version bumps.
"""
from pathlib import Path
import pytest
from mlflow import MlflowClient, set_tracking_uri
from mlflow.demo import ge... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_demo_integration.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/demo/test_traces_generator.py | import pytest
from mlflow import MlflowClient, get_experiment_by_name, set_experiment
from mlflow.demo.base import DEMO_EXPERIMENT_NAME, DemoFeature, DemoResult
from mlflow.demo.generators.traces import (
DEMO_TRACE_TYPE_TAG,
DEMO_VERSION_TAG,
TracesDemoGenerator,
)
@pytest.fixture
def traces_generator()... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_traces_generator.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/entities/test_job_status.py | import pytest
from mlflow.entities._job_status import JobStatus
from mlflow.protos.jobs_pb2 import JobStatus as ProtoJobStatus
@pytest.mark.parametrize(
("status", "expected_proto"),
[
(JobStatus.PENDING, ProtoJobStatus.JOB_STATUS_PENDING),
(JobStatus.RUNNING, ProtoJobStatus.JOB_STATUS_IN_PRO... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/entities/test_job_status.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/assistant/skill_installer.py | """
Manage skill installation
Skills are maintained in the mlflow/assistant/skills subtree in the MLflow repository,
which points to the https://github.com/mlflow/skills repository.
"""
import shutil
from importlib import resources
from pathlib import Path
SKILL_MANIFEST_FILE = "SKILL.md"
def _find_skill_directori... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/skill_installer.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/assistant/test_skill_installer.py | from mlflow.assistant.skill_installer import install_skills, list_installed_skills
def test_install_skills_copies_to_destination(tmp_path):
destination = tmp_path / "skills"
installed = install_skills(destination)
assert destination.exists()
assert "agent-evaluation" in installed
assert (destinat... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/assistant/test_skill_installer.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/demo/base.py | import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from mlflow.tracking._tracking_service.utils import _get_store
_logger = logging.getLogger(__name__)
DEMO_EXPERIMENT_NAME = "MLflow Demo"
DEMO_PROMPT_PREFIX = "mlflow-demo"
class DemoFeature(str, Enum):
"... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/demo/base.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/demo/registry.py | from __future__ import annotations
from typing import TYPE_CHECKING
from mlflow.demo.base import DemoFeature
if TYPE_CHECKING:
from mlflow.demo.base import BaseDemoGenerator
class DemoRegistry:
"""Registry for demo data generators.
Provides registration and lookup of BaseDemoGenerator subclasses by na... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/demo/registry.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/demo/test_base.py | import pytest
from mlflow.demo.base import (
DEMO_EXPERIMENT_NAME,
DEMO_PROMPT_PREFIX,
BaseDemoGenerator,
DemoFeature,
DemoResult,
)
def test_demo_feature_enum():
assert DemoFeature.TRACES == "traces"
assert DemoFeature.EVALUATION == "evaluation"
assert isinstance(DemoFeature.TRACES, ... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_base.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/demo/test_generate.py | import threading
from unittest import mock
from mlflow.demo import generate_all_demos
from mlflow.demo.base import BaseDemoGenerator, DemoFeature, DemoResult
from mlflow.environment_variables import MLFLOW_WORKSPACE
from mlflow.utils.workspace_context import (
clear_server_request_workspace,
get_request_worksp... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_generate.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/demo/test_registry.py | import pytest
from mlflow.demo.base import BaseDemoGenerator, DemoFeature
def test_register_and_get(fresh_registry, stub_generator):
fresh_registry.register(stub_generator)
assert fresh_registry.get(DemoFeature.TRACES) is stub_generator
def test_register_duplicate_raises(fresh_registry, stub_generator):
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/demo/test_registry.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/utils/test_providers.py | from unittest import mock
from mlflow.utils.providers import (
_normalize_provider,
get_all_providers,
get_models,
)
def test_normalize_provider_normalizes_vertex_ai_variants():
assert _normalize_provider("vertex_ai") == "vertex_ai"
assert _normalize_provider("vertex_ai-anthropic") == "vertex_ai"... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/utils/test_providers.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/utils/telemetry_utils.py | from __future__ import annotations
import logging
_logger = logging.getLogger(__name__)
def _record_judge_model_usage_success_databricks_telemetry(
*,
request_id: str | None,
model_provider: str,
endpoint_name: str,
num_prompt_tokens: int | None,
num_completion_tokens: int | None,
) -> None:... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/utils/telemetry_utils.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/optimizers/gepa.py | """GEPA alignment optimizer implementation."""
import logging
from typing import Any, Callable, Collection
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.optimizers.dspy import DSPyAlignmentOptimizer
from mlflow.genai.judges.optimizers.dspy_utils import create_gepa_metric_adapter
from mlflow.p... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/gepa.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:tests/genai/judges/optimizers/test_gepa.py | from importlib import reload
from unittest.mock import MagicMock, patch
import dspy
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.judges.optimizers import GEPAAlignmentOptimizer
from tests.genai.judges.optimizers.conftest import create_mock_judge_invocator
def test_dspy_optimize_no_... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/test_gepa.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/docker/test_integrations.py | import os
from datetime import timedelta
import pytest
from testcontainers.compose import DockerCompose
from testcontainers.core.wait_strategies import HttpWaitStrategy
import mlflow
@pytest.mark.parametrize(
"compose_file",
[
"docker-compose.mssql-test.yaml",
"docker-compose.mysql-test.yaml... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/docker/test_integrations.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/jobs/logging_utils.py | """Shared logging utilities for MLflow job consumers."""
import logging
from mlflow.utils.logging_utils import get_mlflow_log_level
def configure_logging_for_jobs() -> None:
"""Configure Python logging for job consumers to reduce noise for log levels above DEBUG."""
# Suppress noisy alembic INFO logs (e.g.,... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/logging_utils.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/ragas/scorers/agentic_metrics.py | from __future__ import annotations
from typing import ClassVar
from mlflow.genai.judges.builtin import _MODEL_API_DOC
from mlflow.genai.scorers.ragas import RagasScorer
from mlflow.utils.annotations import experimental
from mlflow.utils.docstring_utils import format_docstring
@experimental(version="3.9.0")
@format_... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/ragas/scorers/agentic_metrics.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/optimize/job.py | import logging
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Callable
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets import get_dataset
from mlflow.genai.optimize import optimize_prompts
from mlflow.genai.optimize.optimizers import (
BasePromptOpt... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/job.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/optimize/test_job.py | """
Unit tests for the optimize_prompts_job wrapper.
These tests focus on the helper functions and job function logic without
requiring a full job execution infrastructure.
"""
import sys
from unittest import mock
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.j... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/optimize/test_job.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/use_gh_token.py | import ast
from clint.resolver import Resolver
from clint.rules.base import Rule
class UseGhToken(Rule):
def _message(self) -> str:
return "Use GH_TOKEN instead of GITHUB_TOKEN for the environment variable name."
@staticmethod
def check(node: ast.Call, resolver: Resolver) -> bool:
"""
... | {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/use_gh_token.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_use_gh_token.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import lint_file
from clint.rules.use_gh_token import UseGhToken
@pytest.mark.parametrize(
"code",
[
pytest.param(
'import os\n\ntoken = os.getenv("GITHUB_TOKEN")',
id="os.getenv",
... | {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_use_gh_token.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracing/otel/translation/spring_ai.py | """
Translation utilities for Spring AI semantic conventions.
Spring AI uses OpenTelemetry GenAI semantic conventions but stores
prompt/completion content in events rather than attributes:
- gen_ai.content.prompt event with gen_ai.prompt attribute
- gen_ai.content.completion event with gen_ai.completion attribute
Ref... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracing/otel/translation/spring_ai.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/assistant/cli.py | """MLflow CLI commands for Assistant integration."""
import shutil
import sys
import threading
import time
from pathlib import Path
import click
from mlflow.assistant.config import AssistantConfig, ProjectConfig, SkillsConfig
from mlflow.assistant.providers import AssistantProvider, list_providers
from mlflow.assist... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/cli.py",
"license": "Apache License 2.0",
"lines": 428,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/assistant/config.py | from pathlib import Path
from typing import Literal
from pydantic import BaseModel, Field
MLFLOW_ASSISTANT_HOME = Path.home() / ".mlflow" / "assistant"
CONFIG_PATH = MLFLOW_ASSISTANT_HOME / "config.json"
class PermissionsConfig(BaseModel):
"""Permission settings for the assistant provider."""
allow_edit_fi... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/config.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/assistant/providers/base.py | from abc import ABC, abstractmethod
from functools import lru_cache
from pathlib import Path
from typing import Any, AsyncGenerator, Callable
from mlflow.assistant.config import AssistantConfig, ProviderConfig
@lru_cache(maxsize=10)
def load_config(name: str) -> ProviderConfig:
cfg = AssistantConfig.load()
i... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/providers/base.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/assistant/providers/claude_code.py | """
Claude Code provider for MLflow Assistant.
This module provides the Claude Code integration for the assistant API,
enabling AI-powered trace analysis through the Claude Code CLI.
"""
import asyncio
import json
import logging
import os
import shutil
import subprocess
from pathlib import Path
from typing import Any... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/providers/claude_code.py",
"license": "Apache License 2.0",
"lines": 497,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/assistant/types.py | import json
from enum import Enum
from typing import Any, Literal
from pydantic import BaseModel, Field
# Message interface between assistant providers and the assistant client
# Inspired by https://github.com/anthropics/claude-agent-sdk-python/blob/29c12cd80b256e88f321b2b8f1f5a88445077aa5/src/claude_agent_sdk/types.... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/assistant/types.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/server/assistant/api.py | """
Assistant API endpoints for MLflow Server.
This module provides endpoints for integrating AI assistants with MLflow UI,
enabling AI-powered helper through a chat interface.
"""
import ipaddress
import uuid
from pathlib import Path
from typing import Any, AsyncGenerator, Literal
from fastapi import APIRouter, Dep... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/assistant/api.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/server/assistant/session.py | import json
import os
import signal
import tempfile
import uuid
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
from mlflow.assistant.types import Message
SESSION_DIR = Path(tempfile.gettempdir()) / "mlflow-assistant-sessions"
@dataclass
class Session:
"""Session state f... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/assistant/session.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/assistant/providers/test_claude_code_provider.py | from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from mlflow.assistant.providers.claude_code import ClaudeCodeProvider
from mlflow.assistant.types import EventType
class AsyncIterator:
"""Helper to mock async stdout iteration."""
def __init__(self, items):
self.items = iter(items... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/assistant/providers/test_claude_code_provider.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/assistant/test_cli.py | import os
from unittest import mock
import pytest
from click.testing import CliRunner
from mlflow.assistant.cli import commands
from mlflow.assistant.config import ProviderConfig
@pytest.fixture
def runner():
return CliRunner()
def test_assistant_help(runner):
result = runner.invoke(commands, ["--help"])
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/assistant/test_cli.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/assistant/test_api.py | import os
import shutil
import subprocess
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from fastapi import FastAPI, HTTPException
from fastapi.testclient import TestClient
from mlflow.assistant.config import AssistantConfig, ProjectConfig
from mlflow.assista... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/assistant/test_api.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/assistant/test_session.py | import shutil
import uuid
import pytest
from mlflow.assistant.types import Message
from mlflow.server.assistant.session import Session, SessionManager
def test_session_add_message():
session = Session()
session.add_message("user", "Hello")
assert len(session.messages) == 1
assert session.messages[0... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/assistant/test_session.py",
"license": "Apache License 2.0",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/fixtures/flask_tracing_server.py | """Flask server for distributed tracing tests."""
import sys
import requests
from flask import Flask, jsonify, request
import mlflow
from mlflow.tracing.distributed import (
get_tracing_context_headers_for_http_request,
set_tracing_context_from_http_request_headers,
)
REQUEST_TIMEOUT = 20
app = Flask(__nam... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/fixtures/flask_tracing_server.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/tracing/test_distributed.py | import re
import subprocess
import sys
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Iterator
import requests
import mlflow
from mlflow.tracing.distributed import (
get_tracing_context_headers_for_http_request,
set_tracing_context_from_http_request_headers,
)
f... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracing/test_distributed.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/phoenix/models.py | from __future__ import annotations
from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import (
call_chat_completions,
)
from mlflow.genai.judges.constants import _DATABRICKS_DEFAULT_JUDGE_MODEL
from mlflow.genai.scorers.phoenix.utils import _NoOpRateLimiter, check_phoenix_installed
from mlflow.metr... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/phoenix/models.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/phoenix/registry.py | from __future__ import annotations
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers.phoenix.utils import check_phoenix_installed
_METRIC_REGISTRY = {
"Hallucination": "HallucinationEvaluator",
"Relevance": "RelevanceEvaluator",
"Toxicity": "ToxicityEvaluator",
"QA": "QAEvaluator... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/phoenix/registry.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/phoenix/utils.py | from __future__ import annotations
from typing import Any
from mlflow.entities.trace import Trace
from mlflow.exceptions import MlflowException
from mlflow.genai.utils.trace_utils import (
extract_retrieval_context_from_trace,
parse_inputs_to_str,
parse_outputs_to_str,
resolve_expectations_from_trace,... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/phoenix/utils.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/phoenix/test_models.py | from unittest.mock import Mock, patch
import phoenix.evals as phoenix_evals
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.scorers.phoenix.models import (
DatabricksPhoenixModel,
create_phoenix_model,
)
@pytest.fixture
def mock_call_chat_completions():
with patch("mlflow.g... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_models.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/phoenix/test_phoenix.py | from unittest.mock import Mock, patch
import phoenix.evals as phoenix_evals
import pytest
from mlflow.entities.assessment import Feedback
@pytest.fixture
def mock_model():
mock = Mock()
mock._verbose = False
mock._rate_limiter = Mock()
mock._rate_limiter._verbose = False
return mock
@pytest.ma... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_phoenix.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/phoenix/test_registry.py | from unittest import mock
import pytest
from mlflow.exceptions import MlflowException
phoenix_evals = pytest.importorskip("phoenix.evals")
@pytest.mark.parametrize(
("metric_name", "evaluator_name"),
[
("Hallucination", "HallucinationEvaluator"),
("Relevance", "RelevanceEvaluator"),
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_registry.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/phoenix/test_utils.py | import json
import sys
import time
from unittest.mock import patch
import pytest
from opentelemetry.sdk.trace import ReadableSpan as OTelReadableSpan
from mlflow.entities.span import Span
from mlflow.entities.trace import Trace, TraceData, TraceInfo
from mlflow.entities.trace_location import TraceLocation
from mlflow... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/phoenix/test_utils.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/server/jobs/_periodic_tasks_consumer.py | """
This module is used for launching the periodic tasks Huey consumer.
This is a dedicated consumer that only runs periodic tasks (like the online scoring scheduler).
It is launched by the job runner and runs in a separate process from job execution consumers.
"""
import threading
from mlflow.server.jobs.logging_ut... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/server/jobs/_periodic_tasks_consumer.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/judges/optimizers/memalign/optimizer.py | import copy
import logging
from dataclasses import asdict
from typing import TYPE_CHECKING, Any
import mlflow
from mlflow.entities.assessment import Assessment, AssessmentSource, Feedback
from mlflow.entities.assessment_source import AssessmentSourceType
from mlflow.entities.trace import Trace
from mlflow.exceptions i... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/memalign/optimizer.py",
"license": "Apache License 2.0",
"lines": 572,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/judges/optimizers/memalign/prompts.py | DISTILLATION_PROMPT_TEMPLATE = """You are helping improve an LLM judge with the \
following instructions:
{{ judge_instructions }}
Given a set of examples and a user's judgement of their quality, your task is to \
distill a set of guidelines from the judgements to model this user's perspective, \
which can be used to ... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/memalign/prompts.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/optimizers/memalign/utils.py | import json
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import lru_cache
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
# Try to import jinja2 at module level
try:
from jinja2 import Template
_JINJA2_AVAILABLE = True
except ImportError:
... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/optimizers/memalign/utils.py",
"license": "Apache License 2.0",
"lines": 403,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/judges/optimizers/memalign/test_optimizer.py | import json
from contextlib import contextmanager
from unittest.mock import MagicMock, patch
import pytest
import mlflow
from mlflow.entities.assessment import Assessment, AssessmentSource, Feedback
from mlflow.entities.assessment_source import AssessmentSourceType
from mlflow.exceptions import MlflowException
from m... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/memalign/test_optimizer.py",
"license": "Apache License 2.0",
"lines": 515,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/judges/optimizers/memalign/test_utils.py | from unittest.mock import MagicMock, patch
import dspy
import pytest
import mlflow
from mlflow.genai.judges.optimizers.memalign.utils import (
_count_tokens,
_create_batches,
distill_guidelines,
get_default_embedding_model,
retrieve_relevant_examples,
truncate_to_token_limit,
value_to_embe... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/optimizers/memalign/test_utils.py",
"license": "Apache License 2.0",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/session_processor.py | """Session-level online scoring processor for executing scorers on completed sessions."""
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass, field
from mlflow.entities.assessment import Assessment
from mlflow.environment_variables import MLFLOW_ONLINE_SCO... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/session_processor.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/online/test_session_processor.py | import json
import uuid
from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities import Trace, TraceData, TraceInfo
from mlflow.entities.assessment import Assessment
from mlflow.entities.trace_location import (
MlflowExperimentLocation,
TraceLocation,
TraceLocationType,
)
from mlflow.... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_session_processor.py",
"license": "Apache License 2.0",
"lines": 601,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/session_checkpointer.py | """Checkpoint management for session-level online scoring."""
import json
import logging
import time
from dataclasses import asdict, dataclass
from mlflow.entities.experiment_tag import ExperimentTag
from mlflow.environment_variables import (
MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS,
)
from... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/session_checkpointer.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/genai/scorers/online/test_session_checkpointer.py | import time
from unittest.mock import MagicMock
import pytest
from mlflow.environment_variables import (
MLFLOW_ONLINE_SCORING_DEFAULT_SESSION_COMPLETION_BUFFER_SECONDS,
)
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.genai.scorers.online.session_checkpointer import (
OnlineSes... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_session_checkpointer.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/optimize/optimizers/metaprompt_optimizer.py | import json
import logging
import re
from contextlib import nullcontext
from typing import Any
import mlflow
from mlflow.entities.span import SpanType
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.optimizers.base import BasePromptOptimizer, _EvalFunc
from mlflow.genai.optimize.types import E... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/optimize/optimizers/metaprompt_optimizer.py",
"license": "Apache License 2.0",
"lines": 584,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/optimize/optimizers/test_metaprompt_optimizer.py | import json
import sys
from typing import Any
from unittest.mock import Mock, patch
import pytest
from mlflow.exceptions import MlflowException
from mlflow.genai.optimize.optimizers.metaprompt_optimizer import MetaPromptOptimizer
from mlflow.genai.optimize.types import EvaluationResultRecord, PromptOptimizerOutput
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/optimize/optimizers/test_metaprompt_optimizer.py",
"license": "Apache License 2.0",
"lines": 379,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/pyfunc/custom_model/transitive_test/model_with_transitive.py | from custom_model.transitive_test.transitive_dependency import some_function
from mlflow.pyfunc import PythonModel
class ModelWithTransitiveDependency(PythonModel):
def predict(self, context, model_input, params=None):
result = some_function()
return [result] * len(model_input)
| {
"repo_id": "mlflow/mlflow",
"file_path": "tests/pyfunc/custom_model/transitive_test/model_with_transitive.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:.claude/skills/src/skills/commands/fetch_unresolved_comments.py | # ruff: noqa: T201
"""Fetch unresolved PR review comments using GitHub GraphQL API."""
from __future__ import annotations
import argparse
import asyncio
from typing import Any
from pydantic import BaseModel
from skills.github import GitHubClient, parse_pr_url
from skills.github.types import ReviewComment, ReviewThr... | {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/commands/fetch_unresolved_comments.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:.claude/skills/src/skills/github/client.py | from collections.abc import AsyncIterator
from typing import Any, cast
import aiohttp
from typing_extensions import Self
from skills.github.types import Job, JobRun, PullRequest
from skills.github.utils import get_github_token
class GitHubClient:
def __init__(self, token: str | None = None) -> None:
sel... | {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/github/client.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:.claude/skills/src/skills/github/types.py | from pydantic import BaseModel
class GitRef(BaseModel):
sha: str
ref: str
class PullRequest(BaseModel):
title: str
body: str | None
head: GitRef
class ReviewComment(BaseModel):
id: int
body: str
author: str
createdAt: str
class ReviewThread(BaseModel):
thread_id: str
... | {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/github/types.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:.claude/skills/src/skills/github/utils.py | # ruff: noqa: T201
import os
import re
import subprocess
import sys
def get_github_token() -> str:
if token := os.environ.get("GH_TOKEN"):
return token
try:
return subprocess.check_output(["gh", "auth", "token"], text=True).strip()
except (subprocess.CalledProcessError, FileNotFoundError):... | {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/github/utils.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:.claude/skills/src/skills/cli.py | import argparse
from skills.commands import analyze_ci, fetch_diff, fetch_unresolved_comments
def main() -> None:
parser = argparse.ArgumentParser(prog="skills")
subparsers = parser.add_subparsers(dest="command", required=True)
analyze_ci.register(subparsers)
fetch_diff.register(subparsers)
fetc... | {
"repo_id": "mlflow/mlflow",
"file_path": ".claude/skills/src/skills/cli.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/server/jobs/helpers.py | """Shared test helpers for job execution tests."""
import os
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
from mlflow.entities._job_status import JobStatus
from mlflow.server import (
ARTIFACT_ROOT_ENV_VAR,
BACKEND_STORE_URI_ENV_VAR,
HUEY_STORAGE_PATH_ENV_VAR,
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/helpers.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/server/jobs/test_online_scoring_jobs.py | import json
import os
import uuid
from dataclasses import asdict
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities._job_status import JobStatus
from mlflow.genai.judges import make_judge
from mlflow.genai.scorers.base import Scorer
from mlflo... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/test_online_scoring_jobs.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/trace_processor.py | """Online scoring processor for executing scorers on traces."""
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from mlflow.entities import Trace
from mlflow.environment_variables import MLFLOW_ONLINE_SCORING_MAX_WORKER_THREADS
from mlflow.genai.scorers... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/trace_processor.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/scorers/online/test_trace_processor.py | import json
import uuid
from unittest.mock import MagicMock, patch
import pytest
from mlflow.entities import Trace, TraceData, TraceInfo
from mlflow.genai.scorers.builtin_scorers import Completeness
from mlflow.genai.scorers.online.entities import OnlineScorer, OnlineScoringConfig
from mlflow.genai.scorers.online.sam... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_trace_processor.py",
"license": "Apache License 2.0",
"lines": 372,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/constants.py | """Constants for online scoring."""
from mlflow.tracing.constant import TraceMetadataKey
# Maximum lookback period to prevent getting stuck on old failing traces (1 hour)
MAX_LOOKBACK_MS = 60 * 60 * 1000
# Maximum traces to include in a single scoring job
MAX_TRACES_PER_JOB = 500
# Maximum sessions to include in a ... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/constants.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/online/sampler.py | """Dense sampling strategy for online scoring."""
import hashlib
import logging
from collections import defaultdict
from typing import TYPE_CHECKING
from mlflow.genai.scorers.base import Scorer
if TYPE_CHECKING:
from mlflow.genai.scorers.online.entities import OnlineScorer
_logger = logging.getLogger(__name__)
... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/sampler.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:mlflow/genai/scorers/online/trace_checkpointer.py | """Checkpoint management for trace-level online scoring."""
import json
import logging
import time
from dataclasses import asdict, dataclass
from mlflow.entities.experiment_tag import ExperimentTag
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.store.tracking.abstract_store import Abstr... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/trace_checkpointer.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/scorers/online/trace_loader.py | """Trace loading utilities for online scoring."""
import logging
from mlflow.entities import Trace, TraceInfo
from mlflow.store.tracking.abstract_store import AbstractStore
_logger = logging.getLogger(__name__)
class OnlineTraceLoader:
def __init__(self, tracking_store: AbstractStore):
self._tracking_s... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/trace_loader.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:tests/genai/scorers/online/test_sampler.py | import json
import uuid
import pytest
from mlflow.genai.scorers.builtin_scorers import Completeness, ConversationCompleteness
from mlflow.genai.scorers.online.entities import OnlineScorer, OnlineScoringConfig
from mlflow.genai.scorers.online.sampler import OnlineScorerSampler
def make_online_scorer(
scorer,
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_sampler.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/online/test_trace_checkpointer.py | import time
from unittest.mock import MagicMock
import pytest
from mlflow.genai.scorers.online.constants import MAX_LOOKBACK_MS
from mlflow.genai.scorers.online.trace_checkpointer import (
OnlineTraceCheckpointManager,
OnlineTraceScoringCheckpoint,
)
from mlflow.utils.mlflow_tags import MLFLOW_LATEST_ONLINE_S... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_trace_checkpointer.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/online/test_trace_loader.py | from unittest.mock import MagicMock
import pytest
from mlflow.entities import Trace, TraceInfo
from mlflow.genai.scorers.online.trace_loader import OnlineTraceLoader
@pytest.fixture
def mock_store():
return MagicMock()
@pytest.fixture
def trace_loader(mock_store):
return OnlineTraceLoader(mock_store)
@p... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/online/test_trace_loader.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/online/entities.py | """
Online scorer entities and configuration.
This module contains entities for online scorer configuration used by the store layer
and online scoring infrastructure.
"""
from dataclasses import dataclass
@dataclass
class OnlineScoringConfig:
"""
Internal entity representing the online configuration for a s... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/online/entities.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/simulators/prompts.py | DEFAULT_PERSONA = "You are an inquisitive user having a natural conversation."
INITIAL_USER_PROMPT = """Instructions:
You are role-playing as a real user interacting with an AI assistant.
- Write like a human user, not like an assistant or expert. Do not act as the helper or expert:
NEVER answer the goal yourself, e... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/simulators/prompts.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/simulators/simulator.py | from __future__ import annotations
import inspect
import logging
import math
import time
import uuid
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import contextmanager
from dataclasses import dataclass, field
from threading import Lock
from typing ... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/simulators/simulator.py",
"license": "Apache License 2.0",
"lines": 713,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/simulators/test_simulator.py | import re
from unittest.mock import Mock, patch
import pandas as pd
import pytest
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.genai.datasets.evaluation_dataset import EvaluationDataset
from mlflow.genai.simulators import (
BaseSimulatedUserAgent,
ConversationSimulator,
Simulate... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/simulators/test_simulator.py",
"license": "Apache License 2.0",
"lines": 807,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/conversational_guidelines.py | CONVERSATIONAL_GUIDELINES_ASSESSMENT_NAME = "conversational_guidelines"
CONVERSATIONAL_GUIDELINES_PROMPT = """\
Consider the following conversation history between a user and an assistant.
Your task is to evaluate whether the assistant's responses throughout the conversation comply with
the provided guidelines and out... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/conversational_guidelines.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:dev/clint/src/clint/rules/prefer_dict_union.py | import ast
from clint.rules.base import Rule
def _is_simple_name_or_attribute(node: ast.expr) -> bool:
"""
Check if a node is a simple name (e.g., `a`) or a chain of attribute
accesses on a simple name (e.g., `obj.attr` or `a.b.c`).
"""
if isinstance(node, ast.Name):
return True
if is... | {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/prefer_dict_union.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:dev/clint/tests/rules/test_prefer_dict_union.py | from pathlib import Path
import pytest
from clint.config import Config
from clint.linter import lint_file
from clint.rules import PreferDictUnion
@pytest.mark.parametrize(
"code",
[
pytest.param("{**dict1, **dict2}", id="two_dict_unpacks"),
pytest.param("{**dict1, **dict2, **dict3}", id="thre... | {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_prefer_dict_union.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/gateway/providers/test_fallback.py | from typing import Any
from unittest import mock
import pytest
from fastapi import HTTPException
from mlflow.entities.gateway_endpoint import FallbackStrategy
from mlflow.gateway.config import EndpointConfig
from mlflow.gateway.exceptions import AIGatewayException
from mlflow.gateway.providers.base import FallbackPro... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/gateway/providers/test_fallback.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/scorers/job.py | """Huey job functions for async scorer invocation."""
import logging
import os
import random
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import nullcontext
from dataclasses import asdict, dataclass, field
from typing import Any
from mlflow.entiti... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/scorers/job.py",
"license": "Apache License 2.0",
"lines": 417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/server/jobs/test_scorer_invocation.py | """
E2E integration tests for async scorer invocation via the MLflow server.
These tests spin up a real MLflow server with job execution enabled and test
the full flow of invoking scorers on traces asynchronously.
The MLflow AI Gateway is mocked to avoid real LLM calls during testing.
"""
import json
import os
impor... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/server/jobs/test_scorer_invocation.py",
"license": "Apache License 2.0",
"lines": 479,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/models/test_container.py | """
Tests for mlflow.models.container module.
Includes security tests for command injection prevention.
"""
import os
from unittest import mock
import pytest
import yaml
from mlflow.models.container import _install_model_dependencies_to_env
from mlflow.utils import env_manager as em
def _create_model_artifact(mod... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/models/test_container.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/tracking/context/jupyter_notebook_context.py | import json
import os
from collections.abc import Generator
from functools import lru_cache
from pathlib import Path
from typing import Any
from urllib.request import urlopen
from mlflow.entities import SourceType
from mlflow.tracking.context.abstract_context import RunContextProvider
from mlflow.utils.databricks_util... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/tracking/context/jupyter_notebook_context.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/tracking/context/test_jupyter_notebook_context.py | import json
from unittest import mock
import pytest
from mlflow.entities import SourceType
from mlflow.tracking.context.jupyter_notebook_context import (
JupyterNotebookRunContext,
_get_kernel_id,
_get_notebook_name,
_get_notebook_path_from_sessions,
_get_running_servers,
_get_sessions_noteboo... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/tracking/context/test_jupyter_notebook_context.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/genai/scorers/test_scorer_telemetry.py | """Tests for scorer telemetry behavior, specifically testing that nested scorer calls
skip telemetry recording while top-level calls record telemetry correctly.
"""
import asyncio
import json
import threading
from typing import Callable
from unittest import mock
import pytest
from pydantic import PrivateAttr
from ml... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/scorers/test_scorer_telemetry.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/entities/trace_metrics.py | from dataclasses import dataclass
from enum import Enum
from mlflow.entities._mlflow_object import _MlflowObject
from mlflow.protos import service_pb2 as pb
class MetricViewType(str, Enum):
TRACES = "TRACES"
SPANS = "SPANS"
ASSESSMENTS = "ASSESSMENTS"
def __str__(self) -> str:
return self.va... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/entities/trace_metrics.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/store/tracking/utils/sql_trace_metrics_utils.py | import json
from dataclasses import dataclass
from datetime import datetime, timezone
import sqlalchemy
from sqlalchemy import Column, and_, case, exists, func, literal_column
from sqlalchemy.orm.query import Query
from mlflow.entities.trace_metrics import (
AggregationType,
MetricAggregation,
MetricDataP... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/store/tracking/utils/sql_trace_metrics_utils.py",
"license": "Apache License 2.0",
"lines": 749,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/entities/test_trace_metrics.py | import pytest
from mlflow.entities.trace_metrics import (
AggregationType,
MetricAggregation,
MetricDataPoint,
MetricViewType,
)
from mlflow.protos import service_pb2 as pb
@pytest.mark.parametrize(
("view_type", "expected_proto"),
zip(MetricViewType, pb.MetricViewType.values(), strict=True),... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/entities/test_trace_metrics.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:tests/store/tracking/test_sqlalchemy_store_query_trace_metrics.py | import json
import uuid
from dataclasses import asdict
from datetime import datetime, timezone
import numpy as np
import pytest
from opentelemetry import trace as trace_api
from mlflow.entities import (
Assessment,
AssessmentSource,
AssessmentSourceType,
Expectation,
Feedback,
trace_location,
... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/store/tracking/test_sqlalchemy_store_query_trace_metrics.py",
"license": "Apache License 2.0",
"lines": 4232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:dev/clint/src/clint/rules/forbidden_make_judge_in_builtin_scorers.py | import ast
from pathlib import Path
from clint.resolver import Resolver
from clint.rules.base import Rule
class ForbiddenMakeJudgeInBuiltinScorers(Rule):
"""Ensure make_judge is not used in builtin_scorers.py.
After switching to InstructionsJudge in builtin_scorers.py, this rule
prevents future regressi... | {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/src/clint/rules/forbidden_make_judge_in_builtin_scorers.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:dev/clint/tests/rules/test_forbidden_make_judge_in_builtin_scorers.py | from pathlib import Path
from clint.config import Config
from clint.linter import lint_file
from clint.rules.forbidden_make_judge_in_builtin_scorers import (
ForbiddenMakeJudgeInBuiltinScorers,
)
def test_forbidden_make_judge_in_builtin_scorers(index_path: Path) -> None:
code = """
from mlflow.genai.judges.m... | {
"repo_id": "mlflow/mlflow",
"file_path": "dev/clint/tests/rules/test_forbidden_make_judge_in_builtin_scorers.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/knowledge_retention.py | # NB: User-facing name for the knowledge retention assessment.
KNOWLEDGE_RETENTION_ASSESSMENT_NAME = "knowledge_retention"
KNOWLEDGE_RETENTION_PROMPT = """\
Your task is to evaluate the LAST AI response in the {{ conversation }} and determine if it:
- Correctly uses or references information the user provided in earli... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/knowledge_retention.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/prompts/tool_call_correctness.py | import json
from typing import TYPE_CHECKING
from mlflow.genai.judges.utils.formatting_utils import (
format_available_tools,
format_tools_called,
)
from mlflow.genai.prompts.utils import format_prompt
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import Cha... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/tool_call_correctness.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/judges/utils/formatting_utils.py | import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import ChatTool
_logger = logging.getLogger(__name__)
def format_available_tools(available_tools: list["ChatTool"]) -> str:
"""Format available tools with description... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/utils/formatting_utils.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
mlflow/mlflow:tests/genai/judges/utils/test_formatting_utils.py | import pytest
from mlflow.genai.judges.utils.formatting_utils import format_available_tools, format_tools_called
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import (
ChatTool,
FunctionParams,
FunctionToolDefinition,
ParamProperty,
)
@pytest.mark.parametrize(
("tools", ... | {
"repo_id": "mlflow/mlflow",
"file_path": "tests/genai/judges/utils/test_formatting_utils.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
mlflow/mlflow:mlflow/genai/judges/prompts/tool_call_efficiency.py | from typing import TYPE_CHECKING
from mlflow.genai.judges.utils.formatting_utils import (
format_available_tools,
format_tools_called,
)
from mlflow.genai.prompts.utils import format_prompt
if TYPE_CHECKING:
from mlflow.genai.utils.type import FunctionCall
from mlflow.types.chat import ChatTool
# NB:... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/judges/prompts/tool_call_efficiency.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/genai/utils/type.py | from __future__ import annotations
from typing import Any
from mlflow.types.chat import Function
class FunctionCall(Function):
arguments: str | dict[str, Any] | None = None
outputs: Any | None = None
exception: str | None = None
| {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/utils/type.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
mlflow/mlflow:mlflow/genai/utils/prompts/available_tools_extraction.py | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from mlflow.types.llm import ChatMessage
AVAILABLE_TOOLS_EXTRACTION_SYSTEM_PROMPT = """You are an expert in analyzing agent execution traces.
Your task is to examine an MLflow trace and identify all tools or functions that were
available to the LLM, not which too... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/genai/utils/prompts/available_tools_extraction.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
mlflow/mlflow:mlflow/gateway/providers/litellm.py | from __future__ import annotations
import json
from typing import Any, AsyncIterable
from mlflow.gateway.config import EndpointConfig, LiteLLMConfig
from mlflow.gateway.providers.base import BaseProvider, PassthroughAction, ProviderAdapter
from mlflow.gateway.schemas import chat, embeddings
from mlflow.gateway.utils ... | {
"repo_id": "mlflow/mlflow",
"file_path": "mlflow/gateway/providers/litellm.py",
"license": "Apache License 2.0",
"lines": 469,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.