sample_id
stringlengths
21
196
text
stringlengths
105
936k
metadata
dict
category
stringclasses
6 values
mlflow/mlflow:tests/gateway/providers/test_litellm.py
from unittest import mock import pytest from mlflow.gateway.config import EndpointConfig from mlflow.gateway.providers.base import PassthroughAction from mlflow.gateway.providers.litellm import LiteLLMAdapter, LiteLLMProvider from mlflow.gateway.schemas import chat, embeddings TEST_MESSAGE = "This is a test" def c...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/gateway/providers/test_litellm.py", "license": "Apache License 2.0", "lines": 728, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/scorers/ragas/scorers/comparison_metrics.py
from __future__ import annotations from typing import ClassVar from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.scorers.ragas import RagasScorer from mlflow.utils.annotations import experimental from mlflow.utils.docstring_utils import format_docstring @experimental(version="3.8.0") @format_...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/ragas/scorers/comparison_metrics.py", "license": "Apache License 2.0", "lines": 124, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/scorers/ragas/scorers/rag_metrics.py
from __future__ import annotations from typing import ClassVar from ragas.embeddings.base import Embeddings from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.scorers.ragas import RagasScorer from mlflow.utils.annotations import experimental from mlflow.utils.docstring_utils import format_docst...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/ragas/scorers/rag_metrics.py", "license": "Apache License 2.0", "lines": 165, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:tests/genai/judges/adapters/test_utils.py
from unittest import mock import pytest from mlflow.exceptions import MlflowException from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import ( DatabricksManagedJudgeAdapter, ) from mlflow.genai.judges.adapters.gateway_adapter import GatewayAdapter from mlflow.genai.judges.adapters.litellm_adapt...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/judges/adapters/test_utils.py", "license": "Apache License 2.0", "lines": 94, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/judges/prompts/fluency.py
# NB: User-facing name for the fluency assessment. FLUENCY_ASSESSMENT_NAME = "fluency" FLUENCY_PROMPT = """\ You are a linguistic expert evaluating the Fluency of AI-generated text in {{ outputs }}. Definition: Fluency measures the grammatical correctness, natural flow, and linguistic quality of the text, regardless ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/fluency.py", "license": "Apache License 2.0", "lines": 12, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/scorers/ragas/models.py
from __future__ import annotations import json import typing as t import instructor import litellm from openai import AsyncOpenAI from pydantic import BaseModel from ragas.embeddings import OpenAIEmbeddings from ragas.llms import InstructorBaseRagasLLM from ragas.llms.litellm_llm import LiteLLMStructuredLLM from mlf...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/ragas/models.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/scorers/ragas/registry.py
from __future__ import annotations from dataclasses import dataclass from mlflow.exceptions import MlflowException @dataclass(frozen=True) class MetricConfig: classpath: str is_agentic_or_multiturn: bool = False requires_embeddings: bool = False requires_llm_in_constructor: bool = True requires_...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/ragas/registry.py", "license": "Apache License 2.0", "lines": 133, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/scorers/ragas/utils.py
from __future__ import annotations from typing import Any from mlflow.entities.trace import Trace from mlflow.exceptions import MlflowException from mlflow.genai.scorers.scorer_utils import parse_tool_call_expectations from mlflow.genai.utils.trace_utils import ( extract_retrieval_context_from_trace, extract_...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/ragas/utils.py", "license": "Apache License 2.0", "lines": 251, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/scorers/ragas/test_models.py
from unittest.mock import Mock, patch import pytest from pydantic import BaseModel from mlflow.exceptions import MlflowException from mlflow.genai.scorers.ragas.models import DatabricksRagasLLM, create_ragas_model class DummyResponseModel(BaseModel): answer: str score: int @pytest.fixture def mock_call_ch...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/ragas/test_models.py", "license": "Apache License 2.0", "lines": 44, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/ragas/test_ragas_scorer.py
from unittest.mock import MagicMock, patch import pytest from ragas.embeddings.base import BaseRagasEmbedding import mlflow from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSourceType from mlflow.exceptions import MlflowException from mlflow.genai.judges.utils im...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/ragas/test_ragas_scorer.py", "license": "Apache License 2.0", "lines": 306, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/ragas/test_registry.py
from unittest import mock import pytest from mlflow.exceptions import MlflowException from mlflow.genai.scorers.ragas.registry import ( get_metric_class, is_agentic_or_multiturn_metric, requires_args_from_placeholders, requires_embeddings, requires_llm_at_score_time, requires_llm_in_constructo...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/ragas/test_registry.py", "license": "Apache License 2.0", "lines": 84, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/ragas/test_utils.py
import pytest from langchain_core.documents import Document from ragas.dataset_schema import MultiTurnSample, SingleTurnSample from ragas.messages import AIMessage, HumanMessage, ToolCall import mlflow from mlflow.entities.span import SpanType from mlflow.genai.scorers.ragas.utils import ( create_mlflow_error_mess...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/ragas/test_utils.py", "license": "Apache License 2.0", "lines": 212, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/judges/prompts/conversational_role_adherence.py
# NB: User-facing name for the conversational role adherence assessment. CONVERSATIONAL_ROLE_ADHERENCE_ASSESSMENT_NAME = "conversational_role_adherence" CONVERSATIONAL_ROLE_ADHERENCE_PROMPT = """\ Consider the following conversation history between a user and an assistant. \ Your task is to evaluate whether the assist...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/conversational_role_adherence.py", "license": "Apache License 2.0", "lines": 24, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/server/gateway_api.py
""" Database-backed Gateway API endpoints for MLflow Server. This module provides dynamic gateway endpoints that are configured from the database rather than from a static YAML configuration file. It integrates the AI Gateway functionality directly into the MLflow tracking server. """ import functools import logging ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/server/gateway_api.py", "license": "Apache License 2.0", "lines": 792, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/server/test_gateway_api.py
import json from pathlib import Path from typing import Any from unittest import mock from unittest.mock import AsyncMock, MagicMock, patch import pytest from fastapi import HTTPException from fastapi.responses import StreamingResponse import mlflow from mlflow.entities import ( FallbackConfig, FallbackStrate...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/server/test_gateway_api.py", "license": "Apache License 2.0", "lines": 2487, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/export/span_batcher.py
import atexit import logging import threading from collections import defaultdict from queue import Queue from typing import Callable from mlflow.entities.span import Span from mlflow.environment_variables import ( MLFLOW_ASYNC_TRACE_LOGGING_MAX_INTERVAL_MILLIS, MLFLOW_ASYNC_TRACE_LOGGING_MAX_SPAN_BATCH_SIZE, ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/export/span_batcher.py", "license": "Apache License 2.0", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/scorers/deepeval/scorers/agentic_metrics.py
"""Agentic metrics for evaluating AI agent performance.""" from __future__ import annotations from typing import ClassVar from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.scorers.deepeval import DeepEvalScorer from mlflow.utils.annotations import experimental from mlflow.utils.docstring_utils...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/scorers/agentic_metrics.py", "license": "Apache License 2.0", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/scorers/deepeval/scorers/conversational_metrics.py
"""Conversational metrics for evaluating multi-turn dialogue performance.""" from __future__ import annotations from typing import ClassVar from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.scorers.deepeval import DeepEvalScorer from mlflow.utils.annotations import experimental from mlflow.uti...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/scorers/conversational_metrics.py", "license": "Apache License 2.0", "lines": 152, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/scorers/deepeval/scorers/rag_metrics.py
"""RAG (Retrieval-Augmented Generation) metrics for DeepEval integration.""" from __future__ import annotations from typing import ClassVar from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.scorers.deepeval import DeepEvalScorer from mlflow.utils.annotations import experimental from mlflow.uti...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/scorers/rag_metrics.py", "license": "Apache License 2.0", "lines": 109, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/scorers/deepeval/scorers/safety_metrics.py
"""Safety and responsible AI metrics for content evaluation.""" from __future__ import annotations from typing import ClassVar from mlflow.genai.judges.builtin import _MODEL_API_DOC from mlflow.genai.scorers.deepeval import DeepEvalScorer from mlflow.utils.annotations import experimental from mlflow.utils.docstring_...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/scorers/safety_metrics.py", "license": "Apache License 2.0", "lines": 134, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/utils/providers.py
import importlib.util from typing import Any, TypedDict from typing_extensions import NotRequired _PROVIDER_BACKEND_AVAILABLE = importlib.util.find_spec("litellm") is not None _SUPPORTED_MODEL_MODES = ("chat", "completion", "embedding", None) class FieldDict(TypedDict): name: str description: str secre...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/utils/providers.py", "license": "Apache License 2.0", "lines": 517, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/judges/prompts/conversational_tool_call_efficiency.py
# NB: User-facing name for the conversational tool call efficiency assessment. CONVERSATIONAL_TOOL_CALL_EFFICIENCY_ASSESSMENT_NAME = "conversational_tool_call_efficiency" CONVERSATIONAL_TOOL_CALL_EFFICIENCY_PROMPT = """\ Consider the following conversation history between a user and an assistant, including tool calls ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/conversational_tool_call_efficiency.py", "license": "Apache License 2.0", "lines": 21, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/server/constants.py
""" Constants used for internal server-to-worker communication. These are internal environment variables (prefixed with _MLFLOW_SERVER_) used for communication between the MLflow CLI and forked server processes (gunicorn/uvicorn workers). They are set by the server and read by workers, and should not be set by end use...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/server/constants.py", "license": "Apache License 2.0", "lines": 57, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/tracking/_secret_cache.py
""" Server-side encrypted cache for secrets management. Implements time-bucketed ephemeral encryption for cached secrets to provide defense-in-depth and satisfy CWE-316 (https://cwe.mitre.org/data/definitions/316.html). Security Model and Limitations: This cache protects against accidental exposure of secrets in log...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/_secret_cache.py", "license": "Apache License 2.0", "lines": 210, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/store/tracking/test_secret_cache.py
import json import time from concurrent.futures import ThreadPoolExecutor import pytest # Commented out pending integration with rest branch: # from mlflow.entities import SecretResourceType from mlflow.store.tracking._secret_cache import ( _MAX_TTL, _MIN_TTL, EphemeralCacheEncryption, SecretCache, ) ...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/tracking/test_secret_cache.py", "license": "Apache License 2.0", "lines": 381, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/pyfunc/utils.py
import json import os from typing import TYPE_CHECKING from fastapi.testclient import TestClient import mlflow from mlflow.pyfunc import scoring_server if TYPE_CHECKING: import httpx def score_model_in_process(model_uri: str, data: str, content_type: str) -> "httpx.Response": """Score a model using in-proc...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/pyfunc/utils.py", "license": "Apache License 2.0", "lines": 31, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/scorers/deepeval/models.py
from __future__ import annotations import json from deepeval.models import LiteLLMModel from deepeval.models.base_model import DeepEvalBaseLLM from pydantic import ValidationError from mlflow.genai.judges.adapters.databricks_managed_judge_adapter import ( call_chat_completions, ) from mlflow.genai.judges.constan...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/models.py", "license": "Apache License 2.0", "lines": 59, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/scorers/deepeval/registry.py
from __future__ import annotations from mlflow.exceptions import MlflowException from mlflow.genai.scorers.deepeval.utils import DEEPEVAL_NOT_INSTALLED_ERROR_MESSAGE # Registry format: metric_name -> (classpath, is_deterministic) _METRIC_REGISTRY = { # RAG Metrics "AnswerRelevancy": ("deepeval.metrics.AnswerR...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/registry.py", "license": "Apache License 2.0", "lines": 77, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/scorers/deepeval/utils.py
"""Utility functions and constants for DeepEval integration.""" from __future__ import annotations from typing import Any from mlflow.entities.span import SpanAttributeKey, SpanType from mlflow.entities.trace import Trace from mlflow.exceptions import MlflowException from mlflow.genai.utils.trace_utils import ( ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/scorers/deepeval/utils.py", "license": "Apache License 2.0", "lines": 183, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/scorers/deepeval/test_deepeval_scorer.py
from unittest.mock import Mock, patch import pytest import mlflow from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSourceType from mlflow.genai.judges.utils import CategoricalRating from mlflow.genai.scorers import FRAMEWORK_METADATA_KEY from mlflow.genai.scorers...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/deepeval/test_deepeval_scorer.py", "license": "Apache License 2.0", "lines": 278, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/deepeval/test_models.py
from unittest.mock import Mock, patch import pytest from mlflow.genai.scorers.deepeval.models import DatabricksDeepEvalLLM @pytest.fixture def mock_call_chat_completions(): with patch("mlflow.genai.scorers.deepeval.models.call_chat_completions") as mock: result = Mock() result.output = "Test out...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/deepeval/test_models.py", "license": "Apache License 2.0", "lines": 18, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/deepeval/test_registry.py
from unittest import mock import pytest from mlflow.exceptions import MlflowException from mlflow.genai.scorers.deepeval.registry import get_metric_class, is_deterministic_metric def test_get_metric_class_returns_valid_class(): metric_class = get_metric_class("AnswerRelevancy") assert metric_class.__name__ ...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/deepeval/test_registry.py", "license": "Apache License 2.0", "lines": 27, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/scorers/deepeval/test_utils.py
from unittest.mock import Mock import pytest from mlflow.entities.span import Span, SpanAttributeKey, SpanType from mlflow.exceptions import MlflowException from mlflow.genai.scorers.deepeval.models import create_deepeval_model from mlflow.genai.scorers.deepeval.utils import ( _convert_to_deepeval_tool_calls, ...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/scorers/deepeval/test_utils.py", "license": "Apache License 2.0", "lines": 107, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/judges/adapters/base_adapter.py
from __future__ import annotations from abc import ABC, abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any import pydantic if TYPE_CHECKING: from mlflow.entities.trace import Trace from mlflow.types.llm import ChatMessage from mlflow.entities.assessment import Feedback ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/adapters/base_adapter.py", "license": "Apache License 2.0", "lines": 94, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/judges/adapters/utils.py
"""Utility functions for judge adapters.""" from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: from mlflow.genai.judges.adapters.base_adapter import BaseJudgeAdapter from mlflow.types.llm import ChatMessage from mlflow.exceptions import MlflowException from mlflow.genai.ju...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/adapters/utils.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/judges/prompts/summarization.py
# NB: User-facing name for the summarization assessment. SUMMARIZATION_ASSESSMENT_NAME = "summarization" SUMMARIZATION_PROMPT = """\ Consider the following source document and candidate summary. You must decide whether the summary is an acceptable summary of the document. Output only "yes" or "no" based on whether the...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/summarization.py", "license": "Apache License 2.0", "lines": 20, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/store/tracking/gateway/rest_mixin.py
"""REST Gateway Store Mixin - Gateway API implementation for REST-based tracking stores.""" from __future__ import annotations from typing import Any from mlflow.entities import ( GatewayEndpoint, GatewayEndpointBinding, GatewayEndpointModelConfig, GatewayEndpointModelMapping, GatewayEndpointTag,...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/gateway/rest_mixin.py", "license": "Apache License 2.0", "lines": 624, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/entities/test_gateway_endpoint.py
from mlflow.entities import ( GatewayEndpoint, GatewayEndpointBinding, GatewayEndpointModelMapping, GatewayModelDefinition, GatewayModelLinkageType, GatewayResourceType, ) def test_model_definition_creation_full(): model_def = GatewayModelDefinition( model_definition_id="model-def-...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/entities/test_gateway_endpoint.py", "license": "Apache License 2.0", "lines": 347, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/entities/test_gateway_secrets.py
from mlflow.entities import GatewaySecretInfo def test_secret_creation_full(): secret = GatewaySecretInfo( secret_id="test-secret-id", secret_name="my_api_key", masked_values={"api_key": "sk-...abc123"}, created_at=1234567890000, last_updated_at=1234567890000, provi...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/entities/test_gateway_secrets.py", "license": "Apache License 2.0", "lines": 168, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/store/tracking/gateway/config_resolver.py
""" Server-side only configuration resolver for Gateway endpoints. This module provides functions to retrieve decrypted endpoint configurations for resources. These functions are privileged operations that should only be called server-side and never exposed to clients via MlflowClient. """ import json from mlflow.ex...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/gateway/config_resolver.py", "license": "Apache License 2.0", "lines": 203, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/tracking/gateway/sqlalchemy_mixin.py
from __future__ import annotations import json import os import uuid from typing import Any from sqlalchemy import func from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from mlflow.entities import ( FallbackConfig, GatewayEndpoint, GatewayEndpointBinding, GatewayEndpoin...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/gateway/sqlalchemy_mixin.py", "license": "Apache License 2.0", "lines": 1179, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/store/tracking/test_gateway_sql_store.py
import json import uuid from pathlib import Path import pytest from mlflow.entities import ( BudgetAction, BudgetDurationUnit, BudgetTargetScope, BudgetUnit, FallbackConfig, FallbackStrategy, GatewayBudgetPolicy, GatewayEndpoint, GatewayEndpointBinding, GatewayEndpointModelConf...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/tracking/test_gateway_sql_store.py", "license": "Apache License 2.0", "lines": 1999, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/store/tracking/gateway/abstract_mixin.py
from typing import Any from mlflow.entities import ( FallbackConfig, GatewayEndpoint, GatewayEndpointBinding, GatewayEndpointModelConfig, GatewayEndpointModelMapping, GatewayEndpointTag, GatewayModelDefinition, GatewaySecretInfo, RoutingStrategy, ) from mlflow.entities.gateway_budge...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/gateway/abstract_mixin.py", "license": "Apache License 2.0", "lines": 467, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/judges/prompts/conversational_safety.py
# NB: User-facing name for the conversational safety assessment. CONVERSATIONAL_SAFETY_ASSESSMENT_NAME = "conversational_safety" CONVERSATIONAL_SAFETY_PROMPT = """\ Consider the following conversation history between a user and an assistant. Your task is to \ evaluate the safety of the assistant's responses only and o...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/conversational_safety.py", "license": "Apache License 2.0", "lines": 21, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/entities/gateway_endpoint.py
from __future__ import annotations from dataclasses import dataclass, field from enum import Enum from mlflow.entities._mlflow_object import _MlflowObject from mlflow.protos.service_pb2 import FallbackConfig as ProtoFallbackConfig from mlflow.protos.service_pb2 import FallbackStrategy as ProtoFallbackStrategy from ml...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/entities/gateway_endpoint.py", "license": "Apache License 2.0", "lines": 402, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/entities/gateway_secrets.py
from dataclasses import dataclass from typing import Any from mlflow.entities._mlflow_object import _MlflowObject from mlflow.protos.service_pb2 import GatewaySecretInfo as ProtoGatewaySecretInfo from mlflow.utils.workspace_utils import resolve_entity_workspace_name @dataclass(frozen=True) class GatewaySecretInfo(_M...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/entities/gateway_secrets.py", "license": "Apache License 2.0", "lines": 78, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/store/tracking/gateway/entities.py
from dataclasses import dataclass, field from typing import Any from mlflow.entities.gateway_endpoint import ( FallbackConfig, GatewayModelLinkageType, RoutingStrategy, ) @dataclass class GatewayModelConfig: """ Model configuration with decrypted credentials for runtime use. This entity cont...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/store/tracking/gateway/entities.py", "license": "Apache License 2.0", "lines": 57, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:tests/genai/utils/test_prompt_cache.py
import threading import time import pytest from mlflow.prompt.registry_utils import PromptCache, PromptCacheKey @pytest.fixture(autouse=True) def reset_cache(): """Reset the prompt cache before and after each test.""" PromptCache._reset_instance() yield PromptCache._reset_instance() def test_singl...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/utils/test_prompt_cache.py", "license": "Apache License 2.0", "lines": 146, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/otel/translation/voltagent.py
import json from typing import Any from mlflow.entities.span import SpanType from mlflow.tracing.otel.translation.base import OtelSchemaTranslator class VoltAgentTranslator(OtelSchemaTranslator): """ Translator for VoltAgent semantic conventions. VoltAgent provides clean chat-formatted messages in `agen...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/voltagent.py", "license": "Apache License 2.0", "lines": 102, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/tracing/otel/test_voltagent_translator.py
import json from unittest import mock import pytest from mlflow.entities.span import Span, SpanType from mlflow.tracing.constant import SpanAttributeKey from mlflow.tracing.otel.translation import ( translate_span_type_from_otel, translate_span_when_storing, ) from mlflow.tracing.otel.translation.voltagent im...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/otel/test_voltagent_translator.py", "license": "Apache License 2.0", "lines": 200, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/clint/src/clint/rules/use_walrus_operator.py
import ast from clint.rules.base import Rule class UseWalrusOperator(Rule): def _message(self) -> str: return ( "Use the walrus operator `:=` when a variable is assigned and only used " "within an `if` block that tests its truthiness. " "For example, replace `a = ...; ...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/src/clint/rules/use_walrus_operator.py", "license": "Apache License 2.0", "lines": 136, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:dev/clint/tests/rules/test_use_walrus_operator.py
from pathlib import Path from clint.config import Config from clint.linter import Position, Range, lint_file from clint.rules import UseWalrusOperator def test_basic_walrus_pattern(index_path: Path) -> None: code = """ def f(): a = func() if a: use(a) """ config = Config(select={UseWalrusOper...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/rules/test_use_walrus_operator.py", "license": "Apache License 2.0", "lines": 354, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/clint/src/clint/rules/assign_before_append.py
import ast from clint.rules.base import Rule class AssignBeforeAppend(Rule): def _message(self) -> str: return ( "Avoid unnecessary assignment before appending to a list. " "Use a list comprehension instead." ) @staticmethod def check(node: ast.For, prev_stmt: ast...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/src/clint/rules/assign_before_append.py", "license": "Apache License 2.0", "lines": 56, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:dev/clint/tests/rules/test_assign_before_append.py
from pathlib import Path from clint.config import Config from clint.linter import Position, Range, lint_file from clint.rules import AssignBeforeAppend def test_assign_before_append_basic(index_path: Path) -> None: code = """ items = [] for x in data: item = transform(x) items.append(item) """ config...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/rules/test_assign_before_append.py", "license": "Apache License 2.0", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/evaluate/test_entities.py
from mlflow.entities.dataset_record_source import DatasetRecordSource, DatasetRecordSourceType from mlflow.genai.evaluation.entities import EvalItem def test_eval_item_from_dataset_row_extracts_source(): source = DatasetRecordSource( source_type=DatasetRecordSourceType.TRACE, source_data={"trace_i...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/evaluate/test_entities.py", "license": "Apache License 2.0", "lines": 28, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/judges/prompts/completeness.py
# NB: User-facing name for the completeness assessment. COMPLETENESS_ASSESSMENT_NAME = "completeness" COMPLETENESS_PROMPT = """\ Consider the following user prompt and assistant response. You must decide whether the assistant successfully addressed all explicit requests in the user's prompt. Output only "yes" or "no" ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/completeness.py", "license": "Apache License 2.0", "lines": 17, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/judges/prompts/conversation_completeness.py
# NB: User-facing name for the conversation completeness assessment. CONVERSATION_COMPLETENESS_ASSESSMENT_NAME = "conversation_completeness" CONVERSATION_COMPLETENESS_PROMPT = """\ Consider the following conversation history between a user and an assistant. Your task is to output exactly one label: "yes" or "no" based...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/conversation_completeness.py", "license": "Apache License 2.0", "lines": 16, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/judges/prompts/user_frustration.py
# NB: User-facing name for the user frustration assessment. USER_FRUSTRATION_ASSESSMENT_NAME = "user_frustration" USER_FRUSTRATION_PROMPT = """\ Consider the following conversation history between a user and an assistant. Your task is to determine the user's emotional trajectory and output exactly one of the following...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/prompts/user_frustration.py", "license": "Apache License 2.0", "lines": 15, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/evaluation/session_utils.py
"""Utilities for session-level (multi-turn) evaluation.""" import traceback from collections import defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed from typing import TYPE_CHECKING, Any from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_error import AssessmentE...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/evaluation/session_utils.py", "license": "Apache License 2.0", "lines": 138, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/evaluate/test_session_utils.py
from unittest.mock import Mock, patch import pytest import mlflow from mlflow.entities import TraceData, TraceInfo, TraceLocation, TraceState from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType from mlflow.entities.trace import Trace fro...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/evaluate/test_session_utils.py", "license": "Apache License 2.0", "lines": 423, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/tracing/utils/test_copy.py
import time import pytest import mlflow from mlflow.tracing.utils.copy import copy_trace_to_experiment from tests.tracing.helper import purge_traces def _create_test_span_dict(request_id="test-trace", parent_id=None): """Helper to create a minimal valid span dict for testing""" return { "name": "ro...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/utils/test_copy.py", "license": "Apache License 2.0", "lines": 97, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:.claude/hooks/lint.py
""" Lightweight hook for validating code written by Claude Code. """ import ast import json import os import re import subprocess import sys from dataclasses import dataclass from pathlib import Path from typing import Literal KILL_SWITCH_ENV_VAR = "CLAUDE_LINT_HOOK_DISABLED" @dataclass class LintError: file: P...
{ "repo_id": "mlflow/mlflow", "file_path": ".claude/hooks/lint.py", "license": "Apache License 2.0", "lines": 123, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/agno/autolog_v2.py
""" Autologging logic for Agno V2 (>= 2.0.0) using OpenTelemetry instrumentation. """ import importlib.metadata as _meta import logging from packaging.version import Version import mlflow from mlflow.exceptions import MlflowException from mlflow.tracing.utils.otlp import build_otlp_headers _logger = logging.getLogg...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/agno/autolog_v2.py", "license": "Apache License 2.0", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/langchain/sample_code/simple_runnable.py
from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI import mlflow prompt = PromptTemplate( input_variables=["product"], template="What is {product}?", ) llm = ChatOpenAI(temperature=0.1, stream_usage=True) chain = ...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/langchain/sample_code/simple_runnable.py", "license": "Apache License 2.0", "lines": 11, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/telemetry/installation_id.py
import json import os import threading import uuid from datetime import datetime, timezone from pathlib import Path from mlflow.utils.os import is_windows from mlflow.version import VERSION _KEY_INSTALLATION_ID = "installation_id" _CACHE_LOCK = threading.RLock() _INSTALLATION_ID_CACHE: str | None = None def get_or_...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/telemetry/installation_id.py", "license": "Apache License 2.0", "lines": 72, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/telemetry/test_installation_id.py
import json import uuid from unittest import mock import pytest import mlflow from mlflow.telemetry.client import get_telemetry_client, set_telemetry_client from mlflow.telemetry.installation_id import get_or_create_installation_id from mlflow.utils.os import is_windows from mlflow.version import VERSION @pytest.fi...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/telemetry/test_installation_id.py", "license": "Apache License 2.0", "lines": 71, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/clint/src/clint/rules/subprocess_check_call.py
import ast from clint.resolver import Resolver from clint.rules.base import Rule class SubprocessCheckCall(Rule): def _message(self) -> str: return ( "Use `subprocess.check_call(...)` instead of `subprocess.run(..., check=True)` " "for better readability. Only applies when check=T...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/src/clint/rules/subprocess_check_call.py", "license": "Apache License 2.0", "lines": 32, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:dev/clint/tests/rules/test_subprocess_check_call.py
from pathlib import Path from clint.config import Config from clint.linter import Position, Range, lint_file from clint.rules import SubprocessCheckCall def test_subprocess_check_call(index_path: Path) -> None: code = """ import subprocess # Bad subprocess.run(["echo", "hello"], check=True) # Good - has other ...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/rules/test_subprocess_check_call.py", "license": "Apache License 2.0", "lines": 21, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/otel/translation/vercel_ai.py
import json from typing import Any from mlflow.entities.span import SpanType from mlflow.tracing.constant import SpanAttributeKey from mlflow.tracing.otel.translation.base import OtelSchemaTranslator class VercelAITranslator(OtelSchemaTranslator): """Translator for Vercel AI SDK spans.""" # https://ai-sdk.d...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/vercel_ai.py", "license": "Apache License 2.0", "lines": 80, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/tracing/otel/test_vercel_ai_translator.py
import json from unittest import mock import pytest from mlflow.entities.span import Span from mlflow.tracing.constant import SpanAttributeKey from mlflow.tracing.otel.translation import translate_span_when_storing @pytest.mark.parametrize( ("attributes", "expected_inputs", "expected_outputs"), [ # ...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/otel/test_vercel_ai_translator.py", "license": "Apache License 2.0", "lines": 194, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/clint/src/clint/rules/version_major_check.py
import ast import re from typing import TYPE_CHECKING from clint.rules.base import Rule if TYPE_CHECKING: from clint.resolver import Resolver class MajorVersionCheck(Rule): def _message(self) -> str: return ( "Use `.major` field for major version comparisons instead of full version strin...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/src/clint/rules/version_major_check.py", "license": "Apache License 2.0", "lines": 45, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:dev/clint/tests/rules/test_version_major_check.py
from pathlib import Path from clint.config import Config from clint.linter import lint_file from clint.rules.version_major_check import MajorVersionCheck def test_version_major_check(index_path: Path) -> None: code = """ from packaging.version import Version Version("0.9.0") >= Version("1.0.0") Version("1.2.3")...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/clint/tests/rules/test_version_major_check.py", "license": "Apache License 2.0", "lines": 34, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:dev/check_whitespace_only.py
""" Detect files where all changes are whitespace-only. This helps avoid unnecessary commit history noise from whitespace-only changes. """ import argparse import json import os import sys import urllib.request from typing import cast BYPASS_LABEL = "allow-whitespace-only" def github_api_request(url: str, accept: ...
{ "repo_id": "mlflow/mlflow", "file_path": "dev/check_whitespace_only.py", "license": "Apache License 2.0", "lines": 93, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/tracing/otel/translation/google_adk.py
from mlflow.tracing.otel.translation.base import OtelSchemaTranslator class GoogleADKTranslator(OtelSchemaTranslator): """ Translator for Google ADK semantic conventions. Google ADK mostly uses OpenTelemetry semantic conventions, but with some custom inputs and outputs attributes. """ # Input...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/google_adk.py", "license": "Apache License 2.0", "lines": 11, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:tests/store/tracking/test_plugin_validation.py
import subprocess import sys from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore def test_sqlalchemy_store_import_does_not_cause_circular_import(): """ Regression test for circular import issue (https://github.com/mlflow/mlflow/issues/18386). Store plugins that inherit from SqlAlchemyStor...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/store/tracking/test_plugin_validation.py", "license": "Apache License 2.0", "lines": 75, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/judges/adapters/databricks_managed_judge_adapter.py
from __future__ import annotations import inspect import json import logging from typing import TYPE_CHECKING, Any, Callable, TypeVar if TYPE_CHECKING: import litellm from mlflow.entities.trace import Trace from mlflow.types.llm import ChatMessage, ToolDefinition T = TypeVar("T") # Generic type for age...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/adapters/databricks_managed_judge_adapter.py", "license": "Apache License 2.0", "lines": 324, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/judges/adapters/gateway_adapter.py
from __future__ import annotations import json from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from mlflow.types.llm import ChatMessage from mlflow.entities.assessment import Feedback from mlflow.entities.assessment_source import AssessmentSource, AssessmentSourceType from mlflow.exceptions import Mlflow...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/adapters/gateway_adapter.py", "license": "Apache License 2.0", "lines": 98, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/judges/adapters/litellm_adapter.py
from __future__ import annotations import json import logging import re import threading from contextlib import ContextDecorator from dataclasses import dataclass from typing import TYPE_CHECKING, Any import pydantic if TYPE_CHECKING: import litellm from mlflow.entities.trace import Trace from mlflow.ty...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/adapters/litellm_adapter.py", "license": "Apache License 2.0", "lines": 511, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/judges/utils/invocation_utils.py
"""Main invocation utilities for judge models.""" from __future__ import annotations import json import logging from typing import TYPE_CHECKING, Any import pydantic if TYPE_CHECKING: from mlflow.entities.trace import Trace from mlflow.types.llm import ChatMessage from mlflow.entities.assessment import Fee...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/utils/invocation_utils.py", "license": "Apache License 2.0", "lines": 202, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/genai/judges/utils/parsing_utils.py
"""Response parsing utilities for judge models.""" def _strip_markdown_code_blocks(response: str) -> str: """ Strip markdown code blocks from LLM responses. Some legacy models wrap JSON responses in markdown code blocks (```json...```). This function removes those wrappers to extract the raw JSON con...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/utils/parsing_utils.py", "license": "Apache License 2.0", "lines": 26, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/judges/utils/prompt_utils.py
"""Prompt formatting and manipulation utilities for judge models.""" from __future__ import annotations import re from typing import TYPE_CHECKING, NamedTuple from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import BAD_REQUEST if TYPE_CHECKING: from mlflow.genai.judges.base import...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/utils/prompt_utils.py", "license": "Apache License 2.0", "lines": 74, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/judges/utils/tool_calling_utils.py
"""Tool calling support for judge models.""" from __future__ import annotations import json from dataclasses import asdict, is_dataclass from typing import TYPE_CHECKING, NoReturn if TYPE_CHECKING: import litellm from mlflow.entities.trace import Trace from mlflow.types.llm import ToolCall from mlflow....
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/judges/utils/tool_calling_utils.py", "license": "Apache License 2.0", "lines": 103, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/genai/judges/adapters/test_litellm_adapter.py
from unittest import mock import litellm import pytest from litellm import RetryPolicy from litellm.types.utils import ModelResponse from pydantic import BaseModel, Field from mlflow.entities.trace import Trace from mlflow.entities.trace_info import TraceInfo from mlflow.entities.trace_location import TraceLocation f...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/judges/adapters/test_litellm_adapter.py", "license": "Apache License 2.0", "lines": 505, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/judges/utils/test_invocation_utils.py
import json from unittest import mock import litellm import pytest from litellm import RetryPolicy from litellm.types.utils import ModelResponse, Usage from pydantic import BaseModel, Field from mlflow.entities.assessment import AssessmentSourceType from mlflow.entities.trace import Trace from mlflow.entities.trace_i...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/judges/utils/test_invocation_utils.py", "license": "Apache License 2.0", "lines": 946, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/judges/utils/test_parsing_utils.py
from mlflow.genai.judges.utils.parsing_utils import ( _sanitize_justification, _strip_markdown_code_blocks, ) def test_strip_markdown_no_markdown_returns_unchanged(): response = '{"result": "yes", "rationale": "looks good"}' assert _strip_markdown_code_blocks(response) == response def test_strip_mar...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/judges/utils/test_parsing_utils.py", "license": "Apache License 2.0", "lines": 105, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/judges/utils/test_prompt_utils.py
import pytest from mlflow.genai.judges.base import Judge from mlflow.genai.judges.utils.prompt_utils import add_output_format_instructions from mlflow.genai.prompts.utils import format_prompt def test_add_output_format_instructions(): output_fields = Judge.get_output_fields() simple_prompt = "Evaluate this ...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/judges/utils/test_prompt_utils.py", "license": "Apache License 2.0", "lines": 51, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/genai/judges/utils/test_tool_calling_utils.py
import json from dataclasses import dataclass from unittest import mock import litellm import pytest from mlflow.entities.trace import Trace from mlflow.entities.trace_info import TraceInfo from mlflow.entities.trace_location import TraceLocation from mlflow.entities.trace_state import TraceState from mlflow.genai.ju...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/judges/utils/test_tool_calling_utils.py", "license": "Apache License 2.0", "lines": 122, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/genai/agent_server/server.py
import argparse import functools import inspect import json import logging import os import posixpath from typing import Any, AsyncGenerator, Callable, Literal, ParamSpec, TypeVar import httpx import uvicorn from fastapi import FastAPI, HTTPException, Request from fastapi.responses import Response, StreamingResponse ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/agent_server/server.py", "license": "Apache License 2.0", "lines": 353, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/agent_server/utils.py
import logging import os import subprocess from contextvars import ContextVar from mlflow.tracking.fluent import _set_active_model # Context-isolated storage for request headers # ensuring thread-safe access across async execution contexts _request_headers: ContextVar[dict[str, str]] = ContextVar[dict[str, str]]( ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/agent_server/utils.py", "license": "Apache License 2.0", "lines": 36, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/genai/agent_server/validator.py
from dataclasses import asdict, is_dataclass from typing import Any from pydantic import BaseModel from mlflow.types.responses import ( ResponsesAgentRequest, ResponsesAgentResponse, ResponsesAgentStreamEvent, ) class BaseAgentValidator: """Base validator class with common validation methods""" ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/agent_server/validator.py", "license": "Apache License 2.0", "lines": 54, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/test_agent_server.py
import contextvars from typing import Any, AsyncGenerator from unittest.mock import AsyncMock, Mock, patch import httpx import pytest from fastapi.testclient import TestClient from mlflow.genai.agent_server import ( AgentServer, get_invoke_function, get_request_headers, get_stream_function, invoke...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/test_agent_server.py", "license": "Apache License 2.0", "lines": 1076, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:tests/tracing/opentelemetry/test_integration.py
import pytest from opentelemetry import trace as otel_trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter import mlflow from mlflow.entities.span import SpanStatus...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/opentelemetry/test_integration.py", "license": "Apache License 2.0", "lines": 241, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/tracing/otel/translation/base.py
""" Base class for OTEL semantic convention translators. This module provides a base class that implements common translation logic. Subclasses only need to define the attribute keys and mappings as class attributes. """ import json import logging from typing import Any _logger = logging.getLogger(__name__) class ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/base.py", "license": "Apache License 2.0", "lines": 171, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
documentation
mlflow/mlflow:mlflow/tracing/otel/translation/genai_semconv.py
""" Translation utilities for GenAI (Generic AI) semantic conventions. Reference: https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/ """ import json from typing import Any from mlflow.entities.span import SpanType from mlflow.tracing.otel.translation.base import OtelSchemaTranslator class GenA...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/genai_semconv.py", "license": "Apache License 2.0", "lines": 101, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/tracing/otel/translation/open_inference.py
""" Translation utilities for OpenInference semantic conventions. Reference: https://github.com/Arize-ai/openinference/blob/main/python/openinference-semantic-conventions/ """ from mlflow.entities.span import SpanType from mlflow.tracing.otel.translation.base import OtelSchemaTranslator class OpenInferenceTranslato...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/open_inference.py", "license": "Apache License 2.0", "lines": 42, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:mlflow/tracing/otel/translation/traceloop.py
""" Translation utilities for Traceloop/OpenLLMetry semantic conventions. Reference: https://github.com/traceloop/openllmetry/ """ import re from typing import Any from mlflow.entities.span import SpanType from mlflow.tracing.otel.translation.base import OtelSchemaTranslator class TraceloopTranslator(OtelSchemaTra...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/tracing/otel/translation/traceloop.py", "license": "Apache License 2.0", "lines": 68, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_simple
mlflow/mlflow:tests/tracing/otel/test_span_translation.py
import json from typing import Any from unittest import mock import pytest from mlflow.entities.span import Span, SpanType from mlflow.tracing.constant import SpanAttributeKey, TokenUsageKey from mlflow.tracing.otel.translation import ( sanitize_attributes, translate_loaded_span, translate_span_type_from_...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/tracing/otel/test_span_translation.py", "license": "Apache License 2.0", "lines": 596, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test
mlflow/mlflow:mlflow/langchain/_compat.py
def import_base_retriever(): try: from langchain.schema import BaseRetriever return BaseRetriever except ImportError: from langchain_core.retrievers import BaseRetriever return BaseRetriever def import_document(): try: from langchain.schema import Document ...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/langchain/_compat.py", "license": "Apache License 2.0", "lines": 145, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:mlflow/genai/evaluation/telemetry.py
import hashlib import threading import uuid import mlflow from mlflow.genai.scorers.base import Scorer from mlflow.genai.scorers.builtin_scorers import BuiltInScorer from mlflow.utils.databricks_utils import get_databricks_host_creds from mlflow.utils.rest_utils import _REST_API_PATH_PREFIX, http_request from mlflow.u...
{ "repo_id": "mlflow/mlflow", "file_path": "mlflow/genai/evaluation/telemetry.py", "license": "Apache License 2.0", "lines": 115, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
function_complex
mlflow/mlflow:tests/genai/evaluate/test_telemetry.py
from unittest import mock import pytest from mlflow.genai import Scorer, scorer from mlflow.genai.evaluation.telemetry import ( _BATCH_SIZE_HEADER, _CLIENT_NAME_HEADER, _CLIENT_VERSION_HEADER, _SESSION_ID_HEADER, emit_metric_usage_event, ) from mlflow.genai.judges import make_judge from mlflow.gen...
{ "repo_id": "mlflow/mlflow", "file_path": "tests/genai/evaluate/test_telemetry.py", "license": "Apache License 2.0", "lines": 247, "canary_id": -1, "canary_value": "", "pii_type": "", "provider": "", "regex_pattern": "", "repetition": -1, "template": "" }
test