Dataset Viewer
Auto-converted to Parquet Duplicate
task_id
stringlengths
15
15
repo
stringlengths
11
23
file_path
stringlengths
16
49
function_name
stringlengths
4
33
qualified_name
stringlengths
4
37
function_type
stringclasses
2 values
class_name
stringclasses
8 values
prompt
stringlengths
198
10.1k
signature
stringlengths
11
792
docstring
stringlengths
0
549
canonical_solution
stringlengths
106
2.37k
full_function
stringlengths
129
2.67k
tests
stringlengths
563
4.68M
setup
stringlengths
201
225
metadata
stringlengths
74
78
validation
stringlengths
36
72
original_task_id
stringlengths
15
15
full_context
stringlengths
422
16.4k
repo_patch/0001
Comfy-Org/ComfyUI
comfy_execution/jobs.py
normalize_output_item
normalize_output_item
function
null
""" Job utilities for the /api/jobs endpoint. Provides normalization and helper functions for job status tracking. """ from typing import Optional from comfy_api.internal import prune_dict class JobStatus: """Job status constants.""" PENDING = 'pending' IN_PROGRESS = 'in_progress' COMPLETED = 'compl...
def normalize_output_item(item): """Normalize a single output list item for the jobs API. Returns the normalized item, or None to exclude it. String items with 3D extensions become {filename, type, subfolder} dicts. """
Normalize a single output list item for the jobs API. Returns the normalized item, or None to exclude it. String items with 3D extensions become {filename, type, subfolder} dicts.
if item is None: return None if isinstance(item, str): if has_3d_extension(item): return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'} return None if isinstance(item, dict): return item return None
def normalize_output_item(item): """Normalize a single output list item for the jobs API. Returns the normalized item, or None to exclude it. String items with 3D extensions become {filename, type, subfolder} dicts. """ if item is None: return None if isinstance(item, str): if h...
[{"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_none_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n no...
{"repo_url": "https://github.com/Comfy-Org/ComfyUI", "install_cmd": "pip install -e .", "commit_sha": "dff0a4a15887383c90a031e3fd48ebc41f6928e7", "frozen_requirements": "frozen_requirements/Comfy-Org_ComfyUI.txt"}
{"body_lines": 9, "file_lines": 390, "has_docstring": true, "num_tests": 6}
{"status": "passed", "tests_run": 6}
repo_patch/0001
""" Job utilities for the /api/jobs endpoint. Provides normalization and helper functions for job status tracking. """ from typing import Optional from comfy_api.internal import prune_dict class JobStatus: """Job status constants.""" PENDING = 'pending' IN_PROGRESS = 'in_progress' COMPLETED = 'compl...
repo_patch/0002
Comfy-Org/ComfyUI
comfy_execution/jobs.py
normalize_queue_item
normalize_queue_item
function
null
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
"def normalize_queue_item(item: tuple, status: str) -> dict:\n \"\"\"Convert queue item tuple to (...TRUNCATED)
"Convert queue item tuple to unified job dict.\n\nExpects item with sensitive data already removed ((...TRUNCATED)
" priority, prompt_id, _, extra_data, _ = item\n create_time, workflow_id = _extract_job_metad(...TRUNCATED)
"def normalize_queue_item(item: tuple, status: str) -> dict:\n \"\"\"Convert queue item tuple to (...TRUNCATED)
"[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestNormalizeQueueItem.test(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 10, "file_lines": 390, "has_docstring": true, "num_tests": 1}
{"status": "passed", "tests_run": 1}
repo_patch/0002
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
repo_patch/0003
Comfy-Org/ComfyUI
comfy_api/feature_flags.py
get_connection_feature
get_connection_feature
function
null
"\"\"\"\nFeature flags module for ComfyUI WebSocket protocol negotiation.\n\nThis module handles cap(...TRUNCATED)
"def get_connection_feature(\n sockets_metadata: dict[str, dict[str, Any]],\n sid: str,\n f(...TRUNCATED)
"Get a feature flag value for a specific connection.\n\nArgs:\n sockets_metadata: Dictionary of s(...TRUNCATED)
" if sid not in sockets_metadata:\n return default\n\n return sockets_metadata[sid].get(...TRUNCATED)
"def get_connection_feature(\n sockets_metadata: dict[str, dict[str, Any]],\n sid: str,\n f(...TRUNCATED)
"[{\"test_file\": \"tests-unit/feature_flags_test.py\", \"test_function\": \"TestFeatureFlags.test_g(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 3, "file_lines": 72, "has_docstring": true, "num_tests": 5}
{"status": "passed", "tests_run": 5}
repo_patch/0003
"\"\"\"\nFeature flags module for ComfyUI WebSocket protocol negotiation.\n\nThis module handles cap(...TRUNCATED)
repo_patch/0004
Comfy-Org/ComfyUI
comfy_execution/jobs.py
apply_sorting
apply_sorting
function
null
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
"def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:\n \"\"\"Sort j(...TRUNCATED)
Sort jobs list by specified field and order.
" reverse = (sort_order == 'desc')\n\n if sort_by == 'execution_duration':\n def get_so(...TRUNCATED)
"def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:\n \"\"\"Sort j(...TRUNCATED)
"[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestApplySorting.test_sort_(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 10, "file_lines": 390, "has_docstring": true, "num_tests": 4}
{"status": "passed", "tests_run": 4}
repo_patch/0004
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
repo_patch/0005
Comfy-Org/ComfyUI
comfy_execution/jobs.py
get_outputs_summary
get_outputs_summary
function
null
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
"def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:\n \"\"\"\n Count output(...TRUNCATED)
"Count outputs and find preview in a single pass.\nReturns (outputs_count, preview_output).\n\nPrevi(...TRUNCATED)
" count = 0\n preview_output = None\n fallback_preview = None\n\n for node_id, node_outp(...TRUNCATED)
"def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:\n \"\"\"\n Count output(...TRUNCATED)
"[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestGetOutputsSummary.test_(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 49, "file_lines": 390, "has_docstring": true, "num_tests": 13}
{"status": "passed", "tests_run": 13}
repo_patch/0005
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
repo_patch/0006
Comfy-Org/ComfyUI
comfy_execution/jobs.py
normalize_outputs
normalize_outputs
function
null
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
"def normalize_outputs(outputs: dict) -> dict:\n \"\"\"Normalize raw node outputs for the jobs AP(...TRUNCATED)
"Normalize raw node outputs for the jobs API.\n\nTransforms string 3D filenames into file output dic(...TRUNCATED)
" normalized = {}\n for node_id, node_outputs in outputs.items():\n if not isinstance(n(...TRUNCATED)
"def normalize_outputs(outputs: dict) -> dict:\n \"\"\"Normalize raw node outputs for the jobs AP(...TRUNCATED)
"[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestNormalizeOutputs.test_e(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 19, "file_lines": 390, "has_docstring": true, "num_tests": 6}
{"status": "passed", "tests_run": 6}
repo_patch/0006
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
repo_patch/0007
Comfy-Org/ComfyUI
comfy_execution/jobs.py
is_previewable
is_previewable
function
null
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
"def is_previewable(media_type: str, item: dict) -> bool:\n \"\"\"\n Check if an output item i(...TRUNCATED)
"Check if an output item is previewable.\nMatches frontend logic in ComfyUI_frontend/src/stores/queu(...TRUNCATED)
" if media_type in PREVIEWABLE_MEDIA_TYPES:\n return True\n\n # Check format field (MIM(...TRUNCATED)
"def is_previewable(media_type: str, item: dict) -> bool:\n \"\"\"\n Check if an output item i(...TRUNCATED)
"[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestIsPreviewable.test_prev(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 12, "file_lines": 390, "has_docstring": true, "num_tests": 7}
{"status": "passed", "tests_run": 7}
repo_patch/0007
"\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED)
repo_patch/0008
Comfy-Org/ComfyUI
middleware/cache_middleware.py
cache_control
cache_control
function
null
"\"\"\"Cache control middleware for ComfyUI server\"\"\"\n\nfrom aiohttp import web\nfrom typing imp(...TRUNCATED)
"async def cache_control(\n request: web.Request, handler: Callable[[web.Request], Awaitable[web.(...TRUNCATED)
"Cache control middleware that sets appropriate cache headers based on file type and response status(...TRUNCATED)
" response: web.Response = await handler(request)\n\n path_filename = request.path.rsplit(\"/\(...TRUNCATED)
"async def cache_control(\n request: web.Request, handler: Callable[[web.Request], Awaitable[web.(...TRUNCATED)
"[{\"test_file\": \"tests-unit/server_test/test_cache_control.py\", \"test_function\": \"TestCacheCo(...TRUNCATED)
"{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED)
{"body_lines": 22, "file_lines": 54, "has_docstring": true, "num_tests": 9}
{"status": "passed", "tests_run": 9}
repo_patch/0008
"\"\"\"Cache control middleware for ComfyUI server\"\"\"\n\nfrom aiohttp import web\nfrom typing imp(...TRUNCATED)
repo_patch/0009
docling-project/docling
docling/datamodel/asr_model_specs.py
_get_whisper_base_model
_get_whisper_base_model
function
null
"import logging\nfrom enum import Enum\n\nfrom pydantic import (\n AnyUrl,\n)\n\nfrom docling.dat(...TRUNCATED)
"def _get_whisper_base_model():\n \"\"\"\n Get the best Whisper Base model for the current har(...TRUNCATED)
"Get the best Whisper Base model for the current hardware.\n\nAutomatically selects MLX Whisper Base(...TRUNCATED)
" try:\n import torch\n\n has_mps = torch.backends.mps.is_built() and torch.backend(...TRUNCATED)
"def _get_whisper_base_model():\n \"\"\"\n Get the best Whisper Base model for the current har(...TRUNCATED)
"[{\"test_file\": \"tests/test_asr_mlx_whisper.py\", \"test_function\": \"TestMlxWhisperIntegration.(...TRUNCATED)
"{\"repo_url\": \"https://github.com/docling-project/docling\", \"install_cmd\": \"pip install -e .\(...TRUNCATED)
{"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 2}
{"status": "passed", "tests_run": 2}
repo_patch/0009
"import logging\nfrom enum import Enum\n\nfrom pydantic import (\n AnyUrl,\n)\n\nfrom docling.dat(...TRUNCATED)
repo_patch/0010
docling-project/docling
docling/datamodel/asr_model_specs.py
_get_whisper_tiny_model
_get_whisper_tiny_model
function
null
"import logging\nfrom enum import Enum\n\nfrom pydantic import (\n AnyUrl,\n)\n\nfrom docling.dat(...TRUNCATED)
"def _get_whisper_tiny_model():\n \"\"\"\n Get the best Whisper Tiny model for the current har(...TRUNCATED)
"Get the best Whisper Tiny model for the current hardware.\n\nAutomatically selects MLX Whisper Tiny(...TRUNCATED)
" try:\n import torch\n\n has_mps = torch.backends.mps.is_built() and torch.backend(...TRUNCATED)
"def _get_whisper_tiny_model():\n \"\"\"\n Get the best Whisper Tiny model for the current har(...TRUNCATED)
"[{\"test_file\": \"tests/test_asr_mlx_whisper.py\", \"test_function\": \"TestMlxWhisperIntegration.(...TRUNCATED)
"{\"repo_url\": \"https://github.com/docling-project/docling\", \"install_cmd\": \"pip install -e .\(...TRUNCATED)
{"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1}
{"status": "passed", "tests_run": 1}
repo_patch/0010
"import logging\nfrom enum import Enum\n\nfrom pydantic import (\n AnyUrl,\n)\n\nfrom docling.dat(...TRUNCATED)
End of preview. Expand in Data Studio

CodeDP Repo-Patch Benchmark (CPT-friendly)

Repository-level code completion benchmark for evaluating continual pre-training (CPT) models. 55 tasks from 12 real-world repositories, each requiring the model to generate a function body given file-level context.

Prompt Format

The prompt field contains the file context up to the target function's signature and docstring (truncated at the original # TODO: Implement this function marker). This format is directly usable by base/completion models — the model simply continues generating the function body.

The full_context field preserves the original full-file prompt (including code after the target function) for reference or fill-in-the-middle approaches.

Base/Completion Models

from datasets import load_dataset
ds = load_dataset("melihcatal/codedp-bench-repo-patch-cpt", split="train")

# prompt is ready for completion — ends with signature + docstring
prompt = ds[0]["prompt"]
# Model generates the function body from here

Instruction Models

For chat/instruction models, wrap the prompt in a chat template:

msg = f"Complete the implementation of `{ds[0]['function_name']}`. Return ONLY the function body.\n\n```python\n{ds[0]['prompt']}\n```"

Fields

Field Description
prompt File context up to function signature + docstring (CPT-ready)
full_context Full file with # TODO marker and downstream code
canonical_solution Reference function body
signature Function signature
docstring Function docstring (empty for 22/55 tasks)
function_name Target function name
class_name Enclosing class (if method, else null)
tests JSON list of pytest test cases
setup JSON with repo URL, install command, commit SHA
full_function Complete function (signature + docstring + body)
metadata JSON with body_lines, file_lines, has_docstring, num_tests
validation Test validation status

Statistics

  • 55 tasks from 12 repositories
  • 13 class methods, 42 standalone functions
  • 33 with docstrings, 22 without
  • Prompt lengths (after truncation): median ~2,800 chars (vs ~6,300 before)
  • Reference body lengths: median 437 chars

Metrics

Reference-based metrics (no repo setup needed):

  • BLEU-4: Token-level BLEU score
  • CodeBLEU: Syntax-aware code similarity
  • Edit Similarity: 1 - normalized Levenshtein distance
  • Exact Match: Normalized whitespace comparison

Evaluation

python -m evaluation.utility.run_repo_patch \
    --model_path ./output/model/checkpoint-final \
    --benchmark_path melihcatal/codedp-bench-repo-patch-cpt \
    --output_dir results/repo_patch/model/variant \
    --devices auto --batch_size 4

# For instruction models, add --chat_template
Downloads last month
95