diff --git a/.azure/gpu-test.yml b/.azure/gpu-test.yml new file mode 100644 index 0000000000000000000000000000000000000000..0fcefec661e234620987bfe6a00c73de3c01ced3 --- /dev/null +++ b/.azure/gpu-test.yml @@ -0,0 +1,114 @@ +name: GPU tests + +trigger: + branches: + include: + - "main" + - "wip" + +pr: + branches: + include: + - "main" + - "wip" + +jobs: + - job: testing + strategy: + matrix: + "ordinary": + #image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.7-cuda12.6.3" + dependency: "" + "w. Thunder": + #image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.7-cuda12.6.3" + dependency: "compiler" + variables: + DEVICES: $( python -c 'print("$(Agent.Name)".split("_")[-1])' ) + RUN_ONLY_CUDA_TESTS: "1" + TRANSFORMERS_CACHE: "/var/tmp/hf/transformers" + HF_HOME: "/var/tmp/hf/home" + HF_HUB_CACHE: "/var/tmp/hf/hub" + SKIP_WITH_CI: "1" + NCCL_DEBUG: "INFO" + PYTHON_VERSION: "3.10" + CUDA_VERSION: "12.6.3" + TORCH_VERSION: "2.7.1" + CUDNN_FRONTEND_VERSION: "1.10.0" + container: + # image: "pytorchlightning/pytorch_lightning:base-cuda-py$(PYTHON_VERSION)-torch$(TORCH_VERSION)-cuda$(CUDA_VERSION)" + # pytorchlightning/lightning-thunder:ubuntu22.04-cuda12.1.1-cudnn-fe1.5.0-py3.10-pt_main-dev + image: "pytorchlightning/lightning-thunder:ubuntu24.04-cuda$(CUDA_VERSION)-cudnn-fe$(CUDNN_FRONTEND_VERSION)-py$(PYTHON_VERSION)-pt_$(TORCH_VERSION)-dev" + options: "--gpus=all --shm-size=8gb -v /var/tmp:/var/tmp" + workspace: + clean: all + pool: "lit-rtx-3090" + timeoutInMinutes: "35" + cancelTimeoutInMinutes: "2" + steps: + - bash: | + echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)" + displayName: "set env. vars" + + - bash: | + echo $(DEVICES) + echo $CUDA_VISIBLE_DEVICES + dpkg-query -W -f='${Package} ${Version}\n' libnccl2 libnccl-dev + whereis nvidia + nvidia-smi + which python && which pip + python --version + pip --version + pip list + displayName: "Image info & NVIDIA" + + - script: | + pip install --upgrade pip + pip install '.[extra,test]' "torch==${TORCH_VERSION}" cffi -U + displayName: "Install package & dependencies" + + - script: | + set -e + pip uninstall -y torchvision torchaudio + pip install '.[compiler,extra,test]' "torch==${TORCH_VERSION}" + python -c "from thunder.executors import nvfuser_available ; assert nvfuser_available(), 'nvFuser is missing!'" + python -c "from thunder.executors.triton_utils import triton_version ; assert triton_version() is not None, 'triton is missing!'" + condition: eq(variables['dependency'], 'compiler') + displayName: "Install `compiler` [nvFuser & Thunder]" + + - bash: | + set -e + pip list + python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'" + python -c "from torch import __version__ as ver ; assert str(ver).split('+')[0] == '$(TORCH_VERSION)', f'PyTorch: installed {ver} but expected $(TORCH_VERSION)'" + displayName: "Env details" + + - bash: pytest -v --durations=100 + displayName: "All tests" + timeoutInMinutes: "15" + + - bash: | + wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/run_standalone_tests.sh + bash run_standalone_tests.sh "tests" + displayName: "Standalone tests" + env: + PL_RUN_STANDALONE_TESTS: "1" + # NUM_PARALLEL_TESTS: "10" + NCCL_IGNORE_DISABLED_P2P: "1" + NCCL_DEBUG: "INFO" + timeoutInMinutes: "10" + + - bash: | + pip uninstall -y lightning-thunder + # install thunder from source, so that, thunder.tests will be available + pip install -U "lightning-thunder[test] @ git+https://github.com/Lightning-AI/lightning-thunder.git" "torch==${TORCH_VERSION}" + displayName: "Re-install Thunder [main branch]" + condition: eq(variables['dependency'], 'compiler') + + - bash: | + # without env var, it filters out all tests + RUN_ONLY_CUDA_TESTS=0 pytest tests/ext_thunder/test_thunder_networks.py -v --durations=50 + displayName: "Extra tests for Thunder [main branch]" + condition: eq(variables['dependency'], 'compiler') + env: + TORCHDYNAMO_VERBOSE: "1" + timeoutInMinutes: "10" diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..cd79ea053bca87603b3e5c2397d0428d6b2e7d18 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,9 @@ +# See here for image contents: https://github.com/devcontainers/images/blob/main/src/python/.devcontainer/Dockerfile + +# [Choice] Python version (use -bookworm or -bullseye variants on local arm64/Apple Silicon): 3, 3.12, 3.11, 3.10, 3.9, 3.8, 3-bookworm, 3.12-bookworm, 3.11-bookworm, 3.10-bookworm, 3.9-bookworm, 3.8-bookworm, 3-bullseye, 3.12-bullseye, 3.11-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3-buster, 3.12-buster, 3.11-buster, 3.10-buster, 3.9-buster, 3.8-buster +ARG VARIANT=3-bookworm +FROM mcr.microsoft.com/devcontainers/python:1-${VARIANT} + +# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897 +# They are installed by the base image (python) which does not have the patch. +RUN python3 -m pip install --upgrade pip setuptools diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000000000000000000000000000000000..ddb9eea5f494c7ad6004c5df7d7796c19c44d268 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,105 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: +// https://github.com/microsoft/vscode-dev-containers/tree/v0.194.0/containers/python-3 +{ + "name": "Python 3 (litgpt)", + "build": { + "dockerfile": "Dockerfile", + "context": "..", + "args": { + "VARIANT": "3.11-bookworm" + } + }, + "runArgs": [ + // Enable GPU passthrough, requires WSL2 on Windows + //"--gpus=all", + // One of the following options is required for torch multiprocessing + //"--ipc=host", + //"--shm-size=4gb", + ], + // Features to add to the dev container. More info: https://containers.dev/features. + "features": { + "ghcr.io/devcontainers/features/git:1": {}, + "ghcr.io/devcontainers/features/git-lfs:1": {}, + //"ghcr.io/devcontainers/features/nvidia-cuda:1": {}, + "ghcr.io/devcontainers-extra/features/actionlint:1": {}, + "ghcr.io/devcontainers-extra/features/pre-commit:2": {}, + "ghcr.io/dhoeric/features/act:1": {}, + "ghcr.io/devcontainers/features/docker-in-docker:2": { + "version": "latest", + "moby": true + } + }, + // Set *default* container specific settings.json values on container create. + "customizations": { + "vscode": { + "settings": { + "editor.tabSize": 4, + "editor.renderWhitespace": "all", + "editor.formatOnSave": true, + "editor.rulers": [120], + "files.exclude": { + "**/__pycache__": true + }, + "python.pythonPath": "/usr/local/bin/python", + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.languageServer": "Pylance", + "python.analysis.autoImportCompletions": true, + "python.analysis.completeFunctionParens": true, + "python.analysis.autoSearchPaths": true, + "python.testing.pytestArgs": ["tests"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "code-eol.highlightNonDefault": true, + "code-eol.highlightExtraWhitespace": true, + "autoDocstring.docstringFormat": "google-notypes", + "autoDocstring.guessTypes": true, + "autoDocstring.generateDocstringOnEnter": true, + "autoDocstring.startOnNewLine": true, + "telemetry.telemetryLevel": "off", + "[python]": { + "editor.formatOnSave": true, + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.codeActionsOnSave": { + "source.organizeImports": "always", + "source.fixAll": "always" + } + } + }, + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "ms-toolsai.jupyter", + "GitHub.copilot", + "GitHub.copilot-chat", + "github.vscode-github-actions", + "SanjulaGanepola.github-local-actions", + "charliermarsh.ruff", + "esbenp.prettier-vscode", + "ms-vscode.test-adapter-converter", + "njqdev.vscode-python-typehint", + "KevinRose.vsc-python-indent", + "medo64.render-crlf", + "shardulm94.trailing-spaces", + "nhoizey.gremlins", + "wayou.vscode-todo-highlight", + "Gruntfuggly.todo-tree", + "njpwerner.autodocstring", + "rodolphebarbanneau.python-docstring-highlighter", + "mechatroner.rainbow-csv", + "uctakeoff.vscode-counter", + "bierner.github-markdown-preview", + "yahyabatulu.vscode-markdown-alert", + "ms-vscode-remote.vscode-remote-extensionpack", + "ms-azuretools.vscode-docker", + "redhat.vscode-yaml" + ] + } + }, + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "pre-commit install && pip install '.[extra,compiler,test]' -U", + // Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" +} diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..732fa941df3f721e49143d6cdbaff368c9c8c01d --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +* @lantiga @t-vi @borda +/README.md @williamfalcon @lantiga diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d20187a8c05ba90f05cf9a2f6eed47383295288 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,41 @@ +# Basic dependabot.yml file with +# minimum configuration for two package managers + +version: 2 +updates: + # Enable version updates for python + - package-ecosystem: "pip" + # Look for a `requirements` in the `root` directory + directory: "/" + # Check for updates once a week + schedule: + interval: "monthly" + # Labels on pull requests for version updates only + labels: + - "dependencies" + pull-request-branch-name: + # Separate sections of the branch name with a hyphen + # for example, `dependabot-npm_and_yarn-next_js-acorn-6.4.1` + separator: "-" + # Allow up to 5 open pull requests for pip dependencies + open-pull-requests-limit: 3 + + # Enable version updates for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + # Check for updates once a week + schedule: + interval: "weekly" + # Labels on pull requests for version updates only + labels: + - "CI / actions" + pull-request-branch-name: + # Separate sections of the branch name with a hyphen + # for example, `dependabot-npm_and_yarn-next_js-acorn-6.4.1` + separator: "-" + # Allow up to 5 open pull requests for GitHub Actions + open-pull-requests-limit: 1 + groups: + GHA-updates: + patterns: + - "*" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8c416e1b37676625c8d197feff7a92f2df820950 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +.ipynb_checkpoints/ +__pycache__ +.idea +.DS_Store +*.egg-info +build +dist +.venv +.vscode + +# data +data +datasets +!litgpt/data +!tests/data +checkpoints +out +wandb +events.out.tfevents* + +# test artifacts from tests/test_readme.py +**/custom_finetuning_dataset.json +client.py +**/custom_texts/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2051126fd1a2ec2d6aa5e0e22ed7a92a7639626d --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,89 @@ +# Copyright The Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +default_language_version: + python: python3 + +ci: + autofix_prs: true + autoupdate_commit_msg: "[pre-commit.ci] pre-commit suggestions" + autoupdate_schedule: quarterly + # submodules: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + exclude: README.md + - id: check-yaml + - id: check-toml + #- id: check-docstring-first + #- id: check-executables-have-shebangs + - id: check-case-conflict + - id: check-added-large-files + args: ["--maxkb=250", "--enforce-all"] + - id: detect-private-key + + - repo: https://github.com/codespell-project/codespell + rev: v2.4.1 + hooks: + - id: codespell + additional_dependencies: [tomli] + args: ["--write-changes"] + exclude: pyproject.toml + + #- repo: https://github.com/crate-ci/typos + # rev: dictgen-v0.3.1 + # hooks: + # - id: typos + # args: [] # empty to do not write fixes + # exclude: pyproject.toml + + #- repo: https://github.com/executablebooks/mdformat + # rev: 0.7.21 + # hooks: + # - id: mdformat + # args: ["--number"] + # additional_dependencies: + # - mdformat-gfm + # - mdformat-black + # - mdformat_frontmatter + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.1.0 + hooks: + - id: prettier + files: \.(json|yml|yaml|toml) + # https://prettier.io/docs/en/options.html#print-width + args: ["--print-width=140"] + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.12.2 + hooks: + - id: ruff + args: ["--fix"] + - id: ruff-format + - id: ruff + + - repo: https://github.com/tox-dev/pyproject-fmt + rev: v2.6.0 + hooks: + - id: pyproject-fmt + additional_dependencies: [tox] + - repo: https://github.com/abravalheri/validate-pyproject + rev: v0.24.1 + hooks: + - id: validate-pyproject diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..fae8fe40f850bdc7c788a8d214663c88b58069df --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,9 @@ +cff-version: 1.2.0 +message: "If you use this software, you can cite it as shown below." +title: "LitGPT" +abstract: "20+ high-performance LLMs with recipes to pretrain, finetune and deploy at scale." +date-released: 2023-03-22 +authors: + - name: "The Lightning AI team" +license: "Apache-2.0" +url: "https://github.com/Lightning-AI/litgpt" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2e28ea12b6b637520874dd433aad821b1ba3f2ef --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM ubuntu:22.04 + +# 设置 UTF-8(重要,否则 python/其他程序可能乱码) +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 + +# 安装常用工具(可选) +RUN apt-get update && apt-get install -y \ + python3 python3-pip vim git wget curl \ + && apt-get clean + +# 把你的整个文件夹复制进镜像 +COPY . /workspace + +# 设置默认工作目录 +WORKDIR /workspace + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..fe60df99e7ff3db486c7722fe98e7739614e7a0f --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2023] Lightning AI + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 7be5fc7f47d5db027d120b8024982df93db95b74..f5c139e97558fcd4551913b2ef32e73ec05a6d9b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,722 @@ ---- -license: mit ---- +
+ + +# ⚡ LitGPT + +**20+ high-performance LLMs with recipes to pretrain, finetune, and deploy at scale.** + +
+✅ From scratch implementations      ✅ No abstractions         ✅ Beginner friendly
+   ✅ Flash attention                   ✅ FSDP                    ✅ LoRA, QLoRA, Adapter
+✅ Reduce GPU memory (fp4/8/16/32)   ✅ 1-1000+ GPUs/TPUs       ✅ 20+ LLMs         
+
+ + +--- + + +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pytorch-lightning) +![cpu-tests](https://github.com/lightning-AI/lit-stablelm/actions/workflows/cpu-tests.yml/badge.svg) [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/Lightning-AI/lit-stablelm/blob/master/LICENSE) [![Discord](https://img.shields.io/discord/1077906959069626439)](https://discord.gg/VptPCZkGNa) + +

+ Quick start • + Models • + Finetune • + Deploy • + All workflows • + Features • + Recipes (YAML) • + Lightning AI • + Tutorials +

+ +  + + + Get started + + +  + +
+ +# Use, finetune, pretrain, and deploy LLMs Lightning fast ⚡⚡ +Every LLM is implemented from scratch with **no abstractions** and **full control**, making them blazing fast, minimal, and performant at enterprise scale. + +✅ **Enterprise ready -** Apache 2.0 for unlimited enterprise use.
+✅ **Developer friendly -** Easy debugging with no abstraction layers and single file implementations.
+✅ **Optimized performance -** Models designed to maximize performance, reduce costs, and speed up training.
+✅ **Proven recipes -** Highly-optimized training/finetuning recipes tested at enterprise scale.
+ +  + +# Quick start +Install LitGPT +``` +pip install 'litgpt[extra]' +``` + +Load and use any of the [20+ LLMs](#choose-from-20-llms): +```python +from litgpt import LLM + +llm = LLM.load("microsoft/phi-2") +text = llm.generate("Fix the spelling: Every fall, the family goes to the mountains.") +print(text) +# Corrected Sentence: Every fall, the family goes to the mountains. +``` + +  + +✅ Optimized for fast inference
+✅ Quantization
+✅ Runs on low-memory GPUs
+✅ No layers of internal abstractions
+✅ Optimized for production scale
+ +
+ Advanced install options + +Install from source: + +```bash +git clone https://github.com/Lightning-AI/litgpt +cd litgpt +pip install -e '.[all]' +``` +
+ +[Explore the full Python API docs](tutorials/python-api.md). + +  + +--- +# Choose from 20+ LLMs +Every model is written from scratch to maximize performance and remove layers of abstraction: + +| Model | Model size | Author | Reference | +|----|----|----|----| +| Llama 3, 3.1, 3.2, 3.3 | 1B, 3B, 8B, 70B, 405B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) | +| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) | +| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) | +| Gemma 2 | 2B, 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) | +| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) | +| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) | +| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) | +| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) | +| ... | ... | ... | ... | + +
+ See full list of 20+ LLMs + +  + +#### All models + +| Model | Model size | Author | Reference | +|----|----|----|----| +| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) | +| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) | +| Falcon | 7B, 40B, 180B | TII UAE | [TII 2023](https://falconllm.tii.ae) | +| Falcon 3 | 1B, 3B, 7B, 10B | TII UAE | [TII 2024](https://huggingface.co/blog/falcon3) | +| FreeWilly2 (Stable Beluga 2) | 70B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stable-beluga-large-instruction-fine-tuned-models) | +| Function Calling Llama 2 | 7B | Trelis | [Trelis et al. 2023](https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2) | +| Gemma | 2B, 7B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf) | +| Gemma 2 | 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) | +| Gemma 3 | 1B, 4B, 12B, 27B | Google | [Google Team, Google Deepmind](https://arxiv.org/pdf/2503.19786) | +| Llama 2 | 7B, 13B, 70B | Meta AI | [Touvron et al. 2023](https://arxiv.org/abs/2307.09288) | +| Llama 3.1 | 8B, 70B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) | +| Llama 3.2 | 1B, 3B | Meta AI | [Meta AI 2024](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/) | +| Llama 3.3 | 70B | Meta AI | [Meta AI 2024](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) | +| Mathstral | 7B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mathstral/) | +| MicroLlama | 300M | Ken Wang | [MicroLlama repo](https://github.com/keeeeenw/MicroLlama) | +| Mixtral MoE | 8x7B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/mixtral-of-experts/) | +| Mistral | 7B, 123B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/announcing-mistral-7b/) | +| Mixtral MoE | 8x22B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mixtral-8x22b/) | +| OLMo | 1B, 7B | Allen Institute for AI (AI2) | [Groeneveld et al. 2024](https://aclanthology.org/2024.acl-long.841/) | +| OpenLLaMA | 3B, 7B, 13B | OpenLM Research | [Geng & Liu 2023](https://github.com/openlm-research/open_llama) | +| Phi 1.5 & 2 | 1.3B, 2.7B | Microsoft Research | [Li et al. 2023](https://arxiv.org/abs/2309.05463) | +| Phi 3 | 3.8B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2404.14219) | +| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) | +| Phi 4 Mini Instruct | 3.8B | Microsoft Research | [Microsoft 2025](https://arxiv.org/abs/2503.01743) | +| Phi 4 Mini Reasoning | 3.8B | Microsoft Research | [Xu, Peng et al. 2025](https://arxiv.org/abs/2504.21233) | +| Phi 4 Reasoning | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) | +| Phi 4 Reasoning Plus | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) | +| Platypus | 7B, 13B, 70B | Lee et al. | [Lee, Hunter, and Ruiz 2023](https://arxiv.org/abs/2308.07317) | +| Pythia | {14,31,70,160,410}M, {1,1.4,2.8,6.9,12}B | EleutherAI | [Biderman et al. 2023](https://arxiv.org/abs/2304.01373) | +| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) | +| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) | +| Qwen2.5 1M (Long Context) | 7B, 14B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwen2.5-1m/) | +| Qwen2.5 Math | 1.5B, 7B, 72B | Alibaba Group | [An, Yang et al. 2024](https://arxiv.org/abs/2409.12122) | +| QwQ | 32B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwq-32b/) | +| QwQ-Preview | 32B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwq-32b-preview/) | +| Qwen3 | 0.6B, 1.7B, 4B, 8B, 14B, 32B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) | +| Qwen3 MoE | 30B, 235B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) | +| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) | +| SmolLM2 | 135M, 360M, 1.7B | Hugging Face | [Hugging Face 2024](https://github.com/huggingface/smollm) | +| Salamandra | 2B, 7B | Barcelona Supercomputing Centre | [BSC-LTC 2024](https://github.com/BSC-LTC/salamandra) | +| StableCode | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) | +| StableLM | 3B, 7B | Stability AI | [Stability AI 2023](https://github.com/Stability-AI/StableLM) | +| StableLM Zephyr | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) | +| TinyLlama | 1.1B | Zhang et al. | [Zhang et al. 2023](https://github.com/jzhang38/TinyLlama) | + + +**Tip**: You can list all available models by running the `litgpt download list` command. + + +
+ +  + +--- + +# Workflows + +

+ Finetune • + Pretrain • + Continued pretraining • + Evaluate • + Deploy • + Test +

+ +  + +Use the command line interface to run advanced workflows such as pretraining or finetuning on your own data. + + +## All workflows +After installing LitGPT, select the model and workflow to run (finetune, pretrain, evaluate, deploy, etc...): + +```bash +# litgpt [action] [model] +litgpt serve meta-llama/Llama-3.2-3B-Instruct +litgpt finetune meta-llama/Llama-3.2-3B-Instruct +litgpt pretrain meta-llama/Llama-3.2-3B-Instruct +litgpt chat meta-llama/Llama-3.2-3B-Instruct +litgpt evaluate meta-llama/Llama-3.2-3B-Instruct +``` + +  + +---- + +## Finetune an LLM + +
+ + Run on Studios + +
+ +  + +Finetuning is the process of taking a pretrained AI model and further training it on a smaller, specialized dataset tailored to a specific task or application. + + +  + +```bash +# 0) setup your dataset +curl -L https://huggingface.co/datasets/ksaw008/finance_alpaca/resolve/main/finance_alpaca.json -o my_custom_dataset.json + +# 1) Finetune a model (auto downloads weights) +litgpt finetune microsoft/phi-2 \ + --data JSON \ + --data.json_path my_custom_dataset.json \ + --data.val_split_fraction 0.1 \ + --out_dir out/custom-model + +# 2) Test the model +litgpt chat out/custom-model/final + +# 3) Deploy the model +litgpt serve out/custom-model/final +``` + +[Read the full finetuning docs](tutorials/finetune.md) + +  + +---- + +## Deploy an LLM + +
+ + Deploy on Studios + +
+ +  + +Deploy a pretrained or finetune LLM to use it in real-world applications. Deploy, automatically sets up a web server that can be accessed by a website or app. + +```bash +# deploy an out-of-the-box LLM +litgpt serve microsoft/phi-2 + +# deploy your own trained model +litgpt serve path/to/microsoft/phi-2/checkpoint +``` + +
+ Show code to query server: + +  + +Test the server in a separate terminal and integrate the model API into your AI product: +```python +# 3) Use the server (in a separate Python session) +import requests, json +response = requests.post( + "http://127.0.0.1:8000/predict", + json={"prompt": "Fix typos in the following sentence: Example input"} +) +print(response.json()["output"]) +``` +
+ +[Read the full deploy docs](tutorials/deploy.md). + +  + +---- + +## Evaluate an LLM +Evaluate an LLM to test its performance on various tasks to see how well it understands and generates text. Simply put, we can evaluate things like how well would it do in college-level chemistry, coding, etc... (MMLU, Truthful QA, etc...) + +```bash +litgpt evaluate microsoft/phi-2 --tasks 'truthfulqa_mc2,mmlu' +``` + +[Read the full evaluation docs](tutorials/evaluation.md). + +  + +---- + +## Test an LLM + +
+ + Run on Studios + +
+ +  + +Test how well the model works via an interactive chat. Use the `chat` command to chat, extract embeddings, etc... + +Here's an example showing how to use the Phi-2 LLM: +```bash +litgpt chat microsoft/phi-2 + +>> Prompt: What do Llamas eat? +``` + +
+ Full code: + +  + +```bash +# 1) List all supported LLMs +litgpt download list + +# 2) Use a model (auto downloads weights) +litgpt chat microsoft/phi-2 + +>> Prompt: What do Llamas eat? +``` + +The download of certain models requires an additional access token. You can read more about this in the [download](tutorials/download_model_weights.md#specific-models-and-access-tokens) documentation. + +
+ +[Read the full chat docs](tutorials/inference.md). + +  + +---- + +## Pretrain an LLM + +
+ + Run on Studios + +
+ +  + +Pretraining is the process of teaching an AI model by exposing it to a large amount of data before it is fine-tuned for specific tasks. + +
+ Show code: + +  + +```bash +mkdir -p custom_texts +curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt +curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt + +# 1) Download a tokenizer +litgpt download EleutherAI/pythia-160m \ + --tokenizer_only True + +# 2) Pretrain the model +litgpt pretrain EleutherAI/pythia-160m \ + --tokenizer_dir EleutherAI/pythia-160m \ + --data TextFiles \ + --data.train_data_path "custom_texts/" \ + --train.max_tokens 10_000_000 \ + --out_dir out/custom-model + +# 3) Test the model +litgpt chat out/custom-model/final +``` +
+ +[Read the full pretraining docs](tutorials/pretrain.md) + +  + +---- + +## Continue pretraining an LLM + +
+ + Run on Studios + +
+ +  + +Continued pretraining is another way of finetuning that specializes an already pretrained model by training on custom data: + +
+ Show code: + +  + +```bash +mkdir -p custom_texts +curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt +curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt + +# 1) Continue pretraining a model (auto downloads weights) +litgpt pretrain EleutherAI/pythia-160m \ + --tokenizer_dir EleutherAI/pythia-160m \ + --initial_checkpoint_dir EleutherAI/pythia-160m \ + --data TextFiles \ + --data.train_data_path "custom_texts/" \ + --train.max_tokens 10_000_000 \ + --out_dir out/custom-model + +# 2) Test the model +litgpt chat out/custom-model/final +``` + +
+ +[Read the full continued pretraining docs](tutorials/pretrain.md#continued-pretraining-on-custom-data) + +  + +---- + +# State-of-the-art features + +✅ State-of-the-art optimizations: Flash Attention v2, multi-GPU support via fully-sharded data parallelism, [optional CPU offloading](tutorials/oom.md#do-sharding-across-multiple-gpus), and [TPU and XLA support](extensions/xla).
+✅ [Pretrain](tutorials/pretrain.md), [finetune](tutorials/finetune.md), and [deploy](tutorials/inference.md)
+✅ Reduce compute requirements with low-precision settings: FP16, BF16, and FP16/FP32 mixed.
+✅ Lower memory requirements with [quantization](tutorials/quantize.md): 4-bit floats, 8-bit integers, and double quantization.
+✅ [Configuration files](config_hub) for great out-of-the-box performance.
+✅ Parameter-efficient finetuning: [LoRA](tutorials/finetune_lora.md), [QLoRA](tutorials/finetune_lora.md), [Adapter](tutorials/finetune_adapter.md), and [Adapter v2](tutorials/finetune_adapter.md).
+✅ [Exporting](tutorials/convert_lit_models.md) to other popular model weight formats.
+✅ Many popular datasets for [pretraining](tutorials/pretrain.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning).
+✅ Readable and easy-to-modify code to experiment with the latest research ideas.
+ +  + +--- + +# Training recipes + +LitGPT comes with validated recipes (YAML configs) to train models under different conditions. We've generated these recipes based on the parameters we found to perform the best for different training conditions. + +Browse all training recipes [here](config_hub). + +### Example + +```bash +litgpt finetune \ + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml +``` +
+ ✅ Use configs to customize training + +Configs let you customize training for all granular parameters like: + +```yaml +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-llama2-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +... +``` +
+ +
+ ✅ Example: LoRA finetuning config + +  + +```yaml +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-llama2-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) +quantize: bnb.nf4 + +# How many devices/GPUs to use. (type: Union[int, str], default: 1) +devices: 1 + +# How many nodes to use. (type: int, default: 1) +num_nodes: 1 + +# The LoRA rank. (type: int, default: 8) +lora_r: 32 + +# The LoRA alpha. (type: int, default: 16) +lora_alpha: 16 + +# The LoRA dropout value. (type: float, default: 0.05) +lora_dropout: 0.05 + +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) +lora_query: true + +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) +lora_key: false + +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) +lora_value: true + +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) +lora_projection: false + +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) +lora_mlp: false + +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) +lora_head: false + +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. +data: + class_path: litgpt.data.Alpaca2k + init_args: + mask_prompt: false + val_split_fraction: 0.05 + prompt_style: alpaca + ignore_index: -100 + seed: 42 + num_workers: 4 + download_dir: data/alpaca2k + +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details +train: + + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) + save_interval: 200 + + # Number of iterations between logging calls (type: int, default: 1) + log_interval: 1 + + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) + global_batch_size: 8 + + # Number of samples per data-parallel rank (type: int, default: 4) + micro_batch_size: 2 + + # Number of iterations with learning rate warmup active (type: int, default: 100) + lr_warmup_steps: 10 + + # Number of epochs to train on (type: Optional[int], default: 5) + epochs: 4 + + # Total number of tokens to train on (type: Optional[int], default: null) + max_tokens: + + # Limits the number of optimizer steps to run (type: Optional[int], default: null) + max_steps: + + # Limits the length of samples (type: Optional[int], default: null) + max_seq_length: 512 + + # Whether to tie the embedding weights with the language modeling head weights (type: Optional[bool], default: null) + tie_embeddings: + + # (type: float, default: 0.0003) + learning_rate: 0.0002 + + # (type: float, default: 0.02) + weight_decay: 0.0 + + # (type: float, default: 0.9) + beta1: 0.9 + + # (type: float, default: 0.95) + beta2: 0.95 + + # (type: Optional[float], default: null) + max_norm: + + # (type: float, default: 6e-05) + min_lr: 6.0e-05 + +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details +eval: + + # Number of optimizer steps between evaluation calls (type: int, default: 100) + interval: 100 + + # Number of tokens to generate (type: Optional[int], default: 100) + max_new_tokens: 100 + + # Number of iterations (type: int, default: 100) + max_iters: 100 + +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) +logger_name: csv + +# The random seed to use for reproducibility. (type: int, default: 1337) +seed: 1337 +``` +
+ +
+ ✅ Override any parameter in the CLI: + +```bash +litgpt finetune \ + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml \ + --lora_r 4 +``` +
+ +  + +---- + +# Project highlights + +LitGPT powers many great AI projects, initiatives, challenges and of course enterprises. Please submit a pull request to be considered for a feature. + +
+ 📊 SAMBA: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling + +The [Samba](https://github.com/microsoft/Samba) project by researchers at Microsoft is built on top of the LitGPT code base and combines state space models with sliding window attention, which outperforms pure state space models. + +
+ +
+ 🏆 NeurIPS 2023 Large Language Model Efficiency Challenge: 1 LLM + 1 GPU + 1 Day + +The LitGPT repository was the official starter kit for the [NeurIPS 2023 LLM Efficiency Challenge](https://llm-efficiency-challenge.github.io), which is a competition focused on finetuning an existing non-instruction tuned LLM for 24 hours on a single GPU. + +
+ +
+ 🦙 TinyLlama: An Open-Source Small Language Model + + +LitGPT powered the [TinyLlama project](https://github.com/jzhang38/TinyLlama) and [TinyLlama: An Open-Source Small Language Model](https://arxiv.org/abs/2401.02385) research paper. + +
+ +
+ 🍪 MicroLlama: MicroLlama-300M + +[MicroLlama](https://github.com/keeeeenw/MicroLlama) is a 300M Llama model pretrained on 50B tokens powered by TinyLlama and LitGPT. +
+ +
+ 🔬 Pre-training Small Base LMs with Fewer Tokens + +The research paper ["Pre-training Small Base LMs with Fewer Tokens"](https://arxiv.org/abs/2404.08634), which utilizes LitGPT, develops smaller base language models by inheriting a few transformer blocks from larger models and training on a tiny fraction of the data used by the larger models. It demonstrates that these smaller models can perform comparably to larger models despite using significantly less training data and resources. + +
+ +  + +---- + +# Community + +We welcome all individual contributors, regardless of their level of experience or hardware. Your contributions are valuable, and we are excited to see what you can accomplish in this collaborative and supportive environment. + +- [Request a feature](https://github.com/Lightning-AI/litgpt/issues) +- [Submit your first contribution](https://lightning.ai/pages/community/tutorial/how-to-contribute-to-litgpt/) +- [Join our Discord](https://discord.gg/VptPCZkGNa) + +  + +# Tutorials + +🚀 [Get started](tutorials/0_to_litgpt.md)
+⚡️ [Finetuning, incl. LoRA, QLoRA, and Adapters](tutorials/finetune.md)
+🤖 [Pretraining](tutorials/pretrain.md)
+💬 [Model evaluation](tutorials/evaluation.md)
+📘 [Supported and custom datasets](tutorials/prepare_dataset.md)
+🧹 [Quantization](tutorials/quantize.md)
+🤯 [Tips for dealing with out-of-memory (OOM) errors](tutorials/oom.md)
+🧑🏽‍💻 [Using cloud TPUs](extensions/xla)
+ +  + +---- + +### Acknowledgments + +This implementation extends on [Lit-LLaMA](https://github.com/lightning-AI/lit-llama) and [nanoGPT](https://github.com/karpathy/nanoGPT), and it's **powered by [Lightning Fabric](https://lightning.ai/docs/fabric/stable/) ⚡**. + +- [@karpathy](https://github.com/karpathy) for [nanoGPT](https://github.com/karpathy/nanoGPT) +- [@EleutherAI](https://github.com/EleutherAI) for [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) and the [Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) +- [@TimDettmers](https://github.com/TimDettmers) for [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) +- [@Microsoft](https://github.com/microsoft) for [LoRA](https://github.com/microsoft/LoRA) +- [@tridao](https://github.com/tridao) for [Flash Attention 2](https://github.com/Dao-AILab/flash-attention) + +### License + +LitGPT is released under the [Apache 2.0](https://github.com/Lightning-AI/litgpt/blob/main/LICENSE) license. + +### Citation + +If you use LitGPT in your research, please cite the following work: + +```bibtex +@misc{litgpt-2023, + author = {Lightning AI}, + title = {LitGPT}, + howpublished = {\url{https://github.com/Lightning-AI/litgpt}}, + year = {2023}, +} +``` + +  diff --git a/check.py b/check.py new file mode 100644 index 0000000000000000000000000000000000000000..4eac4ad9830a868812b8b659789e17f867fe82a7 --- /dev/null +++ b/check.py @@ -0,0 +1,34 @@ +import os +import json +import argparse + +def check_file(path): + with open(path, "r", encoding="utf-8") as f: + data = json.load(f) # 这里假设每个文件是一个 JSON 数组 + total = len(data) + # 转换成字符串来判断重复(保证字典可哈希) + unique = len({json.dumps(item, sort_keys=True) for item in data}) + duplicates = total - unique + return total, duplicates + +def main(): + parser = argparse.ArgumentParser(description="Check JSON array dataset files for row count and duplicates.") + parser.add_argument("folder", help="Folder containing the JSON files") + args = parser.parse_args() + + folder = args.folder + for fname in sorted(os.listdir(folder)): + fpath = os.path.join(folder, fname) + if not os.path.isfile(fpath): + continue + if not fname.endswith(".json"): + continue + try: + total, duplicates = check_file(fpath) + print(f"{fname:25} rows={total:6} duplicates={duplicates:4}") + except Exception as e: + print(f"{fname:25} [ERROR: {e}]") + +if __name__ == "__main__": + main() + diff --git a/cmd_start_for.ini b/cmd_start_for.ini new file mode 100644 index 0000000000000000000000000000000000000000..a36c164fb9754e8b05bb155675c44b46c3182a48 --- /dev/null +++ b/cmd_start_for.ini @@ -0,0 +1,2 @@ +[cmd_start_info] +game_id=26 diff --git a/delete.py b/delete.py new file mode 100644 index 0000000000000000000000000000000000000000..ec2748aaa134421889e6839d1aa24ae675e16a8e --- /dev/null +++ b/delete.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import argparse +import pathlib +import re +import shutil + +def remove_explanation_fields(text: str) -> str: + """ + 删除 JSON 文本中键为 "explanation" 的键值对(值为字符串), + 不使用 json 解析,使用正则在词法层面安全处理转义字符。 + 同时正确处理前后逗号,保持 JSON 仍然有效。 + """ + + # JSON 字符串匹配:支持任意数量的转义字符(如 \" \\ \n ...) + # 说明:JSON 字符串不允许出现未转义的换行,因此不需 DOTALL。 + json_string = r'"(?:\\.|[^"\\])*"' + + # 情况 A:explanation 在中间/开头,后面跟逗号 + # ... "explanation": "......", ... + pattern_after = re.compile( + rf'\s*"explanation"\s*:\s*{json_string}\s*,' + ) + + # 情况 B:explanation 在中间/末尾,前面有逗号 + # ..., "explanation": "......" + pattern_before = re.compile( + rf',\s*"explanation"\s*:\s*{json_string}' + ) + + # 先删“后跟逗号”的情况,再删“前有逗号”的情况 + new_text = pattern_after.sub('', text) + new_text = pattern_before.sub('', new_text) + + # 情况 C:对象里只有一个键值对(既没有前逗号也没有后逗号) + # 比如:{ "explanation": "..." } + # 这会在 A/B 后仍残留:{ }(已是合法 JSON),无需额外处理。 + # 若出现花括号内部多余空白也不影响合法性。 + + return new_text + +def process_file(p: pathlib.Path, dry_run: bool = False) -> bool: + original = p.read_text(encoding='utf-8') + repaired = remove_explanation_fields(original) + if repaired != original: + if not dry_run: + # 备份 + backup = p.with_suffix(p.suffix + '.bak') + shutil.copyfile(p, backup) + # 覆盖 + p.write_text(repaired, encoding='utf-8') + return True + return False + +def main(): + ap = argparse.ArgumentParser( + description="在不使用 json 解析的前提下,删除所有 JSON 文件中的 \"explanation\": \"...\" 字段(安全处理转义字符与逗号)。" + ) + ap.add_argument("folder", type=str, help="包含 .json 文件的文件夹路径") + ap.add_argument("--dry-run", action="store_true", help="仅显示将要修改的文件,不写回") + args = ap.parse_args() + + root = pathlib.Path(args.folder) + if not root.is_dir(): + raise SystemExit(f"路径不存在或不是文件夹:{root}") + + changed = 0 + total = 0 + for p in sorted(root.glob("*.json")): + total += 1 + if process_file(p, dry_run=args.dry_run): + changed += 1 + print(f"[UPDATED] {p}") + else: + print(f"[SKIP ] {p}(无 explanation 字段或无需修改)") + + print(f"\n完成:扫描 {total} 个 .json 文件,修改 {changed} 个。") + if not args.dry_run: + print("已为修改过的文件生成 .bak 备份。") + +if __name__ == "__main__": + main() diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..e5448ddf3191ca0e57100be31e2f698344f2119e --- /dev/null +++ b/environment.yml @@ -0,0 +1,261 @@ +name: /mnt/data/llmtcl +channels: + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - bzip2=1.0.8=h5eee18b_6 + - ca-certificates=2025.2.25=h06a4308_0 + - expat=2.7.1=h6a678d5_0 + - ld_impl_linux-64=2.40=h12ee557_0 + - libffi=3.4.4=h6a678d5_1 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libstdcxx-ng=11.2.0=h1234567_1 + - libuuid=1.41.5=h5eee18b_0 + - libxcb=1.17.0=h9b100fa_0 + - ncurses=6.5=h7934f7d_0 + - openssl=3.0.17=h5eee18b_0 + - pip=25.1=pyhc872135_2 + - pthread-stubs=0.3=h0ce48e5_1 + - python=3.10.18=h1a3bd86_0 + - readline=8.2=h5eee18b_0 + - setuptools=78.1.1=py310h06a4308_0 + - sqlite=3.50.2=hb25bd0a_1 + - tk=8.6.14=h993c535_1 + - wheel=0.45.1=py310h06a4308_0 + - xorg-libx11=1.8.12=h9b100fa_1 + - xorg-libxau=1.0.12=h9b100fa_0 + - xorg-libxdmcp=1.1.5=h9b100fa_0 + - xorg-xorgproto=2024.1=h5eee18b_1 + - xz=5.6.4=h5eee18b_1 + - zlib=1.2.13=h5eee18b_1 + - pip: + - absl-py==2.3.1 + - accelerate==1.10.0 + - aiohappyeyeballs==2.6.1 + - aiohttp==3.12.15 + - aiosignal==1.4.0 + - annotated-types==0.7.0 + - anyio==4.10.0 + - astor==0.8.1 + - async-timeout==5.0.1 + - attrs==25.3.0 + - blake3==1.0.5 + - boto3==1.40.1 + - botocore==1.40.1 + - cachetools==6.2.0 + - cbor2==5.7.0 + - certifi==2025.8.3 + - cffi==2.0.0 + - cfgv==3.4.0 + - chardet==5.2.0 + - charset-normalizer==3.4.2 + - click==8.2.1 + - cloudpickle==3.1.1 + - colorama==0.4.6 + - compressed-tensors==0.10.2 + - contourpy==1.3.2 + - coverage==7.10.6 + - cupy-cuda12x==13.6.0 + - cycler==0.12.1 + - dataproperty==1.1.0 + - datasets==3.6.0 + - depyf==0.19.0 + - dill==0.3.8 + - diskcache==5.6.3 + - distlib==0.4.0 + - distro==1.9.0 + - dnspython==2.8.0 + - docstring-parser==0.17.0 + - einops==0.8.1 + - email-validator==2.3.0 + - et-xmlfile==2.0.0 + - evaluate==0.4.5 + - exceptiongroup==1.3.0 + - execnet==2.1.1 + - fastapi==0.116.1 + - fastapi-cli==0.0.11 + - fastapi-cloud-cli==0.1.5 + - fastrlock==0.8.3 + - filelock==3.18.0 + - fonttools==4.59.0 + - frozenlist==1.7.0 + - fsspec==2025.3.0 + - gguf==0.17.1 + - grpcio==1.74.0 + - h11==0.16.0 + - hf-transfer==0.1.9 + - hf-xet==1.1.5 + - httpcore==1.0.9 + - httptools==0.6.4 + - httpx==0.28.1 + - huggingface-hub==0.34.4 + - identify==2.6.14 + - idna==3.10 + - importlib-resources==6.5.2 + - iniconfig==2.1.0 + - interegular==0.3.3 + - jinja2==3.1.6 + - jiter==0.10.0 + - jmespath==1.0.1 + - joblib==1.5.1 + - jsonargparse==4.40.1 + - jsonlines==4.0.0 + - jsonschema==4.25.1 + - jsonschema-specifications==2025.9.1 + - kiwisolver==1.4.9 + - lark==1.2.2 + - lightning==2.5.2 + - lightning-utilities==0.15.0 + - litdata==0.2.51 + - litgpt==0.5.9 + - llguidance==0.7.30 + - llvmlite==0.44.0 + - lm-eval==0.4.9.1 + - lm-format-enforcer==0.10.12 + - lxml==6.0.0 + - markdown==3.8.2 + - markdown-it-py==4.0.0 + - markupsafe==3.0.2 + - matplotlib==3.10.5 + - mbstrdecoder==1.1.4 + - mdurl==0.1.2 + - mistral-common==1.8.4 + - more-itertools==10.7.0 + - mpmath==1.3.0 + - msgpack==1.1.1 + - msgspec==0.19.0 + - multidict==6.6.3 + - multiprocess==0.70.16 + - networkx==3.4.2 + - ninja==1.13.0 + - nltk==3.9.1 + - nodeenv==1.9.1 + - numba==0.61.2 + - numexpr==2.11.0 + - numpy==2.2.6 + - nvidia-cublas-cu12==12.6.4.1 + - nvidia-cuda-cupti-cu12==12.6.80 + - nvidia-cuda-nvrtc-cu12==12.6.77 + - nvidia-cuda-runtime-cu12==12.6.77 + - nvidia-cudnn-cu12==9.5.1.17 + - nvidia-cufft-cu12==11.3.0.4 + - nvidia-cufile-cu12==1.11.1.6 + - nvidia-curand-cu12==10.3.7.77 + - nvidia-cusolver-cu12==11.7.1.2 + - nvidia-cusparse-cu12==12.5.4.2 + - nvidia-cusparselt-cu12==0.6.3 + - nvidia-nccl-cu12==2.26.2 + - nvidia-nvjitlink-cu12==12.6.85 + - nvidia-nvtx-cu12==12.6.77 + - obstore==0.7.3 + - openai==1.107.0 + - openai-harmony==0.0.4 + - opencv-python-headless==4.12.0.88 + - openpyxl==3.1.5 + - outlines-core==0.2.10 + - packaging==25.0 + - pandas==2.3.1 + - partial-json-parser==0.2.1.1.post6 + - pathvalidate==3.3.1 + - peft==0.17.0 + - pillow==11.3.0 + - platformdirs==4.4.0 + - pluggy==1.6.0 + - portalocker==3.2.0 + - pre-commit==4.3.0 + - prometheus-client==0.22.1 + - prometheus-fastapi-instrumentator==7.1.0 + - propcache==0.3.2 + - protobuf==6.31.1 + - psutil==7.0.0 + - py-cpuinfo==9.0.0 + - pyarrow==21.0.0 + - pybase64==1.4.2 + - pybind11==3.0.0 + - pycountry==24.6.1 + - pycparser==2.23 + - pydantic==2.11.7 + - pydantic-core==2.33.2 + - pydantic-extra-types==2.10.5 + - pygments==2.19.2 + - pyparsing==3.2.3 + - pytablewriter==1.2.1 + - pytest==8.4.2 + - pytest-cov==6.3.0 + - pytest-xdist==3.8.0 + - python-dateutil==2.9.0.post0 + - python-dotenv==1.1.1 + - python-json-logger==3.3.0 + - python-multipart==0.0.20 + - pytorch-lightning==2.5.2 + - pytz==2025.2 + - pyyaml==6.0.2 + - pyzmq==27.1.0 + - ray==2.49.1 + - referencing==0.36.2 + - regex==2025.7.34 + - requests==2.32.4 + - rich==14.1.0 + - rich-toolkit==0.15.1 + - rignore==0.6.4 + - rouge-score==0.1.2 + - rpds-py==0.27.1 + - s3transfer==0.13.1 + - sacrebleu==2.5.1 + - safetensors==0.5.3 + - scikit-learn==1.7.1 + - scipy==1.15.3 + - seaborn==0.13.2 + - sentencepiece==0.2.0 + - sentry-sdk==2.37.1 + - setproctitle==1.3.7 + - shellingham==1.5.4 + - six==1.17.0 + - sniffio==1.3.1 + - soundfile==0.13.1 + - soxr==1.0.0 + - sqlitedict==2.1.0 + - starlette==0.47.3 + - sympy==1.14.0 + - tabledata==1.3.4 + - tabulate==0.9.0 + - tcolorpy==0.1.7 + - tenacity==9.1.2 + - tensorboard==2.20.0 + - tensorboard-data-server==0.7.2 + - threadpoolctl==3.6.0 + - tifffile==2025.5.10 + - tiktoken==0.11.0 + - tokenizers==0.21.4 + - tomli==2.2.1 + - torch==2.7.1 + - torchaudio==2.7.1 + - torchmetrics==1.8.0 + - torchvision==0.22.1 + - tqdm==4.67.1 + - tqdm-multiprocess==0.0.11 + - transformers==4.55.0 + - triton==3.3.1 + - typepy==1.3.4 + - typer==0.17.4 + - typeshed-client==2.8.2 + - typing-extensions==4.14.1 + - typing-inspection==0.4.1 + - tzdata==2025.2 + - urllib3==2.5.0 + - uvicorn==0.35.0 + - uvloop==0.21.0 + - virtualenv==20.34.0 + - vllm==0.10.1.1 + - watchfiles==1.1.0 + - websockets==15.0.1 + - werkzeug==3.1.3 + - word2number==1.1 + - xformers==0.0.31 + - xgrammar==0.1.21 + - xxhash==3.5.0 + - yarl==1.20.1 + - zstandard==0.23.0 +prefix: /mnt/data/llmtcl diff --git a/fix.py b/fix.py new file mode 100644 index 0000000000000000000000000000000000000000..3100c28eac9f79083c3cc9d70ac407dc5f40590e --- /dev/null +++ b/fix.py @@ -0,0 +1,45 @@ +import re +import pathlib +import shutil +import argparse + +def escape_backslashes_in_questions(text: str) -> str: + # 匹配 "question": "..." + json_string = r'"(?:\\.|[^"\\])*"' + pattern = re.compile(rf'("question"\s*:\s*)({json_string})') + + def replacer(m): + prefix, raw = m.groups() + # 去掉外层引号 + inner = raw[1:-1] + # 把单反斜杠替换成双反斜杠 + inner_fixed = inner.replace("\\", "\\\\") + return f'{prefix}"{inner_fixed}"' + + return pattern.sub(replacer, text) + +def process_file(p: pathlib.Path): + original = p.read_text(encoding="utf-8") + fixed = escape_backslashes_in_questions(original) + if fixed != original: + shutil.copyfile(p, p.with_suffix(p.suffix + ".bak")) + p.write_text(fixed, encoding="utf-8") + print(f"[UPDATED] {p}") + else: + print(f"[SKIP ] {p}") + +def main(): + ap = argparse.ArgumentParser(description="Fix backslashes in 'question' fields of JSON files") + ap.add_argument("folder", type=str, help="Directory containing .json files") + args = ap.parse_args() + + root = pathlib.Path(args.folder) + if not root.is_dir(): + print(f"[ERROR] {args.folder} is not a valid directory") + return + + for p in root.glob("*.json"): + process_file(p) + +if __name__ == "__main__": + main() diff --git a/litgpt.egg-info/PKG-INFO b/litgpt.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..f7a31ab67893626d673a93cfe0712050cfe9f3c0 --- /dev/null +++ b/litgpt.egg-info/PKG-INFO @@ -0,0 +1,977 @@ +Metadata-Version: 2.4 +Name: litgpt +Version: 0.5.9 +Summary: Hackable implementation of state-of-the-art open-source LLMs +Author-email: Lightning AI +License: Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2023] Lightning AI + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Project-URL: documentation, https://github.com/lightning-AI/litgpt/tutorials +Project-URL: homepage, https://github.com/lightning-AI/litgpt +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: huggingface-hub<0.33,>=0.23.5 +Requires-Dist: jsonargparse[signatures]<=4.32.1,>=4.30.1; python_version <= "3.9" +Requires-Dist: jsonargparse[signatures]>=4.37; python_version > "3.9" +Requires-Dist: lightning>=2.5 +Requires-Dist: psutil==7 +Requires-Dist: safetensors>=0.4.3 +Requires-Dist: tokenizers>=0.15.2 +Requires-Dist: torch>=2.5 +Requires-Dist: tqdm>=4.66 +Provides-Extra: compiler +Requires-Dist: lightning-thunder>=0.2.0.dev20250119; (python_version >= "3.10" and sys_platform == "linux") and extra == "compiler" +Provides-Extra: extra +Requires-Dist: bitsandbytes<0.43,>=0.42; sys_platform == "darwin" and extra == "extra" +Requires-Dist: bitsandbytes<0.45.5,>=0.45.2; (sys_platform == "linux" or sys_platform == "win32") and extra == "extra" +Requires-Dist: datasets<4,>=2.18; extra == "extra" +Requires-Dist: huggingface-hub[hf-transfer]>=0.21; extra == "extra" +Requires-Dist: litdata==0.2.51; extra == "extra" +Requires-Dist: litserve>0.2; extra == "extra" +Requires-Dist: lm-eval>=0.4.2; extra == "extra" +Requires-Dist: pandas>=1.9; extra == "extra" +Requires-Dist: pyarrow>=15.0.2; extra == "extra" +Requires-Dist: requests>=2.31; extra == "extra" +Requires-Dist: sentencepiece>=0.2; extra == "extra" +Requires-Dist: tensorboard>=2.14; extra == "extra" +Requires-Dist: torchmetrics>=1.3.1; extra == "extra" +Requires-Dist: transformers<4.52,>=4.51.3; extra == "extra" +Requires-Dist: uvloop>=0.2; sys_platform != "win32" and extra == "extra" +Requires-Dist: zstandard>=0.22; extra == "extra" +Provides-Extra: test +Requires-Dist: einops>=0.7; extra == "test" +Requires-Dist: protobuf>=4.23.4; extra == "test" +Requires-Dist: pytest>=8.1.1; extra == "test" +Requires-Dist: pytest-benchmark>=5.1; extra == "test" +Requires-Dist: pytest-dependency>=0.6; extra == "test" +Requires-Dist: pytest-rerunfailures>=14; extra == "test" +Requires-Dist: pytest-timeout>=2.3.1; extra == "test" +Dynamic: license-file + +
+ + +# ⚡ LitGPT + +**20+ high-performance LLMs with recipes to pretrain, finetune, and deploy at scale.** + +
+✅ From scratch implementations      ✅ No abstractions         ✅ Beginner friendly
+   ✅ Flash attention                   ✅ FSDP                    ✅ LoRA, QLoRA, Adapter
+✅ Reduce GPU memory (fp4/8/16/32)   ✅ 1-1000+ GPUs/TPUs       ✅ 20+ LLMs         
+
+ + +--- + + +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pytorch-lightning) +![cpu-tests](https://github.com/lightning-AI/lit-stablelm/actions/workflows/cpu-tests.yml/badge.svg) [![license](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/Lightning-AI/lit-stablelm/blob/master/LICENSE) [![Discord](https://img.shields.io/discord/1077906959069626439)](https://discord.gg/VptPCZkGNa) + +

+ Quick start • + Models • + Finetune • + Deploy • + All workflows • + Features • + Recipes (YAML) • + Lightning AI • + Tutorials +

+ +  + + + Get started + + +  + +
+ +# Use, finetune, pretrain, and deploy LLMs Lightning fast ⚡⚡ +Every LLM is implemented from scratch with **no abstractions** and **full control**, making them blazing fast, minimal, and performant at enterprise scale. + +✅ **Enterprise ready -** Apache 2.0 for unlimited enterprise use.
+✅ **Developer friendly -** Easy debugging with no abstraction layers and single file implementations.
+✅ **Optimized performance -** Models designed to maximize performance, reduce costs, and speed up training.
+✅ **Proven recipes -** Highly-optimized training/finetuning recipes tested at enterprise scale.
+ +  + +# Quick start +Install LitGPT +``` +pip install 'litgpt[extra]' +``` + +Load and use any of the [20+ LLMs](#choose-from-20-llms): +```python +from litgpt import LLM + +llm = LLM.load("microsoft/phi-2") +text = llm.generate("Fix the spelling: Every fall, the family goes to the mountains.") +print(text) +# Corrected Sentence: Every fall, the family goes to the mountains. +``` + +  + +✅ Optimized for fast inference
+✅ Quantization
+✅ Runs on low-memory GPUs
+✅ No layers of internal abstractions
+✅ Optimized for production scale
+ +
+ Advanced install options + +Install from source: + +```bash +git clone https://github.com/Lightning-AI/litgpt +cd litgpt +pip install -e '.[all]' +``` +
+ +[Explore the full Python API docs](tutorials/python-api.md). + +  + +--- +# Choose from 20+ LLMs +Every model is written from scratch to maximize performance and remove layers of abstraction: + +| Model | Model size | Author | Reference | +|----|----|----|----| +| Llama 3, 3.1, 3.2, 3.3 | 1B, 3B, 8B, 70B, 405B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) | +| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) | +| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) | +| Gemma 2 | 2B, 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) | +| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) | +| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) | +| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) | +| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) | +| ... | ... | ... | ... | + +
+ See full list of 20+ LLMs + +  + +#### All models + +| Model | Model size | Author | Reference | +|----|----|----|----| +| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) | +| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) | +| Falcon | 7B, 40B, 180B | TII UAE | [TII 2023](https://falconllm.tii.ae) | +| Falcon 3 | 1B, 3B, 7B, 10B | TII UAE | [TII 2024](https://huggingface.co/blog/falcon3) | +| FreeWilly2 (Stable Beluga 2) | 70B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stable-beluga-large-instruction-fine-tuned-models) | +| Function Calling Llama 2 | 7B | Trelis | [Trelis et al. 2023](https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2) | +| Gemma | 2B, 7B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf) | +| Gemma 2 | 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) | +| Gemma 3 | 1B, 4B, 12B, 27B | Google | [Google Team, Google Deepmind](https://arxiv.org/pdf/2503.19786) | +| Llama 2 | 7B, 13B, 70B | Meta AI | [Touvron et al. 2023](https://arxiv.org/abs/2307.09288) | +| Llama 3.1 | 8B, 70B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) | +| Llama 3.2 | 1B, 3B | Meta AI | [Meta AI 2024](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/) | +| Llama 3.3 | 70B | Meta AI | [Meta AI 2024](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) | +| Mathstral | 7B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mathstral/) | +| MicroLlama | 300M | Ken Wang | [MicroLlama repo](https://github.com/keeeeenw/MicroLlama) | +| Mixtral MoE | 8x7B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/mixtral-of-experts/) | +| Mistral | 7B, 123B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/announcing-mistral-7b/) | +| Mixtral MoE | 8x22B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mixtral-8x22b/) | +| OLMo | 1B, 7B | Allen Institute for AI (AI2) | [Groeneveld et al. 2024](https://aclanthology.org/2024.acl-long.841/) | +| OpenLLaMA | 3B, 7B, 13B | OpenLM Research | [Geng & Liu 2023](https://github.com/openlm-research/open_llama) | +| Phi 1.5 & 2 | 1.3B, 2.7B | Microsoft Research | [Li et al. 2023](https://arxiv.org/abs/2309.05463) | +| Phi 3 | 3.8B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2404.14219) | +| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) | +| Phi 4 Mini Instruct | 3.8B | Microsoft Research | [Microsoft 2025](https://arxiv.org/abs/2503.01743) | +| Phi 4 Mini Reasoning | 3.8B | Microsoft Research | [Xu, Peng et al. 2025](https://arxiv.org/abs/2504.21233) | +| Phi 4 Reasoning | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) | +| Phi 4 Reasoning Plus | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) | +| Platypus | 7B, 13B, 70B | Lee et al. | [Lee, Hunter, and Ruiz 2023](https://arxiv.org/abs/2308.07317) | +| Pythia | {14,31,70,160,410}M, {1,1.4,2.8,6.9,12}B | EleutherAI | [Biderman et al. 2023](https://arxiv.org/abs/2304.01373) | +| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) | +| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) | +| Qwen2.5 1M (Long Context) | 7B, 14B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwen2.5-1m/) | +| Qwen2.5 Math | 1.5B, 7B, 72B | Alibaba Group | [An, Yang et al. 2024](https://arxiv.org/abs/2409.12122) | +| QwQ | 32B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwq-32b/) | +| QwQ-Preview | 32B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwq-32b-preview/) | +| Qwen3 | 0.6B, 1.7B, 4B, 8B, 14B, 32B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) | +| Qwen3 MoE | 30B, 235B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) | +| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) | +| SmolLM2 | 135M, 360M, 1.7B | Hugging Face | [Hugging Face 2024](https://github.com/huggingface/smollm) | +| Salamandra | 2B, 7B | Barcelona Supercomputing Centre | [BSC-LTC 2024](https://github.com/BSC-LTC/salamandra) | +| StableCode | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) | +| StableLM | 3B, 7B | Stability AI | [Stability AI 2023](https://github.com/Stability-AI/StableLM) | +| StableLM Zephyr | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) | +| TinyLlama | 1.1B | Zhang et al. | [Zhang et al. 2023](https://github.com/jzhang38/TinyLlama) | + + +**Tip**: You can list all available models by running the `litgpt download list` command. + + +
+ +  + +--- + +# Workflows + +

+ Finetune • + Pretrain • + Continued pretraining • + Evaluate • + Deploy • + Test +

+ +  + +Use the command line interface to run advanced workflows such as pretraining or finetuning on your own data. + + +## All workflows +After installing LitGPT, select the model and workflow to run (finetune, pretrain, evaluate, deploy, etc...): + +```bash +# litgpt [action] [model] +litgpt serve meta-llama/Llama-3.2-3B-Instruct +litgpt finetune meta-llama/Llama-3.2-3B-Instruct +litgpt pretrain meta-llama/Llama-3.2-3B-Instruct +litgpt chat meta-llama/Llama-3.2-3B-Instruct +litgpt evaluate meta-llama/Llama-3.2-3B-Instruct +``` + +  + +---- + +## Finetune an LLM + +
+ + Run on Studios + +
+ +  + +Finetuning is the process of taking a pretrained AI model and further training it on a smaller, specialized dataset tailored to a specific task or application. + + +  + +```bash +# 0) setup your dataset +curl -L https://huggingface.co/datasets/ksaw008/finance_alpaca/resolve/main/finance_alpaca.json -o my_custom_dataset.json + +# 1) Finetune a model (auto downloads weights) +litgpt finetune microsoft/phi-2 \ + --data JSON \ + --data.json_path my_custom_dataset.json \ + --data.val_split_fraction 0.1 \ + --out_dir out/custom-model + +# 2) Test the model +litgpt chat out/custom-model/final + +# 3) Deploy the model +litgpt serve out/custom-model/final +``` + +[Read the full finetuning docs](tutorials/finetune.md) + +  + +---- + +## Deploy an LLM + +
+ + Deploy on Studios + +
+ +  + +Deploy a pretrained or finetune LLM to use it in real-world applications. Deploy, automatically sets up a web server that can be accessed by a website or app. + +```bash +# deploy an out-of-the-box LLM +litgpt serve microsoft/phi-2 + +# deploy your own trained model +litgpt serve path/to/microsoft/phi-2/checkpoint +``` + +
+ Show code to query server: + +  + +Test the server in a separate terminal and integrate the model API into your AI product: +```python +# 3) Use the server (in a separate Python session) +import requests, json +response = requests.post( + "http://127.0.0.1:8000/predict", + json={"prompt": "Fix typos in the following sentence: Example input"} +) +print(response.json()["output"]) +``` +
+ +[Read the full deploy docs](tutorials/deploy.md). + +  + +---- + +## Evaluate an LLM +Evaluate an LLM to test its performance on various tasks to see how well it understands and generates text. Simply put, we can evaluate things like how well would it do in college-level chemistry, coding, etc... (MMLU, Truthful QA, etc...) + +```bash +litgpt evaluate microsoft/phi-2 --tasks 'truthfulqa_mc2,mmlu' +``` + +[Read the full evaluation docs](tutorials/evaluation.md). + +  + +---- + +## Test an LLM + +
+ + Run on Studios + +
+ +  + +Test how well the model works via an interactive chat. Use the `chat` command to chat, extract embeddings, etc... + +Here's an example showing how to use the Phi-2 LLM: +```bash +litgpt chat microsoft/phi-2 + +>> Prompt: What do Llamas eat? +``` + +
+ Full code: + +  + +```bash +# 1) List all supported LLMs +litgpt download list + +# 2) Use a model (auto downloads weights) +litgpt chat microsoft/phi-2 + +>> Prompt: What do Llamas eat? +``` + +The download of certain models requires an additional access token. You can read more about this in the [download](tutorials/download_model_weights.md#specific-models-and-access-tokens) documentation. + +
+ +[Read the full chat docs](tutorials/inference.md). + +  + +---- + +## Pretrain an LLM + +
+ + Run on Studios + +
+ +  + +Pretraining is the process of teaching an AI model by exposing it to a large amount of data before it is fine-tuned for specific tasks. + +
+ Show code: + +  + +```bash +mkdir -p custom_texts +curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt +curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt + +# 1) Download a tokenizer +litgpt download EleutherAI/pythia-160m \ + --tokenizer_only True + +# 2) Pretrain the model +litgpt pretrain EleutherAI/pythia-160m \ + --tokenizer_dir EleutherAI/pythia-160m \ + --data TextFiles \ + --data.train_data_path "custom_texts/" \ + --train.max_tokens 10_000_000 \ + --out_dir out/custom-model + +# 3) Test the model +litgpt chat out/custom-model/final +``` +
+ +[Read the full pretraining docs](tutorials/pretrain.md) + +  + +---- + +## Continue pretraining an LLM + +
+ + Run on Studios + +
+ +  + +Continued pretraining is another way of finetuning that specializes an already pretrained model by training on custom data: + +
+ Show code: + +  + +```bash +mkdir -p custom_texts +curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt +curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt + +# 1) Continue pretraining a model (auto downloads weights) +litgpt pretrain EleutherAI/pythia-160m \ + --tokenizer_dir EleutherAI/pythia-160m \ + --initial_checkpoint_dir EleutherAI/pythia-160m \ + --data TextFiles \ + --data.train_data_path "custom_texts/" \ + --train.max_tokens 10_000_000 \ + --out_dir out/custom-model + +# 2) Test the model +litgpt chat out/custom-model/final +``` + +
+ +[Read the full continued pretraining docs](tutorials/pretrain.md#continued-pretraining-on-custom-data) + +  + +---- + +# State-of-the-art features + +✅ State-of-the-art optimizations: Flash Attention v2, multi-GPU support via fully-sharded data parallelism, [optional CPU offloading](tutorials/oom.md#do-sharding-across-multiple-gpus), and [TPU and XLA support](extensions/xla).
+✅ [Pretrain](tutorials/pretrain.md), [finetune](tutorials/finetune.md), and [deploy](tutorials/inference.md)
+✅ Reduce compute requirements with low-precision settings: FP16, BF16, and FP16/FP32 mixed.
+✅ Lower memory requirements with [quantization](tutorials/quantize.md): 4-bit floats, 8-bit integers, and double quantization.
+✅ [Configuration files](config_hub) for great out-of-the-box performance.
+✅ Parameter-efficient finetuning: [LoRA](tutorials/finetune_lora.md), [QLoRA](tutorials/finetune_lora.md), [Adapter](tutorials/finetune_adapter.md), and [Adapter v2](tutorials/finetune_adapter.md).
+✅ [Exporting](tutorials/convert_lit_models.md) to other popular model weight formats.
+✅ Many popular datasets for [pretraining](tutorials/pretrain.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning).
+✅ Readable and easy-to-modify code to experiment with the latest research ideas.
+ +  + +--- + +# Training recipes + +LitGPT comes with validated recipes (YAML configs) to train models under different conditions. We've generated these recipes based on the parameters we found to perform the best for different training conditions. + +Browse all training recipes [here](config_hub). + +### Example + +```bash +litgpt finetune \ + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml +``` +
+ ✅ Use configs to customize training + +Configs let you customize training for all granular parameters like: + +```yaml +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-llama2-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +... +``` +
+ +
+ ✅ Example: LoRA finetuning config + +  + +```yaml +# The path to the base model's checkpoint directory to load for finetuning. (type: , default: checkpoints/stabilityai/stablelm-base-alpha-3b) +checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf + +# Directory in which to save checkpoints and logs. (type: , default: out/lora) +out_dir: out/finetune/qlora-llama2-7b + +# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null) +precision: bf16-true + +# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null) +quantize: bnb.nf4 + +# How many devices/GPUs to use. (type: Union[int, str], default: 1) +devices: 1 + +# How many nodes to use. (type: int, default: 1) +num_nodes: 1 + +# The LoRA rank. (type: int, default: 8) +lora_r: 32 + +# The LoRA alpha. (type: int, default: 16) +lora_alpha: 16 + +# The LoRA dropout value. (type: float, default: 0.05) +lora_dropout: 0.05 + +# Whether to apply LoRA to the query weights in attention. (type: bool, default: True) +lora_query: true + +# Whether to apply LoRA to the key weights in attention. (type: bool, default: False) +lora_key: false + +# Whether to apply LoRA to the value weights in attention. (type: bool, default: True) +lora_value: true + +# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False) +lora_projection: false + +# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False) +lora_mlp: false + +# Whether to apply LoRA to output head in GPT. (type: bool, default: False) +lora_head: false + +# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``. +data: + class_path: litgpt.data.Alpaca2k + init_args: + mask_prompt: false + val_split_fraction: 0.05 + prompt_style: alpaca + ignore_index: -100 + seed: 42 + num_workers: 4 + download_dir: data/alpaca2k + +# Training-related arguments. See ``litgpt.args.TrainArgs`` for details +train: + + # Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000) + save_interval: 200 + + # Number of iterations between logging calls (type: int, default: 1) + log_interval: 1 + + # Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128) + global_batch_size: 8 + + # Number of samples per data-parallel rank (type: int, default: 4) + micro_batch_size: 2 + + # Number of iterations with learning rate warmup active (type: int, default: 100) + lr_warmup_steps: 10 + + # Number of epochs to train on (type: Optional[int], default: 5) + epochs: 4 + + # Total number of tokens to train on (type: Optional[int], default: null) + max_tokens: + + # Limits the number of optimizer steps to run (type: Optional[int], default: null) + max_steps: + + # Limits the length of samples (type: Optional[int], default: null) + max_seq_length: 512 + + # Whether to tie the embedding weights with the language modeling head weights (type: Optional[bool], default: null) + tie_embeddings: + + # (type: float, default: 0.0003) + learning_rate: 0.0002 + + # (type: float, default: 0.02) + weight_decay: 0.0 + + # (type: float, default: 0.9) + beta1: 0.9 + + # (type: float, default: 0.95) + beta2: 0.95 + + # (type: Optional[float], default: null) + max_norm: + + # (type: float, default: 6e-05) + min_lr: 6.0e-05 + +# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details +eval: + + # Number of optimizer steps between evaluation calls (type: int, default: 100) + interval: 100 + + # Number of tokens to generate (type: Optional[int], default: 100) + max_new_tokens: 100 + + # Number of iterations (type: int, default: 100) + max_iters: 100 + +# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv) +logger_name: csv + +# The random seed to use for reproducibility. (type: int, default: 1337) +seed: 1337 +``` +
+ +
+ ✅ Override any parameter in the CLI: + +```bash +litgpt finetune \ + --config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml \ + --lora_r 4 +``` +
+ +  + +---- + +# Project highlights + +LitGPT powers many great AI projects, initiatives, challenges and of course enterprises. Please submit a pull request to be considered for a feature. + +
+ 📊 SAMBA: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling + +The [Samba](https://github.com/microsoft/Samba) project by researchers at Microsoft is built on top of the LitGPT code base and combines state space models with sliding window attention, which outperforms pure state space models. + +
+ +
+ 🏆 NeurIPS 2023 Large Language Model Efficiency Challenge: 1 LLM + 1 GPU + 1 Day + +The LitGPT repository was the official starter kit for the [NeurIPS 2023 LLM Efficiency Challenge](https://llm-efficiency-challenge.github.io), which is a competition focused on finetuning an existing non-instruction tuned LLM for 24 hours on a single GPU. + +
+ +
+ 🦙 TinyLlama: An Open-Source Small Language Model + + +LitGPT powered the [TinyLlama project](https://github.com/jzhang38/TinyLlama) and [TinyLlama: An Open-Source Small Language Model](https://arxiv.org/abs/2401.02385) research paper. + +
+ +
+ 🍪 MicroLlama: MicroLlama-300M + +[MicroLlama](https://github.com/keeeeenw/MicroLlama) is a 300M Llama model pretrained on 50B tokens powered by TinyLlama and LitGPT. +
+ +
+ 🔬 Pre-training Small Base LMs with Fewer Tokens + +The research paper ["Pre-training Small Base LMs with Fewer Tokens"](https://arxiv.org/abs/2404.08634), which utilizes LitGPT, develops smaller base language models by inheriting a few transformer blocks from larger models and training on a tiny fraction of the data used by the larger models. It demonstrates that these smaller models can perform comparably to larger models despite using significantly less training data and resources. + +
+ +  + +---- + +# Community + +We welcome all individual contributors, regardless of their level of experience or hardware. Your contributions are valuable, and we are excited to see what you can accomplish in this collaborative and supportive environment. + +- [Request a feature](https://github.com/Lightning-AI/litgpt/issues) +- [Submit your first contribution](https://lightning.ai/pages/community/tutorial/how-to-contribute-to-litgpt/) +- [Join our Discord](https://discord.gg/VptPCZkGNa) + +  + +# Tutorials + +🚀 [Get started](tutorials/0_to_litgpt.md)
+⚡️ [Finetuning, incl. LoRA, QLoRA, and Adapters](tutorials/finetune.md)
+🤖 [Pretraining](tutorials/pretrain.md)
+💬 [Model evaluation](tutorials/evaluation.md)
+📘 [Supported and custom datasets](tutorials/prepare_dataset.md)
+🧹 [Quantization](tutorials/quantize.md)
+🤯 [Tips for dealing with out-of-memory (OOM) errors](tutorials/oom.md)
+🧑🏽‍💻 [Using cloud TPUs](extensions/xla)
+ +  + +---- + +### Acknowledgments + +This implementation extends on [Lit-LLaMA](https://github.com/lightning-AI/lit-llama) and [nanoGPT](https://github.com/karpathy/nanoGPT), and it's **powered by [Lightning Fabric](https://lightning.ai/docs/fabric/stable/) ⚡**. + +- [@karpathy](https://github.com/karpathy) for [nanoGPT](https://github.com/karpathy/nanoGPT) +- [@EleutherAI](https://github.com/EleutherAI) for [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) and the [Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) +- [@TimDettmers](https://github.com/TimDettmers) for [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) +- [@Microsoft](https://github.com/microsoft) for [LoRA](https://github.com/microsoft/LoRA) +- [@tridao](https://github.com/tridao) for [Flash Attention 2](https://github.com/Dao-AILab/flash-attention) + +### License + +LitGPT is released under the [Apache 2.0](https://github.com/Lightning-AI/litgpt/blob/main/LICENSE) license. + +### Citation + +If you use LitGPT in your research, please cite the following work: + +```bibtex +@misc{litgpt-2023, + author = {Lightning AI}, + title = {LitGPT}, + howpublished = {\url{https://github.com/Lightning-AI/litgpt}}, + year = {2023}, +} +``` + +  diff --git a/litgpt.egg-info/SOURCES.txt b/litgpt.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..2bfaeb5b013c45ee665a7e2a99feed8cdffe9116 --- /dev/null +++ b/litgpt.egg-info/SOURCES.txt @@ -0,0 +1,89 @@ +LICENSE +README.md +pyproject.toml +litgpt/__init__.py +litgpt/__main__.py +litgpt/adapter.py +litgpt/adapter_v2.py +litgpt/api.py +litgpt/args.py +litgpt/config.py +litgpt/lora.py +litgpt/model.py +litgpt/pretrain.py +litgpt/prompts.py +litgpt/tokenizer.py +litgpt/utils.py +litgpt.egg-info/PKG-INFO +litgpt.egg-info/SOURCES.txt +litgpt.egg-info/dependency_links.txt +litgpt.egg-info/entry_points.txt +litgpt.egg-info/requires.txt +litgpt.egg-info/top_level.txt +litgpt/chat/__init__.py +litgpt/chat/base.py +litgpt/data/__init__.py +litgpt/data/alpaca.py +litgpt/data/alpaca_2k.py +litgpt/data/alpaca_gpt4.py +litgpt/data/base.py +litgpt/data/deita.py +litgpt/data/flan.py +litgpt/data/json_data.py +litgpt/data/lima.py +litgpt/data/lit_data.py +litgpt/data/longform.py +litgpt/data/microllama.py +litgpt/data/openwebtext.py +litgpt/data/prepare_slimpajama.py +litgpt/data/prepare_starcoder.py +litgpt/data/text_files.py +litgpt/data/tinyllama.py +litgpt/data/tinystories.py +litgpt/deploy/__init__.py +litgpt/deploy/serve.py +litgpt/eval/evaluate.py +litgpt/finetune/__init__.py +litgpt/finetune/adapter.py +litgpt/finetune/adapter_v2.py +litgpt/finetune/full.py +litgpt/finetune/lora.py +litgpt/generate/__init__.py +litgpt/generate/adapter.py +litgpt/generate/adapter_v2.py +litgpt/generate/base.py +litgpt/generate/full.py +litgpt/generate/sequentially.py +litgpt/generate/speculative_decoding.py +litgpt/generate/tp.py +litgpt/scripts/__init__.py +litgpt/scripts/convert_hf_checkpoint.py +litgpt/scripts/convert_lit_checkpoint.py +litgpt/scripts/convert_pretrained_checkpoint.py +litgpt/scripts/download.py +litgpt/scripts/merge_lora.py +tests/test_adapter.py +tests/test_adapter_v2.py +tests/test_api.py +tests/test_args.py +tests/test_batch.py +tests/test_chat.py +tests/test_ci.py +tests/test_cli.py +tests/test_config.py +tests/test_config_hub.py +tests/test_distributed.py +tests/test_evaluate.py +tests/test_full.py +tests/test_generate_speculatively.py +tests/test_lora.py +tests/test_merge_lora.py +tests/test_model.py +tests/test_pretrain.py +tests/test_prompts.py +tests/test_readme.py +tests/test_rope.py +tests/test_serve.py +tests/test_tokenizer.py +tests/test_trainer_support.py +tests/test_utils.py \ No newline at end of file diff --git a/litgpt.egg-info/dependency_links.txt b/litgpt.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/litgpt.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/litgpt.egg-info/entry_points.txt b/litgpt.egg-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..cc0f6b344212fa3699ceac6d35049007919c71bb --- /dev/null +++ b/litgpt.egg-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +litgpt = litgpt.__main__:main diff --git a/litgpt.egg-info/requires.txt b/litgpt.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..83d4545190cb687d4c6848c502a50d438e2e8a36 --- /dev/null +++ b/litgpt.egg-info/requires.txt @@ -0,0 +1,51 @@ +huggingface-hub<0.33,>=0.23.5 +lightning>=2.5 +psutil==7 +safetensors>=0.4.3 +tokenizers>=0.15.2 +torch>=2.5 +tqdm>=4.66 + +[:python_version <= "3.9"] +jsonargparse[signatures]<=4.32.1,>=4.30.1 + +[:python_version > "3.9"] +jsonargparse[signatures]>=4.37 + +[compiler] + +[compiler:python_version >= "3.10" and sys_platform == "linux"] +lightning-thunder>=0.2.0.dev20250119 + +[extra] +datasets<4,>=2.18 +huggingface-hub[hf-transfer]>=0.21 +litdata==0.2.51 +litserve>0.2 +lm-eval>=0.4.2 +pandas>=1.9 +pyarrow>=15.0.2 +requests>=2.31 +sentencepiece>=0.2 +tensorboard>=2.14 +torchmetrics>=1.3.1 +transformers<4.52,>=4.51.3 +zstandard>=0.22 + +[extra:sys_platform != "win32"] +uvloop>=0.2 + +[extra:sys_platform == "darwin"] +bitsandbytes<0.43,>=0.42 + +[extra:sys_platform == "linux" or sys_platform == "win32"] +bitsandbytes<0.45.5,>=0.45.2 + +[test] +einops>=0.7 +protobuf>=4.23.4 +pytest>=8.1.1 +pytest-benchmark>=5.1 +pytest-dependency>=0.6 +pytest-rerunfailures>=14 +pytest-timeout>=2.3.1 diff --git a/litgpt.egg-info/top_level.txt b/litgpt.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..52fb5d563e5764c2a4bcaf01a455c41e0468ef13 --- /dev/null +++ b/litgpt.egg-info/top_level.txt @@ -0,0 +1 @@ +litgpt diff --git a/litgpt/__init__.py b/litgpt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c8dc3d8206e7d92b9bbe051757645fb3c88acb3 --- /dev/null +++ b/litgpt/__init__.py @@ -0,0 +1,20 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import logging +import re + +from litgpt.api import LLM +from litgpt.config import Config +from litgpt.model import GPT # needs to be imported before config +from litgpt.prompts import PromptStyle +from litgpt.tokenizer import Tokenizer + +# Suppress excessive warnings, see https://github.com/pytorch/pytorch/issues/111632 +pattern = re.compile(".*Profiler function .* will be ignored") +logging.getLogger("torch._dynamo.variables.torch").addFilter(lambda record: not pattern.search(record.getMessage())) + +# Avoid printing state-dict profiling output at the WARNING level when saving a checkpoint +logging.getLogger("torch.distributed.fsdp._optim_utils").disabled = True +logging.getLogger("torch.distributed.fsdp._debug_utils").disabled = True + +__all__ = ["LLM", "GPT", "Config", "PromptStyle", "Tokenizer"] diff --git a/litgpt/__main__.py b/litgpt/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..3b045473bd405629738c84975979f9ec29a7a8a5 --- /dev/null +++ b/litgpt/__main__.py @@ -0,0 +1,75 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import warnings + +import torch +from jsonargparse import CLI, set_config_read_mode, set_docstring_parse_options + +from litgpt.chat.base import main as chat_fn +from litgpt.deploy.serve import run_server as serve_fn +from litgpt.eval.evaluate import convert_and_evaluate as evaluate_fn +from litgpt.finetune.adapter import setup as finetune_adapter_fn +from litgpt.finetune.adapter_v2 import setup as finetune_adapter_v2_fn +from litgpt.finetune.full import setup as finetune_full_fn +from litgpt.finetune.lora import setup as finetune_lora_fn +from litgpt.generate.adapter import main as generate_adapter_fn +from litgpt.generate.adapter_v2 import main as generate_adapter_v2_fn +from litgpt.generate.base import main as generate_base_fn +from litgpt.generate.full import main as generate_full_fn +from litgpt.generate.sequentially import main as generate_sequentially_fn +from litgpt.generate.speculative_decoding import main as generate_speculatively_fn +from litgpt.generate.tp import main as generate_tp_fn +from litgpt.pretrain import setup as pretrain_fn +from litgpt.perplexity import setup as perplexity_fn +from litgpt.scripts.convert_hf_checkpoint import convert_hf_checkpoint as convert_hf_checkpoint_fn +from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint as convert_lit_checkpoint_fn +from litgpt.scripts.convert_pretrained_checkpoint import ( + convert_pretrained_checkpoint as convert_pretrained_checkpoint_fn, +) +from litgpt.scripts.download import download_from_hub as download_fn +from litgpt.scripts.merge_lora import merge_lora as merge_lora_fn + + +def main() -> None: + parser_data = { + "download": download_fn, + "chat": chat_fn, + "finetune": finetune_lora_fn, + "finetune_lora": finetune_lora_fn, + "finetune_full": finetune_full_fn, + "finetune_adapter": finetune_adapter_fn, + "finetune_adapter_v2": finetune_adapter_v2_fn, + "pretrain": pretrain_fn, + "perplexity": perplexity_fn, + "generate": generate_base_fn, + "generate_full": generate_full_fn, + "generate_adapter": generate_adapter_fn, + "generate_adapter_v2": generate_adapter_v2_fn, + "generate_sequentially": generate_sequentially_fn, + "generate_speculatively": generate_speculatively_fn, + "generate_tp": generate_tp_fn, + "convert_to_litgpt": convert_hf_checkpoint_fn, + "convert_from_litgpt": convert_lit_checkpoint_fn, + "convert_pretrained_checkpoint": convert_pretrained_checkpoint_fn, + "merge_lora": merge_lora_fn, + "evaluate": evaluate_fn, + "serve": serve_fn, + } + + set_docstring_parse_options(attribute_docstrings=True) + set_config_read_mode(urls_enabled=True) + + # PyTorch bug that raises a false-positive warning + # More info: https://github.com/Lightning-AI/litgpt/issues/1561 + warning_message = r"The epoch parameter in `scheduler.step\(\)` was not necessary and is being deprecated.*" + + warnings.filterwarnings( + action="ignore", message=warning_message, category=UserWarning, module=r".*torch\.optim\.lr_scheduler.*" + ) + + torch.set_float32_matmul_precision("high") + CLI(parser_data) + + +if __name__ == "__main__": + main() diff --git a/litgpt/adapter.py b/litgpt/adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..5297df4eb354f5c9f6c8b5ddd494c78030bad3c4 --- /dev/null +++ b/litgpt/adapter.py @@ -0,0 +1,129 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +"""Implementation of the paper: + +LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention +https://arxiv.org/abs/2303.16199 + +Port for LitGPT +""" + +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple + +import torch +import torch.nn as nn +from typing_extensions import Self + +from litgpt.config import Config as BaseConfig +from litgpt.model import GPT as BaseModel +from litgpt.model import Block as BaseBlock +from litgpt.model import CausalSelfAttention as BaseCausalSelfAttention + + +@dataclass +class Config(BaseConfig): + adapter_prompt_length: int = 10 + adapter_start_layer: int = 2 + + +class GPT(BaseModel): + # Copy & paste from :class:`model.GPT`. Note that :class:`Block` is new here. + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + assert config.padded_vocab_size is not None + self.config = config + + self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias) + self.transformer = nn.ModuleDict( + dict( + wte=nn.Embedding(config.padded_vocab_size, config.n_embd), + h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)), + ln_f=config.norm_class(config.n_embd, eps=config.norm_eps), + ) + ) + self.mask_cache: Optional[torch.Tensor] = None + self.max_seq_length = self.config.block_size + + @classmethod + def from_name(cls, name: str, **kwargs: Any) -> Self: + return cls(Config.from_name(name, **kwargs)) + + def _init_weights(self, module: nn.Module) -> None: + """Meant to be used with `gpt.apply(gpt._init_weights)`. Unused method left for completeness.""" + super()._init_weights(module) + if isinstance(module, CausalSelfAttention): + module.reset_parameters() + + +class Block(BaseBlock): + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__(config, block_idx) + self.attn = CausalSelfAttention(config, block_idx) + + +class CausalSelfAttention(BaseCausalSelfAttention): + """A modification of `litgpt.model.CausalSelfAttention` that adds the attention + over the adaption prompt.""" + + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__(config, block_idx) + if block_idx >= config.adapter_start_layer: + # adapter embedding layer + self.adapter_wte = nn.Embedding(config.adapter_prompt_length, config.n_embd) + # gate for adaption + self.gating_factor = torch.nn.Parameter(torch.zeros(1, 1, config.n_head, 1)) + # kv cache for inference + self.adapter_kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None + + def scaled_dot_product_attention( + self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: Optional[torch.Tensor] = None + ) -> torch.Tensor: + y = super().scaled_dot_product_attention(q, k, v, mask) + if self.block_idx < self.config.adapter_start_layer: + return y + + aT = self.config.adapter_prompt_length + if self.adapter_kv_cache is not None: + # since this uses the wte weights as the prefix and the kv cache is only used during inference, ak and av + # are the same every call + ak, av = self.adapter_kv_cache + else: + prefix = self.adapter_wte.weight.reshape(1, aT, self.config.n_embd) + aqkv = self.qkv(prefix) + q_per_kv = self.config.n_head // self.config.n_query_groups + aqkv = aqkv.view(1, aT, self.config.n_query_groups, q_per_kv + 2, self.config.head_size) + aqkv = aqkv.permute(0, 2, 3, 1, 4) + _, ak, av = aqkv.split((q_per_kv, 1, 1), dim=2) + if self.config.n_query_groups != 1: + # for MHA this is a no-op + ak = ak.repeat_interleave(q_per_kv, dim=2) + av = av.repeat_interleave(q_per_kv, dim=2) + ak = ak.view(1, -1, aT, self.config.head_size) # (1, nh_ak, aT, hs) + av = av.view(1, -1, aT, self.config.head_size) # (1, nh_av, aT, hs) + self.adapter_kv_cache = (ak, av) + + T = q.size(2) + amask = torch.ones(T, aT, dtype=torch.bool, device=q.device) + ay = super().scaled_dot_product_attention(q, ak, av, amask) + return y + self.gating_factor * ay + + def reset_parameters(self) -> None: + if hasattr(self, "gating_factor"): + torch.nn.init.zeros_(self.gating_factor) + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with older checkpoints.""" + if (key := prefix + "gating_factor") in state_dict and state_dict[key].size(1) == self.config.n_head: + state_dict[key] = state_dict[key].permute(0, 2, 1, 3) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +def mark_only_adapter_as_trainable(model: GPT) -> None: + """Sets `requires_grad=False` for all non-adapter weights.""" + for name, param in model.named_parameters(): + param.requires_grad = adapter_filter(name, param) + + +def adapter_filter(key: str, value: Any) -> bool: + return "adapter_wte" in key or "gating_factor" in key diff --git a/litgpt/adapter_v2.py b/litgpt/adapter_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..fb4d12ae08b2f9a9adc7268a503e800d0c321219 --- /dev/null +++ b/litgpt/adapter_v2.py @@ -0,0 +1,210 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +"""Implementation of the paper: + +LLaMA-Adapter V2: Parameter-Efficient Visual Instruction Model +https://arxiv.org/abs/2304.15010 + +Port for LitGPT +""" + +from dataclasses import dataclass +from typing import Any, Dict, Optional, Type + +import torch +import torch.nn as nn +from typing_extensions import Self + +import litgpt +from litgpt.adapter import GPT as BaseModel +from litgpt.adapter import CausalSelfAttention as BaseCausalSelfAttention +from litgpt.adapter import Config as BaseConfig +from litgpt.model import Block as BaseBlock +from litgpt.scripts.convert_hf_checkpoint import qkv_reassemble +from litgpt.utils import map_old_state_dict_weights + + +@dataclass +class Config(BaseConfig): + @property + def mlp_class(self) -> Type: + return getattr(litgpt.adapter_v2, self.mlp_class_name) + + +def adapter_filter(key: str, value: Any) -> bool: + adapter_substrings = ( + # regular adapter v1 parameters + "adapter_wte", + "gating_factor", + # adapter v2: new bias and scale used in Linear + "adapter_scale", + "adapter_bias", + # adapter v2: Norm parameters are now trainable + "norm_1", + "norm_2", + "ln_f", + ) + return any(s in key for s in adapter_substrings) + + +class AdapterV2Linear(torch.nn.Module): + def __init__(self, in_features: int, out_features: int, **kwargs) -> None: + super().__init__() + self.linear = torch.nn.Linear(in_features, out_features, **kwargs) + self.adapter_bias = torch.nn.Parameter(torch.zeros(out_features), requires_grad=False) + self.adapter_scale = torch.nn.Parameter(torch.ones(out_features), requires_grad=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.adapter_scale * (self.linear(x) + self.adapter_bias) + + def reset_parameters(self) -> None: + nn.init.zeros_(self.adapter_bias) + nn.init.ones_(self.adapter_scale) + + +class GPT(BaseModel): + # Copy & paste from :class:`model.GPT`. Note that :class:`Block` is new here. + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + assert config.padded_vocab_size is not None + self.config = config + + self.lm_head = AdapterV2Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias) + self.transformer = nn.ModuleDict( + dict( + wte=nn.Embedding(config.padded_vocab_size, config.n_embd), + h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)), + ln_f=config.norm_class(config.n_embd, eps=config.norm_eps), + ) + ) + self.mask_cache: Optional[torch.Tensor] = None + self.max_seq_length = self.config.block_size + + @classmethod + def from_name(cls, name: str, **kwargs: Any) -> Self: + return cls(Config.from_name(name, **kwargs)) + + def _init_weights(self, module: nn.Module) -> None: + """Meant to be used with `gpt.apply(gpt._init_weights)`. Unused method left for completeness.""" + super()._init_weights(module) + if isinstance(module, AdapterV2Linear): + module.reset_parameters() + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = {"lm_head.weight": "lm_head.linear.weight", "lm_head.bias": "lm_head.linear.bias"} + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class Block(BaseBlock): + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__(config, block_idx) + self.attn = CausalSelfAttention(config, block_idx) + self.mlp = config.mlp_class(config) + + +class CausalSelfAttention(BaseCausalSelfAttention): + """A modification of `litgpt.adapter.CausalSelfAttention` that uses the Adapter V2 Linear class""" + + # Copy&paste from :class:`model.CausalSelfAttention` + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__(config, block_idx) + # key, query, value projections for all heads, but in a batch + shape = (config.n_head + 2 * config.n_query_groups) * config.head_size + self.qkv = AdapterV2Linear(in_features=config.n_embd, out_features=shape, bias=config.bias or config.attn_bias) + # output projection + self.proj = AdapterV2Linear(config.head_size * config.n_head, config.n_embd, bias=config.bias) + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base and/or legacy checkpoints.""" + mapping = { + "qkv.weight": "qkv.linear.weight", + "qkv.bias": "qkv.linear.bias", + "proj.weight": "proj.linear.weight", + "proj.bias": "proj.linear.bias", + } + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + # For compatibility with older checkpoints + if (key := prefix + "gating_factor") in state_dict and state_dict[key].size(1) == self.config.n_head: + state_dict[key] = state_dict[key].permute(0, 2, 1, 3) + + for attr in ("weight", "bias"): + legacy_key = f"{prefix}attn.linear.{attr}" + current_key = f"{prefix}qkv.linear.{attr}" + if legacy_key in state_dict: + state_dict[current_key] = qkv_reassemble(state_dict.pop(legacy_key), self.config) + + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class GptNeoxMLP(litgpt.model.GptNeoxMLP): + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + self.fc = AdapterV2Linear(config.n_embd, config.intermediate_size, bias=config.bias) + self.proj = AdapterV2Linear(config.intermediate_size, config.n_embd, bias=config.bias) + self.config = config + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = { + "fc.weight": "fc.linear.weight", + "fc.bias": "fc.linear.bias", + "proj.weight": "proj.linear.weight", + "proj.bias": "proj.linear.bias", + } + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class LLaMAMLP(litgpt.model.LLaMAMLP): + def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None: + nn.Module.__init__(self) + self.intermediate_size = intermediate_size or config.intermediate_size + self.fc_1 = AdapterV2Linear(config.n_embd, self.intermediate_size, bias=config.bias) + self.fc_2 = AdapterV2Linear(config.n_embd, self.intermediate_size, bias=config.bias) + self.proj = AdapterV2Linear(self.intermediate_size, config.n_embd, bias=config.bias) + self.config = config + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = { + "fc_1.weight": "fc_1.linear.weight", + "fc_1.bias": "fc_1.linear.bias", + "fc_2.weight": "fc_2.linear.weight", + "fc_2.bias": "fc_2.linear.bias", + "proj.weight": "proj.linear.weight", + "proj.bias": "proj.linear.bias", + } + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class GemmaMLP(LLaMAMLP): + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_fc_1 = self.fc_1(x) + x_fc_2 = self.fc_2(x) + x = torch.nn.functional.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2 + return self.proj(x) + + +class LLaMAMoE(litgpt.model.LLaMAMoE): + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + self.gate = AdapterV2Linear(config.n_embd, config.n_expert, bias=False) + self.experts = nn.ModuleList( + LLaMAMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(config.n_expert) + ) + self.config = config + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = {"gate.weight": "gate.linear.weight"} + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +def mark_only_adapter_v2_as_trainable(model: GPT) -> None: + """Sets requires_grad=False for all non-adapter weights""" + for name, param in model.named_parameters(): + param.requires_grad = adapter_filter(name, param) diff --git a/litgpt/api.py b/litgpt/api.py new file mode 100644 index 0000000000000000000000000000000000000000..32cc1966032b64a2846056209b4e87b1a05b0669 --- /dev/null +++ b/litgpt/api.py @@ -0,0 +1,734 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. +# +# This file implements the LitGPT Python API +import sys +import time +from pathlib import Path +from typing import Any, Callable, List, Literal, Optional, Tuple, Union + +import lightning as L +import numpy as np +import torch +from lightning.fabric.accelerators import CUDAAccelerator +from lightning.fabric.plugins import BitsandbytesPrecision +from tqdm import tqdm + +from litgpt.chat.base import generate as stream_generate_fn +from litgpt.config import Config, name_to_config +from litgpt.generate.base import generate as generate_fn +from litgpt.generate.sequentially import sequential +from litgpt.generate.tp import tensor_parallel +from litgpt.model import GPT +from litgpt.prompts import PromptStyle, has_prompt_style, load_prompt_style, save_prompt_style +from litgpt.tokenizer import Tokenizer +from litgpt.utils import ( + auto_download_checkpoint, + check_file_size_on_cpu_and_warn, + check_nvlink_connectivity, + chunked_cross_entropy, + copy_config_files, + extend_checkpoint_dir, + get_default_supported_precision, + load_checkpoint, + save_config, +) + + +class LLM(torch.nn.Module): + def __init__( + self, + model: GPT, + preprocessor=None, + prompt_style: PromptStyle = None, + devices: Union[int, List[int]] = None, + config: Config = None, + checkpoint_dir: Path = None, + fabric: L.Fabric = None, + generate_strategy: Optional[Literal["sequential", "tensor_parallel"]] = None, + kv_cache_initialized: bool = False, + fixed_kv_cache_size: Union[int, Literal["max_model_supported"], None] = None, + ) -> None: + super().__init__() + self.model = model + self.preprocessor = preprocessor + self.devices = devices + self.prompt_style = prompt_style + self.config = config + self.checkpoint_dir = checkpoint_dir + self.fabric = fabric + self.generate_strategy = generate_strategy + self.kv_cache_initialized = kv_cache_initialized + self.fixed_kv_cache_size = fixed_kv_cache_size + self.prev_generated_seq_length = 0 + + """ + LLM model class for inference, pretraining, and finetuning. + + Example: + from litgpt.api import LLM + + llm = LLM.load("microsoft/phi-2") + text = llm.generate("What do Llamas eat?", top_k=1) + print(text) + """ + + @property + def tokenizer(self): + return self.preprocessor.tokenizer + + def state_dict(self, destination=None, prefix="", keep_vars=False): + return self.model.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) + + def load_state_dict(self, state_dict, strict=True): + return self.model.load_state_dict(state_dict, strict=strict) + + def forward( + self, + input_ids: torch.Tensor, + target_ids: Optional[torch.Tensor] = None, + loss_fn: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + logits = self.model(input_ids) + if target_ids is not None: + if loss_fn is None: + loss_fn = chunked_cross_entropy + loss = loss_fn(logits[..., :-1, :], target_ids[..., 1:]) + return logits, loss + else: + return logits + + def trainer_setup(self, trainer_ckpt: Optional[Path] = None) -> None: + """Initializes the model checkpoint for PyTorch Lightning Trainer contexts""" + self.model = GPT(self.config) + + if trainer_ckpt is not None: + # strip the object name key from the state_dict + state_dict = torch.load(trainer_ckpt, weights_only=True)["state_dict"] + first_key = next(iter(state_dict)) + prefix = first_key.split(".")[0] + "." + keys_to_modify = [key for key in state_dict if key.startswith(prefix)] + for key in keys_to_modify: + new_key = key.replace(prefix, "", 1) + state_dict[new_key] = state_dict.pop(key) + + self.load_state_dict(state_dict, strict=True) + + elif self.checkpoint_dir is not None: + state_dict = torch.load(self.checkpoint_dir / "lit_model.pth", weights_only=False) + self.load_state_dict(state_dict, strict=False) + + else: + raise ValueError( + "No checkpoint found. Either provide a valid path via `trainer_ckpt` " + "or ensure that `self.checkpoint_dir` points to a folder containing a `lit_model.pth` weight file." + ) + + def save(self, out_dir: Optional[Path] = None, prompt_style: Optional[PromptStyle] = None) -> None: + out_dir = Path(out_dir) + save_path = out_dir / "lit_model.pth" + save_path.parent.mkdir(parents=True, exist_ok=True) + + if prompt_style is None: + prompt_style = PromptStyle.from_config(self.config) + if self.fabric is None: + torch.save(self.state_dict(), save_path) + else: + self.fabric.save(save_path, self.state_dict()) + + if self.fabric is None or self.fabric.global_rank == 0: + # If initialization a model with random weights, the checkpoint dir can be none + if self.checkpoint_dir is not None: + copy_config_files(Path(self.checkpoint_dir), save_path.parent) + else: + save_config(self.config, out_dir) + + save_prompt_style(prompt_style, save_path.parent) + + @classmethod + def load( + cls, + model: str, + init: Optional[Literal["pretrained", "random"]] = "pretrained", + tokenizer_dir: Optional[Path] = None, + access_token: Optional[str] = None, + distribute: Optional[Literal["auto"]] = "auto", + ) -> "LLM": + """ + Loads the LLM from a local directory or model hub. + + Arguments + model: A local path to a directory containing the model weights or a valid model name. + You can get a list of valid model names via the `litgpt download list` command line argument. + init: If "pretrained" (default), downloads the model from the HF Hub if a local model can't be found at the `model` + directory name; otherwise loads the model from the local directory. + If "random", initializes the `model` with random weights. + tokenizer_dir: An optional tokenizer directory if `model` is not a checkpoint directory, or if a user + wants to use a different tokenizer instead. + access_token: Optional API token to access models with restrictions when using `init="pretrained"`. + distribute: If "auto" (default), initializes the model on a single GPU if available and otherwise on the CPU. + To have more control over the model distribution strategy and utilize multiple GPUs, you can set + `llm = LLM.load(..., distribute=None)` and call `llm.distribute(...)` manually. + """ + + allowed_init = {"pretrained", "random"} + + if init == "pretrained": + checkpoint_dir = auto_download_checkpoint( + model_name=model, access_token=access_token, ignore_tokenizer_files=tokenizer_dir is not None + ) + config = Config.from_file(checkpoint_dir / "model_config.yaml") + + elif init == "random": + checkpoint_dir = None + try: + config = Config.from_name(model) + except ValueError: + print(f"Model name {model} is not supported.\n") + available_models = "\n".join(sorted(name_to_config)) + print(f"Available values:\n{available_models}") + return + + else: + raise ValueError(f"Invalid init option: {init}. Must be one of {allowed_init}") + + torch.set_float32_matmul_precision("high") + + if tokenizer_dir is not None: + tokenizer_dir = extend_checkpoint_dir(Path(tokenizer_dir)) + tokenizer = Tokenizer(tokenizer_dir) + elif checkpoint_dir is not None: + tokenizer = Tokenizer(checkpoint_dir) + else: + raise ValueError("Provide a path to a tokenizer directory via the `tokenizer_dir` setting.") + + if checkpoint_dir is not None: + prompt_style = ( + load_prompt_style(checkpoint_dir) + if has_prompt_style(checkpoint_dir) + else PromptStyle.from_config(config) + ) + else: + prompt_style = PromptStyle.from_config(config) + + if distribute == "auto": + if torch.cuda.is_available(): + accelerator = "cuda" + elif torch.backends.mps.is_available(): + accelerator = "mps" + else: + accelerator = "cpu" + + fabric = L.Fabric( + accelerator=accelerator, + devices=1, + precision=get_default_supported_precision(training=False), + ) + + with fabric.init_module(empty_init=False): + model = GPT(config) + model.eval() + preprocessor = Preprocessor(tokenizer, device=fabric.device) + + if checkpoint_dir is not None: + checkpoint_path = checkpoint_dir / "lit_model.pth" + check_file_size_on_cpu_and_warn(checkpoint_path, fabric.device) + load_checkpoint(fabric, model, checkpoint_path) + + model = fabric.setup_module(model) + + else: + preprocessor = Preprocessor(tokenizer, device="cuda" if torch.cuda.is_available() else "cpu") + model = None + fabric = None + + return cls( + model=model, + preprocessor=preprocessor, + prompt_style=prompt_style, + config=config, + checkpoint_dir=checkpoint_dir, + fabric=fabric, + generate_strategy=None, + kv_cache_initialized=False, + fixed_kv_cache_size=False, + ) + + def distribute( + self, + accelerator: Literal["cpu", "cuda", "auto"] = "auto", + devices: Union[int, Literal["auto"]] = "auto", + precision: Optional[Any] = None, + quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None, + generate_strategy: Optional[Literal["sequential", "tensor_parallel"]] = None, + fixed_kv_cache_size: Union[int, Literal["max_model_supported"], None] = None, + ) -> None: + """ + Moves the model onto specified devices for single-GPU or multi-GPU inference + + accelerator: Which device type to load the model on ("cpu", "gpu", "mps", "cuda", or "auto") + devices: The number of devices (1, 2, etc.) or "auto", which uses all available devices + quantize: Whether to quantize the model and using which method: + - bnb.nf4, bnb.nf4-dq, bnb.fp4, bnb.fp4-dq: 4-bit quantization from bitsandbytes + - bnb.int8: 8-bit quantization from bitsandbytes + for more details, see https://github.com/Lightning-AI/litgpt/blob/main/tutorials/quantize.md + precision: Indicates the Fabric precision setting to use. + For instance, "32-true", "16-mixed", "16-true", "bf16-mixed", "bf16-true". + For more details, see https://lightning.ai/docs/fabric/stable/api/fabric_args.html#precision + generate_strategy: Whether to use a sequential model generation strategy. The "sequential" settings allows running + models that wouldn't fit in a single card by partitioning the transformer blocks across + all devices and running them sequentially. Sequential generation may be slower but allows using larger models. + Note that sequential generation sets `fixed_kv_cache_size="max_model_supported"`. You can set it to a lower integer + value, `fixed_kv_cache_size=256` to reduce memory. The `fixed_kv_cache_size` value determines the maximum number + of tokens that can be returned via `llm.generate(...)`. + fixed_kv_cache_size: If set to an integer value or "max_model_supported" is set, the kv-cache won't be resized dynamically + during `llm.generate` calls. Use this setting if you plan to compile the model or use `generate_strategy="sequential`. + Note that the chosen `fixed_kv_cache_size` value determines the maximum number of tokens that can be returned in `llm.generate(...)`. + """ + + if self.checkpoint_dir is None: + raise NotImplementedError( + "The LLM was initialized with init='random' but .distribute() " + "currently only supports pretrained weights." + ) + + allowed_accelerators = {"cpu", "gpu", "cuda", "mps", "auto"} + if accelerator not in allowed_accelerators: + raise ValueError(f"Invalid accelerator: {accelerator}. Must be one of {allowed_accelerators}.") + + if accelerator == "auto": + if torch.cuda.is_available(): + accelerator = "cuda" + elif torch.backends.mps.is_available(): + accelerator = "mps" + else: + accelerator = "cpu" + + if generate_strategy in ("sequential", "tensor_parallel") and accelerator not in ("cuda", "gpu"): + raise NotImplementedError( + f"generate_strategy='{generate_strategy}' is only supported for accelerator='cuda'|'gpu'." + ) + + if devices == "auto": + if generate_strategy in ("sequential", "tensor_parallel"): + total_devices = CUDAAccelerator.auto_device_count() + else: + total_devices = 1 + elif isinstance(devices, int) and accelerator == "cuda": + use_devices = calculate_number_of_devices(devices) + total_devices = CUDAAccelerator.auto_device_count() + if use_devices > total_devices: + raise ValueError( + f"You selected more devices ({use_devices}) than available in your system ({total_devices})." + ) + else: + total_devices = use_devices + + if total_devices > 1 and generate_strategy not in ("sequential", "tensor_parallel"): + raise NotImplementedError( + "Support for multiple devices is currently only implemented for generate_strategy='sequential'|'tensor_parallel'." + ) + elif accelerator == "cpu" or accelerator == "mps": + total_devices = 1 + + else: + raise ValueError(f"devices argument must be an integer or 'auto', got {devices}") + + print(f"Using {total_devices} device(s)", file=sys.stderr) + + if precision is None: + precision = get_default_supported_precision(training=False) + + print("Precision set", file=sys.stderr) + + plugins = None + if quantize is not None and quantize.startswith("bnb."): + if "mixed" in precision: + raise ValueError("The combination of quantization and mixed precision is not supported.") + dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision] + plugins = BitsandbytesPrecision(quantize[4:], dtype) + precision = None + + # set "ddp" as the strategy for the launching functionality, but there's no data-parallelism + if generate_strategy != "tensor_parallel": + fabric = L.Fabric( + accelerator=accelerator, + devices=1, # Otherwise sequential wouldn't work, see litgpt/generate/sequentially.py + # devices=devices, + precision=precision, + plugins=plugins, + ) + else: + fabric = L.Fabric( + accelerator=accelerator, devices=total_devices, strategy="ddp", precision=precision, plugins=plugins + ) + if torch.cuda.is_available() and fabric.accelerator.auto_device_count() > 1: + check_nvlink_connectivity(fabric) + fabric.launch() + + print("Fabric launched", file=sys.stderr) + + self.kv_cache_initialized = False + if generate_strategy is None: + with fabric.init_module(empty_init=(total_devices > 1)): + model = GPT(self.config) + model.eval() + + if self.checkpoint_dir is not None: + load_checkpoint(fabric, model, self.checkpoint_dir / "lit_model.pth") + + model = fabric.setup_module(model) + + if fixed_kv_cache_size is not None: + if fixed_kv_cache_size is None or fixed_kv_cache_size == "max_model_supported": + kv_cache_size = model.max_seq_length + else: + kv_cache_size = fixed_kv_cache_size + model.set_kv_cache(batch_size=1, max_seq_length=kv_cache_size, device=fabric.device) + self.kv_cache_initialized = True + self.fixed_kv_cache_size = fixed_kv_cache_size + + elif generate_strategy in ("sequential", "tensor_parallel"): + with fabric.init_tensor(), torch.device("meta"): + model = GPT(self.config) + model.eval() + + if generate_strategy == "sequential": + state_dict = torch.load( + str(self.checkpoint_dir / "lit_model.pth"), mmap=True, map_location="cpu", weights_only=False + ) + model.load_state_dict(state_dict, assign=True) + model = fabric.setup_module(model, move_to_device=False) + + if fixed_kv_cache_size is None: + fixed_kv_cache_size = "max_model_supported" + if fixed_kv_cache_size == "max_model_supported": + kv_cache_size = model.max_seq_length + else: + kv_cache_size = fixed_kv_cache_size + + model = sequential(model, fabric.device, kv_cache_size, total_devices) + self.fixed_kv_cache_size = fixed_kv_cache_size + + elif generate_strategy == "tensor_parallel": + if fabric.global_rank == 0: + pbar = tqdm(total=fabric.world_size, desc="Loading model weights") + for rank in range(fabric.world_size): + if fabric.global_rank == rank: + state_dict = torch.load( + str(self.checkpoint_dir / "lit_model.pth"), + mmap=True, + map_location="cpu", + weights_only=False, + ) + model.load_state_dict(state_dict, assign=True) + + # cannot use `.setup_module` because it will wrap with DDP + model = fabric._precision.convert_module(model) + model = tensor_parallel(fabric, model) + + with fabric.init_tensor(): + if fixed_kv_cache_size is None: + fixed_kv_cache_size = "max_model_supported" + if fixed_kv_cache_size == "max_model_supported": + kv_cache_size = model.max_seq_length + else: + kv_cache_size = fixed_kv_cache_size + model.max_seq_length = kv_cache_size + # the rope cache which is on meta device + model.cos, model.sin = model.rope_cache() + # enable the kv cache + model.set_kv_cache(batch_size=1) + model.eval() + model = fabric.to_device(model) + + fabric.barrier() + if fabric.global_rank == 0: + pbar.update(1) + + if fabric.global_rank == 0: + pbar.close() + + self.kv_cache_initialized = True + + else: + raise ValueError(f"Unsupported generate_strategy: {generate_strategy}") + + self.model = model + self.fabric = fabric + self.preprocessor.device = fabric.device + + @torch.inference_mode() + def generate( + self, + prompt: str, + sys_prompt: Optional[str] = None, + max_new_tokens: int = 50, + temperature: float = 1.0, + top_k: Optional[int] = None, + top_p: float = 1.0, + return_as_token_ids: bool = False, + stream: bool = False, + ) -> Union[str, torch.Tensor]: + """ + Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested. + + Arguments: + model: The model to use. + prompt: The prompt string to use for generating the samples. + sys_prompt: The system prompt string to use for generating the samples. + The system prompt allows the user to provide additional instructions to shape all responses by providing additional context, behavioral guidelines, style, and constraints. + max_new_tokens: The maximum number of new tokens to return. + temperature: Scales the predicted logits by 1 / temperature. + top_k: If specified, only sample among the tokens with the k highest probabilities. + top_p: If specified, it represents the cumulative probability threshold to consider in the sampling process. + In top-p sampling, the next token is sampled from the highest probability tokens + whose cumulative probability exceeds the threshold `top_p`. When specified, + it must be `0 <= top_p <= 1`. Here, `top_p=0` is equivalent + to sampling the most probable token, while `top_p=1` samples from the whole distribution. + It can be used in conjunction with `top_k` and `temperature` with the following order + of application: + + 1. `top_k` sampling + 2. `temperature` scaling + 3. `top_p` sampling + + For more details, see https://arxiv.org/abs/1904.09751 + or https://huyenchip.com/2024/01/16/sampling.html#top_p + return_as_token_ids: If True, returns the token IDs as a torch.Tensor. Otherwise, returns the decoded text as a string. + stream: If True, returns a generator that yields tokens as they are generated. + At the moment, this setting is slower and may use more memory than the non-streaming version. + We plan to resolve this in the future. + """ + if self.model is None: + raise AttributeError( + "The model is not initialized yet; use the .distribute() " + "or .trainer_setup() method to initialize the model." + ) + input_ids = self._text_to_token_ids(prompt, sys_prompt) + prompt_length = input_ids.size(0) + max_returned_tokens = prompt_length + max_new_tokens + + if not self.kv_cache_initialized: + if self.fabric is not None: + device = self.fabric.device + else: + device = self.preprocessor.device + self.model.set_kv_cache(batch_size=1, max_seq_length=max_returned_tokens, device=device) + self.kv_cache_initialized = True + + # Dynamically grow the kv cache size if necessary + if not self.fixed_kv_cache_size and self.prev_generated_seq_length < max_returned_tokens: + tmp_device = self.model.mask_cache.device + self.model.clear_kv_cache() + self.model.set_kv_cache(batch_size=1, max_seq_length=max_returned_tokens, device=tmp_device) + + else: + for block in self.model.transformer.h: + block.attn.kv_cache.reset_parameters() + + self.prev_generated_seq_length = max_returned_tokens + self.model.eval() + + def iterator(): + outputs = stream_generate_fn( + model=self.model, + prompt=input_ids, + max_returned_tokens=max_returned_tokens, + temperature=temperature, + top_k=top_k, + top_p=top_p, + stop_tokens=([self.preprocessor.tokenizer.eos_id],), + ) + if return_as_token_ids: + yield from outputs + else: + for output in outputs: + yield self.preprocessor.decode(output) + return + + if stream: + outputs = iterator() + else: + outputs = generate_fn( + model=self.model, + prompt=input_ids, + max_returned_tokens=max_returned_tokens, + temperature=temperature, + top_k=top_k, + top_p=top_p, + eos_id=self.preprocessor.tokenizer.eos_id, + include_prompt=False, + ) + + if stream: + return outputs + elif return_as_token_ids: + return outputs + else: + return self.preprocessor.decode(outputs) + + def _text_to_token_ids(self, prompt: str, sys_prompt: Optional[str] = None) -> torch.Tensor: + """Utility method to convert a prompt text to token IDs""" + prompt = self.prompt_style.apply(prompt, sys_prompt=sys_prompt) + input_ids = self.preprocessor.encode(prompt) + return input_ids + + def benchmark(self, num_iterations=1, **kwargs): + """ + A wrapper around the .generate() method to calculate runtime performance. + + Arguments: + num_iterations: How often the `.generate()` call is repeated. + kwargs: Keyword arguments that are passed to the .generate() method. + """ + benchmark_dict = {} + + for i in range(num_iterations): + time_to_first_token = None + t0 = time.perf_counter() + outputs = self.generate(**kwargs) + + if kwargs.get("stream", False): + gen_outputs = [] + for e in outputs: + if time_to_first_token is None: + t1 = time.perf_counter() + time_to_first_token = t1 - t0 + gen_outputs.append(e) + outputs = "".join(gen_outputs) + else: + outputs = self.generate( + **kwargs, + ) + benchmark_dict.setdefault("Seconds total", []).append(time.perf_counter() - t0) + + benchmark_dict.setdefault("Seconds to first token", []).append(time_to_first_token) + tokens_generated = self.preprocessor.encode(outputs).size(0) + benchmark_dict.setdefault("Tokens generated", []).append(tokens_generated) + benchmark_dict.setdefault("Inference speed in tokens/sec", []).append( + benchmark_dict["Tokens generated"][-1] / benchmark_dict["Seconds total"][-1] + ) + if self.fabric is not None and self.fabric.device.type == "cuda": + benchmark_dict.setdefault("Total GPU memory allocated in GB", []).append( + torch.cuda.max_memory_allocated() / 1e9 + ) + + return outputs, benchmark_dict + + +class Preprocessor: + """ + Preprocessor class for tokenization and de-tokenization. + """ + + def __init__(self, tokenizer: Tokenizer, device: str = "cpu") -> None: + self.tokenizer = tokenizer + self.device = device + + def encode(self, text: str) -> torch.Tensor: + return self.tokenizer.encode(text, device=self.device) + + def decode(self, token_ids: torch.Tensor) -> str: + return self.tokenizer.decode(token_ids) + + +def calculate_number_of_devices(devices): + """ + Utility function to calculate the number of devices. + """ + num_devices = devices if isinstance(devices, int) else len(devices) if isinstance(devices, list) else 0 + return num_devices + + +def benchmark_dict_to_markdown_table(data): + """ + Converts .benchmark() outputs to a markdown table + """ + markdown_table = ( + "| Metric | Mean | Std Dev |\n" + ) + markdown_table += ( + "|-------------------------------------|-----------------------------|-----------------------------|\n" + ) + + for key, values in data.items(): + mean_value = np.mean(values) + std_dev_value = np.std(values, ddof=1) + + formatted_mean = f"{mean_value:.2f}" + formatted_std_dev = f"{std_dev_value:.2f}" + + markdown_table += f"| {key.ljust(35)} | {formatted_mean.ljust(27)} | {formatted_std_dev.ljust(27)} |\n" + + return markdown_table + + +def pull_request_benchmark_util(model_name="microsoft/phi-2", num_iterations=6): + def print_table(header, data): + print(f"\n### {header}\n") + markdown_table = ( + f"| Metric | First Iteration | " + f"Iter 2-{num_iterations} Mean | Iter 2-{num_iterations} Standard Dev. |\n" + f"|--------------------------------------|-----------------|" + f"-------------------|-------------------------|\n" + ) + + for key, value in data.items(): + first_iteration = f"{value[0]:.2f}" if value[0] is not None else "N/A" + clean_values = [v for v in value[1:] if v is not None] + + if clean_values: + mean_value = np.mean(clean_values) + std_dev_value = np.std(clean_values, ddof=1) + mean_str = f"{mean_value:.2f}" + std_dev_str = f"{std_dev_value:.2f}" + else: + mean_str = "N/A" + std_dev_str = "N/A" + + markdown_table += f"| {key:<36} | {first_iteration:<15} | {mean_str:<17} | {std_dev_str:<23} |\n" + print(markdown_table) + + import subprocess + + try: + g_hash = subprocess.run( + ["git", "rev-parse", "--short", "HEAD"], capture_output=True, text=True, check=True + ).stdout.strip() + print(f"Git Commit Hash: {g_hash}") + except subprocess.CalledProcessError: + print("Git Commit Hash: N/A") + print(f"PyTorch version: {torch.__version__}") + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print(f"Device: {device}\n") + + # 1st table + llm = LLM.load( + model=model_name, + ) + text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1) + print_table(f"Defaults ({model_name}), 1st time", bench_d) + del llm + + # 2nd table + llm = LLM.load( + model=model_name, + ) + text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1) + print_table(f"Defaults ({model_name}), 2nd time", bench_d) + del llm + + # 3rd table + llm = LLM.load( + model=model_name, + ) + text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1, stream=True) + print_table("stream=True", bench_d) + del llm + + # 4th table + llm = LLM.load(model=model_name, distribute=None) + llm.distribute(fixed_kv_cache_size=500) + + text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1, stream=True) + print_table("stream=True + fixed_kv_cache=500", bench_d) diff --git a/litgpt/args.py b/litgpt/args.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0d99e2e600368cd11a9d5ae8c8831176e0d8df --- /dev/null +++ b/litgpt/args.py @@ -0,0 +1,104 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. +import math +import warnings +from dataclasses import dataclass +from typing import Optional, Union + + +@dataclass +class TrainArgs: + """Training-related arguments""" + + save_interval: Optional[int] = 1000 + """Number of optimizer steps between saving checkpoints""" + log_interval: int = 1 + """Number of iterations between logging calls""" + global_batch_size: int = 64 + """Number of samples between optimizer steps across data-parallel ranks""" + micro_batch_size: int = 4 + """Number of samples per data-parallel rank""" + lr_warmup_steps: Optional[int] = 100 + """Number of iterations with learning rate warmup active""" + lr_warmup_fraction: Optional[float] = None + """The fraction of an epoch to use for learning rate warmup""" + epochs: Optional[int] = None + """Number of epochs to train on""" + # TODO: `pretrain` is the only script using `max_tokens` explicitly. replace it with epoch_size*epochs? + max_tokens: Optional[int] = None + """Total number of tokens to train on""" + max_steps: Optional[int] = None + """Limits the number of optimizer steps to run""" + max_seq_length: Optional[int] = None + """Limits the length of samples""" + tie_embeddings: Optional[bool] = None + """Whether to tie the embedding weights with the language modeling head weights""" + + # Optimization args + max_norm: Optional[float] = None + min_lr: float = 6e-5 + + def __post_init__(self) -> None: + if self.lr_warmup_fraction and self.lr_warmup_steps: + raise ValueError( + "Can't provide both `--train.lr_warmup_fraction` and `--train.lr_warmup_steps`. Choose one." + ) + if self.lr_warmup_fraction and not (0 <= self.lr_warmup_fraction <= 1): + raise ValueError("`--train.lr_warmup_fraction` must be between 0 and 1.") + + if self.lr_warmup_steps and self.max_steps and (self.lr_warmup_steps >= self.max_steps): + warnings.warn( + "`--train.lr_warmup_steps` should be less than `--train.max_steps`." + f" Got {self.lr_warmup_steps} lr_warmup_steps and {self.max_steps} max_steps.", + UserWarning, + ) + + def gradient_accumulation_iters(self, devices: int, num_nodes: int = 1) -> int: + """Number of iterations between gradient synchronizations""" + gradient_accumulation_iters = self.batch_size(devices, num_nodes) // self.micro_batch_size + assert gradient_accumulation_iters > 0 + return gradient_accumulation_iters + + def batch_size(self, devices: int, num_nodes: int = 1) -> int: + """Number of samples between optimizer steps per data-parallel rank""" + batch_size = self.global_batch_size // (devices * num_nodes) + assert batch_size > 0 + return batch_size + + def warmup_iters(self, devices: int, num_nodes: int, max_iters: int, train_dataloader) -> int: + """Number of iterations to warm up the learning rate.""" + if self.lr_warmup_fraction: + return min(max_iters, math.ceil(self.lr_warmup_fraction * len(train_dataloader))) + if self.lr_warmup_steps: + return min(max_iters, self.lr_warmup_steps * self.gradient_accumulation_iters(devices, num_nodes)) + return 0 + + +@dataclass +class EvalArgs: + """Evaluation-related arguments""" + + interval: int = 600 + """Number of optimizer steps between evaluation calls""" + max_new_tokens: Optional[int] = None + """Number of tokens to generate""" + max_iters: int = 100 + """Number of iterations""" + initial_validation: bool = False + """Whether to evaluate on the validation set at the beginning of the training""" + final_validation: bool = True + """Whether to evaluate on the validation set at the end of the training""" + evaluate_example: Union[str, int] = "first" + """How to pick an example instruction to evaluate periodically during training. + Can be "first", "random", or an integer index to pick a specific example.""" + + +@dataclass +class LogArgs: + """Logging-related arguments""" + + project: Optional[str] = None + """Project name""" + run: Optional[str] = None + """Run name""" + group: Optional[str] = None + """Group name""" diff --git a/litgpt/config.py b/litgpt/config.py new file mode 100644 index 0000000000000000000000000000000000000000..9398627f91dfd71dfe579a513e1c92fdbeca2521 --- /dev/null +++ b/litgpt/config.py @@ -0,0 +1,3087 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +from copy import deepcopy +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, List, Literal, Optional, Type, Union + +import yaml +from typing_extensions import Self + + +def find_multiple(n: int, k: int) -> int: + """Utility function for finding the nearest value to n which is a multiple of k. + + NOTE: We define this function in this module rather than `litgpt.utils` so that users can import + this file to do configuration manipulations in Python environments which do not include all the dependencies + demanded by `litgpt.utils`. + """ + assert k > 0 + if n % k == 0: + return n + return n + k - (n % k) + + +@dataclass +class Config: + name: str = "" + hf_config: dict = field(default_factory=dict) + # General size parameters + block_size: int = 4096 + n_layer: int = 16 + n_embd: int = 4096 + vocab_size: int = 50254 + padding_multiple: int = 512 + padded_vocab_size: Optional[int] = None + # Transformer block (structure, normalizations) + norm_class_name: Literal["LayerNorm", "RMSNorm"] = "LayerNorm" + norm_eps: float = 1e-5 + norm_qk: bool = False + norm_qk_type: Literal["default", "olmo2"] = "default" + post_attention_norm: bool = False + post_mlp_norm: bool = False + parallel_residual: bool = True + shared_attention_norm: bool = False + # Transformer block (self-attention) + n_head: int = 32 + head_size: Optional[int] = None + # to use multi-head attention (MHA), set this to `n_head` (default) + # to use multi-query attention (MQA), set this to 1 + # to use grouped-query attention (GQA), set this to a value in between + # Example with `n_head=4` + # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐ + # │ v ││ v ││ v ││ v │ │ v │ │ v │ │ v │ + # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘ + # │ │ │ │ │ │ │ + # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐ + # │ k ││ k ││ k ││ k │ │ k │ │ k │ │ k │ + # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘ + # │ │ │ │ ┌──┴──┐ ┌──┴──┐ ┌────┬──┴─┬────┐ + # ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ + # │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ + # └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ + # ◀──────────────────▶ ◀──────────────────▶ ◀──────────────────▶ + # MHA GQA MQA + # n_query_groups=4 n_query_groups=2 n_query_groups=1 + # + # credit https://arxiv.org/pdf/2305.13245.pdf + n_query_groups: Optional[int] = None + attn_bias: bool = False + attention_scores_scalar: Optional[int] = None + sliding_window_size: Optional[int] = None + sliding_window_indices: Optional[List] = None + # if `attention_logit_softcapping` is used, cannot use optimized + # `torch.nn.functional.scaled_dot_product_attention` (which implements + # Flash attention), may result in higher memory and runtime footprint. + attention_logit_softcapping: Optional[float] = None + # Rotary position embedding (RoPE) + rope_base: int = 10000 + rotary_percentage: float = 0.25 + rope_condense_ratio: int = 1 + rope_adjustments: Optional[dict] = None + # Transformer block (MLP) + intermediate_size: Optional[int] = None + moe_intermediate_size: Optional[int] = None + bias: bool = True + mlp_class_name: Literal["GptNeoxMLP", "LLaMAMLP", "GemmaMLP", "LLaMAMoE"] = "GptNeoxMLP" + gelu_approximate: str = "none" + n_expert: int = 0 + n_expert_per_token: int = 0 + # GPT before/after blocks + scale_embeddings: bool = False + lm_head_bias: bool = False + final_logit_softcapping: Optional[float] = None + norm_1: bool = True + norm_2: bool = True + # The base period of the RoPE embeddings for local attention. + # If not provided, rope_theta will be used for both local and global attention. + rope_local_base_freq: Optional[float] = None + rope_indices: Optional[List] = None + + def __post_init__(self): + if not self.name: + self.name = self.hf_config.get("name", self.name) + + if self.head_size is None: + assert self.n_embd % self.n_head == 0 + self.head_size = self.n_embd // self.n_head + + # vocab size should be a power of 2 to be optimal on hardware. compute the closest value + if self.padded_vocab_size is None: + self.padded_vocab_size = find_multiple(self.vocab_size, self.padding_multiple) + else: + # vocab size shouldn't be larger than padded vocab size + self.vocab_size = min(self.vocab_size, self.padded_vocab_size) + + # compute the number of query groups + if self.n_query_groups is not None: + assert self.n_head % self.n_query_groups == 0 + else: + self.n_query_groups = self.n_head + + # compute the intermediate size for MLP if not set + if self.intermediate_size is None: + if self.mlp_class_name == "LLaMAMLP": + raise ValueError(f"The config {self.name!r}, needs to set the `intermediate_size`") + self.intermediate_size = 4 * self.n_embd + + self.rope_n_elem = int(self.rotary_percentage * self.head_size) + + if self.sliding_window_size is not None and self.sliding_window_indices is None: + self.sliding_window_indices = [1] * self.n_layer + + if self.rope_local_base_freq is not None and self.rope_indices is None: + self.rope_indices = [1] * self.n_layer + + @classmethod + def from_name(cls, name: str, **kwargs: Any) -> Optional[Self]: + if name not in name_to_config: + # search through all `config['hf_config']['name']` + try: + conf_dict = next( + config + for config in configs + if name == config["hf_config"]["name"] + or config["hf_config"]["org"] + "/" + config["hf_config"]["name"] == name + ) + except StopIteration: + raise ValueError(f"{name!r} is not a supported config name") + else: + conf_dict = name_to_config[name] + + conf_dict = conf_dict.copy() + conf_dict.update(kwargs) + return cls(**conf_dict) + + @classmethod + def from_file(cls, path: Union[str, Path], **kwargs: Any) -> Self: + with open(path, encoding="utf-8") as fp: + file_kwargs = yaml.safe_load(fp) + if file_kwargs is None: + raise ValueError(f"{path} is empty which is likely unexpected.") + file_kwargs.update(kwargs) + return cls(**file_kwargs) + + @classmethod + def from_checkpoint(cls, path: Path, **kwargs: Any) -> Self: + """Automatically load `model_config.yaml` and if it doesn't exist - a matching config from `litgpt/config.py`.""" + if (config_path := path / "model_config.yaml").is_file(): + return cls.from_file(config_path, **kwargs) + if (model_name := path.name) in name_to_config: + return cls.from_name(model_name, **kwargs) + raise FileNotFoundError(f"For {str(path)!r} neither 'model_config.yaml' nor matching config exists.") + + @property + def mlp_class(self) -> Type: + # `self.mlp_class_name` cannot be the type to keep the config serializable + import litgpt.model + + return getattr(litgpt.model, self.mlp_class_name) + + @property + def norm_class(self) -> Type: + # `self.norm_class_name` cannot be the type to keep the config serializable + + from functools import partial + + import torch # Torch import is lazy to make config loading faster + + if self.norm_class_name == "RMSNorm": + from litgpt.model import RMSNorm + + return partial(RMSNorm, add_unit_offset="Gemma" in self.name) + + if self.norm_class_name == "LayerNorm" and "OLMo" in self.name: + # this makes it equivalent to `torch.nn.functional.layer_norm` + # that is used by OLMo + # Table 5 caption in the OLMo paper shows this - https://aclanthology.org/2024.acl-long.841 + return partial(torch.nn.LayerNorm, elementwise_affine=False) + + return getattr(torch.nn, self.norm_class_name) + + +######################## +# Stability AI StableLM +######################## +configs = [ + # https://huggingface.co/stabilityai/stablelm-base-alpha-3b/blob/main/config.json + dict(name="stablelm-base-alpha-3b", hf_config=dict(org="stabilityai", name="stablelm-base-alpha-3b")), + # https://huggingface.co/stabilityai/stablelm-base-alpha-7b/blob/main/config.json + dict( + name="stablelm-base-alpha-7b", + hf_config=dict(org="stabilityai", name="stablelm-base-alpha-7b"), + n_head=48, + n_embd=6144, + padding_multiple=256, + ), + # https://huggingface.co/stabilityai/stablelm-tuned-alpha-3b/blob/main/config.json + dict(name="stablelm-tuned-alpha-3b", hf_config=dict(org="stabilityai", name="stablelm-tuned-alpha-3b"), n_head=32), + # https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b/blob/main/config.json + dict( + name="stablelm-tuned-alpha-7b", + hf_config=dict(org="stabilityai", name="stablelm-tuned-alpha-7b"), + n_head=48, + n_embd=6144, + padding_multiple=256, + ), + # https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/config.json + dict( + name="stablelm-3b-4e1t", + hf_config=dict(org="stabilityai", name="stablelm-3b-4e1t"), + padded_vocab_size=50304, + n_layer=32, + n_head=32, + n_embd=2560, + parallel_residual=False, + bias=False, + mlp_class_name="LLaMAMLP", + intermediate_size=6912, + ), + # https://huggingface.co/stabilityai/stablelm-zephyr-3b/blob/main/config.json + dict( + name="stablelm-zephyr-3b", + hf_config=dict(org="stabilityai", name="stablelm-zephyr-3b"), + padded_vocab_size=50304, + n_layer=32, + n_head=32, + n_embd=2560, + parallel_residual=False, + bias=False, + mlp_class_name="LLaMAMLP", + intermediate_size=6912, + ), +] + + +########################## +# Stability AI StableCode +########################## +stablecode = [ + # https://huggingface.co/stabilityai/stablecode-completion-alpha-3b/blob/main/config.json + dict( + name="stablecode-completion-alpha-3b", + hf_config=dict(org="stabilityai", name="stablecode-completion-alpha-3b"), + block_size=16384, + vocab_size=49152, + n_layer=32, + n_embd=2560, + ), + # https://huggingface.co/stabilityai/stablecode-completion-alpha-3b-4k/blob/main/config.json + dict( + name="stablecode-completion-alpha-3b-4k", + hf_config=dict(org="stabilityai", name="stablecode-completion-alpha-3b-4k"), + vocab_size=49152, + n_layer=32, + n_embd=2560, + ), + # https://huggingface.co/stabilityai/stablecode-instruct-alpha-3b/blob/main/config.json + dict( + name="stablecode-instruct-alpha-3b", + hf_config=dict(org="stabilityai", name="stablecode-instruct-alpha-3b"), + vocab_size=49152, + n_layer=32, + n_embd=2560, + ), + # https://huggingface.co/stabilityai/stable-code-3b/blob/main/config.json + dict( + name="stable-code-3b", + hf_config=dict(org="stabilityai", name="stable-code-3b"), + padded_vocab_size=50304, + n_layer=32, + n_embd=2560, + block_size=16384, + parallel_residual=False, + bias=False, + mlp_class_name="LLaMAMLP", + intermediate_size=6912, + ), +] +configs.extend(stablecode) + + +#################### +# EleutherAI Pythia +#################### +pythia = [ + # https://huggingface.co/EleutherAI/pythia-14m/blob/main/config.json + dict( + name="pythia-14m", + hf_config=dict(org="EleutherAI", name="pythia-14m"), + block_size=512, + n_layer=6, + n_embd=128, + n_head=4, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-31m/blob/main/config.json + dict( + name="pythia-31m", + hf_config=dict(org="EleutherAI", name="pythia-31m"), + block_size=1024, + n_layer=6, + n_embd=256, + n_head=8, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-70m/blob/main/config.json + dict( + name="pythia-70m", + hf_config=dict(org="EleutherAI", name="pythia-70m"), + block_size=2048, + n_layer=6, + n_embd=512, + n_head=8, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-160m/blob/main/config.json + dict( + name="pythia-160m", + hf_config=dict(org="EleutherAI", name="pythia-160m"), + block_size=2048, + n_layer=12, + n_embd=768, + n_head=12, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-410m/blob/main/config.json + dict( + name="pythia-410m", + hf_config=dict(org="EleutherAI", name="pythia-410m"), + block_size=2048, + n_layer=24, + n_embd=1024, + n_head=16, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-1b/blob/main/config.json + dict( + name="pythia-1b", + hf_config=dict(org="EleutherAI", name="pythia-1b"), + block_size=2048, + n_embd=2048, + n_head=8, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-1.4b/blob/main/config.json + dict( + name="pythia-1.4b", + hf_config=dict(org="EleutherAI", name="pythia-1.4b"), + block_size=2048, + n_layer=24, + n_embd=2048, + n_head=16, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-2.8b/blob/main/config.json + dict( + name="pythia-2.8b", + hf_config=dict(org="EleutherAI", name="pythia-2.8b"), + block_size=2048, + n_layer=32, + n_embd=2560, + padding_multiple=128, + ), + # https://huggingface.co/EleutherAI/pythia-6.9b/blob/main/config.json + dict( + name="pythia-6.9b", + hf_config=dict(org="EleutherAI", name="pythia-6.9b"), + block_size=2048, + n_layer=32, + padding_multiple=256, + ), + # https://huggingface.co/EleutherAI/pythia-12b/blob/main/config.json + dict( + name="pythia-12b", + hf_config=dict(org="EleutherAI", name="pythia-12b"), + block_size=2048, + n_layer=36, + n_embd=5120, + n_head=40, + ), +] +configs.extend(pythia) +for c in pythia: + # "pythia-14m" and "pythia-31m" don't have deduped version + if c["name"] in ("pythia-14m", "pythia-31m"): + continue + copy = deepcopy(c) + copy["name"] = f"{c['name']}-deduped" + copy["hf_config"]["name"] = f"{c['hf_config']['name']}-deduped" + configs.append(copy) + + +################# +# TII UAE Falcon +################# +falcon = [ + # https://huggingface.co/tiiuae/falcon-7b/blob/main/config.json + dict( + name="falcon-7b{}", + hf_config=dict(org="tiiuae", name="falcon-7b{}"), + block_size=2048, + vocab_size=65024, + padded_vocab_size=65024, + n_layer=32, + n_head=71, + n_embd=4544, + rotary_percentage=1.0, + n_query_groups=1, + bias=False, + # this is not in the config, but in the original model implementation, only for this config + shared_attention_norm=True, + ), + # https://huggingface.co/tiiuae/falcon-40b/blob/main/config.json + dict( + name="falcon-40b{}", + hf_config=dict(org="tiiuae", name="falcon-40b{}"), + block_size=2048, + vocab_size=65024, + padded_vocab_size=65024, + n_layer=60, + n_head=128, + n_embd=8192, + rotary_percentage=1.0, + n_query_groups=8, + bias=False, + ), +] +for c in falcon: + for kind in ("", "-instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + +# https://huggingface.co/tiiuae/falcon-180b/blob/main/config.json +falcon180b = dict( + name="falcon-180B{}", + hf_config=dict(org="tiiuae", name="falcon-180B{}"), + block_size=2048, + vocab_size=65024, + padded_vocab_size=65024, + n_layer=80, + n_head=232, + n_embd=14848, + rotary_percentage=1.0, + n_query_groups=8, + bias=False, +) + +for kind in ("", "-chat"): + copy = deepcopy(falcon180b) + copy["name"] = falcon180b["name"].format(kind) + copy["hf_config"]["name"] = falcon180b["hf_config"]["name"].format(kind) + configs.append(copy) + +falcon3 = [ + # https://huggingface.co/tiiuae/Falcon3-1B-Base/blob/main/config.json + dict( + name="Falcon3-1B{}", + hf_config=dict(org="tiiuae", name="Falcon3-1B{}"), + block_size=4096, + vocab_size=131072, + padded_vocab_size=131072, + n_layer=18, + n_head=8, + n_query_groups=4, + n_embd=2048, + rotary_percentage=1.0, + parallel_residual=False, + rope_base=1000042, + norm_eps=1e-6, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8192, + ), + # https://huggingface.co/tiiuae/Falcon3-3B-Base/blob/main/config.json + dict( + name="Falcon3-3B{}", + hf_config=dict(org="tiiuae", name="Falcon3-3B{}"), + block_size=32768, + vocab_size=131072, + padded_vocab_size=131072, + n_layer=22, + n_head=12, + n_query_groups=4, + n_embd=3072, + rotary_percentage=1.0, + parallel_residual=False, + rope_base=1000042, + norm_eps=1e-6, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=9216, + ), + # https://huggingface.co/tiiuae/Falcon3-7B-Base/blob/main/config.json + dict( + name="Falcon3-7B{}", + hf_config=dict(org="tiiuae", name="Falcon3-7B{}"), + block_size=32768, + vocab_size=131072, + padded_vocab_size=131072, + n_layer=28, + n_head=12, + n_query_groups=4, + n_embd=3072, + rotary_percentage=1.0, + parallel_residual=False, + rope_base=1000042, + norm_eps=1e-6, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=23040, + ), + # https://huggingface.co/tiiuae/Falcon3-10B-Base/blob/main/config.json + dict( + name="Falcon3-10B{}", + hf_config=dict(org="tiiuae", name="Falcon3-10B{}"), + block_size=32768, + vocab_size=131072, + padded_vocab_size=131072, + n_layer=40, + n_head=12, + n_query_groups=4, + n_embd=3072, + rotary_percentage=1.0, + parallel_residual=False, + rope_base=1000042, + norm_eps=1e-6, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=23040, + ), +] +for c in falcon3: + for kind in ("-Base", "-Instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + + +############################# +# OpenLM Research Open LLaMA +############################# +open_LLaMA = [ + # https://huggingface.co/openlm-research/open_llama_3b/blob/main/config.json + dict( + name="open_llama_3b", + hf_config=dict(org="openlm-research", name="open_llama_3b"), + block_size=2048, + vocab_size=32000, + padding_multiple=64, + n_layer=26, + n_embd=3200, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-6, + mlp_class_name="LLaMAMLP", + intermediate_size=8640, + ), + # https://huggingface.co/openlm-research/open_llama_7b/blob/main/config.json + dict( + name="open_llama_7b", + hf_config=dict(org="openlm-research", name="open_llama_7b"), + block_size=2048, + vocab_size=32000, + padding_multiple=64, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-6, + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + ), + # https://huggingface.co/openlm-research/open_llama_13b/blob/main/config.json + dict( + name="open_llama_13b", + hf_config=dict(org="openlm-research", name="open_llama_13b"), + block_size=2048, + vocab_size=32000, + padding_multiple=64, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-6, + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + ), +] +configs.extend(open_LLaMA) + +############### +# Meta LLaMA 2 +############### +llama_2 = [ + # https://huggingface.co/meta-llama/Llama-2-7b-hf/blob/main/config.json + dict( + name="Llama-2-7b{}-hf", + hf_config=dict(org="meta-llama", name="Llama-2-7b{}-hf"), + vocab_size=32000, + padding_multiple=64, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + ), + # https://huggingface.co/meta-llama/Llama-2-13b-hf/blob/main/config.json + dict( + name="Llama-2-13b{}-hf", + hf_config=dict(org="meta-llama", name="Llama-2-13b{}-hf"), + vocab_size=32000, + padding_multiple=64, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + ), + # https://huggingface.co/meta-llama/Llama-2-70b-hf/blob/main/config.json + dict( + name="Llama-2-70b{}-hf", + hf_config=dict(org="meta-llama", name="Llama-2-70b{}-hf"), + vocab_size=32000, + padding_multiple=64, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ), +] +for c in llama_2: + for kind in ("", "-chat"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + + +############### +# Meta LLaMA 3 +############### +llama_3 = [ + # https://huggingface.co/meta-llama/Meta-Llama-3-8B/blob/main/config.json + dict( + name="Llama-3-8B{}", + hf_config=dict(org="meta-llama", name="Meta-Llama-3-8B{}"), + block_size=8192, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=32, + n_head=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + rope_base=500000, + ), + # https://huggingface.co/meta-llama/Meta-Llama-3.1-8B/blob/main/config.json + dict( + name="Llama-3.1-8B{}", + hf_config=dict(org="meta-llama", name="Meta-Llama-3.1-8B{}"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=32, + n_head=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), + # https://huggingface.co/meta-llama/Meta-Llama-3-70B/blob/main/config.json + dict( + name="Llama-3-70B{}", + hf_config=dict(org="meta-llama", name="Meta-Llama-3-70B{}"), + block_size=8192, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=500000, + ), + # https://huggingface.co/meta-llama/Meta-Llama-3.1-70B/blob/main/config.json + dict( + name="Llama-3.1-70B{}", + hf_config=dict(org="meta-llama", name="Meta-Llama-3.1-70B{}"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), + # https://huggingface.co/meta-llama/Meta-Llama-3.1-405B/blob/main/config.json + dict( + name="Llama-3.1-405B{}", + hf_config=dict(org="meta-llama", name="Meta-Llama-3.1-405B{}"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=126, + n_head=128, + n_embd=16384, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=53248, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), + # https://huggingface.co/meta-llama/Llama-3.2-1B/blob/main/config.json + dict( + name="Llama-3.2-1B{}", + hf_config=dict(org="meta-llama", name="Llama-3.2-1B{}"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=16, + n_embd=2048, + n_head=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8192, + rope_base=500000, + rope_adjustments=dict(factor=32.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), + # https://huggingface.co/meta-llama/Llama-3.2-3B/blob/main/config.json + dict( + name="Llama-3.2-3B{}", + hf_config=dict(org="meta-llama", name="Llama-3.2-3B{}"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=28, + n_embd=3072, + n_head=24, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8192, + rope_base=500000, + rope_adjustments=dict(factor=32.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), + # https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct/blob/main/config.json + dict( + name="Llama-3.3-70B-Instruct", + hf_config=dict(org="meta-llama", name="Llama-3.3-70B-Instruct"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), +] +for c in llama_3: + if c["name"] == "Llama-3.3-70B-Instruct": + configs.append(c) + continue + for kind in ("", "-Instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + +######################### +# NVIDIA Llama Nemotron +######################### +configs.append( + dict( + name="Llama-3.1-Nemotron-70B-Instruct-HF", + hf_config=dict(org="nvidia", name="Llama-3.1-Nemotron-70B-Instruct-HF"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), +) + +################# +# Allen AI OLMo +################# +olmo = [ + # https://huggingface.co/allenai/OLMo-1B-hf/blob/main/config.json + dict( + name="OLMo-1B-hf", + hf_config=dict(org="allenai", name="OLMo-1B-hf"), + vocab_size=50280, + padded_vocab_size=50304, + block_size=2048, + n_embd=2048, + n_layer=16, + n_head=16, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="LayerNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8192, + ), + # https://huggingface.co/allenai/OLMo-7B-hf/blob/main/config.json + dict( + name="OLMo-7B-hf", + hf_config=dict(org="allenai", name="OLMo-7B-hf"), + vocab_size=50280, + padded_vocab_size=50304, + block_size=2048, + n_layer=32, + n_head=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="LayerNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + ), + # https://huggingface.co/allenai/OLMo-7B-Instruct-hf/blob/main/config.json + dict( + name="OLMo-7B-Instruct-hf", + hf_config=dict(org="allenai", name="OLMo-7B-Instruct-hf"), + vocab_size=50280, + padded_vocab_size=50304, + block_size=2048, + n_layer=32, + n_head=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="LayerNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + ), +] + +configs.extend(olmo) + +olmo2 = [ + # https://huggingface.co/allenai/OLMo-2-1124-7B/blob/main/config.json + dict( + name="OLMo-2-1124-7B{}", + hf_config=dict(org="allenai", name="OLMo-2-1124-7B{}"), + vocab_size=100278, + padded_vocab_size=100352, + block_size=4096, + n_embd=4096, + n_layer=32, + n_head=32, + n_query_groups=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + norm_eps=1e-06, + intermediate_size=11008, + rope_base=500000, + norm_qk=True, + post_mlp_norm=True, + norm_1=False, + norm_2=False, + norm_qk_type="olmo2", + post_attention_norm=True, + ), + # https://huggingface.co/allenai/OLMo-2-1124-13B/blob/main/config.json + dict( + name="OLMo-2-1124-13B{}", + hf_config=dict(org="allenai", name="OLMo-2-1124-13B{}"), + vocab_size=100278, + padded_vocab_size=100352, + block_size=4096, + n_embd=5120, + n_layer=40, + n_head=40, + n_query_groups=40, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + norm_eps=1e-06, + intermediate_size=13824, + rope_base=500000, + norm_qk=True, + post_mlp_norm=True, + norm_1=False, + norm_2=False, + norm_qk_type="olmo2", + post_attention_norm=True, + ), +] + +for c in olmo2: + for kind in ("", "-SFT", "-DPO", "-Instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + +############### +# Google Gemma +############### +gemma = [ + # https://huggingface.co/google/gemma-2b/blob/main/config.json + dict( + name="Gemma-2b", + hf_config=dict(org="google", name="gemma-2b"), + scale_embeddings=True, + vocab_size=256000, + padding_multiple=64, + n_embd=2048, + n_layer=18, + n_head=8, + n_query_groups=1, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + intermediate_size=16384, + ), + # https://huggingface.co/google/gemma-7b/blob/main/config.json + dict( + name="Gemma-7b", + hf_config=dict(org="google", name="gemma-7b"), + scale_embeddings=True, + vocab_size=256000, + padding_multiple=64, + n_embd=3072, + n_layer=28, + n_head=16, + head_size=256, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + intermediate_size=24576, + ), + # https://huggingface.co/google/gemma-2-2b/blob/main/config.json + dict( + name="Gemma-2-2b", + hf_config=dict(org="google", name="gemma-2-2b"), + scale_embeddings=True, + attention_scores_scalar=256, + vocab_size=256000, + block_size=8192, + sliding_window_size=4096, + # only layer with idx 0, 2, 4, ... have sliding window attention + sliding_window_indices=[1 if i % 2 == 0 else 0 for i in range(26)], + intermediate_size=9216, + n_embd=2304, + n_layer=26, + n_head=8, + n_query_groups=4, + head_size=256, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + attention_logit_softcapping=50.0, + final_logit_softcapping=30.0, + ), + # https://huggingface.co/google/gemma-2-9b/blob/main/config.json + dict( + name="Gemma-2-9b", + hf_config=dict(org="google", name="gemma-2-9b"), + scale_embeddings=True, + attention_scores_scalar=256, + vocab_size=256000, + block_size=8192, + sliding_window_size=4096, + # only layer with idx 0, 2, 4, ... have sliding window attention + sliding_window_indices=[1 if i % 2 == 0 else 0 for i in range(42)], + intermediate_size=14336, + n_embd=3584, + n_layer=42, + n_head=16, + n_query_groups=8, + head_size=256, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + attention_logit_softcapping=50.0, + final_logit_softcapping=30.0, + ), + # https://huggingface.co/google/gemma-2-27b/blob/main/config.json + dict( + name="Gemma-2-27b", + hf_config=dict(org="google", name="gemma-2-27b"), + scale_embeddings=True, + # In Gemma 2 27B attention scores are scaled not by `sqrt(head_size)` (11.31), + # but by `sqrt(n_emb // n_head)` = sqrt(4608 // 32) = 12 + attention_scores_scalar=144, + vocab_size=256000, + block_size=8192, + sliding_window_size=4096, + # only layer with idx 0, 2, 4, ... have sliding window attention + sliding_window_indices=[1 if i % 2 == 0 else 0 for i in range(46)], + intermediate_size=36864, + n_embd=4608, + n_layer=46, + n_head=32, + n_query_groups=16, + head_size=128, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + attention_logit_softcapping=50.0, + final_logit_softcapping=30.0, + ), +] +configs.extend(gemma) +for c in gemma: + copy = deepcopy(c) + copy["name"] = f"{c['name']}-it" + copy["hf_config"]["name"] = f"{c['hf_config']['name']}-it" + configs.append(copy) + +################## +# Google Gemma 3 +################## +gemma3 = [ + # https://huggingface.co/google/gemma-3-1b-it/blob/main/config.json + dict( + name="Gemma-3-1b-it", + hf_config=dict(org="google", name="gemma-3-1b-it"), + scale_embeddings=True, + attention_scores_scalar=256, + vocab_size=262144, + block_size=131072, + sliding_window_size=512, + # 5 local layers for every global layer + sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(26)], + intermediate_size=6912, + n_embd=1152, + n_layer=26, + n_head=4, + n_query_groups=1, + head_size=256, + rotary_percentage=1.0, + rope_adjustments=None, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + norm_qk=True, + rope_base=1000000, + rope_local_base_freq=10000, + # 5 local layers for every global layer + rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(26)], + ), + # https://huggingface.co/google/gemma-3-4b-it/blob/main/config.json + dict( + name="Gemma-3-4b-it", + hf_config=dict(org="google", name="gemma-3-4b-it"), + scale_embeddings=True, + attention_scores_scalar=256, + vocab_size=262144, + block_size=131072, + sliding_window_size=1024, + # 5 local layers for every global layer + sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(34)], + intermediate_size=10240, + n_embd=2560, + n_layer=34, + n_head=8, + n_query_groups=4, + head_size=256, + rotary_percentage=1.0, + rope_adjustments=dict(factor=8.0), + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + norm_qk=True, + rope_base=1000000, + rope_local_base_freq=10000, + # 5 local layers for every global layer + rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(34)], + ), + # https://huggingface.co/google/gemma-3-12b-it/blob/main/config.json + dict( + name="Gemma-3-12b-it", + hf_config=dict(org="google", name="gemma-3-12b-it"), + scale_embeddings=True, + attention_scores_scalar=256, + vocab_size=262144, + block_size=131072, + sliding_window_size=1024, + # 5 local layers for every global layer + sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(48)], + intermediate_size=15360, + n_embd=3840, + n_layer=48, + n_head=16, + n_query_groups=8, + head_size=256, + rotary_percentage=1.0, + rope_adjustments=dict(factor=8.0), + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + norm_qk=True, + rope_base=1000000, + rope_local_base_freq=10000, + # 5 local layers for every global layer + rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(48)], + ), + # https://huggingface.co/google/gemma-3-27b-it/blob/main/config.json + dict( + name="Gemma-3-27b-it", + hf_config=dict(org="google", name="gemma-3-27b-it"), + scale_embeddings=True, + attention_scores_scalar=168, + vocab_size=262144, + block_size=131072, + sliding_window_size=1024, + # 5 local layers for every global layer + sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(62)], + intermediate_size=21504, + n_embd=5376, + n_layer=62, + n_head=32, + n_query_groups=16, + head_size=128, + rotary_percentage=1.0, + rope_adjustments=dict(factor=8.0), + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + post_attention_norm=True, + post_mlp_norm=True, + norm_qk=True, + rope_base=1000000, + rope_local_base_freq=10000, + # 5 local layers for every global layer + rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(62)], + ), +] +configs.extend(gemma3) + +################## +# Google CodeGemma +################## +codegemma = [ + # https://huggingface.co/google/codegemma-7b-it/blob/main/config.json + dict( + name="CodeGemma-7b-it", + hf_config=dict(org="google", name="codegemma-7b-it"), + scale_embeddings=True, + vocab_size=256000, + padding_multiple=64, + n_embd=3072, + n_layer=28, + n_head=16, + head_size=256, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="GemmaMLP", + gelu_approximate="tanh", + intermediate_size=24576, + ), +] +configs.extend(codegemma) + + +########################## +# Stability AI FreeWilly2 +########################## +freewilly_2 = [ + # https://huggingface.co/stabilityai/FreeWilly2/blob/main/config.json + dict( + name="FreeWilly2", + hf_config=dict(org="stabilityai", name="FreeWilly2"), + vocab_size=32000, + padding_multiple=64, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ) +] +configs.extend(freewilly_2) + + +################## +# Meta Code Llama +################## +code_llama = [ + # https://huggingface.co/codellama/CodeLlama-7b-hf/blob/main/config.json + dict( + name="CodeLlama-7b-hf", + hf_config=dict(org="codellama", name="CodeLlama-7b-hf"), + block_size=16384, + vocab_size=32016, + padding_multiple=16, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-13b-hf/blob/main/config.json + dict( + name="CodeLlama-13b-hf", + hf_config=dict(org="codellama", name="CodeLlama-13b-hf"), + block_size=16384, + vocab_size=32016, + padding_multiple=16, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-34b-hf/blob/main/config.json + dict( + name="CodeLlama-34b-hf", + hf_config=dict(org="codellama", name="CodeLlama-34b-hf"), + block_size=16384, + vocab_size=32000, + padded_vocab_size=32000, + n_layer=48, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=22016, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-70b-hf/blob/main/config.json + dict( + name="CodeLlama-70b-hf", + hf_config=dict(org="codellama", name="CodeLlama-70b-hf"), + block_size=16384, + vocab_size=32016, + padding_multiple=16, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-7b-Python-hf/blob/main/config.json + dict( + name="CodeLlama-7b-Python-hf", + hf_config=dict(org="codellama", name="CodeLlama-7b-Python-hf"), + block_size=16384, + vocab_size=32000, + padded_vocab_size=32000, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-13b-Python-hf/blob/main/config.json + dict( + name="CodeLlama-13b-Python-hf", + hf_config=dict(org="codellama", name="CodeLlama-13b-Python-hf"), + block_size=16384, + vocab_size=32000, + padded_vocab_size=32000, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-34b-Python-hf/blob/main/config.json + dict( + name="CodeLlama-34b-Python-hf", + hf_config=dict(org="codellama", name="CodeLlama-34b-Python-hf"), + block_size=16384, + vocab_size=32000, + padded_vocab_size=32000, + n_layer=48, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=22016, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-70b-Python-hf/blob/main/config.json + dict( + name="CodeLlama-70b-Python-hf", + hf_config=dict(org="codellama", name="CodeLlama-70b-Python-hf"), + block_size=16384, + vocab_size=32016, + padding_multiple=16, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/blob/main/config.json + dict( + name="CodeLlama-7b-Instruct-hf", + hf_config=dict(org="codellama", name="CodeLlama-7b-Instruct-hf"), + block_size=16384, + vocab_size=32016, + padding_multiple=16, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf/blob/main/config.json + dict( + name="CodeLlama-13b-Instruct-hf", + hf_config=dict(org="codellama", name="CodeLlama-13b-Instruct-hf"), + block_size=2048, + vocab_size=32016, + padding_multiple=16, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf/blob/main/config.json + dict( + name="CodeLlama-34b-Instruct-hf", + hf_config=dict(org="codellama", name="CodeLlama-34b-Instruct-hf"), + block_size=16384, + vocab_size=32000, + padded_vocab_size=32000, + n_layer=48, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=22016, + rope_base=1000000, + ), + # https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf/blob/main/config.json + dict( + name="CodeLlama-70b-Instruct-hf", + hf_config=dict(org="codellama", name="CodeLlama-70b-Instruct-hf"), + block_size=16384, + # 32016 is an added token, so not reported in vocab_size + # https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf/blob/main/tokenizer_config.json + vocab_size=32015, + padding_multiple=16, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=1000000, + ), +] +configs.extend(code_llama) + + +######################## +# garage-bAInd Platypus +######################## +platypus = [ + # https://huggingface.co/garage-bAInd/Platypus-30B/blob/main/config.json + dict( + name="Platypus-30B", + hf_config=dict(org="garage-bAInd", name="Platypus-30B"), + block_size=2048, + padded_vocab_size=32000, + n_layer=60, + n_head=52, + n_embd=6656, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-06, + mlp_class_name="LLaMAMLP", + intermediate_size=17920, + ), + # https://huggingface.co/garage-bAInd/Platypus2-7B/blob/main/config.json + dict( + name="Platypus2-7B", + hf_config=dict(org="garage-bAInd", name="Platypus2-7B"), + padded_vocab_size=32000, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + ), + # https://huggingface.co/garage-bAInd/Platypus2-13B/blob/main/config.json + dict( + name="Platypus2-13B", + hf_config=dict(org="garage-bAInd", name="Platypus2-13B"), + padded_vocab_size=32000, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + ), + # https://huggingface.co/garage-bAInd/Platypus2-70B/blob/main/config.json + dict( + name="Platypus2-70B", + hf_config=dict(org="garage-bAInd", name="Platypus2-70B"), + padded_vocab_size=32000, + n_layer=80, + n_head=64, + n_embd=8192, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ), + # https://huggingface.co/garage-bAInd/Camel-Platypus2-13B/blob/main/config.json + dict( + name="Camel-Platypus2-13B", + hf_config=dict(org="garage-bAInd", name="Camel-Platypus2-13B"), + padded_vocab_size=32000, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + ), + # https://huggingface.co/garage-bAInd/Camel-Platypus2-70B/blob/main/config.json + dict( + name="Camel-Platypus2-70B", + hf_config=dict(org="garage-bAInd", name="Camel-Platypus2-70B"), + padded_vocab_size=32000, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ), + # https://huggingface.co/garage-bAInd/Stable-Platypus2-13B/blob/main/config.json + dict( + name="Stable-Platypus2-13B", + hf_config=dict(org="garage-bAInd", name="Stable-Platypus2-13B"), + padded_vocab_size=32000, + n_layer=40, + n_head=40, + n_embd=5120, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + ), + # https://huggingface.co/garage-bAInd/Platypus2-70B-instruct/blob/main/config.json + dict( + name="Platypus2-70B-instruct", + hf_config=dict(org="garage-bAInd", name="Platypus2-70B-instruct"), + padded_vocab_size=32000, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ), +] +configs.extend(platypus) + + +################################## +# togethercomputer LLaMA-2-7B-32K +################################## +together_llama2_32k = [ + # https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/blob/main/config.json + dict( + name="LLaMA-2-7B-32K", + hf_config=dict(org="togethercomputer", name="LLaMA-2-7B-32K"), + vocab_size=32000, + padding_multiple=64, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + rope_condense_ratio=8, + ) +] +configs.extend(together_llama2_32k) + + +################ +# Microsoft Phi +################ +phi = [ + # https://huggingface.co/microsoft/phi-1_5/blob/main/config.json + dict( + name="phi-1_5", + hf_config=dict(org="microsoft", name="phi-1_5"), + vocab_size=50257, + padded_vocab_size=51200, + block_size=2048, + n_embd=2048, + n_layer=24, + rotary_percentage=0.5, # 32 / (n_embd / n_head) = 32 / 64 + shared_attention_norm=True, + lm_head_bias=True, + gelu_approximate="tanh", + ), + # https://huggingface.co/microsoft/phi-2/blob/main/config.json + dict( + name="phi-2", + hf_config=dict(org="microsoft", name="phi-2"), + vocab_size=50257, + padded_vocab_size=51200, + block_size=2048, + n_embd=2560, + n_layer=32, + rotary_percentage=0.4, # 32 / (n_embd / n_head) = 32 / 80 + shared_attention_norm=True, + lm_head_bias=True, + gelu_approximate="tanh", + ), + # https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json + dict( + name="Phi-3-mini-4k-instruct", + hf_config=dict(org="microsoft", name="Phi-3-mini-4k-instruct"), + vocab_size=32000, + padded_vocab_size=32064, + block_size=4096, + n_embd=3072, + n_layer=32, + rotary_percentage=1.0, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=8192, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + sliding_window_size=2048, + ), + # https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/config.json + dict( + name="Phi-3-mini-128k-instruct", + hf_config=dict(org="microsoft", name="Phi-3-mini-128k-instruct"), + vocab_size=32000, + padded_vocab_size=32064, + block_size=131072, + n_embd=3072, + n_layer=32, + rotary_percentage=1.0, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=8192, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + sliding_window_size=262145, + ), + # https://huggingface.co/microsoft/Phi-3.5-mini-instruct/blob/main/config.json + dict( + name="Phi-3.5-mini-instruct", + hf_config=dict(org="microsoft", name="Phi-3.5-mini-instruct"), + vocab_size=32000, + padded_vocab_size=32064, + block_size=4096, + n_embd=3072, + n_layer=32, + rotary_percentage=1.0, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=8192, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + ), + # https://huggingface.co/microsoft/phi-4/blob/main/config.json + dict( + name="phi-4", + hf_config=dict(org="microsoft", name="phi-4"), + vocab_size=100352, + padded_vocab_size=100352, + block_size=16384, + n_embd=5120, + n_layer=40, + n_head=40, + n_query_groups=10, + rotary_percentage=1.0, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=17920, + rope_base=250000, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + ), + # https://huggingface.co/microsoft/Phi-4-reasoning/blob/main/config.json + dict( + name="Phi-4-reasoning", + hf_config=dict(org="microsoft", name="Phi-4-reasoning"), + vocab_size=100352, + padded_vocab_size=100352, + block_size=32768, + n_embd=5120, + n_layer=40, + n_head=40, + n_query_groups=10, + rotary_percentage=1.0, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=17920, + rope_base=500000, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + ), + # https://huggingface.co/microsoft/Phi-4-reasoning-plus/blob/main/config.json + dict( + name="Phi-4-reasoning-plus", + hf_config=dict(org="microsoft", name="Phi-4-reasoning-plus"), + vocab_size=100352, + padded_vocab_size=100352, + block_size=32768, + n_embd=5120, + n_layer=40, + n_head=40, + n_query_groups=10, + rotary_percentage=1.0, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=17920, + rope_base=500000, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + ), + # https://huggingface.co/microsoft/Phi-4-mini-instruct/blob/main/config.json + dict( + name="Phi-4-mini-instruct", + hf_config=dict(org="microsoft", name="Phi-4-mini-instruct"), + vocab_size=200019, + padded_vocab_size=200064, + block_size=131072, + n_embd=3072, + n_layer=32, + n_head=24, + n_query_groups=8, + rotary_percentage=0.75, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=8192, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + sliding_window_size=262145, + ), + # https://huggingface.co/microsoft/Phi-4-mini-reasoning/blob/main/config.json + dict( + name="Phi-4-mini-reasoning", + hf_config=dict(org="microsoft", name="Phi-4-mini-reasoning"), + vocab_size=200019, + padded_vocab_size=200064, + block_size=131072, + n_embd=3072, + n_layer=32, + n_head=24, + n_query_groups=8, + rotary_percentage=0.75, + bias=False, + norm_class_name="RMSNorm", + intermediate_size=8192, + mlp_class_name="LLaMAMLP", + parallel_residual=False, + sliding_window_size=262145, + ), +] +configs.extend(phi) + + +############# +# Mistral AI +############# + +configs.append( + # https://huggingface.co/mistralai/mathstral-7B-v0.1/blob/main/config.json + dict( + name="Mathstral-7B-v0.1", + hf_config=dict(org="mistralai", name="mathstral-7B-v0.1"), + padded_vocab_size=32768, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + sliding_window_size=4096, + ) +) + +mistral = [ + # https://huggingface.co/mistralai/Mistral-7B-v0.1/blob/main/config.json + dict( + name="Mistral-7B-{}v0.1", + hf_config=dict(org="mistralai", name="Mistral-7B-{}v0.1"), + padded_vocab_size=32000, + block_size=4096, # should be 32768 but sliding window attention is not implemented + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + sliding_window_size=4096, + ), + # https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json + dict( + name="Mixtral-8x7B-{}v0.1", + hf_config=dict(org="mistralai", name="Mixtral-8x7B-{}v0.1"), + padded_vocab_size=32000, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMoE", + intermediate_size=14336, + rope_base=1000000, + n_expert=8, + n_expert_per_token=2, + ), + # https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1/blob/main/config.json + dict( + name="Mixtral-8x22B-{}v0.1", + hf_config=dict(org="mistralai", name="Mixtral-8x22B-{}v0.1"), + padded_vocab_size=32768, + block_size=65536, + n_layer=56, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMoE", + intermediate_size=16384, + n_head=48, + n_embd=6144, + rope_base=1000000, + n_expert=8, + n_expert_per_token=2, + ), +] +for c in mistral: + for kind in ("", "Instruct-"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) +configs.append( + # https://huggingface.co/unsloth/mistral-7b-v0.2/blob/main/config.json + dict( + name="Mistral-7B-v0.2", + hf_config=dict(org="unsloth", name="Mistral-7B-v0.2"), + padded_vocab_size=32000, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + ) +) +configs.append( + # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/config.json + dict( + name="Mistral-7B-Instruct-v0.2", + hf_config=dict(org="mistralai", name="Mistral-7B-Instruct-v0.2"), + padded_vocab_size=32000, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + ) +) +configs.append( + # https://huggingface.co/mistralai/Mistral-7B-v0.3/blob/main/config.json + dict( + name="Mistral-7B-v0.3", + hf_config=dict(org="mistralai", name="Mistral-7B-v0.3"), + padded_vocab_size=32768, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + ) +) +configs.append( + # https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3/blob/main/config.json + dict( + name="Mistral-7B-Instruct-v0.3", + hf_config=dict(org="mistralai", name="Mistral-7B-Instruct-v0.3"), + padded_vocab_size=32768, + block_size=32768, + n_layer=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + ) +) +configs.append( + # https://huggingface.co/mistralai/Mistral-Large-Instruct-2407/blob/main/config.json + dict( + name="Mistral-Large-Instruct-2407", + hf_config=dict(org="mistralai", name="Mistral-Large-Instruct-2407"), + padded_vocab_size=32768, + block_size=32768, + n_layer=88, + n_head=96, + n_embd=12288, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ) +) +configs.append( + # https://huggingface.co/mistralai/Mistral-Large-Instruct-2411/blob/main/config.json + dict( + name="Mistral-Large-Instruct-2411", + hf_config=dict(org="mistralai", name="Mistral-Large-Instruct-2411"), + padded_vocab_size=32768, + block_size=32768, + n_layer=88, + n_head=96, + n_embd=12288, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + norm_eps=1e-05, + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + ) +) + + +############ +# TinyLlama +############ +tiny_llama = [ + dict( + name="tiny-llama-1.1b{}", + hf_config=dict(org="TinyLlama", name="TinyLlama-1.1B{}"), + block_size=2048, + vocab_size=32000, + padding_multiple=64, + n_layer=22, + n_head=32, + n_embd=2048, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", # original TinyLlama use FusedRMSNorm + norm_eps=1e-5, + mlp_class_name="LLaMAMLP", + intermediate_size=5632, + n_query_groups=4, + ) +] +for c in tiny_llama: + for kind, hf_postfix in (("", "-intermediate-step-1431k-3T"), ("-chat", "-Chat-v1.0")): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(hf_postfix) + configs.append(copy) + + +############ +# MicroLlama +############ +micro_llama = [ + dict( + name="micro-llama-300M", + hf_config=dict(org="keeeeenw", name="MicroLlama"), + block_size=2048, + vocab_size=32000, + padding_multiple=64, + n_layer=12, + n_head=16, + n_embd=1024, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", # original TinyLlama and MicroLlama use FusedRMSNorm + norm_eps=1e-5, + mlp_class_name="LLaMAMLP", + intermediate_size=5632, + n_query_groups=4, + ) +] +configs.extend(micro_llama) + + +########################## +# Trelis Function Calling +########################## +llama_2_function_calling = [ + # https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2/blob/main/config.json + dict( + name="Llama-2-7b-chat-hf-function-calling-v2", + hf_config=dict(org="Trelis", name="Llama-2-7b-chat-hf-function-calling-v2"), + padding_multiple=64, + n_layer=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + norm_eps=1e-6, + block_size=4096, + vocab_size=32000, + n_head=32, + n_embd=4096, + rope_base=10000, + ) +] + +configs.extend(llama_2_function_calling) + +########################## +# Qwen2 +########################## +qwen_2 = [ + # https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2/blob/main/config.json + dict( + name="Qwen2-7B", + hf_config=dict(org="Qwen", name="Qwen2-7B"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=28, + n_head=28, + n_embd=3584, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=18944, + norm_eps=1e-6, + rope_base=1000000, + ), +dict( + name="Qwen2-0.5B", + hf_config=dict(org="Qwen", name="Qwen2-0.5B"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=24, + n_head=14, + n_embd=896, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=4864, + norm_eps=1e-6, + rope_base=1000000, + ), +] + +configs.extend(qwen_2) + +########## +# Qwen2.5 +########## +qwen_2_5 = [ + # https://huggingface.co/Qwen/Qwen2.5-0.5B/blob/main/config.json + dict( + name="Qwen2.5-0.5B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-0.5B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=24, + n_head=14, + n_embd=896, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=4864, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-1.5B/blob/main/config.json + dict( + name="Qwen2.5-1.5B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-1.5B{}"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=28, + n_head=12, + n_embd=1536, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8960, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-3B/blob/main/config.json + dict( + name="Qwen2.5-3B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-3B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=36, + n_head=16, + n_embd=2048, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-7B/blob/main/config.json + dict( + name="Qwen2.5-7B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-7B{}"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=28, + n_head=28, + n_embd=3584, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=18944, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-14B/blob/main/config.json + dict( + name="Qwen2.5-14B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-14B{}"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=48, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + norm_eps=1e-5, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-32B/blob/main/config.json + dict( + name="Qwen2.5-32B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-32B{}"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=64, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=27648, + norm_eps=1e-5, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-72B/blob/main/config.json + dict( + name="Qwen2.5-72B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-72B{}"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=29568, + norm_eps=1e-5, + rope_base=1000000, + ), +] + +qwen_2_5_coder = [ + # https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B/blob/main/config.json + dict( + name="Qwen2.5-Coder-0.5B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Coder-0.5B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=24, + n_head=14, + n_embd=896, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=4864, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B/blob/main/config.json + dict( + name="Qwen2.5-Coder-1.5B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Coder-1.5B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=28, + n_head=12, + n_embd=1536, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8960, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Coder-3B/blob/main/config.json + dict( + name="Qwen2.5-Coder-3B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Coder-3B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=36, + n_head=16, + n_embd=2048, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Coder-7B/blob/main/config.json + dict( + name="Qwen2.5-Coder-7B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Coder-7B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=28, + n_head=28, + n_embd=3584, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=18944, + norm_eps=1e-6, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Coder-14B/blob/main/config.json + dict( + name="Qwen2.5-Coder-14B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Coder-14B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=48, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + norm_eps=1e-5, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Coder-32B/blob/main/config.json + dict( + name="Qwen2.5-Coder-32B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Coder-32B{}"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=64, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=27648, + norm_eps=1e-5, + rope_base=1000000, + ), +] + +qwen_2_5.extend(qwen_2_5_coder) + +qwen_2_5_math = [ + # https://huggingface.co/Qwen/Qwen2.5-Math-1.5B/blob/main/config.json + dict( + name="Qwen2.5-Math-1.5B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Math-1.5B{}"), + block_size=4096, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=28, + n_head=12, + n_embd=1536, + n_query_groups=2, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8960, + norm_eps=1e-6, + rope_base=10000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Math-7B/blob/main/config.json + dict( + name="Qwen2.5-Math-7B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Math-7B{}"), + block_size=4096, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=28, + n_head=28, + n_embd=3584, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=18944, + norm_eps=1e-6, + rope_base=10000, + ), + # https://huggingface.co/Qwen/Qwen2.5-Math-72B/blob/main/config.json + dict( + name="Qwen2.5-Math-72B{}", + hf_config=dict(org="Qwen", name="Qwen2.5-Math-72B{}"), + block_size=4096, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=29568, + norm_eps=1e-5, + rope_base=10000, + ), +] + +qwen_2_5.extend(qwen_2_5_math) + +for c in qwen_2_5: + for kind in ("", "-Instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + +qwen_2_5_1m = [ + # https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-1M/blob/main/config.json + dict( + name="Qwen2.5-7B-Instruct-1M", + hf_config=dict(org="Qwen", name="Qwen2.5-7B-Instruct-1M"), + block_size=1010000, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=28, + n_head=28, + n_embd=3584, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=18944, + norm_eps=1e-5, + rope_base=10000000, + ), + # https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-1M/blob/main/config.json + dict( + name="Qwen2.5-14B-Instruct-1M", + hf_config=dict(org="Qwen", name="Qwen2.5-14B-Instruct-1M"), + block_size=1010000, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=48, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=13824, + norm_eps=1e-5, + rope_base=10000000, + ), +] + +configs.extend(qwen_2_5_1m) + +########## +# QwQ +########## +qwq = [ + # https://huggingface.co/Qwen/QwQ-32B/blob/main/config.json + dict( + name="QwQ-32B", + hf_config=dict(org="Qwen", name="QwQ-32B"), + block_size=131072, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=64, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=27648, + norm_eps=1e-5, + rope_base=1000000, + ), + # https://huggingface.co/Qwen/QwQ-32B-Preview/blob/main/config.json + dict( + name="QwQ-32B-Preview", + hf_config=dict(org="Qwen", name="QwQ-32B-Preview"), + block_size=32768, + vocab_size=151643, + padded_vocab_size=152064, + n_layer=64, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + attn_bias=True, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=27648, + norm_eps=1e-5, + rope_base=1000000, + ), +] + +configs.extend(qwq) + +########## +# Qwen3 +########## +qwen_3 = [ + # https://huggingface.co/Qwen/Qwen3-0.6B/blob/main/config.json + dict( + name="Qwen3-0.6B{}", + hf_config=dict(org="Qwen", name="Qwen3-0.6B{}"), + block_size=40960, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=28, + n_head=16, + n_embd=1024, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=3072, + norm_eps=1e-6, + rope_base=1000000, + head_size=128, + norm_qk=True, + ), + # https://huggingface.co/Qwen/Qwen3-1.7B/blob/main/config.json + dict( + name="Qwen3-1.7B{}", + hf_config=dict(org="Qwen", name="Qwen3-1.7B{}"), + block_size=40960, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=28, + n_head=16, + n_embd=2048, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=6144, + norm_eps=1e-6, + rope_base=1000000, + norm_qk=True, + ), + # https://huggingface.co/Qwen/Qwen3-4B/blob/main/config.json + dict( + name="Qwen3-4B{}", + hf_config=dict(org="Qwen", name="Qwen3-4B{}"), + block_size=40960, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=36, + n_head=32, + n_embd=2560, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=9728, + norm_eps=1e-6, + rope_base=1000000, + head_size=128, + norm_qk=True, + ), + # https://huggingface.co/Qwen/Qwen3-8B/blob/main/config.json + dict( + name="Qwen3-8B{}", + hf_config=dict(org="Qwen", name="Qwen3-8B{}"), + block_size=40960, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=36, + n_head=32, + n_embd=4096, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=12288, + norm_eps=1e-6, + rope_base=1000000, + norm_qk=True, + ), + # https://huggingface.co/Qwen/Qwen3-14B/blob/main/config.json + dict( + name="Qwen3-14B{}", + hf_config=dict(org="Qwen", name="Qwen3-14B{}"), + block_size=40960, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=40, + n_head=40, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=17408, + norm_eps=1e-6, + rope_base=1000000, + norm_qk=True, + ), +] +for c in qwen_3: + for kind in ("", "-Base"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) +qwen_3_32b = [ + # https://huggingface.co/Qwen/Qwen3-32B/blob/main/config.json + dict( + name="Qwen3-32B", + hf_config=dict(org="Qwen", name="Qwen3-32B"), + block_size=40960, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=64, + n_head=64, + n_embd=5120, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=25600, + norm_eps=1e-6, + rope_base=1000000, + head_size=128, + norm_qk=True, + ), +] +configs.extend(qwen_3_32b) + +qwen_3_moe = [ + # https://huggingface.co/Qwen/Qwen3-30B-A3B/blob/main/config.json + dict( + name="Qwen3-30B-A3B", + hf_config=dict(org="Qwen", name="Qwen3-30B-A3B"), + block_size=40960, + head_size=128, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=48, + n_head=32, + n_embd=2048, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMoE", + intermediate_size=6144, + moe_intermediate_size=768, + norm_eps=1e-6, + rope_base=1000000, + norm_qk=True, + n_expert=128, + n_expert_per_token=8, + ), + # https://huggingface.co/Qwen/Qwen3-30B-A3B-Base/blob/main/config.json + dict( + name="Qwen3-30B-A3B-Base", + hf_config=dict(org="Qwen", name="Qwen3-30B-A3B-Base"), + block_size=40960, + head_size=128, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=48, + n_head=32, + n_embd=2048, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMoE", + intermediate_size=6144, + moe_intermediate_size=768, + norm_eps=1e-6, + rope_base=1000000, + norm_qk=True, + n_expert=128, + n_expert_per_token=8, + ), + # https://huggingface.co/Qwen/Qwen3-235B-A22B/blob/main/config.json + dict( + name="Qwen3-235B-A22B", + hf_config=dict(org="Qwen", name="Qwen3-235B-A22B"), + block_size=40960, + head_size=128, + vocab_size=151643, + padded_vocab_size=151936, + n_layer=94, + n_head=64, + n_embd=4096, + n_query_groups=4, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMoE", + intermediate_size=12288, + moe_intermediate_size=1536, + norm_eps=1e-6, + rope_base=1000000, + norm_qk=True, + n_expert=128, + n_expert_per_token=8, + ), +] +configs.extend(qwen_3_moe) + + +############# +# Salamandra +############# +salamandra = [ + # https://huggingface.co/BSC-LT/salamandra-2b-instruct/blob/main/config.json + dict( + name="salamandra-2b{}", + hf_config=dict(org="BSC-LT", name="salamandra-2b{}"), + block_size=8192, + vocab_size=256000, + padded_vocab_size=256000, + n_layer=24, + n_head=16, + n_embd=2048, + n_query_groups=16, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=5440, + norm_eps=1e-5, + rope_base=10000, + ), + # https://huggingface.co/BSC-LT/salamandra-7b-instruct/blob/main/config.json + dict( + name="salamandra-7b{}", + hf_config=dict(org="BSC-LT", name="salamandra-7b{}"), + block_size=8192, + vocab_size=256000, + padded_vocab_size=256000, + n_layer=32, + n_head=32, + n_embd=4096, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=11008, + norm_eps=1e-6, + rope_base=10000, + ), +] + +for c in salamandra: + for kind in ("", "-instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + + +############### +# SmolLM2 +############### +smollm2 = [ + # https://huggingface.co/HuggingFaceTB/SmolLM2-135M/blob/main/config.json + dict( + name="SmolLM2-135M{}", + hf_config=dict(org="HuggingFaceTB", name="SmolLM2-135M{}"), + block_size=8192, + vocab_size=49152, + padded_vocab_size=49152, + n_layer=30, + n_head=9, + n_embd=576, + n_query_groups=3, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=1536, + rope_base=100000, + norm_eps=1e-5, + ), + # https://huggingface.co/HuggingFaceTB/SmolLM2-360M/blob/main/config.json + dict( + name="SmolLM2-360M{}", + hf_config=dict(org="HuggingFaceTB", name="SmolLM2-360M{}"), + block_size=8192, + vocab_size=49152, + padded_vocab_size=49152, + n_layer=32, + n_head=15, + n_embd=960, + n_query_groups=5, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=2560, + rope_base=100000, + norm_eps=1e-5, + ), + # https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B/blob/main/config.json + dict( + name="SmolLM2-1.7B{}", + hf_config=dict(org="HuggingFaceTB", name="SmolLM2-1.7B{}"), + block_size=8192, + vocab_size=49152, + padded_vocab_size=49152, + n_layer=24, + n_head=32, + n_embd=2048, + n_query_groups=32, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=8192, + rope_base=130000, + norm_eps=1e-5, + ), +] + +for c in smollm2: + for kind in ("", "-Instruct"): + copy = deepcopy(c) + copy["name"] = c["name"].format(kind) + copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind) + configs.append(copy) + +############### +# DeepSeek R1 Distill +############### + +r1_distill_llama = [ + # https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/blob/main/config.json + dict( + name="R1-Distill-Llama-8B", + hf_config=dict(org="deepseek-ai", name="DeepSeek-R1-Distill-Llama-8B"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=32, + n_head=32, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=14336, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), + # https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B/blob/main/config.json + dict( + name="R1-Distill-Llama-70B", + hf_config=dict(org="deepseek-ai", name="DeepSeek-R1-Distill-Llama-70B"), + block_size=131072, + vocab_size=128000, + padded_vocab_size=128256, + n_layer=80, + n_head=64, + n_embd=8192, + n_query_groups=8, + rotary_percentage=1.0, + parallel_residual=False, + bias=False, + norm_class_name="RMSNorm", + mlp_class_name="LLaMAMLP", + intermediate_size=28672, + rope_base=500000, + rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192), + ), +] + +configs.extend(r1_distill_llama) + +name_to_config = {config["name"]: config for config in configs} diff --git a/litgpt/lora.py b/litgpt/lora.py new file mode 100644 index 0000000000000000000000000000000000000000..6739b5b040f1bf3b5b1350889fafb12ebbf0eee3 --- /dev/null +++ b/litgpt/lora.py @@ -0,0 +1,662 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +# Derived from https://github.com/microsoft/LoRA +# ------------------------------------------------------------------------------------------ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. +# ------------------------------------------------------------------------------------------ + +r""" + Low Ranking Adaptation for LLMs scheme. + + ┌───────────────────┐ + ┆ h ┆ + └───────────────────┘ + ▲ + | + + + / \ + ┌─────────────────┐ ╭───────────────╮ Matrix initialization: + ┆ ┆ \ B / B = 0 + ┆ pretrained ┆ \ r*d / A = N(0, sigma^2) + ┆ weights ┆ ╰─────────╯ + ┆ ┆ | r | r - rank + ┆ W e R^(d*d) ┆ | ◀─────▶ | + ┆ ┆ ╭─────────╮ + └─────────────────┘ / A \ + ▲ / d*r \ + \ ╰───────────────╯ + \ ▲ + \ / + \ / + ┌───────────────────┐ + ┆ x ┆ + └───────────────────┘ + +With LoRA (Low Ranking Adaptation: https://arxiv.org/abs/2106.09685) instead of learning weights of size d*d, +we can freeze the pretrained weights and instead learn two matrices of size d*r and r*d (they will store weight updates +for the pretrained weights): the number of parameters in this case will be reduced drastically (depending on the rank of +course) yet after multiplication of matrices d*r and r*d we will get a matrix d*d which we can sum with frozen +pretrained weights and thus fine-tune the model. + +The goal of this approach is to move weight updates into a separate matrix which is decomposed with +two matrices of a lower rank. +""" + +import math +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple, Type, Union + +import torch +import torch.nn as nn +from torch.nn import functional as F +from typing_extensions import Self + +import litgpt +from litgpt.config import Config as BaseConfig +from litgpt.model import GPT as BaseModel +from litgpt.model import Block as BaseBlock +from litgpt.model import CausalSelfAttention as BaseCausalSelfAttention +from litgpt.scripts.convert_hf_checkpoint import qkv_reassemble +from litgpt.utils import map_old_state_dict_weights + + +class LoRALayer(nn.Module): + def __init__(self, r: int, lora_alpha: int, lora_dropout: float): + """Store LoRA specific attributes in a class. + + Args: + r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of + the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2) + lora_alpha: alpha is needed for scaling updates as alpha/r + "This scaling helps to reduce the need to retune hyperparameters when we vary r" + https://arxiv.org/pdf/2106.09685.pdf (section 4.1) + lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A) + """ + super().__init__() + assert r >= 0 + self.r = r + self.lora_alpha = lora_alpha + # Optional dropout + if lora_dropout > 0.0: + self.lora_dropout = nn.Dropout(p=lora_dropout) + else: + self.lora_dropout = lambda x: x + # Mark the weight as unmerged + self.merged = False + + +class LoRALinear(LoRALayer): + # LoRA implemented in a dense layer + def __init__( + self, + # ↓ this part is for pretrained weights + in_features: int, + out_features: int, + # ↓ the remaining part is for LoRA + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + **kwargs: Any, + ): + """LoRA wrapper around linear class. + + This class has three weight matrices: + 1. Pretrained weights are stored as `self.linear.weight` + 2. LoRA A matrix as `self.lora_A` + 3. LoRA B matrix as `self.lora_B` + Only LoRA's A and B matrices are updated, pretrained weights stay frozen. + + Args: + in_features: number of input features of the pretrained weights + out_features: number of output features of the pretrained weights + r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of + the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2) + lora_alpha: alpha is needed for scaling updates as alpha/r + "This scaling helps to reduce the need to retune hyperparameters when we vary r" + https://arxiv.org/pdf/2106.09685.pdf (section 4.1) + lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A) + """ + super().__init__(r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout) + self.linear = torch.nn.Linear(in_features, out_features, **kwargs) + + # Actual trainable parameters + if r > 0: + self.lora_A = nn.Parameter(torch.empty((r, in_features))) + self.lora_B = nn.Parameter(torch.empty((out_features, r))) + self.scaling = self.lora_alpha / self.r + self.reset_parameters() + + def reset_parameters(self) -> None: + """Reset all the weights, even including pretrained ones.""" + if hasattr(self, "lora_A"): + # initialize A the same way as the default for nn.Linear and B to zero + # Wondering why 'a' is equal to math.sqrt(5)?: https://github.com/pytorch/pytorch/issues/15314 + nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5)) + nn.init.zeros_(self.lora_B) + + def get_lora_AB(self) -> torch.Tensor: + """Return merged lora_A and lora_B matrices with the same shape as the pretrained weights.""" + return (self.lora_B @ self.lora_A) * self.scaling + + def merge(self) -> None: + """Merges the LoRA weights into the full-rank weights (W = W + delta_W).""" + if self.r > 0 and not self.merged: + pretrained_dtype = self.linear.weight.data.dtype + lora_data = self.get_lora_AB() + # if only the pretrained are in quantized form - dequantize, sum with LoRA and quantize the result + if pretrained_dtype == torch.uint8: + import bitsandbytes as bnb + + weight = self.linear.weight + # dequantize the pretrained weights + weight_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state).to(lora_data.dtype) + # add pretrained and LoRA weights + weight_data += lora_data + # assign updated weights and quantize by moving to CUDA device + self.linear.weight = bnb.nn.Params4bit(weight_data, requires_grad=False, **weight.__dict__) + self.linear.weight.cuda(weight.device) + else: + # self.linear might be on CPU and lora_data on CUDA + # the inplace add will preserve the dtype of linear.weight + self.linear.weight.data += lora_data.to(device=self.linear.weight.data.device) + self.merged = True + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # if weights are merged or rank is less or equal to zero (LoRA is disabled) - it's only a regular nn.Linear forward pass; + # otherwise in addition do the forward pass with LoRA weights and add it's output to the output from pretrained weights + pretrained = self.linear(x) + if self.r == 0 or self.merged: + return pretrained + lora = (self.lora_dropout(x) @ self.lora_A.transpose(0, 1) @ self.lora_B.transpose(0, 1)) * self.scaling + return pretrained + lora + + +class LoRAQKVLinear(LoRALinear): + # LoRA implemented in a dense layer + def __init__( + self, + # ↓ this part is for pretrained weights + in_features: int, + out_features: int, + # ↓ the remaining part is for LoRA + head_size: int, + n_head: int, + n_query_groups: int, + r: int = 0, + lora_alpha: int = 1, + lora_dropout: float = 0.0, + enable_lora: Union[bool, Tuple[bool, bool, bool]] = False, + **kwargs: Any, + ): + """LoRA wrapper around linear class that is used for calculation of q, k and v matrices. + + This class has three weight matrices: + 1. Pretrained weights are stored as `self.linear.weight` + 2. LoRA A matrix as `self.lora_A` + 3. LoRA B matrix as `self.lora_B` + Only LoRA's A and B matrices are updated, pretrained weights stay frozen. + + Args: + in_features: number of input features of the pretrained weights + out_features: number of output features of the pretrained weights + head_size: size of a single attention head + n_head: number of attention heads + n_query_groups: number of query groups (see diagram in `litgpt/config.py`) + r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of + the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2) + lora_alpha: alpha is needed for scaling updates as alpha/r + "This scaling helps to reduce the need to retune hyperparameters when we vary r" + https://arxiv.org/pdf/2106.09685.pdf (section 4.1) + lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A) + enable_lora: MergeLinear class is for attention mechanism where qkv are calculated with a single weight matrix. If we + don't want to apply LoRA we can set it as False. For example if we want to apply LoRA only to `query` + and `value` but keep `key` without weight updates we should pass `[True, False, True]` + """ + super(LoRALinear, self).__init__(r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout) + self.linear = torch.nn.Linear(in_features, out_features, **kwargs) + self.head_size = head_size + self.n_head = n_head + self.n_query_groups = n_query_groups + if isinstance(enable_lora, bool): + enable_lora = [enable_lora] * 3 + assert len(enable_lora) == 3 + self.enable_lora = enable_lora + + # Actual trainable parameters + # To better understand initialization let's imagine that we have such parameters: + # ⚬ in_features: 128 (embeddings_size) + # ⚬ out_features: 384 (3 * embedding_size) + # ⚬ r: 2 + # ⚬ enable_lora: [True, False, True] + if r > 0 and any(enable_lora): + self.lora_A = nn.Parameter(torch.empty((r * sum(enable_lora), in_features))) # (4, 128) + enable_q, enable_k, enable_v = enable_lora + # qkv_shapes will be used to split a tensor with weights correctly + qkv_shapes = ( + # if `head_size` is explicitly specified in the config, `n_embd` (or `in_features`) + # might not be equal to `head_size * n_head`, thus we use it directly here + head_size * n_head * enable_q, + head_size * n_query_groups * enable_k, + head_size * n_query_groups * enable_v, + ) + self.qkv_shapes = [s for s in qkv_shapes if s] + self.lora_B = nn.Parameter(torch.empty(sum(self.qkv_shapes), r)) # (256, 2)) + # Notes about shapes above + # - self.lora_A has shape (4, 128): 4 because rank is 2 and LoRA is applied only to two matrices; + # 128 is the input size of the x (embedding size). (4, 128) and not (128, 4) because later on in + # F.linear function weights are automatically transposed. In addition conv1d requires channels to + # be before seq length + # - self.lora_B has shape (256, 2): 256 because LoRA is applied only to two matrices, so the output is + # 128*2; 2 tells to have two channels per group for group convolution + + # Scaling: + # This balances the pretrained model`s knowledge and the new task-specific adaptation + # https://lightning.ai/pages/community/tutorial/lora-llm/ + # So, set alpha to 1.0 to fully add LoRA. If the LoRA seems to have too much effect (i.e., overfitted), set + # alpha to lower value. If the LoRA seems to have too little effect, set alpha to higher than 1.0. You can + # tune these values to your needs. This value can be even slightly greater than 1.0! + # https://github.com/cloneofsimo/lora + self.scaling = self.lora_alpha / self.r + + self.reset_parameters() + + @property + def lora_ind(self) -> torch.Tensor: + """Lazy creation of a buffer with LoRA indices to overcome the limitation when FSDP with meta device is used.""" + # Indices are needed to properly pad weight updates with zeros. + if not hasattr(self, "_lora_ind"): + enable_q, enable_k, enable_v = self.enable_lora + kv_embd_size = self.linear.in_features // (self.n_head // self.n_query_groups) + lora_ind = [] + if enable_q: + lora_ind.extend(range(0, self.linear.in_features)) + if enable_k: + lora_ind.extend(range(self.linear.in_features, self.linear.in_features + kv_embd_size)) + if enable_v: + lora_ind.extend(range(self.linear.in_features + kv_embd_size, self.linear.out_features)) + self.register_buffer( + "_lora_ind", torch.tensor(lora_ind, device=self.linear.weight.device), persistent=False + ) + + return self._lora_ind + + def zero_pad(self, x: torch.Tensor) -> torch.Tensor: + """Properly pad the last dimension of weight updates with zeros. + + If, based on `self.enable_lora`, we want to fine-tune queries and values, but not keys, + then the weights update should be: + + [[ΔW,ΔW,ΔW, ..., 0,0,0, ..., ΔW,ΔW,ΔW,], + [....................................], + [ΔW,ΔW,ΔW, ..., 0,0,0, ..., ΔW,ΔW,ΔW,]] + ↑ ↑ ↑ + ________________________________________ + | query | key | value | + ---------------------------------------- + + Args: + x: tensor with weights update that will be padded with zeros if necessary + + Returns: + A tensor with weight updates and zeros for deselected q, k or v + """ + # we need to do zero padding only if LoRA is disabled for one of QKV matrices + if all(self.enable_lora): + return x + + # Let's image that: + # ⚬ input x has shape (64, 64, 256): (batch_size, sequence_length, embeddings_size) + # ⚬ embeddings_size: 128 + # ⚬ self.linear.out_features: 384 (3 * embeddings_size) + # ⚬ enable_lora: [True, False, True] + # Then x has embeddings_size of 256 (2 * 128 as enable_lora only for query and value, not keys) and expected + # embeddings_size is 384 (self.linear.out_features), so that means that we need to pad from 256 to 384 with zeros, but + # only for key updates (this is where self.lora_ind comes in handy) + + result = x.new_zeros(*x.shape[:-1], self.linear.out_features) # (64, 64, 384) + if result.device.type == "mps": + result[..., self.lora_ind] = x + return result + else: + return result.index_copy_(dim=-1, index=self.lora_ind, source=x) # (64, 64, 384) + + def conv1d(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: + """An extension of the `torch.nn.functional.conv1d` function with a logic specific to grouped queries. + + If the number of heads is equal to the number of query groups - grouped queries are disabled + (see scheme in `litgpt/config.py:Config`). In this case the combined QKV matrix consists of equally sized + query, key and value parts, which means we can utilize `groups` argument from `conv1d`: with this argument the + input and weight matrices will be split in equally sized parts and applied separately (like having multiple + conv layers side by side). + + Otherwise QKV matrix consists of unequally sized parts and thus we have to split input and weight matrices manually, + apply each part of the weight matrix to the corresponding input's part and concatenate the result. + + Args: + input: input matrix of shape (B, C, T) + weight: weight matrix of shape (C_output, rank, 1). + "C_output" is defined as a sum of embedding sizes for each enabled LoRA layer (see init method of the class). + + Returns: + A tensor with a shape (B, C_output, T) + + """ + if self.n_head == self.n_query_groups: + return F.conv1d(input, weight, groups=sum(self.enable_lora)) # (B, C_output, T) + + # Notation: + # ⚬ N: number of enabled LoRA layers (self.enable_lora) + # ⚬ C_output': embeddings size for each LoRA layer (not equal in size) + # ⚬ r: rank of all LoRA layers (equal in size) + + input_splitted = input.chunk(sum(self.enable_lora), dim=1) # N * (B, C // N, T) + weight_splitted = weight.split(self.qkv_shapes) # N * (C_output', r, 1) + return torch.cat( + [F.conv1d(a, b) for a, b in zip(input_splitted, weight_splitted)], + dim=1, # (B, C_output', T) + ) # (B, C_output, T) + + def get_lora_AB(self) -> torch.Tensor: + """Return merged lora_A and lora_B matrices with the same shape as the pretrained weights.""" + # Let's assume that: + # ⚬ self.linear.weight.data: (384, 128) or (3 * embedding_size, embedding_size) + # ⚬ self.lora_A.data: (4, 128) + # ⚬ self.lora_B.data: (256, 2) + lora = self.conv1d( + self.lora_A.data.unsqueeze(0), # (4, 128) -> (1, 4, 128) + self.lora_B.data.unsqueeze(-1), # (256, 2) -> (256, 2, 1) + ).squeeze(0) # (1, 4, 128) @ (256, 2, 1) -> (1, 256, 128) -> (256, 128) + return self.zero_pad(lora.T * self.scaling).T # (256, 128) after zero_pad (384, 128) + + def merge(self) -> None: + """Merges the LoRA weights into the full-rank weights (W = W + delta_W).""" + if self.r > 0 and any(self.enable_lora) and not self.merged: + super().merge() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Do the forward pass. + + If LoRA's weights are merged with pretrained ones then it's a simple matrix multiplication. + If not, then multiply pretrained weights with input, apply LoRA on input and do summation. + + Args: + x: input tensor of shape (batch_size, context_length, embedding_size) + + Returns: + Output tensor of shape (batch_size, context_length, 3 * embedding_size) + """ + + # Let's assume that: + # ⚬ x: (64, 64, 128) or (batch_size, context_length, embedding_size) + # ⚬ self.linear.weight: (384, 128) or (3 * embedding_size, embedding_size) + # ⚬ self.lora_A.data: (4, 128) + # ⚬ self.lora_B.data: (256, 2) + + # if weights are merged or LoRA is disabled (r <= 0 or all `enable_lora` are False) - it's only a regular nn.Linear forward pass; + # otherwise in addition do the forward pass with LoRA weights and add it's output to the output from pretrained weights + pretrained = self.linear(x) + if self.r == 0 or not any(self.enable_lora) or self.merged: + return pretrained + after_A = F.linear(self.lora_dropout(x), self.lora_A) # (64, 64, 128) @ (4, 128) -> (64, 64, 4) + # For F.conv1d: + # ⚬ input: input tensor of shape (mini-batch, in_channels, iW) + # ⚬ weight: filters of shape (out_channels, in_channels/groups, kW) + after_B = self.conv1d( + after_A.transpose(-2, -1), # (64, 64, 4) -> (64, 4, 64) + self.lora_B.unsqueeze(-1), # (256, 2) -> (256, 2, 1) + ).transpose(-2, -1) # (64, 4, 64) @ (256, 2, 1) -> (64, 256, 64) -> (64, 64, 256) + lora = self.zero_pad(after_B) * self.scaling # (64, 64, 256) after zero_pad (64, 64, 384) + return pretrained + lora + + +def mark_only_lora_as_trainable(model: nn.Module, bias: str = "none") -> None: + """Freeze all modules except LoRA's and depending on 'bias' value unfreezes bias weights. + + Args: + model: model with LoRA layers + bias: + ``"none"``: all bias weights will be frozen, + ``"lora_only"``: only bias weight for LoRA layers will be unfrozen, + ``"all"``: all bias weights will be unfrozen. + + Raises: + NotImplementedError: if `bias` not in ["none", "lora_only", "all"] + """ + # freeze all layers except LoRA's + for n, p in model.named_parameters(): + if "lora_" not in n: + p.requires_grad = False + + # depending on the `bias` value unfreeze bias weights + if bias == "none": + return + if bias == "all": + for n, p in model.named_parameters(): + if "bias" in n: + p.requires_grad = True + elif bias == "lora_only": + for m in model.modules(): + if isinstance(m, LoRALayer) and hasattr(m, "bias") and m.bias is not None: + m.bias.requires_grad = True + else: + raise NotImplementedError + + +def lora_filter(key: str, value: Any) -> bool: + return "lora_" in key + + +@dataclass +class Config(BaseConfig): + """ + Args: + lora_r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of + the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2) + lora_alpha: alpha is needed for scaling updates as alpha/r + "This scaling helps to reduce the need to retune hyperparameters when we vary r" + https://arxiv.org/pdf/2106.09685.pdf (section 4.1) + lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A) + lora_*: whether to apply LoRA to the specified weights or not + """ + + lora_r: int = 0 + lora_alpha: int = 1 + lora_dropout: float = 0.0 + lora_query: bool = False + lora_key: bool = False + lora_value: bool = False + lora_projection: bool = False + lora_mlp: bool = False + lora_head: bool = False + + @property + def mlp_class(self) -> Type: + return getattr(litgpt.lora, self.mlp_class_name) + + +class GPT(BaseModel): + # Copy & paste from :class:`model.GPT`. Note that :class:`Block` is new here. + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + assert config.padded_vocab_size is not None + self.config = config + + self.lm_head = create_lora_linear( + config, + config.n_embd, + config.padded_vocab_size, + bias=config.lm_head_bias, + use_r=config.lora_head, + ) + self.transformer = nn.ModuleDict( + dict( + wte=nn.Embedding(config.padded_vocab_size, config.n_embd), + h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)), + ln_f=config.norm_class(config.n_embd, eps=config.norm_eps), + ) + ) + self.mask_cache: Optional[torch.Tensor] = None + self.max_seq_length = self.config.block_size + + @classmethod + def from_name(cls, name: str, **kwargs: Any) -> Self: + return cls(Config.from_name(name, **kwargs)) + + def _init_weights(self, module: nn.Module) -> None: + """Meant to be used with `gpt.apply(gpt._init_weights)`. Unused method left for completeness.""" + super()._init_weights(module) + if isinstance(module, LoRALinear): + module.reset_parameters() + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = {"lm_head.weight": "lm_head.linear.weight", "lm_head.bias": "lm_head.linear.bias"} + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class Block(BaseBlock): + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__(config, block_idx) + self.attn = CausalSelfAttention(config, block_idx) + self.mlp = config.mlp_class(config) + + +class CausalSelfAttention(BaseCausalSelfAttention): + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__(config, block_idx) + # key, query, value projections for all heads, but in a batch + shape = (config.n_head + 2 * config.n_query_groups) * config.head_size + self.qkv = LoRAQKVLinear( + in_features=config.n_embd, + out_features=shape, + r=config.lora_r, + lora_alpha=config.lora_alpha, + lora_dropout=config.lora_dropout, + enable_lora=(config.lora_query, config.lora_key, config.lora_value), + bias=config.bias or config.attn_bias, + # for MQA/GQA support + head_size=config.head_size, + n_head=config.n_head, + n_query_groups=config.n_query_groups, + ) + # output projection + self.proj = create_lora_linear( + config, + config.head_size * config.n_head, + config.n_embd, + use_r=config.lora_projection, + ) + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base and/or legacy checkpoints.""" + mapping = { + "qkv.weight": "qkv.linear.weight", + "qkv.bias": "qkv.linear.bias", + "proj.weight": "proj.linear.weight", + "proj.bias": "proj.linear.bias", + } + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + + for attr in ("weight", "bias"): + legacy_key = f"{prefix}attn.linear.{attr}" + current_key = f"{prefix}qkv.linear.{attr}" + if legacy_key in state_dict: + state_dict[current_key] = qkv_reassemble(state_dict.pop(legacy_key), self.config) + + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +def create_lora_linear( + config: Config, + in_size: int, + out_size: int, + bias: Optional[Union[float, bool]] = None, + use_r: Optional[bool] = None, +) -> LoRALinear: + if bias is None: + bias = config.bias + if use_r is None: + use_r = config.lora_mlp + return LoRALinear( + in_size, + out_size, + bias=bias, + r=(config.lora_r if use_r else 0), + lora_alpha=config.lora_alpha, + lora_dropout=config.lora_dropout, + ) + + +class GptNeoxMLP(litgpt.model.GptNeoxMLP): + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + self.fc = create_lora_linear(config, config.n_embd, config.intermediate_size) + self.proj = create_lora_linear(config, config.intermediate_size, config.n_embd) + self.config = config + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = { + "fc.weight": "fc.linear.weight", + "fc.bias": "fc.linear.bias", + "proj.weight": "proj.linear.weight", + "proj.bias": "proj.linear.bias", + } + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class LLaMAMLP(litgpt.model.LLaMAMLP): + def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None: + nn.Module.__init__(self) + self.intermediate_size = intermediate_size or config.intermediate_size + self.fc_1 = create_lora_linear(config, config.n_embd, self.intermediate_size) + self.fc_2 = create_lora_linear(config, config.n_embd, self.intermediate_size) + self.proj = create_lora_linear(config, self.intermediate_size, config.n_embd) + self.config = config + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = { + "fc_1.weight": "fc_1.linear.weight", + "fc_1.bias": "fc_1.linear.bias", + "fc_2.weight": "fc_2.linear.weight", + "fc_2.bias": "fc_2.linear.bias", + "proj.weight": "proj.linear.weight", + "proj.bias": "proj.linear.bias", + } + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class GemmaMLP(LLaMAMLP): + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_fc_1 = self.fc_1(x) + x_fc_2 = self.fc_2(x) + x = torch.nn.functional.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2 + return self.proj(x) + + +class LLaMAMoE(litgpt.model.LLaMAMoE): + def __init__(self, config: Config) -> None: + nn.Module.__init__(self) + self.gate = create_lora_linear(config, config.n_embd, config.n_expert, bias=False) + self.experts = nn.ModuleList( + LLaMAMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(config.n_expert) + ) + self.config = config + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with base checkpoints.""" + mapping = {"gate.weight": "gate.linear.weight"} + state_dict = map_old_state_dict_weights(state_dict, mapping, prefix) + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +def merge_lora_weights(model: GPT) -> None: + """Merge LoRA weights into the full-rank weights to speed up inference.""" + for module in model.modules(): + if isinstance(module, LoRALinear): + module.merge() diff --git a/litgpt/model.py b/litgpt/model.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c1833db90f9f41ba09309a031a23617877b5f0 --- /dev/null +++ b/litgpt/model.py @@ -0,0 +1,876 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +"""Full definition of a decoder-only transformer-based language model, all of it in this single file. + +Based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT and +https://github.com/EleutherAI/gpt-neox/tree/main/megatron/model. +""" + +import math +from functools import partial +from typing import Any, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing_extensions import Self + +from litgpt.config import Config +from litgpt.scripts.convert_hf_checkpoint import qkv_reassemble + + +class GPT(nn.Module): + def __init__(self, config: Config) -> None: + super().__init__() + assert config.padded_vocab_size is not None + self.config = config + + self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias) + self.transformer = nn.ModuleDict( + dict( + wte=nn.Embedding(config.padded_vocab_size, config.n_embd), + h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)), + ln_f=config.norm_class(config.n_embd, eps=config.norm_eps), + ) + ) + self.mask_cache: Optional[torch.Tensor] = None + self.max_seq_length = self.config.block_size + + @property + def max_seq_length(self) -> int: + return self._max_seq_length + + @max_seq_length.setter + def max_seq_length(self, value: int) -> None: + """ + When doing inference, the sequences used might be shorter than the model's context length. + This allows setting a smaller number to avoid allocating unused memory + """ + if value > self.config.block_size: + raise ValueError( + f"Cannot attend to {value}, block size is only {self.config.block_size}." + " This is likely because the input text exceeds the supported context length of this model." + ) + self._max_seq_length = value + if not hasattr(self, "cos"): + # first call + cos, sin = self.rope_cache() + self.register_buffer("cos", cos, persistent=False) + self.register_buffer("sin", sin, persistent=False) + # override + elif value != self.cos.size(0): + self.cos, self.sin = self.rope_cache(device=self.cos.device) + # the mask and kv cache size will get updated on `set_kv_cache`. we cannot update it here because we don't know + # if the kv cache is expected + if self.mask_cache is not None and self.mask_cache.shape[-1] < value: + print( + f"Warning: KV cache has length {self.mask_cache.shape[-1]} < {value} = max_seq_length. Call 'set_kv_cache' before doing any forwards!" + ) + + def reset_parameters(self) -> None: + # Trigger resetting the rope-cache + self.cos, self.sin = self.rope_cache(device=self.cos.device) + + def _init_weights(self, module: nn.Module) -> None: + """Meant to be used with `gpt.apply(gpt._init_weights)`.""" + if isinstance(module, nn.Linear): + torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) + if module.bias is not None: + torch.nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) + + def forward( + self, + idx: torch.Tensor, + input_pos: Optional[torch.Tensor] = None, + input_pos_maxp1: Optional[int] = None, + lm_head_chunk_size: int = 0, + ) -> Union[torch.Tensor, List[torch.Tensor]]: + """ + If `input_pos` is provided, the KV cache uses K and V vectors for + positions smaller than entries in `input_pos`. For efficiency, pass + `input_pos_maxp1` as `max(input_pos) + 1` if already available from + your forward algorithm. This slices the KV cache buffers and speeds + up multi-head attention. + + Without `input_pos_maxp1`, the computation uses the full KV cache + (`max_seq_length`) with masking applied. Note that inferring + `input_pos_maxp1` from `input_pos` causes graph breaks and prevents + compilation. + + Args: + idx: Token indices of input sequences, shape `(B, T)`, where `B` + is batch size. + input_pos: Optional. Positions of input tokens. The default is + `arange(T)`. Can have shape `(T,)` or `(B, T)` (batched index). + input_pos_maxp1: Optional. See above. + lm_head_chunk_size: Optional. If `lm_head_chunk_size > 0`, the final + `lm_head` computation is done in chunks of this size. + + Returns: + Logit outputs, shape `(B, T, config.padded_vocab_size)`. If + `lm_head_chunk_size > 0`, this is a list of chunks of shape + `(B, lm_head_chunk_size, config.padded_vocab_size)`, the final + entry can be shorter. + + """ + T = idx.size(1) + if self.max_seq_length < T: + raise ValueError(f"Cannot forward sequence of length {T}, max seq length is only {self.max_seq_length}.") + + if input_pos is not None: # use the kv cache + if input_pos.dim() > 2: + # otherwise, things go wrong in `apply_rope` + raise ValueError(f"input_pos must have 1 or 2 dimensions, input_pos.shape = {input_pos.shape}") + if input_pos.shape[-1] != T: + raise ValueError(f"input_pos.shape[-1] = {input_pos.shape[-1]} != {T} = idx.shape[1], must be the same") + cos = batched_index_select(self.cos, 0, input_pos) + sin = batched_index_select(self.sin, 0, input_pos) + if input_pos.dim() == 1: + cos = cos.unsqueeze(0) + sin = sin.unsqueeze(0) + if self.mask_cache is None: + raise TypeError("You need to call `gpt.set_kv_cache()`") + mask = batched_index_select(self.mask_cache, 2, input_pos) + if mask.dim() > 4: + # the mask cache has a batch dim of 1 in addition to the one + # we get if input_pos has a batch dimension + mask = mask.view(*(mask.shape[0:1] + mask.shape[2:])) + if input_pos_maxp1 is not None: + # Shorten final dimension so it just covers all `input_pos` entries + if input_pos_maxp1 > self.max_seq_length: + raise ValueError(f"Positions in 'input_pos' must be in [0,{self.max_seq_length})") + mask = mask[..., :input_pos_maxp1] + else: + # unsqueeze to have a batch dimension + cos = self.cos[:T].unsqueeze(0) + sin = self.sin[:T].unsqueeze(0) + # `cos`, `sin` have shape (1, T, config.rope_n_elem) + mask = None # defaults to causal mask + input_pos_maxp1 = None + + x = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd) + if self.config.scale_embeddings: + x = x * torch.tensor(self.config.n_embd**0.5, dtype=x.dtype) + + for block_idx, block in enumerate(self.transformer.h): + if self.config.rope_indices is not None: + x = block( + x, + cos[..., self.config.rope_indices[block_idx]], + sin[..., self.config.rope_indices[block_idx]], + mask, + input_pos, + input_pos_maxp1, + ) + else: + x = block(x, cos, sin, mask, input_pos, input_pos_maxp1) + x = self.transformer.ln_f(x) + clamp_head = ( + partial(do_softcapping, thresh=self.config.final_logit_softcapping) + if self.config.final_logit_softcapping is not None + else nn.Identity() + ) + if lm_head_chunk_size > 0: + # chunk the lm head logits to reduce the peak memory used by autograd + return [clamp_head(self.lm_head(x_i)) for x_i in x.split(lm_head_chunk_size, dim=1)] + else: + return clamp_head(self.lm_head(x)) # (B, T, padded_vocab_size) + + @classmethod + def from_name(cls, name: str, **kwargs: Any) -> Self: + return cls(Config.from_name(name, **kwargs)) + + def rope_cache(self, device: Optional[torch.device] = None) -> Tuple[torch.Tensor, torch.Tensor]: + if self.config.rope_adjustments is None: + extra_config = None + + else: + adjusted_params_required = ["factor", "low_freq_factor", "high_freq_factor", "original_max_seq_len"] + params_present = [param in self.config.rope_adjustments for param in adjusted_params_required] + num_params_present = sum(params_present) + + if num_params_present == 0: + extra_config = None # uses standard RoPE + elif num_params_present == 4: + # These parameters should always be used together so that we don't interfere with standard rope + extra_config = {name: self.config.rope_adjustments[name] for name in adjusted_params_required} + elif "factor" in self.config.rope_adjustments: + # linear RoPE + adjusted_params_required = ["factor"] + extra_config = {name: self.config.rope_adjustments[name] for name in adjusted_params_required} + else: + # Some but not all parameters are specified; raise an error + missing_params = [ + param for param, present in zip(adjusted_params_required, params_present) if not present + ] + raise ValueError( + f"The following adjusted RoPE parameters are missing in rope_adjustments: {', '.join(missing_params)}. " + "All adjusted RoPE parameters must be specified together." + ) + + return build_rope_cache( + seq_len=self.max_seq_length, + n_elem=self.config.rope_n_elem, + device=device, + condense_ratio=self.config.rope_condense_ratio, + base=self.config.rope_base, + extra_config=extra_config, + rope_local_base_freq=self.config.rope_local_base_freq, + ) + + def set_kv_cache( + self, + batch_size: int, + max_seq_length: Optional[int] = None, + rope_cache_length: Optional[int] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> None: + if rope_cache_length is None: + if len(self.cos.shape) == 2: + rope_cache_length = self.cos.size(-1) + else: + rope_cache_length = self.cos[..., 0].size(-1) + + if max_seq_length is None: + max_seq_length = self.max_seq_length + + # initialize the kv cache for all blocks + for block in self.transformer.h: + block.attn.kv_cache = block.attn.build_kv_cache( + batch_size, + max_seq_length, + rope_cache_length, + device, + dtype, + ) + + if self.mask_cache is None or self.mask_cache.size(3) != max_seq_length: + # passing `attn_mask` to SDPA disables the flash implementation. since we only need the mask + # for the kv-cache support (only during inference), we only create it in that situation + self.mask_cache = build_mask_cache(max_seq_length, device) + + def clear_kv_cache(self) -> None: + self.mask_cache = None + for block in self.transformer.h: + block.attn.kv_cache = None + + +class Block(nn.Module): + def __init__( + self, + config: Config, + block_idx: int, + ) -> None: + super().__init__() + if not config.parallel_residual and config.shared_attention_norm: + raise NotImplementedError( + "No checkpoint amongst the ones we support uses this configuration" + " (non-parallel residual and shared attention norm)." + ) + + self.norm_1 = nn.Identity() if not config.norm_1 else config.norm_class(config.n_embd, eps=config.norm_eps) + self.attn = CausalSelfAttention(config, block_idx) + self.post_attention_norm = ( + config.norm_class(config.n_embd, eps=config.norm_eps) if config.post_attention_norm else nn.Identity() + ) + self.norm_2 = ( + nn.Identity() + if not config.norm_2 + else (None if config.shared_attention_norm else config.norm_class(config.n_embd, eps=config.norm_eps)) + ) + self.mlp = config.mlp_class(config) + self.post_mlp_norm = ( + config.norm_class(config.n_embd, eps=config.norm_eps) if config.post_mlp_norm else nn.Identity() + ) + + self.config = config + + def forward( + self, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + mask: Optional[torch.Tensor] = None, + input_pos: Optional[torch.Tensor] = None, + input_pos_maxp1: Optional[int] = None, + ) -> torch.Tensor: + """ + Non-parallel residual Parallel residual + ┌─ x ┌─ x ──────────────────┐ Note: if `shared_attention_norm` is True, + │ ↓ │ ↓ ↓ the output from `norm_1` is reused + │ norm_1 │ norm_1 ───────► norm_2 + │ ↓ │ ↓ ↓ + │ attn │ attn MLP + │ ↓ │ ↓ ↓ + | post_attn_norm | post_attn_norm post_mlp_norm + | ↓ | ↓ ↓ + ┌─ └► + └► + ◄─────────────────┘ + | ↓ + │ norm_2 + │ ↓ + │ MLP + │ ↓ + | post_mlp_norm + | ↓ + └───► + + """ + + x_normed = self.norm_1(x) + attention_output = self.attn(x_normed, cos, sin, mask, input_pos, input_pos_maxp1) + attention_output = self.post_attention_norm(attention_output) + + if self.config.parallel_residual: + if not self.config.shared_attention_norm: + x_normed = self.norm_2(x) + x = attention_output + x + else: + x = attention_output + x + x_normed = self.norm_2(x) + + return self.post_mlp_norm(self.mlp(x_normed)) + x + + +class CausalSelfAttention(nn.Module): + def __init__(self, config: Config, block_idx: int) -> None: + super().__init__() + # key, query and value projections for all heads, but in a batch + self.qkv = nn.Linear( + config.n_embd, + (config.n_head + 2 * config.n_query_groups) * config.head_size, # support for grouped/multi queries + bias=config.bias or config.attn_bias, + ) + # output projection + self.proj = nn.Linear(config.head_size * config.n_head, config.n_embd, bias=config.bias) + # disabled by default + self.kv_cache: Optional[KVCache] = None + self.apply_sliding_window_attention = False + if config.sliding_window_size is not None and config.sliding_window_indices is not None: + self.apply_sliding_window_attention = config.sliding_window_indices[block_idx] + + if config.norm_qk: + norm_q_size = config.n_head * config.head_size if config.norm_qk_type == "olmo2" else config.head_size + norm_k_size = ( + config.n_query_groups * config.head_size if config.norm_qk_type == "olmo2" else config.head_size + ) + self.norm_q = config.norm_class(norm_q_size, eps=config.norm_eps) + self.norm_k = config.norm_class(norm_k_size, eps=config.norm_eps) + else: + self.norm_q = self.norm_k = None + + self.config = config + self.block_idx = block_idx + + def forward( + self, + x: torch.Tensor, + cos: torch.Tensor, + sin: torch.Tensor, + mask: Optional[torch.Tensor] = None, + input_pos: Optional[torch.Tensor] = None, + input_pos_maxp1: Optional[int] = None, + ) -> torch.Tensor: + # Notation: + # - B | batch size + # - T | time-step (sequence length) + # - C | model's embeddings size (n_embd) + # - C* | attentions's embeddings size + # - hs | head size + # - nh_(q,k,v) | number of heads for query, key and value + # - n_query_groups = nh_k = nh_v | number of query groups sharing key and value heads + # alternative notation: num_kv_groups = n_query_groups + # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐ + # │ v ││ v ││ v ││ v │ │ v │ │ v │ │ v │ + # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘ + # │ │ │ │ │ │ │ + # ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐ + # │ k ││ k ││ k ││ k │ │ k │ │ k │ │ k │ + # └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘ + # │ │ │ │ ┌──┴──┐ ┌──┴──┐ ┌────┬──┴─┬────┐ + # ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ + # │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ + # └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ + # ◀──────────────────▶ ◀──────────────────▶ ◀──────────────────▶ + # MHA GQA MQA + # n_query_groups=4 n_query_groups=2 n_query_groups=1 + # + # credit https://arxiv.org/pdf/2305.13245.pdf + head_size = self.config.head_size + n_head = self.config.n_head + n_query_groups = self.config.n_query_groups + rope_n_elem = self.config.rope_n_elem + B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) + + # Perform a single multiplication operation using a combined QKV matrix to calculate `query`, `key`, and `value` + # instead of individually multiplying the input `x` with the respective weight matrices. + qkv = self.qkv(x) # (B, T, 3xC*) + + # Define query, key and value sizes. + # If grouped/multi query is enabled, these sizes are not equal (see the diagram above). + query_size = n_head * head_size + key_size = value_size = n_query_groups * head_size + # Split qkv into query, key and value matrices. + q, k, v = qkv.split((query_size, key_size, value_size), dim=-1) # 3x(B, T, C*) + + if self.config.norm_qk and self.config.norm_qk_type == "olmo2": + q = self.norm_q(q) + k = self.norm_k(k) + + # To place the num_heads (nh) dimension right after the batch (B) dimension, the first step is to decouple the + # embedding size (C) into num_heads (nh) and head_size (hs). + + # The original GQA paper is followed here and the term query groups is used. + # alternative notation: Query groups are also referred to as KV groups. + q = q.view(B, T, n_head, head_size) # (B, T, nh_q, hs) + k = k.view(B, T, n_query_groups, head_size) # (B, T, n_query_groups, hs) + v = v.view(B, T, n_query_groups, head_size) # (B, T, n_query_groups, hs) + + # The tensors `query`, `key`, and `value` are now accurately structured: within each batch element (B), there are + # multiple heads (nh), and within each head, there is a sequence of elements (T), each represented by a vector + # of size `hs`. + q = q.transpose(1, 2) # (B, nh_q, T, hs) + k = k.transpose(1, 2) # (B, nh_k, T, hs) + v = v.transpose(1, 2) # (B, nh_v, T, hs) + + if self.config.norm_qk and self.config.norm_qk_type == "default": + q = self.norm_q(q) + k = self.norm_k(k) + + # Unlike standard positional embeddings rotary embeddings must be applied at every layer. + q_roped = apply_rope(q[..., :rope_n_elem], cos, sin) + k_roped = apply_rope(k[..., :rope_n_elem], cos, sin) + q = torch.cat((q_roped, q[..., rope_n_elem:]), dim=-1) # (B, nh_q, T, hs) + k = torch.cat((k_roped, k[..., rope_n_elem:]), dim=-1) # (B, nh_k, T, hs) + + # Apply kv-cache during inference. + if input_pos is not None: + if not isinstance(self.kv_cache, KVCache): + raise TypeError("You need to call `gpt.set_kv_cache()`") + k, v = self.kv_cache(input_pos, k, v) + if input_pos_maxp1 is not None: + # Subselect along sequence dimension + k = k[..., :input_pos_maxp1, :] + v = v[..., :input_pos_maxp1, :] + # k, v: (B, nh_k, input_pos_maxp1, hs) + # If input_pos_maxp1 is None -> max_seq_length + + # Grouped queries: balance the number of heads across all three matrices. + # NOTE: flash attention requires it in training mode. + # Multi-query: this step can be skipped since there is only 1 head, allowing us to use broadcasting. + if n_query_groups != n_head and (input_pos is None or n_query_groups != 1): + q_per_kv = n_head // n_query_groups + k = k.repeat_interleave(q_per_kv, dim=1) # (B, nh_q, T, hs) + v = v.repeat_interleave(q_per_kv, dim=1) # (B, nh_q, T, hs) + + if self.apply_sliding_window_attention: + """ + Global Window Sliding window Sliding window + attention mask + bias = attention mask + ┌────────────────────────┐ ┌───────────────────────┐ ┌─────────────────────────┐ + │ True False False False │ │ True True True True │ │ True False False False │ + │ True True False False │ │ True True True True │ │ True True False False │ + │ True True True False │ │ False True True True │ │ False True True False │ + │ True True True True │ │ False False True True │ │ False False True True │ + └────────────────────────┘ └───────────────────────┘ └─────────────────────────┘ + """ + if mask is None: + mask = torch.ones(T, T, dtype=q.dtype, device=q.device).triu(diagonal=1) + mask.masked_fill_(mask.bool(), float("-inf")) + mask = mask.view(1, 1, *mask.shape) + sliding_window_bias = torch.ones_like(mask).tril(diagonal=-self.config.sliding_window_size) + sliding_window_bias.masked_fill_(sliding_window_bias.bool(), float("-inf")) + mask += sliding_window_bias + + # Efficient attention using Flash Attention CUDA kernels. + # NOTE: efficient implementation is disabled if `mask` is not None or softcapping is enabled. + # ↓ (B, nh, T, hs) @ (B, nh, T, hs).mT --> (B, nh, T, T) @ (B, nh, T, hs) --> (B, nh, T, hs) + y = self.scaled_dot_product_attention(q, k, v, mask) + + # Re-assemble all head outputs side by side. + y = y.reshape(B, T, head_size * n_head) + + # Output projection. + return self.proj(y) # (B, T, C) + + def scaled_dot_product_attention( + self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: Optional[torch.Tensor] = None + ) -> torch.Tensor: + scale = 1.0 / math.sqrt(self.config.attention_scores_scalar or self.config.head_size) + + # with softcapping we cannot use SDPA + if self.config.attention_logit_softcapping is not None: + scores = q @ k.mT * scale + scores = do_softcapping(scores, self.config.attention_logit_softcapping) + if mask is None: + mask = torch.ones(q.size(2), q.size(2), dtype=q.dtype, device=q.device).triu(diagonal=1) + mask.masked_fill_(mask.bool(), torch.finfo(q.dtype).min) + scores = scores + mask + scores = F.softmax(scores, dim=-1, dtype=torch.float).to(dtype=q.dtype) + y = scores @ v + else: + y = F.scaled_dot_product_attention( + q, k, v, attn_mask=mask, dropout_p=0.0, scale=scale, is_causal=mask is None + ) + return y.transpose(1, 2) + + def build_kv_cache( + self, + batch_size: int, + max_seq_length: int, + rope_cache_length: Optional[int] = None, + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> "KVCache": + v_shape = (batch_size, self.config.n_query_groups, max_seq_length, self.config.head_size) + if rope_cache_length is None: + if self.config.rotary_percentage != 1.0: + raise TypeError("Please pass the `rope_cache_length=gpt.cos.size(-1)` value") + k_shape = v_shape + else: + k_shape = ( + batch_size, + self.config.n_query_groups, + max_seq_length, + rope_cache_length + self.config.head_size - self.config.rope_n_elem, + ) + return KVCache(k_shape, v_shape, device=device, dtype=dtype) + + def _load_from_state_dict(self, state_dict: dict, prefix: str, *args: Any, **kwargs: Any) -> None: + """For compatibility with legacy checkpoints.""" + + for attr in ("weight", "bias"): + legacy_key = f"{prefix}attn.{attr}" + current_key = f"{prefix}qkv.{attr}" + if legacy_key in state_dict: + state_dict[current_key] = qkv_reassemble(state_dict.pop(legacy_key), self.config) + + super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) + + +class GptNeoxMLP(nn.Module): + def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None: + super().__init__() + self.intermediate_size = intermediate_size or config.intermediate_size + self.fc = nn.Linear(config.n_embd, self.intermediate_size, bias=config.bias) + self.proj = nn.Linear(self.intermediate_size, config.n_embd, bias=config.bias) + self.config = config + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.fc(x) + x = F.gelu(x, approximate=self.config.gelu_approximate) + return self.proj(x) + + +class LLaMAMLP(nn.Module): + def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None: + super().__init__() + self.intermediate_size = intermediate_size or config.intermediate_size + self.fc_1 = nn.Linear(config.n_embd, self.intermediate_size, bias=config.bias) + self.fc_2 = nn.Linear(config.n_embd, self.intermediate_size, bias=config.bias) + self.proj = nn.Linear(self.intermediate_size, config.n_embd, bias=config.bias) + self.config = config + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_fc_1 = self.fc_1(x) + x_fc_2 = self.fc_2(x) + x = F.silu(x_fc_1) * x_fc_2 + return self.proj(x) + + +class GemmaMLP(LLaMAMLP): + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_fc_1 = self.fc_1(x) + x_fc_2 = self.fc_2(x) + x = F.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2 + return self.proj(x) + + +class LLaMAMoE(nn.Module): + def __init__(self, config: Config) -> None: + super().__init__() + self.gate = nn.Linear(config.n_embd, config.n_expert, bias=False) + self.experts = nn.ModuleList( + LLaMAMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(config.n_expert) + ) + self.config = config + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Derived from: https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219 + See also figure 1 in https://arxiv.org/abs/2211.15841 + """ + B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd) + x = x.view(-1, C) # (B*T, C) + router = self.gate(x) # (B*T, n_expert) + probs, indices = torch.topk(router, self.config.n_expert_per_token) # (B*T, n_expert_per_token) + probs = probs.softmax(dim=1, dtype=torch.float).to(dtype=x.dtype) + masks = indices.unsqueeze(-1) == torch.arange(self.config.n_expert, device=x.device) + masks = masks.permute(2, 0, 1) # (n_expert, B*T, n_expert_per_token) + y = torch.zeros_like(x) # (B*T, C) + for mask, expert in zip(masks, self.experts): + token_idx, expert_idx = torch.where(mask) + y[token_idx] += probs[token_idx, expert_idx, None] * expert(x[token_idx]) + return y.view(B, T, C) + + +def build_rope_cache( + seq_len: int, + n_elem: int, + device: Optional[torch.device] = None, + base: int = 10000, + condense_ratio: int = 1, + extra_config: Optional[dict] = None, + rope_local_base_freq: Optional[float] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Enhanced Transformer with Rotary Position Embedding. + + Args: + seq_len (int): Sequence length. + n_elem (int): Number of elements (head dimension). + device (torch.device, optional): Device for tensor allocations. + base (int, optional): Base for computing inverse frequencies. + condense_ratio (int, optional): Ratio to condense the position indices. + extra_config (dict, optional): Configuration parameters for frequency adjustments (used by Llama 3.1 and 3.2) + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Cosine and sine caches for RoPE. + Shapes are `(seq_len, n_elem)`. + """ + + # Compute the inverse frequencies theta + theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, device=device).float() / n_elem)) + + if extra_config is not None: + factor = extra_config["factor"] + if "original_max_seq_len" in extra_config: + orig_context_len = extra_config["original_max_seq_len"] + low_freq_factor = extra_config["low_freq_factor"] + high_freq_factor = extra_config["high_freq_factor"] + + wavelen = 2 * torch.pi / theta + ratio = orig_context_len / wavelen + smooth_factor = (ratio - low_freq_factor) / (high_freq_factor - low_freq_factor) + smooth_factor = torch.clamp(smooth_factor, min=0.0, max=1.0) + + # Compute adjusted_theta without masked indexing + adjusted_theta = (1 - smooth_factor) * (theta / factor) + smooth_factor * theta + theta = adjusted_theta + else: + theta = theta / factor + + # Create position indices `[0, 1, ..., seq_len - 1]` + seq_idx = torch.arange(seq_len, device=device) / condense_ratio + + # Calculate the product of position index and $\theta_i$ + idx_theta = torch.outer(seq_idx, theta).repeat(1, 2) + # If `n_elem` is odd, the final dimension of `idx_theta` has size + # `n_elem + 1`, so need to cut something off. + # Due to a current bug in Hugging Face, in the case `n_elem == 1`, we leave + # `idx_theta`, `cos`, `sin` as is. Things work out in `apply_rope` due to + # broadcasting. If we shorten `idx_theta`, unit tests comparing to + # Hugging Face fail. + # https://github.com/huggingface/transformers/issues/35233 + if idx_theta.shape[-1] > n_elem > 1: + idx_theta = idx_theta[..., :n_elem] + + # if rope_local_base_freq is given, have a separate rope value for local embedding + # For now, we use default RoPE for local embedding + if rope_local_base_freq is not None: + local_theta = 1.0 / (rope_local_base_freq ** (torch.arange(0, n_elem, 2, device=device).float() / n_elem)) + local_idx_theta = torch.outer(seq_idx, local_theta) + local_idx_theta = local_idx_theta.repeat(1, 2) + if local_idx_theta.shape[-1] > n_elem > 1: + local_idx_theta = local_idx_theta[..., :n_elem] + + idx_theta = torch.stack((idx_theta, local_idx_theta), dim=-1) + + return torch.cos(idx_theta), torch.sin(idx_theta) + + +def batched_index_select(t, dim, idx): + """index_select for batched index and unbatched t""" + if idx.dim() == 1: + return torch.index_select(t, dim, idx) + + *batch_shape, idx_size = idx.shape + res = torch.index_select(t, dim, idx.reshape(-1)) # flat index + # split out single batch idx + res = res.view(*t.shape[:dim], -1, idx_size, *t.shape[dim + 1 :]) + if dim > 0: + # move batch dim to front, this is np.rollaxis(res, dim, 0) for tensors + dims = [dim] + list(range(res.dim())) + del dims[dim + 1] + res = res.permute(dims) + # unflatten batch dims + res = res.view(*batch_shape, *res.shape[1:]) + return res + + +def batched_index_copy_(t, dim, idx, val): + """Index copy for batched t, idx, val""" + + if t.device.type == "mps": + # Normalize negative dimensions + if dim < 0: + dim = t.dim() + dim + if idx.dim() == 1: + idx_shape = [1] * val.dim() + idx_shape[dim] = -1 + idx_expanded = idx.view(*idx_shape) + idx_expanded = idx_expanded.expand_as(val) + t.scatter_(dim, idx_expanded, val) + return t + + elif idx.dim() == 2: + assert dim != 0, "Cannot index the batch dimension" + batch_size = idx.size(0) + idx_size = idx.size(1) + assert batch_size == t.size(0) == val.size(0) + + idx_shape = [batch_size] + [1] * (val.dim() - 1) + idx_shape[dim] = idx_size + idx_expanded = idx.view(*idx_shape) + idx_expanded = idx_expanded.expand_as(val) + + t.scatter_(dim, idx_expanded, val) + return t + else: + raise NotImplementedError(f"idx.dim() == {idx.dim()} not supported") + + else: + if idx.dim() == 1: + return t.index_copy_(dim, idx, val) + + assert idx.dim() == 2, f"multiple batch dims not yet {idx.shape=}" + assert dim != 0, f"cannot index batch dim {dim=}" + batch_size, idx_size = idx.shape + assert batch_size == t.size(0) + assert batch_size == val.size(0) + + # if we can view the batch and indexed dimensions together, we could + # do index trickery. This is, sadly, not the case for kvcache so we + # fall back to for loop + for i in range(batch_size): + unbatched_dim = dim if dim < 0 else dim - 1 + t[i].index_copy_(unbatched_dim, idx[i], val[i]) + return t + + +def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: + """ + Applies RoPE transform to `x`. Note that `cos`, `sin` need to have a batch + dimension. + + Args: + x: Input tensor, `(B, ..., T, head_size)` + cos: Cached cosines, `(B, T, head_size)` or `(1, T, head_size)` + sin: Cached sines, `(B, T, head_size)` or `(1, T, head_size)` + + Returns: + Encoded tensor, `(B, ..., T, head_size)` + """ + if cos.dim() != 3: + raise ValueError(f"cos must be three-dimensional, but shape is {cos.shape}") + if cos.shape != sin.shape: + raise ValueError(f"cos, sin must have same shape, but cos.shape={cos.shape}, sin.shape={sin.shape}") + head_size_half = x.size(-1) // 2 + x1 = x[..., :head_size_half] # (B, ..., T, head_size/2) + x2 = x[..., head_size_half:] # (B, ..., T, head_size/2) + rotated = torch.cat((-x2, x1), dim=-1) # (B, ..., T, head_size) + dims_diff = x.dim() - cos.dim() + if dims_diff > 0: + # Ensure that shapes of `x`, `cos`, `sin` align + new_shape = cos.shape[0:1] + (1,) * dims_diff + cos.shape[1:] + cos = cos.view(*new_shape) + sin = sin.view(*new_shape) + + roped = (x * cos) + (rotated * sin) + return roped.to(dtype=x.dtype) + + +def do_softcapping(x: torch.Tensor, thresh: float) -> torch.Tensor: + return torch.tanh(x / thresh) * thresh + + +class KVCache(nn.Module): + """ + Buffers `k`, `v` have shape + `(batch_size, n_query_groups, max_seq_length, head_size)`. + """ + + def __init__( + self, + k_shape: Tuple[int, int, int, int], + v_shape: Tuple[int, int, int, int], + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ) -> None: + super().__init__() + self.register_buffer("k", torch.zeros(k_shape, device=device, dtype=dtype), persistent=False) + self.register_buffer("v", torch.zeros(v_shape, device=device, dtype=dtype), persistent=False) + + def forward(self, input_pos: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Writes new values `k` and `v` into the cache at the positions specified + by `input_pos` along the sequence dimension (`max_seq_length`). The batch + size of `k` and `v` (`bs`) must be smaller or equal to `KVCache` batch + size. Returns the full buffers, adjusted to the batch size `bs`. + + Args: + input_pos: Position index, `(bs, T)` or `(T,)` + k: New values, `(bs, n_query_groups, T, head_size)` + v: New values, `(bs, n_query_groups, T, head_size)` + + Returns: + k_full, v_full, `(bs, n_query_groups, max_seq_length, head_size)` + + """ + # move the buffer to the activation dtype for when AMP is used + if self.k.dtype != k.dtype: + self.k = self.k.to(k.dtype) + if self.v.dtype != v.dtype: + self.v = self.v.to(v.dtype) + # update the cache + bs = k.size(0) + k = batched_index_copy_(self.k[:bs, ...], -2, input_pos, k) + v = batched_index_copy_(self.v[:bs, ...], -2, input_pos, v) + return k, v + + def reset_parameters(self) -> None: + torch.nn.init.zeros_(self.k) + torch.nn.init.zeros_(self.v) + + +def build_mask_cache(max_seq_length: int, device: Optional[torch.device] = None) -> torch.Tensor: + ones = torch.ones((max_seq_length, max_seq_length), device=device, dtype=torch.bool) + return torch.tril(ones).unsqueeze(0).unsqueeze(0) + + +class RMSNorm(torch.nn.Module): + """Root Mean Square Layer Normalization. + + Derived from https://github.com/bzhangGo/rmsnorm/blob/master/rmsnorm_torch.py. BSD 3-Clause License: + https://github.com/bzhangGo/rmsnorm/blob/master/LICENSE. + """ + + def __init__(self, size: int, dim: int = -1, eps: float = 1e-6, add_unit_offset: bool = False) -> None: + super().__init__() + self.weight = torch.nn.Parameter(torch.ones(size)) + self.eps = eps + self.dim = dim + self.add_unit_offset = add_unit_offset + + def forward(self, x: torch.Tensor) -> torch.Tensor: + dtype = x.dtype + x = x.float() + # NOTE: the original RMSNorm paper implementation is not equivalent + norm_x = torch.mean(x * x, dim=self.dim, keepdim=True) + x_normed = x * torch.rsqrt(norm_x + self.eps) + weight = (1 + self.weight) if self.add_unit_offset else self.weight + return (x_normed * weight.float()).to(dtype=dtype) + + def reset_parameters(self) -> None: + torch.nn.init.ones_(self.weight) diff --git a/litgpt/perplexity.py b/litgpt/perplexity.py new file mode 100644 index 0000000000000000000000000000000000000000..243c79849dff13737785e6539a8fc66bd33b1010 --- /dev/null +++ b/litgpt/perplexity.py @@ -0,0 +1,513 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import math +import pprint +import time +import os +import json +from dataclasses import asdict +from datetime import timedelta +from functools import partial +from pathlib import Path +from typing import Dict, Optional, Tuple, Union + +import lightning as L +import torch +import torch.nn as nn +from lightning.fabric.strategies import FSDPStrategy +from lightning.fabric.utilities.throughput import ThroughputMonitor, measure_flops +from torch.utils.data import DataLoader +from torchmetrics.aggregation import RunningMean +from typing_extensions import Literal + +from litgpt import Tokenizer +from litgpt.args import EvalArgs, LogArgs, TrainArgs +from litgpt.config import name_to_config +from litgpt.data import DataModule, TinyLlama, Arxiv +from litgpt.model import GPT, Block, CausalSelfAttention, Config, LLaMAMLP +from litgpt.utils import ( + CycleIterator, + capture_hparams, + check_nvlink_connectivity, + choose_logger, + chunked_cross_entropy, + copy_config_files, + extend_checkpoint_dir, + find_resume_path, + get_default_supported_precision, + init_out_dir, + instantiate_torch_optimizer, + num_parameters, + parse_devices, + reset_parameters, + save_config, + save_hyperparameters, +) + + +def setup( + model_name: str, + model_config: Optional[Config] = None, + out_dir: Path = Path("out/pretrain"), + precision: Literal["bf16-true", "bf16-mixed", "32-true", None] = None, + initial_checkpoint_dir: Optional[Path] = None, + resume: Union[bool, Literal["auto"], Path] = False, + data: Optional[DataModule] = None, + data_dir: Path = None, + train: TrainArgs = TrainArgs( + save_interval=1000, + log_interval=1, + global_batch_size=512, + micro_batch_size=4, + max_tokens=int(3e12), # 3 trillion + max_norm=1.0, + min_lr=4e-5, + lr_warmup_steps=2000, + tie_embeddings=False, + ), + eval: EvalArgs = EvalArgs(interval=1000, max_iters=100), + log: LogArgs = LogArgs(), + optimizer: Union[str, Dict] = "AdamW", + devices: Union[int, str] = "auto", + num_nodes: int = 1, + tokenizer_dir: Optional[Path] = None, + logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "tensorboard", + seed: int = 42, + multi_month: bool = False, +): + """Pretrain a model. + + Arguments: + model_name: The name of the model to pretrain. Choose from names in ``litgpt.config``. Use "list" to list the supported models. + model_config: A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with + ``model_config``. Overrides the `model_name` if specified. + out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in + /teamspace/jobs//share. + precision: The precision to use for finetuning. Determines a compatible precision setting by default. + initial_checkpoint_dir: Optional path to a checkpoint directory to initialize the model from. + Useful for continued pretraining. Mutually exclusive with ``resume``. + resume: Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume + from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing + ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists. + data: Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``. + train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details. + eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details. + optimizer: An optimizer name (such as "AdamW") or config. + + devices: How many devices/GPUs to use. Uses all GPUs by default. + num_nodes: How many nodes the code is being run on. + tokenizer_dir: Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data + module require this. + logger_name: The name of the logger to send metrics to. + seed: The random seed to use for reproducibility. + """ + if model_name == "list": + available_models = "\n".join(sorted(name_to_config)) + print(f"Available values:\n{available_models}") + quit() + + if initial_checkpoint_dir is not None: + initial_checkpoint_dir = extend_checkpoint_dir(initial_checkpoint_dir) + + if tokenizer_dir is not None: + tokenizer_dir = extend_checkpoint_dir(tokenizer_dir) + + if model_config is None: + # Support both model_name options: meta-llama/Meta-Llama-3-8B & Meta-Llama-3-8B + try: + model_config = Config.from_name(model_name) + except ValueError: + print(f"Model name {model_name} is not supported.\n") + available_models = "\n".join(sorted(name_to_config)) + print(f"Available values:\n{available_models}") + quit() + + hparams = capture_hparams() + + config = Config.from_name(model_name) if model_config is None else model_config + precision = precision or get_default_supported_precision(training=True) + devices = parse_devices(devices) + out_dir = init_out_dir(out_dir) + # in case the dataset requires the Tokenizer + tokenizer = Tokenizer(tokenizer_dir) if tokenizer_dir is not None else None + + logger = choose_logger( + logger_name, + out_dir, + name=f"pretrain-{config.name}", + resume=bool(resume), + log_interval=train.log_interval, + log_args=asdict(log), + ) + + if devices * num_nodes > 1: + strategy = FSDPStrategy(auto_wrap_policy={Block}, state_dict_type="full", sharding_strategy="HYBRID_SHARD") + else: + strategy = "auto" + + fabric = L.Fabric(devices=devices, num_nodes=num_nodes, strategy=strategy, precision=precision, loggers=[logger]) + + if torch.cuda.is_available() and devices > 1: + check_nvlink_connectivity(fabric) + + fabric.launch() + + fabric.print(pprint.pformat(hparams)) + if logger_name in ("tensorboard", "wandb", "mlflow"): + fabric.logger.log_hyperparams(hparams) + + main( + fabric=fabric, + devices=devices, + seed=seed, + initial_checkpoint_dir=initial_checkpoint_dir, + resume=resume, + config=config, + data=data, + data_dir=data_dir, + out_dir=out_dir, + tokenizer=tokenizer, + train=train, + eval=eval, + optimizer=optimizer, + multi_month=multi_month, + ) + + +def main( + fabric: L.Fabric, + devices: int, + seed: int, + initial_checkpoint_dir: Optional[Path], + resume: Union[bool, Literal["auto"], Path], + config: Config, + data: DataModule, + data_dir: Path, + out_dir: Path, + tokenizer: Optional[Tokenizer], + train: TrainArgs, + eval: EvalArgs, + optimizer: Union[str, Dict], + multi_month: bool = False, +) -> None: + validate_args(train, eval, initial_checkpoint_dir, resume) + + if fabric.global_rank == 0: + out_dir.mkdir(parents=True, exist_ok=True) + + fabric.seed_everything(seed) # same seed for every process to init model (FSDP) + + t0 = time.perf_counter() + with fabric.init_module(empty_init=True): + model = GPT(config) + + initialize_weights(fabric, model, n_layer=config.n_layer, n_embd=config.n_embd) + + if train.tie_embeddings: + model.transformer.wte.weight = model.lm_head.weight + if train.max_seq_length: + model.max_seq_length = train.max_seq_length + + fabric.print(f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.") + fabric.print(f"Total parameters: {num_parameters(model):,}") + + model = torch.compile(model) + model = fabric.setup(model) + + extra_kwargs = {"fused": fabric.device.type == "cuda"} + optimizer = instantiate_torch_optimizer(optimizer, model.parameters(), **extra_kwargs) + optimizer = fabric.setup_optimizers(optimizer) + + if initial_checkpoint_dir: + ckpt_path = initial_checkpoint_dir / "lit_model.pth" + + if fabric.global_rank == 0: + try: + obj = torch.load(ckpt_path, map_location="cpu") + except Exception as e: + raise RuntimeError(f"[load] Unable to read {ckpt_path}: {e}") + + if isinstance(obj, dict) and "model" in obj: + import os + print(f"[fix] {ckpt_path} the full state, extract 'model' and write bacl", flush=True) + sd = obj["model"] + if len(sd) and next(iter(sd)).startswith("module."): + sd = {k[7:]: v for k, v in sd.items()} + + tmp = ckpt_path.with_suffix(".pth.tmp") + torch.save(sd, tmp) + os.replace(tmp, ckpt_path) + try: + os.sync() + except Exception: + pass + print(f"[fix] rewrite to make it 'model' only: {ckpt_path}", flush=True) + else: + print(f"[ok] {ckpt_path} already 'model' only", flush=True) + + fabric.barrier() + + fabric.load_raw(ckpt_path, model, strict=False) + + from litgpt.data import Arxiv + + if not multi_month: + if isinstance(data, Arxiv): + data.arxiv_train = str(data_dir).rstrip("/") + "/train" + data.arxiv_val = str(data_dir).rstrip("/") + "/train" + else: + NotImplementedError() + train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train, model.max_seq_length) + train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader) + + state = { + "model": model, + "optimizer": optimizer, + "train_dataloader": train_dataloader, + "iter_num": 0, + "step_count": 0, + } + + resume = find_resume_path(resume, out_dir) + if resume: + fabric.print(f"Resuming training from {resume}") + fabric.load(resume, state) + + train_time = time.perf_counter() + + # work around PyTorch issue https://github.com/pytorch/pytorch/issues/152162 + # which does not like the lazy initialization to be called in dynamo. + # Happens with PyTorch 2.7. + if ( + torch.__version__.startswith("2.7.") + and (model._forward_module.__class__.__name__ == "OptimizedModule") + and (model._forward_module._orig_mod.__class__.__name__ == "FullyShardedDataParallel") + ): + from torch.distributed.fsdp._runtime_utils import _root_pre_forward + + _root_pre_forward(model._forward_module._orig_mod, model._forward_module._orig_mod, [], {}) + + ppl( + fabric=fabric, + state=state, + val_dataloader=val_dataloader, + out_dir=out_dir, + train=train, + eval=eval, + ) + + + total_tokens = state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size + + # Print formatted output + separator = "-" * 40 + fabric.print(separator) + fabric.print("| Performance") + fabric.print(f"| - Total tokens : {total_tokens:,}") + fabric.print(f"| - Training Time : {(time.perf_counter() - train_time):.2f} s") + fabric.print(f"| - Tok/sec : {total_tokens / train_time:.2f} tok/s") + fabric.print("| " + "-" * 40) + + if fabric.device.type == "cuda": + memory_used = torch.cuda.max_memory_allocated() / 1e9 + fabric.print("| Memory Usage") + fabric.print(f"| - Memory Used : {memory_used:.2f} GB") + fabric.print(separator) + + else: + months = [ + "2407", "2408", "2409", "2410", "2411", "2412", + "2501", "2502", "2503", "2504", "2505", "2506" + ] + for month in months: + if isinstance(data, Arxiv): + data.arxiv_train = str(data_dir).rstrip("/") + f"/{month}/train" + data.arxiv_val = str(data_dir).rstrip("/") + f"/{month}/train" + else: + NotImplementedError() + train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train, model.max_seq_length) + train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader) + + state = { + "model": model, + "optimizer": optimizer, + "train_dataloader": train_dataloader, + "iter_num": 0, + "step_count": 0, + } + + resume = find_resume_path(resume, out_dir) + if resume: + fabric.print(f"Resuming training from {resume}") + fabric.load(resume, state) + + train_time = time.perf_counter() + + # work around PyTorch issue https://github.com/pytorch/pytorch/issues/152162 + # which does not like the lazy initialization to be called in dynamo. + # Happens with PyTorch 2.7. + if ( + torch.__version__.startswith("2.7.") + and (model._forward_module.__class__.__name__ == "OptimizedModule") + and (model._forward_module._orig_mod.__class__.__name__ == "FullyShardedDataParallel") + ): + from torch.distributed.fsdp._runtime_utils import _root_pre_forward + + _root_pre_forward(model._forward_module._orig_mod, model._forward_module._orig_mod, [], {}) + + ppl( + fabric=fabric, + state=state, + val_dataloader=val_dataloader, + out_dir=out_dir/month, + train=train, + eval=eval, + month=month, + ) + + total_tokens = state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size + + # Print formatted output + separator = "-" * 40 + fabric.print(separator) + fabric.print("| Performance") + fabric.print(f"| - Total tokens : {total_tokens:,}") + fabric.print(f"| - Training Time : {(time.perf_counter() - train_time):.2f} s") + fabric.print(f"| - Tok/sec : {total_tokens / train_time:.2f} tok/s") + fabric.print("| " + "-" * 40) + + if fabric.device.type == "cuda": + memory_used = torch.cuda.max_memory_allocated() / 1e9 + fabric.print("| Memory Usage") + fabric.print(f"| - Memory Used : {memory_used:.2f} GB") + fabric.print(separator) + + +def ppl( + fabric: L.Fabric, + state: dict, + val_dataloader: DataLoader, + out_dir: Path, + train: TrainArgs, + eval: EvalArgs, + month: Optional[str] = None, +) -> None: + model = state["model"] + + with torch.device("meta"): + meta_model = GPT(model.config) + x = torch.randint(0, 1, (train.micro_batch_size, meta_model.max_seq_length)) + model_fwd = lambda: meta_model(x) # noqa: F821 + model_loss = lambda y: chunked_cross_entropy(y, x, chunk_size=0) # noqa: F821 + measured_flops = measure_flops(meta_model, model_fwd, model_loss) + fabric.print(f"Measured TFLOPs: {measured_flops * fabric.world_size / 1e12:.2f}") + del meta_model, x + + val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters) + metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)} + fabric.log_dict(metrics, step=state["iter_num"]) + fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}") + + if month is not None: + metrics = { + "month": month, + "val_loss": float(val_loss), + "val_ppl": float(math.exp(val_loss)), + "rank": fabric.global_rank, + } + else: + metrics = { + "val_loss": float(val_loss), + "val_ppl": float(math.exp(val_loss)), + } + jsonl_path = out_dir.parent / "ppl_metrics.jsonl" + os.makedirs(out_dir, exist_ok=True) + with open(jsonl_path, "a", encoding="utf-8") as f: + f.write(json.dumps(metrics, ensure_ascii=False) + "\n") + + +@torch.no_grad() +def validate( + fabric: L.Fabric, model: nn.Module, val_dataloader: DataLoader, max_iters: int, verbose: bool = True +) -> torch.Tensor: + fabric.barrier() + if verbose: + fabric.print("Validating ...") + model.eval() + + losses = [] + for k, batch in enumerate(val_dataloader): + if k >= max_iters: + break + input_ids = batch[:, 0 : model.max_seq_length].contiguous().long() + targets = batch[:, 1 : (model.max_seq_length + 1)].contiguous().long() + logits = model(input_ids) + loss = chunked_cross_entropy(logits, targets) + losses.append(loss) + + val_loss = torch.stack(losses).mean() + model.train() + fabric.barrier() + return val_loss + + +def get_dataloaders( + fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs, block_size: int +) -> Tuple[DataLoader, DataLoader]: + data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=block_size) + with fabric.rank_zero_first(): + data.prepare_data() + data.setup() + train_dataloader = data.train_dataloader() + val_dataloader = data.val_dataloader() + return train_dataloader, val_dataloader + + +def save_checkpoint(fabric, state, tokenizer_dir, checkpoint_file): + model = state["model"] + checkpoint_file.parent.mkdir(parents=True, exist_ok=True) + fabric.print(f"Saving checkpoint to {str(checkpoint_file)!r}") + fabric.save(checkpoint_file, state) + if fabric.global_rank == 0: + save_hyperparameters(setup, checkpoint_file.parent) + if tokenizer_dir is not None: + copy_config_files(tokenizer_dir, checkpoint_file.parent) + save_config(model.config, checkpoint_file.parent) + +def initialize_weights(fabric: L.Fabric, model: GPT, n_layer: int, n_embd: int) -> None: + """GPT-NeoX weight initialization (https://arxiv.org/abs/2204.06745).""" + # Adapted from https://github.com/jzhang38/TinyLlama + + def init_weights(module, std): + nn.init.normal_(module.weight, mean=0.0, std=std) + if getattr(module, "bias", None) is not None: + nn.init.zeros_(module.bias) + + for mod in model.modules(): + if isinstance(mod, (nn.Embedding, nn.Linear)): + mod.reset_parameters = partial(init_weights, mod, std=math.sqrt(2.0 / 5 / n_embd)) + + # need a separate loop because `mod.proj` below is a `nn.Linear` too + for mod in model.modules(): + if isinstance(mod, (LLaMAMLP, CausalSelfAttention)): + mod.proj.reset_parameters = partial(init_weights, mod.proj, std=(1 / math.sqrt(n_embd) / n_layer)) + + if not isinstance(fabric.strategy, FSDPStrategy): + reset_parameters(model) + +def validate_args(train: TrainArgs, eval: EvalArgs, initial_checkpoint_dir, resume) -> None: + issues = [] + unsupported = [(train, ["max_steps", "epochs"]), (eval, ["max_new_tokens"])] + for args, names in unsupported: + for name in names: + if getattr(args, name) is not None: + issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}") + required = [(train, ["max_tokens", "max_norm"])] + for args, names in required: + for name in names: + if getattr(args, name) is None: + issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}") + if initial_checkpoint_dir and resume: + issues.append("Can't provide both `--resume` and `--initial_checkpoint_dir`. Choose one.") + if issues: + raise ValueError("\n".join(issues)) diff --git a/litgpt/pretrain.py b/litgpt/pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..32832574e82f621b3aa23469bcc5a1af10aedc70 --- /dev/null +++ b/litgpt/pretrain.py @@ -0,0 +1,564 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import math +import pprint +import time +import os +import json +from dataclasses import asdict +from datetime import timedelta +from functools import partial +from pathlib import Path +from typing import Dict, Optional, Tuple, Union + +import lightning as L +import torch +import torch.nn as nn +from lightning.fabric.strategies import FSDPStrategy +from lightning.fabric.utilities.throughput import ThroughputMonitor, measure_flops +from torch.utils.data import DataLoader +from torchmetrics.aggregation import RunningMean +from typing_extensions import Literal + +from litgpt import Tokenizer +from litgpt.args import EvalArgs, LogArgs, TrainArgs +from litgpt.config import name_to_config +from litgpt.data import DataModule, TinyLlama, Arxiv +from litgpt.model import GPT, Block, CausalSelfAttention, Config, LLaMAMLP +from litgpt.utils import ( + CycleIterator, + capture_hparams, + check_nvlink_connectivity, + choose_logger, + chunked_cross_entropy, + copy_config_files, + extend_checkpoint_dir, + find_resume_path, + get_default_supported_precision, + init_out_dir, + instantiate_torch_optimizer, + num_parameters, + parse_devices, + reset_parameters, + save_config, + save_hyperparameters, +) + + +def setup( + model_name: str, + model_config: Optional[Config] = None, + out_dir: Path = Path("out/pretrain"), + precision: Literal["bf16-true", "bf16-mixed", "32-true", None] = None, + initial_checkpoint_dir: Optional[Path] = None, + resume: Union[bool, Literal["auto"], Path] = False, + data: Optional[DataModule] = None, + data_dir: Path = None, + train: TrainArgs = TrainArgs( + save_interval=1000, + log_interval=1, + global_batch_size=512, + micro_batch_size=4, + max_tokens=int(3e12), # 3 trillion + max_norm=1.0, + min_lr=4e-5, + lr_warmup_steps=2000, + tie_embeddings=False, + ), + eval: EvalArgs = EvalArgs(interval=1000, max_iters=100), + log: LogArgs = LogArgs(), + optimizer: Union[str, Dict] = "AdamW", + devices: Union[int, str] = "auto", + num_nodes: int = 1, + tokenizer_dir: Optional[Path] = None, + logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "tensorboard", + seed: int = 42, +): + """Pretrain a model. + + Arguments: + model_name: The name of the model to pretrain. Choose from names in ``litgpt.config``. Use "list" to list the supported models. + model_config: A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with + ``model_config``. Overrides the `model_name` if specified. + out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in + /teamspace/jobs//share. + precision: The precision to use for finetuning. Determines a compatible precision setting by default. + initial_checkpoint_dir: Optional path to a checkpoint directory to initialize the model from. + Useful for continued pretraining. Mutually exclusive with ``resume``. + resume: Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume + from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing + ``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists. + data: Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``. + train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details. + eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details. + optimizer: An optimizer name (such as "AdamW") or config. + + devices: How many devices/GPUs to use. Uses all GPUs by default. + num_nodes: How many nodes the code is being run on. + tokenizer_dir: Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data + module require this. + logger_name: The name of the logger to send metrics to. + seed: The random seed to use for reproducibility. + """ + if model_name == "list": + available_models = "\n".join(sorted(name_to_config)) + print(f"Available values:\n{available_models}") + quit() + + if initial_checkpoint_dir is not None: + initial_checkpoint_dir = extend_checkpoint_dir(initial_checkpoint_dir) + + if tokenizer_dir is not None: + tokenizer_dir = extend_checkpoint_dir(tokenizer_dir) + + if model_config is None: + # Support both model_name options: meta-llama/Meta-Llama-3-8B & Meta-Llama-3-8B + try: + model_config = Config.from_name(model_name) + except ValueError: + print(f"Model name {model_name} is not supported.\n") + available_models = "\n".join(sorted(name_to_config)) + print(f"Available values:\n{available_models}") + quit() + + hparams = capture_hparams() + + from litgpt.data import Arxiv + if isinstance(data, Arxiv): + data.arxiv_train = str(data_dir).rstrip("/") + "/train" + data.arxiv_val = str(data_dir).rstrip("/") + "/train" + else: + data = TinyLlama() if data is None else data + + config = Config.from_name(model_name) if model_config is None else model_config + precision = precision or get_default_supported_precision(training=True) + devices = parse_devices(devices) + out_dir = init_out_dir(out_dir) + # in case the dataset requires the Tokenizer + tokenizer = Tokenizer(tokenizer_dir) if tokenizer_dir is not None else None + + logger = choose_logger( + logger_name, + out_dir, + name=f"pretrain-{config.name}", + resume=bool(resume), + log_interval=train.log_interval, + log_args=asdict(log), + ) + + if devices * num_nodes > 1: + strategy = FSDPStrategy(auto_wrap_policy={Block}, state_dict_type="full", sharding_strategy="HYBRID_SHARD") + else: + strategy = "auto" + + fabric = L.Fabric(devices=devices, num_nodes=num_nodes, strategy=strategy, precision=precision, loggers=[logger]) + + if torch.cuda.is_available() and devices > 1: + check_nvlink_connectivity(fabric) + + fabric.launch() + + fabric.print(pprint.pformat(hparams)) + if logger_name in ("tensorboard", "wandb", "mlflow"): + fabric.logger.log_hyperparams(hparams) + + main( + fabric=fabric, + devices=devices, + num_nodes=num_nodes, + seed=seed, + initial_checkpoint_dir=initial_checkpoint_dir, + resume=resume, + config=config, + data=data, + out_dir=out_dir, + tokenizer_dir=tokenizer_dir, + tokenizer=tokenizer, + train=train, + eval=eval, + optimizer=optimizer, + ) + + +def main( + fabric: L.Fabric, + devices: int, + seed: int, + initial_checkpoint_dir: Optional[Path], + resume: Union[bool, Literal["auto"], Path], + config: Config, + data: DataModule, + out_dir: Path, + tokenizer_dir: Optional[Path], + tokenizer: Optional[Tokenizer], + train: TrainArgs, + eval: EvalArgs, + optimizer: Union[str, Dict], + num_nodes: int = 1, +) -> None: + validate_args(train, eval, initial_checkpoint_dir, resume) + + if fabric.global_rank == 0: + out_dir.mkdir(parents=True, exist_ok=True) + + fabric.seed_everything(seed) # same seed for every process to init model (FSDP) + + t0 = time.perf_counter() + with fabric.init_module(empty_init=True): + model = GPT(config) + + initialize_weights(fabric, model, n_layer=config.n_layer, n_embd=config.n_embd) + + if train.tie_embeddings: + model.transformer.wte.weight = model.lm_head.weight + if train.max_seq_length: + model.max_seq_length = train.max_seq_length + + fabric.print(f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.") + fabric.print(f"Total parameters: {num_parameters(model):,}") + + model = torch.compile(model) + model = fabric.setup(model) + + extra_kwargs = {"fused": fabric.device.type == "cuda"} + optimizer = instantiate_torch_optimizer(optimizer, model.parameters(), **extra_kwargs) + optimizer = fabric.setup_optimizers(optimizer) + + train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train, model.max_seq_length) + train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader) + + if initial_checkpoint_dir: + ckpt_path = initial_checkpoint_dir / "lit_model.pth" + + if fabric.global_rank == 0: + try: + obj = torch.load(ckpt_path, map_location="cpu") + except Exception as e: + raise RuntimeError(f"[load] 无法读取 {ckpt_path}: {e}") + + if isinstance(obj, dict) and "model" in obj: + import os + print(f"[fix] {ckpt_path} 是整包 state,提取 model 权重并原子覆盖...", flush=True) + sd = obj["model"] + # 去掉可能的 'module.' 前缀 + if len(sd) and next(iter(sd)).startswith("module."): + sd = {k[7:]: v for k, v in sd.items()} + + tmp = ckpt_path.with_suffix(".pth.tmp") + torch.save(sd, tmp) + os.replace(tmp, ckpt_path) # 原子替换,避免半文件 + try: + os.sync() # 刷盘(若权限允许) + except Exception: + pass + print(f"[fix] 已覆盖为纯权重: {ckpt_path}", flush=True) + else: + print(f"[ok] {ckpt_path} 已是纯权重", flush=True) + + fabric.barrier() + + fabric.load_raw(ckpt_path, model, strict=False) + + state = { + "model": model, + "optimizer": optimizer, + "train_dataloader": train_dataloader, + "iter_num": 0, + "step_count": 0, + } + + resume = find_resume_path(resume, out_dir) + if resume: + fabric.print(f"Resuming training from {resume}") + fabric.load(resume, state) + + train_time = time.perf_counter() + + # work around PyTorch issue https://github.com/pytorch/pytorch/issues/152162 + # which does not like the lazy initialization to be called in dynamo. + # Happens with PyTorch 2.7. + if ( + torch.__version__.startswith("2.7.") + and (model._forward_module.__class__.__name__ == "OptimizedModule") + and (model._forward_module._orig_mod.__class__.__name__ == "FullyShardedDataParallel") + ): + from torch.distributed.fsdp._runtime_utils import _root_pre_forward + + _root_pre_forward(model._forward_module._orig_mod, model._forward_module._orig_mod, [], {}) + + fit( + fabric=fabric, + devices=devices, + num_nodes=num_nodes, + state=state, + train_dataloader=train_dataloader, + val_dataloader=val_dataloader, + out_dir=out_dir, + tokenizer_dir=tokenizer_dir, + train=train, + eval=eval, + ) + + # Save final checkpoi + save_checkpoint(fabric, state, tokenizer_dir, out_dir / "final" / "lit_model.pth") + + total_tokens = state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size + + # Print formatted output + separator = "-" * 40 + fabric.print(separator) + fabric.print("| Performance") + fabric.print(f"| - Total tokens : {total_tokens:,}") + fabric.print(f"| - Training Time : {(time.perf_counter() - train_time):.2f} s") + fabric.print(f"| - Tok/sec : {total_tokens / train_time:.2f} tok/s") + fabric.print("| " + "-" * 40) + + if fabric.device.type == "cuda": + memory_used = torch.cuda.max_memory_allocated() / 1e9 + fabric.print("| Memory Usage") + fabric.print(f"| - Memory Used : {memory_used:.2f} GB") + fabric.print(separator) + + +def fit( + fabric: L.Fabric, + devices: int, + state: dict, + train_dataloader: DataLoader, + val_dataloader: DataLoader, + out_dir: Path, + tokenizer_dir: Optional[Path], + train: TrainArgs, + eval: EvalArgs, + num_nodes: int = 1, +) -> None: + model = state["model"] + optimizer = state["optimizer"] + + if eval.initial_validation: + val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters) + val_loss = f"{val_loss:.3f}" + else: + fabric.print("Verifying settings ...") + validate(fabric, model, val_dataloader, max_iters=2, verbose=False) # sanity check + val_loss = "n/a" + + throughput = ThroughputMonitor(fabric, window_size=5) + + with torch.device("meta"): + meta_model = GPT(model.config) + x = torch.randint(0, 1, (train.micro_batch_size, meta_model.max_seq_length)) + model_fwd = lambda: meta_model(x) # noqa: F821 + model_loss = lambda y: chunked_cross_entropy(y, x, chunk_size=0) # noqa: F821 + measured_flops = measure_flops(meta_model, model_fwd, model_loss) + fabric.print(f"Measured TFLOPs: {measured_flops * fabric.world_size / 1e12:.2f}") + del meta_model, x + + + max_tokens_per_device = train.max_tokens // fabric.world_size + tokens_per_iter = train.micro_batch_size * model.max_seq_length + max_iters = max_tokens_per_device // tokens_per_iter + log_iter_interval = train.log_interval * train.gradient_accumulation_iters(devices, num_nodes) + initial_iter = state["iter_num"] + train_iterator = CycleIterator(train_dataloader) + + running_loss = RunningMean(window=train.gradient_accumulation_iters(devices, num_nodes), sync_on_compute=False).to( + fabric.device + ) + fabric.barrier() + total_t0 = time.perf_counter() + + warmup_iters = train.warmup_iters(devices, num_nodes, max_iters, train_dataloader) + + for train_data in train_iterator: + if state["iter_num"] >= max_iters: + break + + # determine and set the learning rate for this iteration + lr = get_lr(optimizer.defaults["lr"], state["iter_num"], warmup_iters, max_iters, train.min_lr) + for param_group in optimizer.param_groups: + param_group["lr"] = lr + + state["iter_num"] += 1 + iter_t0 = time.perf_counter() + + input_ids = train_data[:, 0 : model.max_seq_length].contiguous().long() + targets = train_data[:, 1 : (model.max_seq_length + 1)].contiguous().long() + + is_accumulating = state["iter_num"] % train.gradient_accumulation_iters(devices, num_nodes) != 0 + with fabric.no_backward_sync(model, enabled=is_accumulating): + logits = model(input_ids) + loss = chunked_cross_entropy(logits, targets) + fabric.backward(loss / train.gradient_accumulation_iters(devices, num_nodes)) + + running_loss.update(loss.detach()) + + if not is_accumulating: + fabric.clip_gradients(model, optimizer, max_norm=train.max_norm) + optimizer.step() + optimizer.zero_grad() + state["step_count"] += 1 + + if state["iter_num"] % log_iter_interval == 0: + loss = running_loss.compute().item() # expensive device-to-host synchronization + t1 = time.perf_counter() + throughput.update( + time=(t1 - total_t0), + flops=(measured_flops * log_iter_interval), + batches=state["iter_num"], + samples=(state["iter_num"] * train.micro_batch_size), + lengths=(state["iter_num"] * train.micro_batch_size * model.max_seq_length), + ) + metrics = { + "loss": loss, + "iter": state["iter_num"], + "step": state["step_count"], + "epoch": train_iterator.epoch, + "iter_time": t1 - iter_t0, + "remaining_time": ( + (t1 - total_t0) / (state["iter_num"] - initial_iter) * (max_iters - state["iter_num"]) + ), + "tokens": state["iter_num"] * train.micro_batch_size * model.max_seq_length, + "total_tokens": (state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size), + "learning_rate": lr, + } + if isinstance(val_loss, float): + val_loss = f"{val_loss:.3f}" + fabric.print( + f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |" + f" loss train: {metrics['loss']:.3f}," + f" val: {val_loss} |" + f" iter time: {metrics['iter_time'] * 1000:.2f} ms" + f"{' (step)' if not is_accumulating else ''}" + f" remaining time: {timedelta(seconds=int(metrics['remaining_time']))!s}", + flush=True + ) + + throughput_metrics = throughput.compute() + metrics.update(throughput_metrics) + fabric.log_dict(metrics, step=state["iter_num"] - 1) + + if val_dataloader is not None and not is_accumulating and state["step_count"] % eval.interval == 0: + t0 = time.perf_counter() + val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters) + val_loss = val_loss.item() + td = time.perf_counter() - t0 + + fabric.print(f"iter {state['iter_num']}: val loss {val_loss:.4f}, val time: {td * 1000:.2f} ms") + metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)} + fabric.log_dict(metrics, step=state["iter_num"] - 1) + fabric.barrier() + + if train.save_interval is not None and not is_accumulating and state["step_count"] % train.save_interval == 0: + save_checkpoint(fabric, state, tokenizer_dir, out_dir / f"step-{state['step_count']:08d}" / "lit_model.pth") + + # Final validation + if eval.final_validation: + val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters) + metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)} + fabric.log_dict(metrics, step=state["iter_num"]) + fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}") + + +@torch.no_grad() +def validate( + fabric: L.Fabric, model: nn.Module, val_dataloader: DataLoader, max_iters: int, verbose: bool = True +) -> torch.Tensor: + fabric.barrier() + if verbose: + fabric.print("Validating ...") + model.eval() + + losses = [] + for k, batch in enumerate(val_dataloader): + if k >= max_iters: + break + input_ids = batch[:, 0 : model.max_seq_length].contiguous().long() + targets = batch[:, 1 : (model.max_seq_length + 1)].contiguous().long() + logits = model(input_ids) + loss = chunked_cross_entropy(logits, targets) + losses.append(loss) + + val_loss = torch.stack(losses).mean() + model.train() + fabric.barrier() + return val_loss + + +def get_dataloaders( + fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs, block_size: int +) -> Tuple[DataLoader, DataLoader]: + data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=block_size) + with fabric.rank_zero_first(): + data.prepare_data() + data.setup() + train_dataloader = data.train_dataloader() + val_dataloader = data.val_dataloader() + return train_dataloader, val_dataloader + + +# learning rate decay scheduler (cosine with linear warmup) +def get_lr(learning_rate: float, it: int, warmup_iters: int, max_iters: int, min_lr: float) -> float: + # 1) linear warmup for warmup_iters steps + if it < warmup_iters: + return learning_rate * it / warmup_iters + # 2) if it > max_iters, return min learning rate + if it > max_iters: + return min_lr + # 3) in between, use cosine decay down to min learning rate + decay_ratio = (it - warmup_iters) / (max_iters - warmup_iters) + assert 0 <= decay_ratio <= 1 + coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1 + return min_lr + coeff * (learning_rate - min_lr) + + +def initialize_weights(fabric: L.Fabric, model: GPT, n_layer: int, n_embd: int) -> None: + """GPT-NeoX weight initialization (https://arxiv.org/abs/2204.06745).""" + # Adapted from https://github.com/jzhang38/TinyLlama + + def init_weights(module, std): + nn.init.normal_(module.weight, mean=0.0, std=std) + if getattr(module, "bias", None) is not None: + nn.init.zeros_(module.bias) + + for mod in model.modules(): + if isinstance(mod, (nn.Embedding, nn.Linear)): + mod.reset_parameters = partial(init_weights, mod, std=math.sqrt(2.0 / 5 / n_embd)) + + # need a separate loop because `mod.proj` below is a `nn.Linear` too + for mod in model.modules(): + if isinstance(mod, (LLaMAMLP, CausalSelfAttention)): + mod.proj.reset_parameters = partial(init_weights, mod.proj, std=(1 / math.sqrt(n_embd) / n_layer)) + + if not isinstance(fabric.strategy, FSDPStrategy): + reset_parameters(model) + + +def save_checkpoint(fabric, state, tokenizer_dir, checkpoint_file): + model = state["model"] + checkpoint_file.parent.mkdir(parents=True, exist_ok=True) + fabric.print(f"Saving checkpoint to {str(checkpoint_file)!r}") + fabric.save(checkpoint_file, state) + if fabric.global_rank == 0: + save_hyperparameters(setup, checkpoint_file.parent) + if tokenizer_dir is not None: + copy_config_files(tokenizer_dir, checkpoint_file.parent) + save_config(model.config, checkpoint_file.parent) + + +def validate_args(train: TrainArgs, eval: EvalArgs, initial_checkpoint_dir, resume) -> None: + issues = [] + unsupported = [(train, ["max_steps", "epochs"]), (eval, ["max_new_tokens"])] + for args, names in unsupported: + for name in names: + if getattr(args, name) is not None: + issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}") + required = [(train, ["max_tokens", "max_norm"])] + for args, names in required: + for name in names: + if getattr(args, name) is None: + issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}") + if initial_checkpoint_dir and resume: + issues.append("Can't provide both `--resume` and `--initial_checkpoint_dir`. Choose one.") + if issues: + raise ValueError("\n".join(issues)) diff --git a/litgpt/prompts.py b/litgpt/prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..f0b9168866872e30eb0668f67e2396900d5496fd --- /dev/null +++ b/litgpt/prompts.py @@ -0,0 +1,541 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. +import importlib +import re +from abc import abstractmethod +from json import dumps +from pathlib import Path +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union + +import yaml + +from litgpt.config import Config + +if TYPE_CHECKING: + from litgpt import Tokenizer + + +class PromptStyle: + """Base interface for prompt styles.""" + + @abstractmethod + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return prompt + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ([tokenizer.eos_id],) + + @classmethod + def from_name(cls, name: str) -> "PromptStyle": + return prompt_styles[name]() + + @classmethod + def from_config(cls, config: Config) -> "PromptStyle": + return model_name_to_prompt_style(config.name) + + +class Default(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return prompt + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ([tokenizer.eos_id],) + + +class Alpaca(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + if kwargs.get("input"): + sys_prompt = sys_prompt or ( + "Below is an instruction that describes a task, paired with an input that provides further context. " + "Write a response that appropriately completes the request.\n\n" + ) + return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Input:\n{kwargs['input']}\n\n### Response:\n" + + sys_prompt = sys_prompt or ( + "Below is an instruction that describes a task. " + "Write a response that appropriately completes the request.\n\n" + ) + return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Response:\n" + + +class FLAN(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or ( + "Below is an instruction that describes a task. " + "Write a response that appropriately completes the request.\n\n" + ) + return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Response:\n" + + +class Longform(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or ( + "Below is an instruction that describes a task, paired with an input that provides further context. " + "Write a response that appropriately completes the request.\n\n" + ) + return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Response:\n" + + +class StableLMAlpha(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or ( + "# StableLM Tuned (Alpha version)\n- StableLM is a helpful and harmless open-source AI language" + " model developed by StabilityAI.\n- StableLM is excited to be able to help the user, but will refuse to do" + " anything that could be considered harmful to the user.\n- StableLM is more than just an information" + " source, StableLM is also able to write poetry, short stories, and make jokes.\n- StableLM will refuse to" + " participate in anything that could harm a human." + ) + return f"<|SYSTEM|>{sys_prompt}<|USER|>{prompt}<|ASSISTANT|>" + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ( + [tokenizer.eos_id], + [tokenizer.token_to_id("<|SYSTEM|>")], + [tokenizer.token_to_id("<|ASSISTANT|>")], + [tokenizer.token_to_id("<|USER|>")], + ) + + +class StableLMZephyr(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"<|user|>\n{prompt}<|endoftext|>\n<|assistant|>\n" + + +class Falcon(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"{prompt}\nAnswer:" + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ( + [tokenizer.eos_id], + # the model rarely emits the eos token and instead outputs newlines, but we cannot use them + # to stop or else things like code generation wouldn't work + [tokenizer.token_to_id("User"), tokenizer.token_to_id(":")], + [193, tokenizer.token_to_id("User")], # 193: '\n' + ) + + +class Falcon3(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"<|user|>\n{prompt}<|endoftext|>\n<|assistant|>\n" + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ( + [tokenizer.eos_id], + [tokenizer.token_to_id("<|endoftext|>")], + ) + + +class Llama2FunctionCalling(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + # Has to be before the llama config + b_func, e_func = "", "\n\n" + b_inst, e_inst = "[INST]", "[/INST]" + b_sys, e_sys = "<>\n", "\n<>\n\n" + # This is an example for how to format functions for the model + function_metadata = { + "function": "search_bing", + "description": ( + "Search the web for content on Bing. This allows users to search online/the internet/the web for" + " content." + ), + "arguments": [{"name": "query", "type": "string", "description": "The search query string"}], + } + + system_prompt = sys_prompt or ( + "You are a helpful, respectful and honest assistant. Always answer as helpfully as" + "possible. Your only response should be JSON formatted functions" + ) + # replace the curly braces with double curly braces to escape them + function_list = dumps(function_metadata).replace("{", "{{").replace("}", "}}") + return ( + f"{b_func}{function_list.strip()}{e_func}{b_inst}{b_sys}{system_prompt.strip()}{e_sys}{prompt}{e_inst}\n\n" + ) + + +class Llama2(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + b_inst, e_inst = "[INST]", "[/INST]" + b_sys, e_sys = "<>\n", "\n<>\n\n" + sys_prompt = sys_prompt or ( + "You are a helpful, respectful and honest assistant. Always answer as helpfully as" + " possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist," + " toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and" + " positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why" + " instead of answering something not correct. If you don't know the answer to a question, please don't" + " share false information." + ) + return f"{b_inst} {b_sys}{sys_prompt}{e_sys} {prompt} {e_inst} " + + +class Llama3(PromptStyle): + def apply( + self, prompt: Union[str, List[Dict[str, str]]], *, sys_prompt: Optional[str] = None, **kwargs: str + ) -> str: + default_system_prompt = sys_prompt or "You are a helpful assistant." + + # https://github.com/meta-llama/llama3/blob/359887376f0aaf30e433f23e25df858d8c2a9833/llama/tokenizer.py#L202-L229 + if isinstance(prompt, str): + return ( + "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n" + f"{default_system_prompt}<|eot_id|>" # No newline + "<|start_header_id|>user<|end_header_id|>\n\n" + f"{prompt}<|eot_id|>" # No newline + "<|start_header_id|>assistant<|end_header_id|>\n\n" + ) + elif isinstance(prompt, list): + + def encode_header(role: str) -> List[str]: + return [f"<|start_header_id|>{role}<|end_header_id|>\n\n"] + + def encode_message(message: Dict[str, str]) -> List[str]: + tokens = encode_header(message["role"]) + # NOTE: Meta stripped this. I'm not sure I agree, but who am I to argue? + tokens.append(message["content"].strip()) + tokens.append("<|eot_id|>") + return tokens + + def has_system_prompt(messages: List[Dict[str, str]]) -> bool: + return messages[0].get("role", "") == "system" if len(messages) else False + + tokens = ["<|begin_of_text|>"] + if not has_system_prompt(prompt): + tokens.extend(encode_message({"role": "system", "content": default_system_prompt})) + for i, message in enumerate(prompt): + if i != 0 and message["role"] == "system": + raise ValueError("'system' role is only allowed at the beginning of the conversation list.") + if message["role"] not in ["assistant", "user", "system"]: + raise ValueError( + f"Unknown role: '{message['role']}'. Supported roles are 'assistant', 'user', and 'system'." + ) + tokens.extend(encode_message(message)) + tokens.extend(encode_header("assistant")) + return "".join(tokens) + else: + raise ValueError(f"Unsupported prompt type: {type(prompt)}") + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ( + [tokenizer.eos_id], + [tokenizer.token_to_id("<|eot_id|>")], + ) + + +class R1Base(PromptStyle): + def apply( + self, prompt: Union[str, List[Dict[str, str]]], *, sys_prompt: Optional[str] = None, **kwargs: str + ) -> str: + default_system_prompt = sys_prompt or "" + + bos_token = "<|begin▁of▁sentence|>" + eos_token = "" + + if isinstance(prompt, str): + return f"{default_system_prompt}<|User|>{prompt}<|Assistant|>" # Prepares for assistant response + elif isinstance(prompt, list): + + def encode_message(message: Dict[str, str]) -> str: + role = message["role"] + content = message["content"].strip() + + if role == "system": + return content # System prompt is prepended at the start + elif role == "user": + return f"<|User|>{content}" + elif role == "assistant": + return f"<|Assistant|>{content}{eos_token}" + else: + raise ValueError(f"Unknown role: '{role}'. Supported roles are 'assistant', 'user', and 'system'.") + + # Extract system prompt (if any) + system_prompt = "" + if prompt[0].get("role") == "system": + system_prompt = prompt[0]["content"] + prompt = prompt[1:] # Remove system message from the list + + # Construct the formatted prompt + formatted_prompt = system_prompt + for message in prompt: + formatted_prompt += encode_message(message) + + formatted_prompt += "<|Assistant|>" # Prepares for assistant response + return formatted_prompt + else: + raise ValueError(f"Unsupported prompt type: {type(prompt)}") + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ( + [tokenizer.eos_id], + [tokenizer.token_to_id("<|end▁of▁sentence|>")], + ) + + +class FreeWilly2(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or "This is a system prompt, please behave and help the user." + return f"### System:\n{sys_prompt}\n\n### User:\n{prompt}\n\n### Assistant:\n" + + +class Platypus(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"### Instruction:\n\n{prompt}\n\n### Response:\n" + + +class StableCode(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"###Instruction\n{prompt}###Response\n" + + +class CodeLlama(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + # for CodeLLama, we don't set a default system prompt, but it is supported: + # https://huggingface.co/blog/codellama#conversational-instructions + # Mistral does not: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1#instruction-format + b_inst, e_inst = "[INST]", "[/INST]" + if sys_prompt: + b_sys, e_sys = "<>\n", "\n<>\n\n" + return f"{b_inst} {b_sys}{sys_prompt}{e_sys}{prompt} {e_inst}" + return f"{b_inst} {prompt} {e_inst}" + + +class Phi1(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"{prompt}\n\nAnswer:" + + def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]: + return ( + [tokenizer.eos_id], + [tokenizer.token_to_id("Answer"), tokenizer.token_to_id(":")], + [198, tokenizer.token_to_id("Answer"), tokenizer.token_to_id(":")], + # the model rarely emits the eos token and instead outputs newlines, but we cannot use them + # to stop or else things like code generation wouldn't work + # [198, 198], # '\n', '\n' + ) + + +class Phi2(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"Instruct: {prompt}\nOutput:" + + +class Phi3(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or "You are a helpful assistant." + return f"<|system|>\n{sys_prompt}<|end|>\n<|user|>\n{prompt}<|end|>\n<|assistant|>\n" + + +class Phi4(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + res = "" + if sys_prompt: + res += f"<|im_start|>system<|im_sep|>{sys_prompt}<|im_end|>" + res += f"<|im_start|>user<|im_sep|>{prompt}<|im_end|><|im_start|>assistant<|im_sep|>" + return res + + +class Phi4Reasoning(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = ( + sys_prompt + or "You are Phi, a language model trained by Microsoft to help users. Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: {Thought section} {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:" + ) + return f"<|im_start>system<|im_sep|>{sys_prompt}<|im_end|><|im_start|>user<|im_sep|>{prompt}<|im_end|><|im_start|>assistant<|im_sep|>" + + +class Phi4Mini(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + res = "" + if sys_prompt: + res += f"<|system|>{sys_prompt}<|end|>" + res += f"<|user|>{prompt}<|end|><|assistant|>" + return res + + +class Phi4MiniReasoning(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or "Your name is Phi, an AI math expert developed by Microsoft." + return f"<|system|>{sys_prompt}<|end|><|user|>{prompt}<|end|><|assistant|>" + + +class TinyLlama(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or "You are a friendly chatbot who always gives helpful, detailed, and polite answers." + return f"<|system|>\n{sys_prompt}\n<|user|>\n{prompt}\n<|assistant|>\n" + + +class Gemma(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"user\n{prompt}\nmodel\n" + + +class OLMo(PromptStyle): + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + return f"<|endoftext|><|user|>\n{prompt}\n<|assistant|>\n" + + +class ChatML(PromptStyle): + def __init__(self, system_message: Optional[str] = None): + self.system_message = system_message + + def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str: + sys_prompt = sys_prompt or self.system_message + return ( + f"<|im_start|>system\n{sys_prompt}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n" + ) + + +class Qwen2_5(ChatML): + def __init__(self): + super().__init__("You are Qwen, created by Alibaba Cloud. You are a helpful assistant.") + + +class Qwen2_5_Math(ChatML): + def __init__(self): + super().__init__("Please reason step by step, and put your final answer within \\boxed{}.") + + +class QwQ(ChatML): + def __init__(self): + super().__init__( + "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step." + ) + + +class Qwen3(ChatML): + def __init__(self): + super().__init__() + + +class SmolLM2(ChatML): + def __init__(self): + super().__init__("You are a helpful AI assistant named SmolLM, trained by Hugging Face") + + +class Salamandra(ChatML): + def __init__(self): + super().__init__( + "I am Salamandra, an AI language model developed at the Barcelona Supercomputing Centre (BSC) by the Language Technologies Unit. My knowledge base was last updated on August 2023. Today Date: 2024-09-30\nSoy Salamandra, un modelo lingüístico de IA desarrollado en el Barcelona Supercomputing Centre (BSC) por la Language Technologies Unit. Mi base de conocimientos se actualizó por última vez en agosto de 2023.\nSoc Salamandra, un model de llenguatge d'IA desenvolupat al Barcelona Supercomputing Centre (BSC) per la Language Technologies Unit." + ) + + +# Maps prompt style names to PromptStyle classes +prompt_styles: Dict[str, Type[PromptStyle]] = { + # Dataset-specific prompt styles + "default": Default, + "alpaca": Alpaca, + "flan": FLAN, + "longform": Longform, + # Model-specific prompt styles + "stablelm-alpha": StableLMAlpha, + "stablelm-zephyr": StableLMZephyr, + "falcon": Falcon, + "llama2-function-calling": Llama2FunctionCalling, + "llama2": Llama2, + "freewilly2": FreeWilly2, + "platypus": Platypus, + "stablecode": StableCode, + "codellama": CodeLlama, + "phi-1": Phi1, + "phi-2": Phi2, + "phi-3": Phi3, + "phi-4": Phi4, + "phi-4-reasoning": Phi4Reasoning, + "phi-4-mini": Phi4Mini, + "phi-4-mini-reasoning": Phi4MiniReasoning, + "tinyllama": TinyLlama, + "gemma": Gemma, + "llama3": Llama3, + "olmo": OLMo, + "qwen2.5": Qwen2_5, + "qwen2.5-math": Qwen2_5_Math, + "qwq": QwQ, + "qwen3": Qwen3, + "smollm2": SmolLM2, + "salamandra": Salamandra, +} + + +def model_name_to_prompt_style(model_name: str) -> PromptStyle: + if re.search(r"stablelm-tuned-alpha", model_name): + return StableLMAlpha() + if re.search(r"stablelm-zephyr-3b", model_name): + return StableLMZephyr() + if re.search("stablecode-instruct", model_name): + return StableCode() + if re.search(r"Falcon3.*-Instruct", model_name): + return Falcon3() + if re.search(r"falcon.*-instruct", model_name): + return Falcon() + if re.search("Llama-2-7b-chat-hf-function-calling-v2", model_name): + return Llama2FunctionCalling() + if re.search("Llama-2.*-chat", model_name): + return Llama2() + if re.search("Llama-3.*-Instruct", model_name): + return Llama3() + if re.search("Llama-3.*-Instruct-*", model_name): + return Llama3() + if re.search("OLMo-2.*-(Instruct|SFT|DPO)", model_name): + return Llama3() + if re.search("R1", model_name): + return R1Base() + if re.search("FreeWilly2", model_name): + return FreeWilly2() + if re.search("Platypus", model_name): + return Platypus() + if re.search("CodeLlama|Mi[sx]tral.*Instruct", model_name): + return CodeLlama() + if re.search("phi-1", model_name): + return Phi1() + if re.search("phi-2", model_name): + return Phi2() + if re.search("Phi-3", model_name): + return Phi3() + if re.search("Phi-4-reasoning", model_name): + return Phi4Reasoning() + if re.search("Phi-4-mini-reasoning", model_name): + return Phi4MiniReasoning() + if re.search("Phi-4-mini", model_name): + return Phi4Mini() + if re.search("phi-4", model_name): + return Phi4() + if re.search(r"tiny-llama.*chat", model_name): + return TinyLlama() + if re.search(r"(Code)?Gemma.*-it", model_name): + return Gemma() + if re.search(r"OLMo.*-hf", model_name): + return OLMo() + if re.search(r"Qwen2\.5-Math-.*", model_name): + return Qwen2_5_Math() + if re.search(r"Qwen2\.5-.*", model_name): + return Qwen2_5() + if re.search(r"QwQ-.*", model_name): + return QwQ() + if re.search(r"Qwen3-.*", model_name): + return Qwen3() + if re.search(r"SmolLM2.*-Instruct", model_name): + return SmolLM2() + if re.search(r"salamandra-.*-instruct", model_name): + return Salamandra() + return Default() + + +def save_prompt_style(style: Union[str, PromptStyle], checkpoint_dir: Path) -> None: + style = PromptStyle.from_name(style) if isinstance(style, str) else style + cls = type(style) + # Allow saving the full module path for user-defined prompt classes + config = {"class_path": f"{cls.__module__}.{cls.__name__}"} + with open(checkpoint_dir / "prompt_style.yaml", "w", encoding="utf-8") as file: + yaml.dump(config, file) + + +def load_prompt_style(checkpoint_dir: Path) -> PromptStyle: + with open(checkpoint_dir / "prompt_style.yaml", encoding="utf-8") as file: + config = yaml.safe_load(file) + # Support loading the full module path for user-defined prompt classes + full_module_path, cls_name = config["class_path"].rsplit(".", 1) + module = importlib.import_module(full_module_path) + cls = getattr(module, cls_name) + return cls() + + +def has_prompt_style(checkpoint_dir: Path) -> bool: + return (checkpoint_dir / "prompt_style.yaml").is_file() diff --git a/litgpt/tokenizer.py b/litgpt/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..fa263c6e01a5263f0328bbf8e1692cca0377ff67 --- /dev/null +++ b/litgpt/tokenizer.py @@ -0,0 +1,182 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +import json +from pathlib import Path +from typing import Iterable, Iterator, Optional, Union + +import torch + +from litgpt.utils import fix_and_load_json + + +class Tokenizer: + def __init__(self, checkpoint_dir: Union[Path, str]) -> None: + checkpoint_dir = Path(checkpoint_dir) + if not checkpoint_dir.exists(): + raise NotADirectoryError(f"The checkpoint directory does not exist: {str(checkpoint_dir)}") + + self.model_name = checkpoint_dir.stem + self.use_bos = self.check_if_bos_token_used(checkpoint_dir) + self.bos_id = None + self.eos_id = None + + # some checkpoints have both files, `.json` takes precedence + if (vocabulary_path := checkpoint_dir / "tokenizer.json").is_file(): + from tokenizers import Tokenizer as HFTokenizer + + self.processor = HFTokenizer.from_file(str(vocabulary_path)) + self.backend = "huggingface" + + if (special_tokens_path := checkpoint_dir / "tokenizer_config.json").is_file(): + with open(special_tokens_path, encoding="utf-8") as fp: + config = json.load(fp) + bos_token = config.get("bos_token") + eos_token = config.get("eos_token") + if bos_token is not None and isinstance(bos_token, dict): + bos_token = bos_token.get("content") + if eos_token is not None and isinstance(eos_token, dict): + eos_token = eos_token.get("content") + self.bos_id = self.token_to_id(bos_token) if bos_token is not None else None + self.eos_id = self.token_to_id(eos_token) if eos_token is not None else None + if (special_tokens_path := checkpoint_dir / "generation_config.json").is_file(): + try: + with open(special_tokens_path, encoding="utf-8") as fp: + config = json.load(fp) + except json.JSONDecodeError: # Some files like the Llama 3.2 one have bugs + with open(special_tokens_path, encoding="utf-8") as fp: + json_string = fp.read() + config = fix_and_load_json(json_string) + if self.bos_id is None: + self.bos_id = config.get("bos_token_id") + if self.eos_id is None: + self.eos_id = config.get("eos_token_id") + + elif (vocabulary_path := checkpoint_dir / "tokenizer.model").is_file(): + from sentencepiece import SentencePieceProcessor + + self.processor = SentencePieceProcessor(model_file=str(vocabulary_path)) + self.backend = "sentencepiece" + self.bos_id = self.processor.bos_id() + self.eos_id = self.processor.eos_id() + else: + raise NotImplementedError + + # NOTE: A temporary fix until it's resolved on Tokenizers side. + # LlaMA tokenizer strips leading spaces if to decode a single token at a time. + # https://github.com/huggingface/transformers/issues/31643 + self.apply_decoding_fix = None + if (config_path := checkpoint_dir / "tokenizer_config.json").is_file(): + with open(config_path, encoding="utf-8") as fp: + self.apply_decoding_fix = "LlamaTokenizer" in json.load(fp)["tokenizer_class"] + + @property + def vocab_size(self) -> int: + if self.backend == "huggingface": + return self.processor.get_vocab_size(with_added_tokens=False) + if self.backend == "sentencepiece": + return self.processor.vocab_size() + raise RuntimeError + + def token_to_id(self, token: str) -> int: + if self.backend == "huggingface": + id_ = self.processor.token_to_id(token) + elif self.backend == "sentencepiece": + id_ = self.processor.piece_to_id(token) + else: + raise RuntimeError + if id_ is None: + raise ValueError(f"token {token!r} not found in the collection.") + return id_ + + def check_if_bos_token_used(self, checkpoint_dir: Path) -> bool: + if not (tokenizer_config_path := checkpoint_dir / "tokenizer_config.json").is_file(): + return False + with open(tokenizer_config_path, encoding="utf-8") as fp: + config = json.load(fp) + # for LlaMA-3 tokenizer there is no `add_bos_token` at all and `tokenizer_class` is only + # `PreTrainedTokenizerFast` + if checkpoint_dir.stem.startswith(("Meta-Llama-3", "Llama-3")): + return True + if checkpoint_dir.stem.startswith("SmolLM2") and checkpoint_dir.name.endswith("Instruct"): + return True + if "add_bos_token" in config: + return config["add_bos_token"] + # if `add_bos_token` isn't in the config file, but LLaMA tokenizer is used - return True. + # ex: https://huggingface.co/stabilityai/StableBeluga2/blob/main/tokenizer_config.json#L2 + return config.get("tokenizer_class") == "LlamaTokenizer" + + def encode( + self, + string: str, + device: Optional[torch.device] = None, + bos: Optional[bool] = None, + eos: bool = False, + max_length: int = -1, + ) -> torch.Tensor: + if self.backend == "huggingface": + tokens = self.processor.encode(string).ids + elif self.backend == "sentencepiece": + tokens = self.processor.encode(string) + else: + raise RuntimeError(f"`{self.backend}` is not supported.") + if tokens is None: + raise ValueError("`self.processor` returned tokens of None value.") + + if bos or (bos is None and self.use_bos): + if self.bos_id is None: + raise NotImplementedError("This tokenizer does not have a defined bos token.") + if not tokens or tokens[0] != self.bos_id: + tokens = [self.bos_id] + tokens + # if the processor misbehaves and adds `bos` token no matter what + elif tokens and tokens[0] == self.bos_id: + tokens = tokens[1:] + + if eos and (not tokens or tokens[-1] != self.eos_id): + tokens = tokens + [self.eos_id] + # if the processor misbehaves and adds `eos` token no matter what + elif tokens and tokens[-1] == self.eos_id: + tokens = tokens[:-1] + + if max_length > 0: + tokens = tokens[:max_length] + return torch.tensor(tokens, dtype=torch.int, device=device) + + def decode(self, tensor: torch.Tensor) -> str: + tokens = [tensor.item()] if tensor.ndim == 0 else tensor.tolist() + if len(tokens) == 1 and self.apply_decoding_fix: + dummy_token_id = 33 # \x1e + dummy_token = self.processor.decode([dummy_token_id]) + if dummy_token != "\x1e": + dummy_token_id = 165 # \x1e is different in salamandra tokenizers + dummy_token = self.processor.decode([dummy_token_id]) + return self.processor.decode([dummy_token_id] + tokens)[len(dummy_token) :] + return self.processor.decode(tokens) + + def decode_stream( + self, token_stream: Iterable[torch.Tensor], device: Optional[torch.device] = None + ) -> Iterator[str]: + if self.backend == "huggingface": + try: + for token in token_stream: + yield self.decode(token) + except KeyboardInterrupt: + return + elif self.backend == "sentencepiece": + # TODO: Is there a way to not have to do this? + # This may actually affect our tokens per second. + + # sentencepiece does not support decoding token-by-token because it adds spaces based on the surrounding tokens + # meaning that we need to decode everything each time + so_far = torch.tensor([], dtype=torch.long, device=device) + decoded_so_far = "" + try: + for token in token_stream: + so_far = so_far.to(device=token.device) + so_far = torch.cat((so_far, token.view(-1))) + decoded_new = self.decode(so_far) + yield decoded_new[len(decoded_so_far) :] + decoded_so_far = decoded_new + except KeyboardInterrupt: + return + else: + raise NotImplementedError(self.backend) diff --git a/litgpt/utils.py b/litgpt/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..af97fa2f11d74613adf8b3f77a73ee6725f71d0b --- /dev/null +++ b/litgpt/utils.py @@ -0,0 +1,875 @@ +# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file. + +"""Utility functions for training and inference.""" + +import inspect +import json +import math +import os +import pickle +import random +import re +import shutil +import subprocess +import sys +import warnings +from dataclasses import asdict, is_dataclass +from io import BytesIO +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Literal, Mapping, Optional, TypeVar, Union + +import lightning as L +import psutil +import torch +import torch.nn as nn +import torch.utils._device +import yaml +from lightning.fabric.loggers import CSVLogger, TensorBoardLogger +from lightning.fabric.strategies import FSDPStrategy +from lightning.fabric.utilities.load import _lazy_load as lazy_load +from lightning.pytorch.cli import instantiate_class +from lightning.pytorch.loggers import MLFlowLogger, WandbLogger +from lightning_utilities.core.imports import module_available +from packaging import version +from torch.serialization import normalize_storage_type +from typing_extensions import Self + +if TYPE_CHECKING: + from litgpt import GPT, Config + +_THUNDER_AVAILABLE = module_available("thunder") +_TRITON_AVAILABLE = module_available("triton") + + +def init_out_dir(out_dir: Path) -> Path: + if not isinstance(out_dir, Path): + out_dir = Path(out_dir) + if not out_dir.is_absolute() and "LIGHTNING_ARTIFACTS_DIR" in os.environ: + return Path(os.getenv("LIGHTNING_ARTIFACTS_DIR")) / out_dir + return out_dir + + +def find_resume_path(resume: Union[bool, Literal["auto"], Path], out_dir: Path) -> Optional[Path]: + if not resume or isinstance(resume, Path): + return resume + + resume_path = max(out_dir.rglob("step-*/*.pth"), key=(lambda p: int(p.parent.name.split("-")[1])), default=None) + if resume == "auto": + return resume_path + if resume is True and resume_path is None: + raise FileNotFoundError( + f"You passed `--resume=True`, but no checkpoint file was found in `--out_dir={out_dir}`." + ) + return resume_path + + +def num_parameters(module: nn.Module, requires_grad: Optional[bool] = None) -> int: + total = 0 + for p in module.parameters(): + if requires_grad is None or p.requires_grad == requires_grad: + if hasattr(p, "quant_state"): + # bitsandbytes 4bit layer support + total += math.prod(p.quant_state.shape) + else: + total += p.numel() + return total + + +def reset_parameters(module: nn.Module) -> None: + """Calls `reset_parameters` on the module and all its submodules.""" + for mod in module.modules(): + if callable(getattr(mod, "reset_parameters", None)): + mod.reset_parameters() + + +def check_valid_checkpoint_dir( + checkpoint_dir: Path, + model_filename: str = "lit_model.pth", + verbose: bool = True, + raise_error: bool = False, + ignore_tokenizer_files: bool = False, +) -> None: + files = { + model_filename: (checkpoint_dir / model_filename).is_file(), + "model_config.yaml": (checkpoint_dir / "model_config.yaml").is_file(), + } + if not ignore_tokenizer_files: + files.update( + { + "tokenizer.json OR tokenizer.model": (checkpoint_dir / "tokenizer.json").is_file() + or (checkpoint_dir / "tokenizer.model").is_file(), + "tokenizer_config.json": (checkpoint_dir / "tokenizer_config.json").is_file(), + } + ) + + if checkpoint_dir.is_dir(): + if all(files.values()): + # we're good + return + problem = f" is missing the files: {[f for f, exists in files.items() if not exists]!r}" + else: + problem = " is not a checkpoint directory" + + # list locally available checkpoints + available = list(Path("checkpoints").glob("*/*")) + if available: + options = "\n".join([""] + [repr(str(p.resolve())) for p in available]) + extra = f"\nYou have downloaded locally:{options}\n" + else: + extra = "" + + if verbose: + error_message = ( + f"checkpoint_dir {str(checkpoint_dir.absolute())!r}{problem}." + "\nFind download instructions at https://github.com/Lightning-AI/litgpt/blob/main/tutorials\n" + f"{extra}\nSee all download options by running:\n litgpt download" + ) + print(error_message, file=sys.stderr) + + if raise_error: + raise FileNotFoundError(f"checkpoint_dir {str(checkpoint_dir.absolute())!r}{problem}.") + else: + raise SystemExit(1) + + +class SavingProxyForStorage: + def __init__(self, obj, saver, protocol_version=5): + self.protocol_version = protocol_version + self.saver = saver + if not (isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj)): + raise TypeError(f"expected storage, not {type(obj)}") + + # this logic is taken from PyTorch 2.0+ torch/serialization.py + if isinstance(obj, torch.storage.TypedStorage): + # PT upstream wants to deprecate this eventually... + storage = obj._untyped_storage + storage_type_str = obj._pickle_storage_type() + storage_type = getattr(torch, storage_type_str) + storage_numel = obj._size() + else: + storage = obj + storage_type = normalize_storage_type(type(obj)) + storage_numel = storage.nbytes() + + storage_key = saver._write_storage_and_return_key(storage) + location = torch.serialization.location_tag(storage) + + self.storage_info = ("storage", storage_type, storage_key, location, storage_numel) + + def __reduce_ex__(self, protocol_version): + assert False, "this should be handled with out of band" + + +class SavingProxyForTensor: + def __init__(self, tensor, saver, protocol_version=5): + self.protocol_version = protocol_version + self.reduce_ret_fn, reduce_args = tensor.__reduce_ex__(protocol_version) + if reduce_args[0] == torch._utils._rebuild_tensor_v2: + # for Tensors with Python attributes + (a0, a1, (storage, *a2_other), *other_reduce_args) = reduce_args + assert isinstance(storage, (torch.storage.TypedStorage, torch.storage.UntypedStorage)), ( + "Please check for updates" + ) + storage_proxy = SavingProxyForStorage(storage, saver, protocol_version=protocol_version) + self.reduce_args = (a0, a1, (storage_proxy, *a2_other), *other_reduce_args) + else: + (storage, *other_reduce_args) = reduce_args + assert isinstance(storage, (torch.storage.TypedStorage, torch.storage.UntypedStorage)), ( + "Please check for updates" + ) + storage_proxy = SavingProxyForStorage(storage, saver, protocol_version=protocol_version) + self.reduce_args = (storage_proxy, *other_reduce_args) + + def __reduce_ex__(self, protocol_version): + if protocol_version != self.protocol_version: + raise RuntimeError(f"Unexpected protocol version: expected {self.protocol_version}, got {protocol_version}") + return self.reduce_ret_fn, self.reduce_args + + +class IncrementalPyTorchPickler(pickle.Pickler): + def __init__(self, saver, *args, **kwargs): + super().__init__(*args, **kwargs) + self.storage_dtypes = {} + self.saver = saver + self.id_map = {} + + # this logic is taken from PyTorch 2.0+ torch/serialization.py + def persistent_id(self, obj): + # FIXME: the docs say that persistent_id should only return a string + # but torch store returns tuples. This works only in the binary protocol + # see + # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects + # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 + if isinstance(obj, SavingProxyForStorage): + return obj.storage_info + + if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj): + if isinstance(obj, torch.storage.TypedStorage): + # TODO: Once we decide to break serialization FC, this case + # can be deleted + storage = obj._untyped_storage + storage_dtype = obj.dtype + storage_type_str = obj._pickle_storage_type() + storage_type = getattr(torch, storage_type_str) + storage_numel = obj._size() + + else: + storage = obj + storage_dtype = torch.uint8 + storage_type = normalize_storage_type(type(obj)) + storage_numel = storage.nbytes() + + # If storage is allocated, ensure that any other saved storages + # pointing to the same data all have the same dtype. If storage is + # not allocated, don't perform this check + if storage.data_ptr() != 0: + if storage.data_ptr() in self.storage_dtypes: + if storage_dtype != self.storage_dtypes[storage.data_ptr()]: + raise RuntimeError( + "Cannot save multiple tensors or storages that view the same data as different types" + ) + else: + self.storage_dtypes[storage.data_ptr()] = storage_dtype + + storage_key = self.id_map.get(storage._cdata) + if storage_key is None: + storage_key = self.saver._write_storage_and_return_key(storage) + self.id_map[storage._cdata] = storage_key + location = torch.serialization.location_tag(storage) + + return ("storage", storage_type, storage_key, location, storage_numel) + + return None + + +class incremental_save: + def __init__(self, name): + self.name = name + self.zipfile = torch._C.PyTorchFileWriter(str(name)) + self.has_saved = False + self.next_key = 0 + self.protocol_version = 2 + + def __enter__(self): + return self + + def store_early(self, tensor): + if isinstance(tensor, torch.Tensor): + return SavingProxyForTensor(tensor, self, protocol_version=self.protocol_version) + raise TypeError(f"can only store tensors early, not {type(tensor)}") + + def save(self, obj): + if self.has_saved: + raise RuntimeError("have already saved") + # Write the pickle data for `obj` + data_buf = BytesIO() + pickler = IncrementalPyTorchPickler(self, data_buf, protocol=self.protocol_version) + pickler.dump(obj) + data_value = data_buf.getvalue() + self.zipfile.write_record("data.pkl", data_value, len(data_value)) + self.has_saved = True + + def _write_storage_and_return_key(self, storage): + if self.has_saved: + raise RuntimeError("have already saved") + key = self.next_key + self.next_key += 1 + name = f"data/{key}" + if storage.device.type != "cpu": + storage = storage.cpu() + num_bytes = storage.nbytes() + + current_version = version.parse(torch.__version__) + threshold_version = version.parse("2.2.2") + if current_version <= threshold_version: + self.zipfile.write_record(name, storage.data_ptr(), num_bytes) + else: + self.zipfile.write_record(name, storage, num_bytes) + + return key + + def __exit__(self, type, value, traceback): + self.zipfile.write_end_of_file() + + +T = TypeVar("T") + + +def chunked_cross_entropy( + logits: Union[torch.Tensor, List[torch.Tensor]], + targets: torch.Tensor, + chunk_size: int = 128, + ignore_index: int = -100, +) -> torch.Tensor: + # with large max_sequence_lengths, the beginning of `backward` allocates a large memory chunk which can dominate + # the memory usage in fine-tuning settings with low number of parameters. + # as a workaround hack, the cross entropy computation is chunked to force it to deallocate on the go, reducing + # the memory spike's magnitude + + # lm_head was chunked (we are fine-tuning) + if isinstance(logits, list): + # don't want to chunk cross entropy + if chunk_size == 0: + logits = torch.cat(logits, dim=1) + logits = logits.reshape(-1, logits.size(-1)) + targets = targets.reshape(-1) + return torch.nn.functional.cross_entropy(logits, targets, ignore_index=ignore_index) + + # chunk cross entropy + logit_chunks = [logit_chunk.reshape(-1, logit_chunk.size(-1)) for logit_chunk in logits] + target_chunks = [target_chunk.reshape(-1) for target_chunk in targets.split(logits[0].size(1), dim=1)] + loss_chunks = [ + torch.nn.functional.cross_entropy(logit_chunk, target_chunk, ignore_index=ignore_index, reduction="none") + for logit_chunk, target_chunk in zip(logit_chunks, target_chunks) + ] + non_masked_elems = (targets != ignore_index).sum() + # See [non_masked_elems div note] + return torch.cat(loss_chunks).sum() / non_masked_elems.maximum(torch.ones_like(non_masked_elems)) + + # no chunking at all + logits = logits.reshape(-1, logits.size(-1)) + targets = targets.reshape(-1) + if chunk_size == 0: + return torch.nn.functional.cross_entropy(logits, targets, ignore_index=ignore_index) + + # lm_head wasn't chunked, chunk cross entropy + logit_chunks = logits.split(chunk_size) + target_chunks = targets.split(chunk_size) + loss_chunks = [ + torch.nn.functional.cross_entropy(logit_chunk, target_chunk, ignore_index=ignore_index, reduction="none") + for logit_chunk, target_chunk in zip(logit_chunks, target_chunks) + ] + non_masked_elems = (targets != ignore_index).sum() + # [non_masked_elems div note]: + # max(1, non_masked_elems) would be more ergonomic to avoid a division by zero. However that + # results in a python int which is then passed back to torch division. By using the + # `x.maximum(torch.ones_like(x))` pattern we avoid a cudaStreamSynchronize. + return torch.cat(loss_chunks).sum() / non_masked_elems.maximum(torch.ones_like(non_masked_elems)) + + +def map_old_state_dict_weights(state_dict: Dict, mapping: Mapping, prefix: str) -> Dict: + for checkpoint_name, attribute_name in mapping.items(): + full_checkpoint_name = prefix + checkpoint_name + if full_checkpoint_name in state_dict: + full_attribute_name = prefix + attribute_name + state_dict[full_attribute_name] = state_dict.pop(full_checkpoint_name) + return state_dict + + +def get_default_supported_precision(training: bool) -> str: + """ + Return the default precision that is supported by the hardware: either `bf16` or `16`. + + Args: + training: If True, returns '-mixed' version of the precision; if False, returns '-true' version. + + Returns: + The default precision that is suitable for the task and is supported by the hardware. + """ + import torch + + if torch.cuda.is_available(): + if torch.cuda.is_bf16_supported(): + return "bf16-mixed" if training else "bf16-true" + else: + return "16-mixed" if training else "16-true" + return "bf16-mixed" if training else "bf16-true" + + +def load_checkpoint(fabric: L.Fabric, model: nn.Module, checkpoint_path: Path, strict: bool = True) -> None: + if isinstance(fabric.strategy, FSDPStrategy): + fabric.load_raw(checkpoint_path, model, strict=strict) + else: + state_dict = lazy_load(checkpoint_path) + state_dict = state_dict.get("model", state_dict) + model.load_state_dict(state_dict, strict=strict) + + +def load_checkpoint_update( + fabric: L.Fabric, adapter_path: Path, model: nn.Module, checkpoint_path: Path, strict: bool = True +) -> None: + if isinstance(fabric.strategy, FSDPStrategy): + fabric.load_raw(checkpoint_path, model, strict=strict) + else: + state_dict = lazy_load(checkpoint_path) + state_dict = state_dict.get("model", state_dict) + adapter_cp = lazy_load(adapter_path) + state_dict.update(adapter_cp) + model.load_state_dict(state_dict, strict=strict) + + +def flops_per_param(max_seq_length: int, n_layer: int, n_embd: int, n_params: int) -> int: + flops_per_token = 2 * n_params # each parameter is used for a MAC (2 FLOPS) per network operation + # this assumes that all samples have a fixed length equal to the block size + # which is most likely false during finetuning + flops_per_seq = flops_per_token * max_seq_length + attn_flops_per_seq = n_layer * 2 * 2 * (n_embd * (max_seq_length**2)) + return flops_per_seq + attn_flops_per_seq + + +def estimate_flops(model: "GPT", training: bool) -> int: + """Measures estimated FLOPs for MFU. + + Refs: + * https://ar5iv.labs.arxiv.org/html/2205.05198#A1 + * https://ar5iv.labs.arxiv.org/html/2204.02311#A2 + """ + # using all parameters for this is a naive over estimation because not all model parameters actually contribute to + # this FLOP computation (e.g. embedding, norm). For this reason, the result will be higher by a fixed percentage + # (~10%) compared to the measured FLOPs, making those lower but more realistic. + # For a proper estimate, this needs a more fine-grained calculation as in Appendix A of the paper. + n_trainable_params = num_parameters(model, requires_grad=True) + trainable_flops = flops_per_param( + model.max_seq_length, model.config.n_layer, model.config.n_embd, n_trainable_params + ) + # forward + backward + gradients (assumes no gradient accumulation) + ops_per_step = 3 if training else 1 + n_frozen_params = num_parameters(model, requires_grad=False) + frozen_flops = flops_per_param(model.max_seq_length, model.config.n_layer, model.config.n_embd, n_frozen_params) + # forward + backward + frozen_ops_per_step = 2 if training else 1 + return ops_per_step * trainable_flops + frozen_ops_per_step * frozen_flops + + +class CycleIterator: + """An iterator that cycles through an iterable indefinitely. + + Example: + >>> iterator = CycleIterator([1, 2, 3]) + >>> [next(iterator) for _ in range(5)] + [1, 2, 3, 1, 2] + + Note: + Unlike ``itertools.cycle``, this iterator does not cache the values of the iterable. + """ + + def __init__(self, iterable: Iterable) -> None: + self.iterable = iterable + self.epoch = 0 + self._iterator = None + + def __next__(self) -> Any: + if self._iterator is None: + self._iterator = iter(self.iterable) + try: + return next(self._iterator) + except StopIteration: + self._iterator = iter(self.iterable) + self.epoch += 1 + return next(self._iterator) + + def __iter__(self) -> Self: + return self + + +def copy_config_files(source_dir: Path, out_dir: Path) -> None: + """Copies the specified configuration and tokenizer files into the output directory.""" + + config_files = ["config.json", "generation_config.json", "model_config.yaml"] + tokenizer_files = ["tokenizer.json", "tokenizer.model", "tokenizer_config.json"] + + for file_name in config_files + tokenizer_files: + src_path = source_dir / file_name + if src_path.exists(): + shutil.copy(src_path, out_dir) + + +def CLI(*args: Any, **kwargs: Any) -> Any: + from jsonargparse import CLI, set_config_read_mode, set_docstring_parse_options + + set_docstring_parse_options(attribute_docstrings=True) + set_config_read_mode(urls_enabled=True) + + return CLI(*args, **kwargs) + + +def capture_hparams() -> Dict[str, Any]: + """Captures the local variables ('hyperparameters') from where this function gets called.""" + caller_frame = inspect.currentframe().f_back + locals_of_caller = caller_frame.f_locals + hparams = {} + for name, value in locals_of_caller.items(): + if value is None or isinstance(value, (int, float, str, bool, Path)): + hparams[name] = value + elif is_dataclass(value): + hparams[name] = asdict(value) + else: + hparams[name] = str(value) + return hparams + + +def save_hyperparameters(function: callable, checkpoint_dir: Path) -> None: + """Captures the CLI parameters passed to `function` without running `function` and saves them to the checkpoint.""" + from jsonargparse import capture_parser + + # TODO: Make this more robust + # This hack strips away the subcommands from the top-level CLI + # to parse the file as if it was called as a script + known_commands = [ + ("finetune_full",), # For subcommands, use `("finetune", "full")` etc + ("finetune_lora",), + ("finetune_adapter",), + ("finetune_adapter_v2",), + ("finetune",), + ("pretrain",), + ] + for known_command in known_commands: + unwanted = slice(1, 1 + len(known_command)) + if tuple(sys.argv[unwanted]) == known_command: + sys.argv[unwanted] = [] + + parser = capture_parser(lambda: CLI(function)) + config = parser.parse_args() + parser.save(config, checkpoint_dir / "hyperparameters.yaml", overwrite=True) + + +def save_config(config: "Config", checkpoint_dir: Path) -> None: + config_dict = asdict(config) + with open(checkpoint_dir / "model_config.yaml", "w", encoding="utf-8") as fp: + yaml.dump(config_dict, fp) + + +def parse_devices(devices: Union[str, int]) -> int: + if devices in (-1, "auto"): + return torch.cuda.device_count() or 1 + if isinstance(devices, int) and devices > 0: + return devices + raise ValueError(f"Devices must be 'auto' or a positive integer, got: {devices!r}") + + +def choose_logger( + logger_name: Literal["csv", "tensorboard", "wandb", "mlflow"], + out_dir: Path, + name: str, + log_interval: int = 1, + log_args: Optional[Dict] = None, + resume: Optional[bool] = None, + **kwargs: Any, +): + if logger_name == "csv": + return CSVLogger(root_dir=(out_dir / "logs"), name="csv", flush_logs_every_n_steps=log_interval, **kwargs) + if logger_name == "tensorboard": + return TensorBoardLogger(root_dir=(out_dir / "logs"), name="tensorboard", **kwargs) + if logger_name == "wandb": + project = log_args.pop("project", name) + run = log_args.pop("run", os.environ.get("WANDB_RUN_NAME")) + group = log_args.pop("group", os.environ.get("WANDB_RUN_GROUP")) + return WandbLogger(project=project, name=run, group=group, resume=resume, **kwargs) + if logger_name == "mlflow": + return MLFlowLogger(experiment_name=name, **kwargs) + raise ValueError(f"`--logger_name={logger_name}` is not a valid option. Choose from 'csv', 'tensorboard', 'wandb'.") + + +def get_argument_names(cls): + sig = inspect.signature(cls.__init__) + return { + name + for name, param in sig.parameters.items() + if param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY] + } + + +def instantiate_bnb_optimizer(optimizer, model_parameters): + if (isinstance(optimizer, str) and "AdamW" not in optimizer) or ( + isinstance(optimizer, dict) and "AdamW" not in optimizer.get("class_path", "") + ): + raise ValueError("The chosen quantization format only supports the AdamW optimizer.") + + import bitsandbytes as bnb + + if isinstance(optimizer, str): + optimizer = bnb.optim.PagedAdamW(model_parameters) + else: + optim_args = get_argument_names(bnb.optim.PagedAdamW) + allowed_kwargs = {key: optimizer["init_args"][key] for key in optim_args & optimizer["init_args"].keys()} + optimizer = bnb.optim.PagedAdamW(model_parameters, **allowed_kwargs) + return optimizer + + +def instantiate_torch_optimizer(optimizer, model_parameters, **kwargs): + # Special care taken where some optimizers do not have some parameters referenced in some of the code, for example "fused" in the pretrain.py script: + # bnb.optim.AdamW8bit + # grokadamw.GrokAdamW + # torch.optim.RMSprop + + if isinstance(optimizer, str): + if "." in optimizer: + class_module, class_name = optimizer.rsplit(".", 1) + else: + class_module, class_name = "torch.optim", optimizer + + module = __import__(class_module, fromlist=[class_name]) + optimizer_cls = getattr(module, class_name) + + valid_params = set(inspect.signature(optimizer_cls).parameters) + kwargs = {key: value for key, value in dict(kwargs).items() if key in valid_params} + optimizer = optimizer_cls(model_parameters, **kwargs) + elif isinstance(optimizer, dict): + optimizer = dict(optimizer) + class_module, class_name = optimizer["class_path"].rsplit(".", 1) + module = __import__(class_module, fromlist=[class_name]) + optimizer_cls = getattr(module, class_name) + + valid_params = set(inspect.signature(optimizer_cls).parameters) + kwargs = {key: value for key, value in dict(kwargs).items() if key in valid_params} + + optimizer["init_args"].update(kwargs) + optimizer = instantiate_class(model_parameters, optimizer) + else: + raise ValueError(f'Unrecognized "optimizer" value: {optimizer}') + + return optimizer + + +def extend_checkpoint_dir(checkpoint_dir: Path) -> Path: + new_checkpoint_dir = "checkpoints" / checkpoint_dir + should_return_new_dir = ( + not checkpoint_dir.is_dir() + and checkpoint_dir.parts[0] != "checkpoints" + and not checkpoint_dir.is_absolute() + and new_checkpoint_dir.exists() + ) + return new_checkpoint_dir if should_return_new_dir else checkpoint_dir + + +def check_file_size_on_cpu_and_warn(checkpoint_path, device, size_limit=4_509_715_660): + """ + Checks the file size and raises a warning if it exceeds the size_limit. + The default size limit is 4.2 GB, the size of TinyLlama 1.1B: 4.2 * 1024 * 1024 * 1024 = 4_509_715_660 + """ + size = 0.0 + if os.path.exists(checkpoint_path): + size = os.path.getsize(checkpoint_path) + if size > size_limit and str(device) == "cpu": + warnings.warn( + f"The file size of {checkpoint_path} is over {size_limit / 1024 / 1024 / 1024:.1f} GB. Using a model " + "with more than 1B parameters on a CPU can be slow, it is recommended to switch to a GPU." + ) + return size + + +def auto_download_checkpoint(model_name, access_token=None, ignore_tokenizer_files=False): + from litgpt.scripts.download import download_from_hub # moved here due to circular import issue + + checkpoint_dir = extend_checkpoint_dir(Path(model_name)) + try: + check_valid_checkpoint_dir( + checkpoint_dir, verbose=False, raise_error=True, ignore_tokenizer_files=ignore_tokenizer_files + ) + except FileNotFoundError as e: + if access_token is None: + access_token = os.getenv("HF_TOKEN") + + if checkpoint_dir.parts[0] != "checkpoints" and not checkpoint_dir.is_absolute(): + download_from_hub(repo_id=str(model_name), access_token=access_token) + checkpoint_dir = Path("checkpoints") / checkpoint_dir + else: + raise e + + return checkpoint_dir + + +def check_nvlink_connectivity(fabric=None): + """Checks GPU connectivity for both NVIDIA and AMD GPUs. + + This function delegates to vendor-specific implementations based on + the detected GPU vendor. + """ + if fabric is not None: + custom_print = fabric.print + else: + custom_print = print + + if os.getenv("RANK", "0") == "0": + try: + if torch.cuda.is_available(): + device_properties = torch.cuda.get_device_properties(0) + gpu_name = device_properties.name.lower() + if "nvidia" in gpu_name: + _check_nvidia_connectivity(custom_print) + elif "advanced micro devices" in gpu_name or "amd" in gpu_name: + _check_amd_connectivity(custom_print) + else: + custom_print(f"Unrecognized GPU vendor: {device_properties.name}") + else: + custom_print("No GPUs available") + except Exception as e: + custom_print(f"An error occurred while checking GPU connectivity: {e}") + + +def _check_nvidia_connectivity(custom_print): + """Checks NVLink connectivity on NVIDIA GPUs.""" + result = subprocess.run(["nvidia-smi", "topo", "-m"], stdout=subprocess.PIPE, text=True) + if result.returncode != 0: + custom_print("Failed to run nvidia-smi") + return + + lines = result.stdout.strip().split("\n") + start_index = next((i for i, line in enumerate(lines) if "GPU0" in line), None) + if start_index is None: + custom_print("Failed to parse nvidia-smi output") + return + + headers_line = lines[start_index] + headers = headers_line.split() + gpu_regex = re.compile(r"^GPU\d+$") + gpu_count = len([header for header in headers if gpu_regex.match(header)]) + + all_nvlink = True + for line in lines[start_index + 1 : start_index + 1 + gpu_count]: + columns = line.split() + connections = columns[1 : 1 + gpu_count] + if not all("NV" in conn for conn in connections if conn != "X"): + all_nvlink = False + break + + if all_nvlink: + custom_print("All GPUs are fully connected via NVLink.") + else: + custom_print( + "Warning: Not all GPUs are fully connected via NVLink. Some GPUs are connected via slower interfaces. " + "It is recommended to switch to a different machine with faster GPU connections for optimal multi-GPU training performance." + ) + + +def _check_amd_connectivity(custom_print): + """Checks XGMI connectivity on AMD GPUs.""" + result = subprocess.run(["rocm-smi", "--showtopotype"], stdout=subprocess.PIPE, text=True) + if result.returncode != 0: + custom_print("Failed to run rocm-smi") + return + + lines = result.stdout.strip().split("\n") + gpu_header_index = next((i for i, line in enumerate(lines) if re.match(r"^\s*GPU0", line)), None) + if gpu_header_index is None or gpu_header_index == 0: + custom_print("Failed to parse rocm-smi output (no GPU headers found)") + return + + header_line = lines[gpu_header_index - 1] + headers = header_line.strip().split() + gpu_regex = re.compile(r"^GPU\d+$") + gpu_count = len([header for header in headers if gpu_regex.match(header)]) + + gpu_lines = [] + for line in lines[gpu_header_index : gpu_header_index + gpu_count]: + if re.match(r"^\s*GPU\d+", line): + gpu_lines.append(line.strip()) + if len(gpu_lines) != gpu_count: + custom_print("Mismatch in GPU count when parsing rocm-smi output") + return + + all_xgmi = True + for line in gpu_lines: + columns = line.split() + connections = columns[1 : 1 + gpu_count] + for conn in connections: + if conn not in ("XGMI", "0"): + all_xgmi = False + break + if not all_xgmi: + break + + if all_xgmi: + custom_print("All GPUs are fully connected via XGMI.") + else: + custom_print( + "Warning: Not all GPUs are fully connected via XGMI. Some GPUs are connected via slower interfaces. " + "It is recommended to switch to a different machine with faster GPU connections for optimal multi-GPU training performance." + ) + + +def fix_and_load_json(s): + # Remove trailing commas before } or ] + s = re.sub(r",(\s*[}\]])", r"\1", s) + + # Insert missing commas between properties + # Match positions where a value is followed by a newline and then a quote without a comma + pattern = r'(?<=[}\]0-9truefalsenull"])\s*(\n\s*)"' + replacement = r',\1"' + s = re.sub(pattern, replacement, s) + + # Now try to parse the JSON + try: + return json.loads(s) + except json.JSONDecodeError as e: + raise ValueError(f"Failed to parse JSON after fixing: {e}") + + +def create_finetuning_performance_report(training_time, token_counts, device_type): + tok_sec = token_counts["raw_tokens_plus_prompt_template_and_padding"] / training_time + output = f""" +| ------------------------------------------------------ +| Token Counts +| - Input Tokens : {token_counts["raw_tokens"]:>5} +| - Tokens w/ Prompt : {token_counts["raw_tokens_plus_prompt_template"]:>5} +| - Total Tokens (w/ Padding) : {token_counts["raw_tokens_plus_prompt_template_and_padding"]:>5} +| ----------------------------------------------------- +| Performance +| - Training Time : {training_time:.2f} s +| - Tok/sec : {tok_sec:.2f} tok/s +| ----------------------------------------------------- +""" + + if device_type == "cuda": + memory_used = torch.cuda.max_memory_allocated() / 1e9 + output += "| Memory Usage \n" + output += f"| - Memory Used : {memory_used:.02f} GB \n" + output += "-------------------------------------------------------\n" + + return output + + +def select_sft_generate_example(eval, data): + if eval.evaluate_example == "first": + if len(data.test_dataset.data): + instruction = data.test_dataset.data[0]["instruction"] + else: + instruction = data.train_dataset.data[0]["instruction"] + + elif eval.evaluate_example == "random": + if len(data.test_dataset.data): + random_idx = random.randint(0, len(data.test_dataset.data) - 1) + instruction = data.test_dataset.data[random_idx]["instruction"] + else: + random_idx = random.randint(0, len(data.train_dataset.data) - 1) + instruction = data.train_dataset.data[random_idx]["instruction"] + + elif isinstance(eval.evaluate_example, int): + index = eval.evaluate_example + if len(data.test_dataset.data) > index: + instruction = data.test_dataset.data[index]["instruction"] + elif len(data.train_dataset.data) > index: + instruction = data.train_dataset.data[index]["instruction"] + else: + raise IndexError(f"Index {index} is out of range for both test and training datasets.") + + else: + raise ValueError(f"Unknown evaluation example type: {eval.evaluate_example}") + return instruction + + +def _RunIf(thunder: bool = False, **kwargs): + import pytest + from lightning.fabric.utilities.testing import _runif_reasons + + reasons, marker_kwargs = _runif_reasons(**kwargs) + + if thunder and not module_available("thunder"): + # if we require Thunder, but it's not available, we should skip + reasons.append("Thunder") + + return pytest.mark.skipif(condition=len(reasons) > 0, reason=f"Requires: [{' + '.join(reasons)}]", **marker_kwargs) + + +def kill_process_tree(pid: int): + """ + Kill a process and all its child processes given the parent PID. + """ + try: + parent = psutil.Process(pid) + children = parent.children(recursive=True) + for child in children: + child.kill() + parent.kill() + except psutil.NoSuchProcess: + pass # Process already exited diff --git a/out/eval/tinyllama_benches/monthly_metrics.csv b/out/eval/tinyllama_benches/monthly_metrics.csv new file mode 100644 index 0000000000000000000000000000000000000000..358924670f2cc4fc77564a362c595ae2d7e7d2f7 --- /dev/null +++ b/out/eval/tinyllama_benches/monthly_metrics.csv @@ -0,0 +1,133 @@ +month,task,metric,value +2407,arc_challenge,acc,0.26621160409556316 +2408,arc_challenge,acc,0.25426621160409557 +2409,arc_challenge,acc,0.23890784982935154 +2410,arc_challenge,acc,0.24146757679180889 +2411,arc_challenge,acc,0.2380546075085324 +2412,arc_challenge,acc,0.23464163822525597 +2501,arc_challenge,acc,0.22525597269624573 +2502,arc_challenge,acc,0.21928327645051193 +2503,arc_challenge,acc,0.22013651877133106 +2504,arc_challenge,acc,0.22781569965870307 +2505,arc_challenge,acc,0.2175767918088737 +2506,arc_challenge,acc,0.23976109215017063 +2407,arc_challenge,acc_norm,0.3046075085324232 +2408,arc_challenge,acc_norm,0.2883959044368601 +2409,arc_challenge,acc_norm,0.2764505119453925 +2410,arc_challenge,acc_norm,0.26535836177474403 +2411,arc_challenge,acc_norm,0.27047781569965873 +2412,arc_challenge,acc_norm,0.25170648464163825 +2501,arc_challenge,acc_norm,0.2568259385665529 +2502,arc_challenge,acc_norm,0.24061433447098976 +2503,arc_challenge,acc_norm,0.26023890784982934 +2504,arc_challenge,acc_norm,0.24829351535836178 +2505,arc_challenge,acc_norm,0.26535836177474403 +2506,arc_challenge,acc_norm,0.2687713310580205 +2407,arc_easy,acc,0.5757575757575758 +2408,arc_easy,acc,0.5391414141414141 +2409,arc_easy,acc,0.5260942760942761 +2410,arc_easy,acc,0.5244107744107744 +2411,arc_easy,acc,0.5071548821548821 +2412,arc_easy,acc,0.48569023569023567 +2501,arc_easy,acc,0.4941077441077441 +2502,arc_easy,acc,0.47095959595959597 +2503,arc_easy,acc,0.4852693602693603 +2504,arc_easy,acc,0.4659090909090909 +2505,arc_easy,acc,0.4722222222222222 +2506,arc_easy,acc,0.47769360269360267 +2407,arc_easy,acc_norm,0.531986531986532 +2408,arc_easy,acc_norm,0.5004208754208754 +2409,arc_easy,acc_norm,0.49957912457912457 +2410,arc_easy,acc_norm,0.49326599326599324 +2411,arc_easy,acc_norm,0.4819023569023569 +2412,arc_easy,acc_norm,0.4511784511784512 +2501,arc_easy,acc_norm,0.4583333333333333 +2502,arc_easy,acc_norm,0.4318181818181818 +2503,arc_easy,acc_norm,0.4537037037037037 +2504,arc_easy,acc_norm,0.44023569023569026 +2505,arc_easy,acc_norm,0.44191919191919193 +2506,arc_easy,acc_norm,0.45664983164983164 +2407,hellaswag,acc,0.416849233220474 +2408,hellaswag,acc,0.3974307906791476 +2409,hellaswag,acc,0.38498307110137425 +2410,hellaswag,acc,0.3686516630153356 +2411,hellaswag,acc,0.35610436168094006 +2412,hellaswag,acc,0.3496315475004979 +2501,hellaswag,acc,0.34405496912965544 +2502,hellaswag,acc,0.33409679346743676 +2503,hellaswag,acc,0.33738299143596895 +2504,hellaswag,acc,0.3301135232025493 +2505,hellaswag,acc,0.3280223063134834 +2506,hellaswag,acc,0.3253335988846843 +2407,hellaswag,acc_norm,0.5473013343955387 +2408,hellaswag,acc_norm,0.5153355905198168 +2409,hellaswag,acc_norm,0.489344752041426 +2410,hellaswag,acc_norm,0.46484763991236805 +2411,hellaswag,acc_norm,0.44343756223859787 +2412,hellaswag,acc_norm,0.4257120095598486 +2501,hellaswag,acc_norm,0.4239195379406493 +2502,hellaswag,acc_norm,0.4017128062139016 +2503,hellaswag,acc_norm,0.40201155148376816 +2504,hellaswag,acc_norm,0.39543915554670384 +2505,hellaswag,acc_norm,0.3903604859589723 +2506,hellaswag,acc_norm,0.38697470623381797 +2407,mmlu,acc,0.241917105825381 +2408,mmlu,acc,0.2318758011679248 +2409,mmlu,acc,0.22959692351516878 +2410,mmlu,acc,0.23807149978635522 +2411,mmlu,acc,0.23508047286711295 +2412,mmlu,acc,0.2352229027204102 +2501,mmlu,acc,0.2380002848597066 +2502,mmlu,acc,0.23593505198689646 +2503,mmlu,acc,0.2318758011679248 +2504,mmlu,acc,0.23251673550776242 +2505,mmlu,acc,0.23678963110667997 +2506,mmlu,acc,0.24291411479846176 +2407,sciq,acc,0.882 +2408,sciq,acc,0.87 +2409,sciq,acc,0.882 +2410,sciq,acc,0.866 +2411,sciq,acc,0.857 +2412,sciq,acc,0.855 +2501,sciq,acc,0.854 +2502,sciq,acc,0.836 +2503,sciq,acc,0.82 +2504,sciq,acc,0.833 +2505,sciq,acc,0.834 +2506,sciq,acc,0.847 +2407,sciq,acc_norm,0.844 +2408,sciq,acc_norm,0.804 +2409,sciq,acc_norm,0.819 +2410,sciq,acc_norm,0.811 +2411,sciq,acc_norm,0.822 +2412,sciq,acc_norm,0.811 +2501,sciq,acc_norm,0.802 +2502,sciq,acc_norm,0.781 +2503,sciq,acc_norm,0.771 +2504,sciq,acc_norm,0.778 +2505,sciq,acc_norm,0.786 +2506,sciq,acc_norm,0.795 +2407,truthfulqa_mc1,acc,0.2423500611995104 +2408,truthfulqa_mc1,acc,0.2594859241126071 +2409,truthfulqa_mc1,acc,0.2386780905752754 +2410,truthfulqa_mc1,acc,0.2521419828641371 +2411,truthfulqa_mc1,acc,0.2607099143206854 +2412,truthfulqa_mc1,acc,0.26805385556915545 +2501,truthfulqa_mc1,acc,0.27906976744186046 +2502,truthfulqa_mc1,acc,0.2974296205630355 +2503,truthfulqa_mc1,acc,0.26193390452876375 +2504,truthfulqa_mc1,acc,0.27050183598531213 +2505,truthfulqa_mc1,acc,0.26438188494492043 +2506,truthfulqa_mc1,acc,0.26805385556915545 +2407,truthfulqa_mc2,acc,0.41171296428698934 +2408,truthfulqa_mc2,acc,0.4127800755882328 +2409,truthfulqa_mc2,acc,0.40520433625245705 +2410,truthfulqa_mc2,acc,0.42608784023385377 +2411,truthfulqa_mc2,acc,0.42431987710612645 +2412,truthfulqa_mc2,acc,0.426638224947852 +2501,truthfulqa_mc2,acc,0.43294939737693783 +2502,truthfulqa_mc2,acc,0.45673716541282944 +2503,truthfulqa_mc2,acc,0.4308698731928784 +2504,truthfulqa_mc2,acc,0.4490762477503484 +2505,truthfulqa_mc2,acc,0.43051964732093856 +2506,truthfulqa_mc2,acc,0.4367369684704035 diff --git a/out/eval/tinyllama_full_arc_arxiv_mc/2407/config.json b/out/eval/tinyllama_full_arc_arxiv_mc/2407/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6a20305540fec9201e5c28b99dcd32c1000201fd --- /dev/null +++ b/out/eval/tinyllama_full_arc_arxiv_mc/2407/config.json @@ -0,0 +1,24 @@ +{ + "architectures": [ + "LlamaForCausalLM" + ], + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 5632, + "max_position_embeddings": 2048, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 22, + "num_key_value_heads": 4, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": null, + "tie_word_embeddings": false, + "torch_dtype": "float32", + "transformers_version": "4.31.0.dev0", + "use_cache": true, + "vocab_size": 32000 +} diff --git a/out/eval/tinyllama_full_arc_arxiv_mc/2407/generation_config.json b/out/eval/tinyllama_full_arc_arxiv_mc/2407/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..89c1930ccf07b1ba0c1bf146b2ad2d2666761dfb --- /dev/null +++ b/out/eval/tinyllama_full_arc_arxiv_mc/2407/generation_config.json @@ -0,0 +1,7 @@ +{ + "bos_token_id": 1, + "eos_token_id": 2, + "pad_token_id": 0, + "max_length": 2048, + "transformers_version": "4.31.0.dev0" +} diff --git a/out/eval/tinyllama_full_arc_arxiv_mc/2407/log.txt b/out/eval/tinyllama_full_arc_arxiv_mc/2407/log.txt new file mode 100644 index 0000000000000000000000000000000000000000..535a37244f7312cc4054b10053b2a0b1b8824ab7 --- /dev/null +++ b/out/eval/tinyllama_full_arc_arxiv_mc/2407/log.txt @@ -0,0 +1,29 @@ +{'access_token': None, + 'batch_size': 4, + 'checkpoint_dir': PosixPath('out/finetune/tinyllama_full_arc/2407/final'), + 'device': None, + 'dtype': None, + 'force_conversion': False, + 'limit': None, + 'num_fewshot': None, + 'out_dir': PosixPath('out/eval/tinyllama_full_arc_arxiv_mc/2407'), + 'save_filepath': None, + 'seed': 1234, + 'tasks': 'arxiv_mc'} +INFO 09-24 14:45:19 [__init__.py:241] Automatically detected platform cuda. +Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher. +{'checkpoint_dir': PosixPath('out/finetune/tinyllama_full_arc/2407/final'), + 'output_dir': PosixPath('out/eval/tinyllama_full_arc_arxiv_mc/2407')} + 0%| | 0/100 [00:00 datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2408": { + "task": "arxiv_mc_2408", + "task_alias": "2024-08", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2408", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2408mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2409": { + "task": "arxiv_mc_2409", + "task_alias": "2024-09", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2409", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2409mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2410": { + "task": "arxiv_mc_2410", + "task_alias": "2024-10", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2410", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2410mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2411": { + "task": "arxiv_mc_2411", + "task_alias": "2024-11", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2411", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2411mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2412": { + "task": "arxiv_mc_2412", + "task_alias": "2024-12", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2412", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2412mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2501": { + "task": "arxiv_mc_2501", + "task_alias": "2025-01", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2501", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2501mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2502": { + "task": "arxiv_mc_2502", + "task_alias": "2025-02", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2502", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2502mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2503": { + "task": "arxiv_mc_2503", + "task_alias": "2025-03", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2503", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2503mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2504": { + "task": "arxiv_mc_2504", + "task_alias": "2025-04", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2504", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2504mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2505": { + "task": "arxiv_mc_2505", + "task_alias": "2025-05", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2505", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2505mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + }, + "arxiv_mc_2506": { + "task": "arxiv_mc_2506", + "task_alias": "2025-06", + "tag": "arxiv_mc_tasks", + "dataset_path": "json", + "dataset_name": "arxiv_mc_2506", + "dataset_kwargs": { + "data_files": { + "test": "/mnt/data/lm-evaluation-harness/dataset/arxiv_mc/2506mc.json" + } + }, + "test_split": "test", + "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n answer_map = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n out_doc = {\n \"question\": doc[\"question\"],\n \"choices\": doc[\"choices\"],\n \"answer\": answer_map[doc[\"answer\"]],\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", + "doc_to_text": "Question:{{question.strip()}}\nAnswer:", + "doc_to_target": "answer", + "unsafe_code": false, + "doc_to_choice": "{{choices}}", + "description": "", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "num_fewshot": 0, + "metric_list": [ + { + "metric": "acc", + "aggregation": "mean", + "higher_is_better": true + }, + { + "metric": "acc_norm", + "aggregation": "mean", + "higher_is_better": true + } + ], + "output_type": "multiple_choice", + "repeats": 1, + "should_decontaminate": false, + "metadata": {} + } + }, + "versions": { + "arxiv_mc": null, + "arxiv_mc_2407": "Yaml", + "arxiv_mc_2408": "Yaml", + "arxiv_mc_2409": "Yaml", + "arxiv_mc_2410": "Yaml", + "arxiv_mc_2411": "Yaml", + "arxiv_mc_2412": "Yaml", + "arxiv_mc_2501": "Yaml", + "arxiv_mc_2502": "Yaml", + "arxiv_mc_2503": "Yaml", + "arxiv_mc_2504": "Yaml", + "arxiv_mc_2505": "Yaml", + "arxiv_mc_2506": "Yaml" + }, + "n-shot": { + "arxiv_mc_2407": 0, + "arxiv_mc_2408": 0, + "arxiv_mc_2409": 0, + "arxiv_mc_2410": 0, + "arxiv_mc_2411": 0, + "arxiv_mc_2412": 0, + "arxiv_mc_2501": 0, + "arxiv_mc_2502": 0, + "arxiv_mc_2503": 0, + "arxiv_mc_2504": 0, + "arxiv_mc_2505": 0, + "arxiv_mc_2506": 0 + }, + "higher_is_better": { + "arxiv_mc": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2407": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2408": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2409": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2410": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2411": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2412": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2501": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2502": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2503": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2504": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2505": { + "acc": true, + "acc_norm": true + }, + "arxiv_mc_2506": { + "acc": true, + "acc_norm": true + } + }, + "n-samples": { + "arxiv_mc_2407": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2408": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2409": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2410": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2411": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2412": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2501": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2502": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2503": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2504": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2505": { + "original": 100, + "effective": 100 + }, + "arxiv_mc_2506": { + "original": 100, + "effective": 100 + } + }, + "samples": { + "arxiv_mc_2407": [ + { + "doc_id": 0, + "doc": { + "question": "Which chunking method was chosen in the study for balancing simplicity and semantic preservation?", + "choices": [ + "Token-level Chunking", + "Sentence-level Chunking", + "Semantic-level Chunking", + "Paragraph-level Chunking" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which chunking method was chosen in the study for balancing simplicity and semantic preservation?\nAnswer:", + " Token-level Chunking" + ], + [ + "Question:Which chunking method was chosen in the study for balancing simplicity and semantic preservation?\nAnswer:", + " Sentence-level Chunking" + ], + [ + "Question:Which chunking method was chosen in the study for balancing simplicity and semantic preservation?\nAnswer:", + " Semantic-level Chunking" + ], + [ + "Question:Which chunking method was chosen in the study for balancing simplicity and semantic preservation?\nAnswer:", + " Paragraph-level Chunking" + ] + ], + "resps": [ + [ + [ + -14.86745834350586, + false + ] + ], + [ + [ + -12.75033950805664, + false + ] + ], + [ + [ + -18.18337059020996, + false + ] + ], + [ + [ + -15.440104484558105, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.86745834350586, + false + ], + [ + -12.75033950805664, + false + ], + [ + -18.18337059020996, + false + ], + [ + -15.440104484558105, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "112fbf650834f38f39bc57c35a7c8cc57f706e1daacf1cdd6a832275c5608ac3", + "prompt_hash": "4f21c252401f8a7f83cba5cd45a593221ca2fa1ec916980f2b415e1ac1d60783", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 1, + "doc": { + "question": "Which vector database was identified as the most comprehensive solution, meeting all key criteria including multiple index types, billion-scale support, hybrid search, and cloud-native capabilities?", + "choices": [ + "Weaviate", + "Faiss", + "Qdrant", + "Milvus" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which vector database was identified as the most comprehensive solution, meeting all key criteria including multiple index types, billion-scale support, hybrid search, and cloud-native capabilities?\nAnswer:", + " Weaviate" + ], + [ + "Question:Which vector database was identified as the most comprehensive solution, meeting all key criteria including multiple index types, billion-scale support, hybrid search, and cloud-native capabilities?\nAnswer:", + " Faiss" + ], + [ + "Question:Which vector database was identified as the most comprehensive solution, meeting all key criteria including multiple index types, billion-scale support, hybrid search, and cloud-native capabilities?\nAnswer:", + " Qdrant" + ], + [ + "Question:Which vector database was identified as the most comprehensive solution, meeting all key criteria including multiple index types, billion-scale support, hybrid search, and cloud-native capabilities?\nAnswer:", + " Milvus" + ] + ], + "resps": [ + [ + [ + -16.53425407409668, + false + ] + ], + [ + [ + -7.181585788726807, + false + ] + ], + [ + [ + -9.658040046691895, + false + ] + ], + [ + [ + -8.326963424682617, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.53425407409668, + false + ], + [ + -7.181585788726807, + false + ], + [ + -9.658040046691895, + false + ], + [ + -8.326963424682617, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dc5641478241a96b0f12c3e9c434abd3b0971c0ed76a108c5f8298504f86d732", + "prompt_hash": "aedd8317881ad284c723d89028876ce36faf93c1b4b32fc269961413c7729440", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 2, + "doc": { + "question": "Which reranking model was recommended as balancing performance and efficiency?", + "choices": [ + "monoT5", + "RankLLaMA", + "TILDEv2", + "monoBERT" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which reranking model was recommended as balancing performance and efficiency?\nAnswer:", + " monoT5" + ], + [ + "Question:Which reranking model was recommended as balancing performance and efficiency?\nAnswer:", + " RankLLaMA" + ], + [ + "Question:Which reranking model was recommended as balancing performance and efficiency?\nAnswer:", + " TILDEv2" + ], + [ + "Question:Which reranking model was recommended as balancing performance and efficiency?\nAnswer:", + " monoBERT" + ] + ], + "resps": [ + [ + [ + -15.6542329788208, + false + ] + ], + [ + [ + -12.929773330688477, + false + ] + ], + [ + [ + -19.209312438964844, + false + ] + ], + [ + [ + -14.345467567443848, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.6542329788208, + false + ], + [ + -12.929773330688477, + false + ], + [ + -19.209312438964844, + false + ], + [ + -14.345467567443848, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ec1881d1f0dd4d7a02eb312674cd46a64e082bcc50727d3d07966e690bcaaee0", + "prompt_hash": "1516fe9f99e098b69b5a14277865693e334db21041e6a088a001c7daf9ed2959", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 3, + "doc": { + "question": "In generator fine-tuning, which training strategy was found to enhance robustness by mixing relevant and random contexts?", + "choices": [ + "Training only with relevant documents (Dg)", + "Training only with random documents (Dr)", + "Training with both relevant and random documents (Dgr)", + "Training with duplicate relevant documents (Dgg)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In generator fine-tuning, which training strategy was found to enhance robustness by mixing relevant and random contexts?\nAnswer:", + " Training only with relevant documents (Dg)" + ], + [ + "Question:In generator fine-tuning, which training strategy was found to enhance robustness by mixing relevant and random contexts?\nAnswer:", + " Training only with random documents (Dr)" + ], + [ + "Question:In generator fine-tuning, which training strategy was found to enhance robustness by mixing relevant and random contexts?\nAnswer:", + " Training with both relevant and random documents (Dgr)" + ], + [ + "Question:In generator fine-tuning, which training strategy was found to enhance robustness by mixing relevant and random contexts?\nAnswer:", + " Training with duplicate relevant documents (Dgg)" + ] + ], + "resps": [ + [ + [ + -40.50318908691406, + false + ] + ], + [ + [ + -39.652557373046875, + false + ] + ], + [ + [ + -44.193763732910156, + false + ] + ], + [ + [ + -52.69440460205078, + false + ] + ] + ], + "filtered_resps": [ + [ + -40.50318908691406, + false + ], + [ + -39.652557373046875, + false + ], + [ + -44.193763732910156, + false + ], + [ + -52.69440460205078, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7173c9ca783290cdb4b8a10eb3a1dce044a749f837da2139a7a29d92d000cf85", + "prompt_hash": "384c9b52ef5231fd417565f14aea7a6a784241b6b1fb250deee33dbddf87fff7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 4, + "doc": { + "question": "Which summarization method was recommended for its outstanding performance?", + "choices": [ + "Selective Context", + "LongLLMLingua", + "BM25 Extractive", + "Recomp" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which summarization method was recommended for its outstanding performance?\nAnswer:", + " Selective Context" + ], + [ + "Question:Which summarization method was recommended for its outstanding performance?\nAnswer:", + " LongLLMLingua" + ], + [ + "Question:Which summarization method was recommended for its outstanding performance?\nAnswer:", + " BM25 Extractive" + ], + [ + "Question:Which summarization method was recommended for its outstanding performance?\nAnswer:", + " Recomp" + ] + ], + "resps": [ + [ + [ + -20.990047454833984, + false + ] + ], + [ + [ + -18.77847671508789, + false + ] + ], + [ + [ + -23.54022216796875, + false + ] + ], + [ + [ + -14.204093933105469, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.990047454833984, + false + ], + [ + -18.77847671508789, + false + ], + [ + -23.54022216796875, + false + ], + [ + -14.204093933105469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a6b581adefddccee39d5247c99e1f0bde00414c0f883a8cf2547bdb539a9aa7b", + "prompt_hash": "9fa3215a12dc1fc19dc1c9d7fd30cd52340f16e534d409f05ea2262435ed51a1", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 5, + "doc": { + "question": "How many visual math problems are included in the proposed dataset?", + "choices": [ + "4.5K", + "5.0K", + "6.5K", + "7.2K" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many visual math problems are included in the proposed dataset?\nAnswer:", + " 4.5K" + ], + [ + "Question:How many visual math problems are included in the proposed dataset?\nAnswer:", + " 5.0K" + ], + [ + "Question:How many visual math problems are included in the proposed dataset?\nAnswer:", + " 6.5K" + ], + [ + "Question:How many visual math problems are included in the proposed dataset?\nAnswer:", + " 7.2K" + ] + ], + "resps": [ + [ + [ + -12.729255676269531, + false + ] + ], + [ + [ + -14.22647476196289, + false + ] + ], + [ + [ + -13.167308807373047, + false + ] + ], + [ + [ + -13.763832092285156, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.729255676269531, + false + ], + [ + -14.22647476196289, + false + ], + [ + -13.167308807373047, + false + ], + [ + -13.763832092285156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fb3604d749f5b7fcc83d359bdf0a18094bf32436d7242c0d61f39bc92a5575ec", + "prompt_hash": "e7b62eec87e03ee8506c86b240c326f3114a1ab0247407850fc920b4c2c2332b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 6, + "doc": { + "question": "Into how many knowledge concepts are the problems in the dataset categorized?", + "choices": [ + "45", + "57", + "67", + "77" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Into how many knowledge concepts are the problems in the dataset categorized?\nAnswer:", + " 45" + ], + [ + "Question:Into how many knowledge concepts are the problems in the dataset categorized?\nAnswer:", + " 57" + ], + [ + "Question:Into how many knowledge concepts are the problems in the dataset categorized?\nAnswer:", + " 67" + ], + [ + "Question:Into how many knowledge concepts are the problems in the dataset categorized?\nAnswer:", + " 77" + ] + ], + "resps": [ + [ + [ + -8.812484741210938, + false + ] + ], + [ + [ + -9.466009140014648, + false + ] + ], + [ + [ + -9.849740982055664, + false + ] + ], + [ + [ + -10.170186996459961, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.812484741210938, + false + ], + [ + -9.466009140014648, + false + ], + [ + -9.849740982055664, + false + ], + [ + -10.170186996459961, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9dbe00d7ad342d9d61a05b95909c8335e44a65600dbca6821fc9ff31d702aa79", + "prompt_hash": "b8c118c69992d28a3628ec73ea1ac12a01da856f97eef3b95a3339bb4e94a8fb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "Which four-dimensional metric is used to evaluate problem-solving abilities?", + "choices": [ + "Precision, Recall, Accuracy, F1", + "IK, IG, CM, RM", + "BLEU, ROUGE, METEOR, CIDEr", + "Speed, Memory, Efficiency, Robustness" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which four-dimensional metric is used to evaluate problem-solving abilities?\nAnswer:", + " Precision, Recall, Accuracy, F1" + ], + [ + "Question:Which four-dimensional metric is used to evaluate problem-solving abilities?\nAnswer:", + " IK, IG, CM, RM" + ], + [ + "Question:Which four-dimensional metric is used to evaluate problem-solving abilities?\nAnswer:", + " BLEU, ROUGE, METEOR, CIDEr" + ], + [ + "Question:Which four-dimensional metric is used to evaluate problem-solving abilities?\nAnswer:", + " Speed, Memory, Efficiency, Robustness" + ] + ], + "resps": [ + [ + [ + -24.218046188354492, + false + ] + ], + [ + [ + -40.70429611206055, + false + ] + ], + [ + [ + -28.812503814697266, + false + ] + ], + [ + [ + -38.256866455078125, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.218046188354492, + false + ], + [ + -40.70429611206055, + false + ], + [ + -28.812503814697266, + false + ], + [ + -38.256866455078125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e7d482ff7ce85c9bb9f81ef47fda540bfc235fd3cdbb8cce2b61775e27c03d22", + "prompt_hash": "f32689e2b31b6aa425d21f4e02e4b2b1a6bfbbb6c61cbb7ddf7f0895b98e939d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 8, + "doc": { + "question": "Which model achieved the best overall performance across different visual mathematics categories?", + "choices": [ + "GPT-4V", + "Gemini 1.5 Pro", + "GPT-4o", + "LLaVA-NeXT-110B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model achieved the best overall performance across different visual mathematics categories?\nAnswer:", + " GPT-4V" + ], + [ + "Question:Which model achieved the best overall performance across different visual mathematics categories?\nAnswer:", + " Gemini 1.5 Pro" + ], + [ + "Question:Which model achieved the best overall performance across different visual mathematics categories?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which model achieved the best overall performance across different visual mathematics categories?\nAnswer:", + " LLaVA-NeXT-110B" + ] + ], + "resps": [ + [ + [ + -9.522256851196289, + false + ] + ], + [ + [ + -8.766477584838867, + false + ] + ], + [ + [ + -7.747229099273682, + false + ] + ], + [ + [ + -21.87334442138672, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.522256851196289, + false + ], + [ + -8.766477584838867, + false + ], + [ + -7.747229099273682, + false + ], + [ + -21.87334442138672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8c452c6f79e5150462edd67e4195af4bd8b30a326a1993e322675fd8487a60fe", + "prompt_hash": "6c857a8e695bf66745f3a947b038b9297beda2f70ee0d8d18dbdf51c67e38e55", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 9, + "doc": { + "question": "From where were the 67 knowledge concept descriptions for Knowledge Concept Augmentation (KCA) constructed?", + "choices": [ + "Only Wikipedia", + "Only Euclid's Elements", + "Wikipedia and textbooks", + "Research papers only" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:From where were the 67 knowledge concept descriptions for Knowledge Concept Augmentation (KCA) constructed?\nAnswer:", + " Only Wikipedia" + ], + [ + "Question:From where were the 67 knowledge concept descriptions for Knowledge Concept Augmentation (KCA) constructed?\nAnswer:", + " Only Euclid's Elements" + ], + [ + "Question:From where were the 67 knowledge concept descriptions for Knowledge Concept Augmentation (KCA) constructed?\nAnswer:", + " Wikipedia and textbooks" + ], + [ + "Question:From where were the 67 knowledge concept descriptions for Knowledge Concept Augmentation (KCA) constructed?\nAnswer:", + " Research papers only" + ] + ], + "resps": [ + [ + [ + -14.755115509033203, + false + ] + ], + [ + [ + -23.898681640625, + false + ] + ], + [ + [ + -15.336736679077148, + false + ] + ], + [ + [ + -14.167567253112793, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.755115509033203, + false + ], + [ + -23.898681640625, + false + ], + [ + -15.336736679077148, + false + ], + [ + -14.167567253112793, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aac932a6dc506406fdc0373f66d9e474f5ea83351d26ca76893eba8dacb3de96", + "prompt_hash": "c41ce6c6bd641fdcf1c75204b06887e517a2df335ace0c502170795e268a6228", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 10, + "doc": { + "question": "Which humanoid robot used in the experiments is equipped with multi-finger hands?", + "choices": [ + "Fourier GR-1", + "Unitree H1", + "ALOHA", + "AnyTeleop" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which humanoid robot used in the experiments is equipped with multi-finger hands?\nAnswer:", + " Fourier GR-1" + ], + [ + "Question:Which humanoid robot used in the experiments is equipped with multi-finger hands?\nAnswer:", + " Unitree H1" + ], + [ + "Question:Which humanoid robot used in the experiments is equipped with multi-finger hands?\nAnswer:", + " ALOHA" + ], + [ + "Question:Which humanoid robot used in the experiments is equipped with multi-finger hands?\nAnswer:", + " AnyTeleop" + ] + ], + "resps": [ + [ + [ + -19.240524291992188, + false + ] + ], + [ + [ + -8.106757164001465, + false + ] + ], + [ + [ + -14.45346450805664, + false + ] + ], + [ + [ + -17.957672119140625, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.240524291992188, + false + ], + [ + -8.106757164001465, + false + ], + [ + -14.45346450805664, + false + ], + [ + -17.957672119140625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "03549faa0210f17c230f5e924730bcd50c2c8b8d86e30dd769837a95380f5495", + "prompt_hash": "513ea08ab58365f9c8d0dbe8082c93d5d5beaa6661c0cee8334ebe899b02c226", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 11, + "doc": { + "question": "What stereo camera is used with both robots to provide RGB streaming?", + "choices": [ + "Intel RealSense D435", + "ZED Mini", + "Azure Kinect", + "Leap Motion" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What stereo camera is used with both robots to provide RGB streaming?\nAnswer:", + " Intel RealSense D435" + ], + [ + "Question:What stereo camera is used with both robots to provide RGB streaming?\nAnswer:", + " ZED Mini" + ], + [ + "Question:What stereo camera is used with both robots to provide RGB streaming?\nAnswer:", + " Azure Kinect" + ], + [ + "Question:What stereo camera is used with both robots to provide RGB streaming?\nAnswer:", + " Leap Motion" + ] + ], + "resps": [ + [ + [ + -7.248941898345947, + false + ] + ], + [ + [ + -8.554323196411133, + false + ] + ], + [ + [ + -10.395401000976562, + false + ] + ], + [ + [ + -17.37678337097168, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.248941898345947, + false + ], + [ + -8.554323196411133, + false + ], + [ + -10.395401000976562, + false + ], + [ + -17.37678337097168, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "097bac5cbc7f405d0d2c5d54bb87a36fbc0b869e5ca0fb38460550e54b3c1deb", + "prompt_hash": "6e647c4b2048258098420c8acb5aed5a05834763fdf430aab1b860c12a68b87b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 12, + "doc": { + "question": "In the Can Insertion task, what is the approximate diameter of the soda can slot?", + "choices": [ + "3.6 cm", + "5.6 cm", + "7.6 cm", + "9.6 cm" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the Can Insertion task, what is the approximate diameter of the soda can slot?\nAnswer:", + " 3.6 cm" + ], + [ + "Question:In the Can Insertion task, what is the approximate diameter of the soda can slot?\nAnswer:", + " 5.6 cm" + ], + [ + "Question:In the Can Insertion task, what is the approximate diameter of the soda can slot?\nAnswer:", + " 7.6 cm" + ], + [ + "Question:In the Can Insertion task, what is the approximate diameter of the soda can slot?\nAnswer:", + " 9.6 cm" + ] + ], + "resps": [ + [ + [ + -9.950165748596191, + false + ] + ], + [ + [ + -10.258798599243164, + false + ] + ], + [ + [ + -10.89891242980957, + false + ] + ], + [ + [ + -11.146050453186035, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.950165748596191, + false + ], + [ + -10.258798599243164, + false + ], + [ + -10.89891242980957, + false + ], + [ + -11.146050453186035, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9a1a06df62b817e903f1359d4c025997f64cf35d891deaac93377a160228984a", + "prompt_hash": "6059a3748f485ec2ce37de889c2df78a6f8f5e469d1c7156c1c872b8278b5edd", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 13, + "doc": { + "question": "Which visual backbone replaced ResNet in the modified ACT algorithm?", + "choices": [ + "ViT-B/16", + "DinoV2", + "CLIP-ViT", + "Swin Transformer" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which visual backbone replaced ResNet in the modified ACT algorithm?\nAnswer:", + " ViT-B/16" + ], + [ + "Question:Which visual backbone replaced ResNet in the modified ACT algorithm?\nAnswer:", + " DinoV2" + ], + [ + "Question:Which visual backbone replaced ResNet in the modified ACT algorithm?\nAnswer:", + " CLIP-ViT" + ], + [ + "Question:Which visual backbone replaced ResNet in the modified ACT algorithm?\nAnswer:", + " Swin Transformer" + ] + ], + "resps": [ + [ + [ + -10.351160049438477, + false + ] + ], + [ + [ + -9.934405326843262, + false + ] + ], + [ + [ + -12.724019050598145, + false + ] + ], + [ + [ + -9.91457748413086, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.351160049438477, + false + ], + [ + -9.934405326843262, + false + ], + [ + -12.724019050598145, + false + ], + [ + -9.91457748413086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7335d1d88d4bdc8ea82abb121524726484026f6508ee853f1e27fb73070421c3", + "prompt_hash": "55413e378ac34f5ac9b87eb75e3a8375c6d12a88b184f3919918f9cbeb5ffd03", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "What is the action dimension of the H1 robot in the imitation learning experiments?", + "choices": [ + "19", + "22", + "28", + "36" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the action dimension of the H1 robot in the imitation learning experiments?\nAnswer:", + " 19" + ], + [ + "Question:What is the action dimension of the H1 robot in the imitation learning experiments?\nAnswer:", + " 22" + ], + [ + "Question:What is the action dimension of the H1 robot in the imitation learning experiments?\nAnswer:", + " 28" + ], + [ + "Question:What is the action dimension of the H1 robot in the imitation learning experiments?\nAnswer:", + " 36" + ] + ], + "resps": [ + [ + [ + -7.213255882263184, + false + ] + ], + [ + [ + -7.448844909667969, + false + ] + ], + [ + [ + -7.832391738891602, + false + ] + ], + [ + [ + -7.059266567230225, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.213255882263184, + false + ], + [ + -7.448844909667969, + false + ], + [ + -7.832391738891602, + false + ], + [ + -7.059266567230225, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "814c5e04b3c52aee4763235c2a6be2412ce07724c00036756ee1a253a02d9677", + "prompt_hash": "afce1cbfccf449d047baf9ab43d4dfee10be79063fe8399c469b1df801b2e8a9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 15, + "doc": { + "question": "What is the primary goal of Kimi as a Model-as-a-Service (MaaS) provider?", + "choices": [ + "To maximize overall effective throughput while meeting latency-related SLO requirements", + "To minimize GPU usage regardless of service quality", + "To reject as many requests as possible to save resources", + "To increase output length of all requests without constraints" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the primary goal of Kimi as a Model-as-a-Service (MaaS) provider?\nAnswer:", + " To maximize overall effective throughput while meeting latency-related SLO requirements" + ], + [ + "Question:What is the primary goal of Kimi as a Model-as-a-Service (MaaS) provider?\nAnswer:", + " To minimize GPU usage regardless of service quality" + ], + [ + "Question:What is the primary goal of Kimi as a Model-as-a-Service (MaaS) provider?\nAnswer:", + " To reject as many requests as possible to save resources" + ], + [ + "Question:What is the primary goal of Kimi as a Model-as-a-Service (MaaS) provider?\nAnswer:", + " To increase output length of all requests without constraints" + ] + ], + "resps": [ + [ + [ + -46.09083938598633, + false + ] + ], + [ + [ + -44.75458526611328, + false + ] + ], + [ + [ + -32.21599197387695, + false + ] + ], + [ + [ + -50.386390686035156, + false + ] + ] + ], + "filtered_resps": [ + [ + -46.09083938598633, + false + ], + [ + -44.75458526611328, + false + ], + [ + -32.21599197387695, + false + ], + [ + -50.386390686035156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "357101440addaf6419788f163efe79352930f15a794edcda0260253127f3980e", + "prompt_hash": "61cb0bfc2625337ab48a23c909c4203ddc219f9d2ade56062d4f2d2ddc02cbc6", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 16, + "doc": { + "question": "In Mooncake's architecture, what does the prefill stage primarily focus on?", + "choices": [ + "Generating all output tokens at once", + "Processing all input tokens in parallel and generating the first output token", + "Rejecting requests before execution", + "Managing memory transfers between CPU and GPU" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In Mooncake's architecture, what does the prefill stage primarily focus on?\nAnswer:", + " Generating all output tokens at once" + ], + [ + "Question:In Mooncake's architecture, what does the prefill stage primarily focus on?\nAnswer:", + " Processing all input tokens in parallel and generating the first output token" + ], + [ + "Question:In Mooncake's architecture, what does the prefill stage primarily focus on?\nAnswer:", + " Rejecting requests before execution" + ], + [ + "Question:In Mooncake's architecture, what does the prefill stage primarily focus on?\nAnswer:", + " Managing memory transfers between CPU and GPU" + ] + ], + "resps": [ + [ + [ + -23.025474548339844, + false + ] + ], + [ + [ + -34.40630340576172, + false + ] + ], + [ + [ + -33.791385650634766, + false + ] + ], + [ + [ + -24.586078643798828, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.025474548339844, + false + ], + [ + -34.40630340576172, + false + ], + [ + -33.791385650634766, + false + ], + [ + -24.586078643798828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "38ca07f4adc667f8133e57528e618095826332c29aa1c3319cdae66a99c6378a", + "prompt_hash": "18182cffc75f3b11fb711616fe233f9e643833bc77701108f1a5e3995d185645", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 17, + "doc": { + "question": "What is the main advantage of Early Rejection in Mooncake's overload scheduling?", + "choices": [ + "It increases the output length of requests", + "It reduces ineffective computations by rejecting requests before wasting prefill resources", + "It doubles the number of decoding nodes required", + "It guarantees zero fluctuations in system load" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main advantage of Early Rejection in Mooncake's overload scheduling?\nAnswer:", + " It increases the output length of requests" + ], + [ + "Question:What is the main advantage of Early Rejection in Mooncake's overload scheduling?\nAnswer:", + " It reduces ineffective computations by rejecting requests before wasting prefill resources" + ], + [ + "Question:What is the main advantage of Early Rejection in Mooncake's overload scheduling?\nAnswer:", + " It doubles the number of decoding nodes required" + ], + [ + "Question:What is the main advantage of Early Rejection in Mooncake's overload scheduling?\nAnswer:", + " It guarantees zero fluctuations in system load" + ] + ], + "resps": [ + [ + [ + -27.2811222076416, + false + ] + ], + [ + [ + -61.370513916015625, + false + ] + ], + [ + [ + -38.169864654541016, + false + ] + ], + [ + [ + -31.666122436523438, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.2811222076416, + false + ], + [ + -61.370513916015625, + false + ], + [ + -38.169864654541016, + false + ], + [ + -31.666122436523438, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "34b971e8ea67c807f3df941c16dce19d1c799012d68e6bc92335d7c2803a550e", + "prompt_hash": "1a16f6563d0113375ee1a225e7fa2fde0dd9fbeb6b356447607f396f66e4d5fd", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 18, + "doc": { + "question": "Which cache eviction policy achieved the best performance under Mooncake's request trace dataset?", + "choices": [ + "FIFO (First In First Out)", + "LRU (Least Recently Used)", + "LFU (Least Frequently Used)", + "LengthAwareCache" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which cache eviction policy achieved the best performance under Mooncake's request trace dataset?\nAnswer:", + " FIFO (First In First Out)" + ], + [ + "Question:Which cache eviction policy achieved the best performance under Mooncake's request trace dataset?\nAnswer:", + " LRU (Least Recently Used)" + ], + [ + "Question:Which cache eviction policy achieved the best performance under Mooncake's request trace dataset?\nAnswer:", + " LFU (Least Frequently Used)" + ], + [ + "Question:Which cache eviction policy achieved the best performance under Mooncake's request trace dataset?\nAnswer:", + " LengthAwareCache" + ] + ], + "resps": [ + [ + [ + -12.678746223449707, + false + ] + ], + [ + [ + -10.518775939941406, + false + ] + ], + [ + [ + -17.02437973022461, + false + ] + ], + [ + [ + -18.03860092163086, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.678746223449707, + false + ], + [ + -10.518775939941406, + false + ], + [ + -17.02437973022461, + false + ], + [ + -18.03860092163086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fc4857a67b3c44de917f0962153f5561de3784f4837615bf36cf05223ce42f3e", + "prompt_hash": "fbd26fbc05e64de5a13e17eb8ddaac22222e0f3ecd4d93f15822c5194dc0f8eb", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 19, + "doc": { + "question": "In the evaluation of Mooncake versus vLLM on simulated long-context data, what was Mooncake's throughput improvement range?", + "choices": [ + "10% to 25%", + "30% to 75%", + "50% to 525%", + "600% to 1000%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the evaluation of Mooncake versus vLLM on simulated long-context data, what was Mooncake's throughput improvement range?\nAnswer:", + " 10% to 25%" + ], + [ + "Question:In the evaluation of Mooncake versus vLLM on simulated long-context data, what was Mooncake's throughput improvement range?\nAnswer:", + " 30% to 75%" + ], + [ + "Question:In the evaluation of Mooncake versus vLLM on simulated long-context data, what was Mooncake's throughput improvement range?\nAnswer:", + " 50% to 525%" + ], + [ + "Question:In the evaluation of Mooncake versus vLLM on simulated long-context data, what was Mooncake's throughput improvement range?\nAnswer:", + " 600% to 1000%" + ] + ], + "resps": [ + [ + [ + -20.490446090698242, + false + ] + ], + [ + [ + -21.809951782226562, + false + ] + ], + [ + [ + -28.085920333862305, + false + ] + ], + [ + [ + -22.367385864257812, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.490446090698242, + false + ], + [ + -21.809951782226562, + false + ], + [ + -28.085920333862305, + false + ], + [ + -22.367385864257812, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d29b4d3b3018678369da35496faf2781c1cd2413e158c32855218e1cc61af667", + "prompt_hash": "996d24f8f92ece7fb356f59ef65a55f3015130f5e67c6565b61d20902e9c837c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What is the primary purpose of the FEVER dataset?", + "choices": [ + "To verify factual claims using evidence from Wikipedia", + "To evaluate conversational abilities of AI assistants", + "To generate multi-hop reasoning chains", + "To benchmark cross-lingual retrieval tasks" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the primary purpose of the FEVER dataset?\nAnswer:", + " To verify factual claims using evidence from Wikipedia" + ], + [ + "Question:What is the primary purpose of the FEVER dataset?\nAnswer:", + " To evaluate conversational abilities of AI assistants" + ], + [ + "Question:What is the primary purpose of the FEVER dataset?\nAnswer:", + " To generate multi-hop reasoning chains" + ], + [ + "Question:What is the primary purpose of the FEVER dataset?\nAnswer:", + " To benchmark cross-lingual retrieval tasks" + ] + ], + "resps": [ + [ + [ + -27.64024543762207, + false + ] + ], + [ + [ + -29.950502395629883, + false + ] + ], + [ + [ + -21.078857421875, + false + ] + ], + [ + [ + -24.49301528930664, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.64024543762207, + false + ], + [ + -29.950502395629883, + false + ], + [ + -21.078857421875, + false + ], + [ + -24.49301528930664, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cbe34ffff85183cd109608a0dcc923042e88190c33685761b9a3196d82235d94", + "prompt_hash": "7711b67fd53913f5056986c72fa2a4002ba3a489c2518ce1ec791fcccf202504", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 21, + "doc": { + "question": "How many questions are included in the MMLU-med dataset?", + "choices": [ + "1,273", + "4,183", + "1,089", + "1,000" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many questions are included in the MMLU-med dataset?\nAnswer:", + " 1,273" + ], + [ + "Question:How many questions are included in the MMLU-med dataset?\nAnswer:", + " 4,183" + ], + [ + "Question:How many questions are included in the MMLU-med dataset?\nAnswer:", + " 1,089" + ], + [ + "Question:How many questions are included in the MMLU-med dataset?\nAnswer:", + " 1,000" + ] + ], + "resps": [ + [ + [ + -13.105728149414062, + false + ] + ], + [ + [ + -14.59390926361084, + false + ] + ], + [ + [ + -12.9570894241333, + false + ] + ], + [ + [ + -8.63340950012207, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.105728149414062, + false + ], + [ + -14.59390926361084, + false + ], + [ + -12.9570894241333, + false + ], + [ + -8.63340950012207, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1c2a3feddd0a24b3633fc2cfc1e6137d92c681e6bcb5ac792da9785f22329b9d", + "prompt_hash": "2849090bd6bbb607b5e19bb26891b41e60931e3fb66a3eb6384318e73d63ef7e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "What is the main characteristic of the PopQA dataset?", + "choices": [ + "It focuses on cross-lingual question answering", + "It targets long-tail entities with low Wikipedia page views", + "It verifies biomedical hypotheses using abstracts", + "It tests multi-hop reasoning across documents" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main characteristic of the PopQA dataset?\nAnswer:", + " It focuses on cross-lingual question answering" + ], + [ + "Question:What is the main characteristic of the PopQA dataset?\nAnswer:", + " It targets long-tail entities with low Wikipedia page views" + ], + [ + "Question:What is the main characteristic of the PopQA dataset?\nAnswer:", + " It verifies biomedical hypotheses using abstracts" + ], + [ + "Question:What is the main characteristic of the PopQA dataset?\nAnswer:", + " It tests multi-hop reasoning across documents" + ] + ], + "resps": [ + [ + [ + -21.004573822021484, + false + ] + ], + [ + [ + -43.161983489990234, + false + ] + ], + [ + [ + -46.05027770996094, + false + ] + ], + [ + [ + -30.60395050048828, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.004573822021484, + false + ], + [ + -43.161983489990234, + false + ], + [ + -46.05027770996094, + false + ], + [ + -30.60395050048828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "402c31403bb0f6373c60bdbb8ae1740ddc46d18ede9eeb6c93717377641878ba", + "prompt_hash": "a55011ae2acadc35c2994a6c5e1267c86e1a7cd3c7062ac6d9d09d7d06d4ea21", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 23, + "doc": { + "question": "Which dataset comprises four-option multiple-choice questions derived from the US Medical Licensing Examination?", + "choices": [ + "MedQA", + "MedMCQA", + "PubMedQA", + "BioASQ" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset comprises four-option multiple-choice questions derived from the US Medical Licensing Examination?\nAnswer:", + " MedQA" + ], + [ + "Question:Which dataset comprises four-option multiple-choice questions derived from the US Medical Licensing Examination?\nAnswer:", + " MedMCQA" + ], + [ + "Question:Which dataset comprises four-option multiple-choice questions derived from the US Medical Licensing Examination?\nAnswer:", + " PubMedQA" + ], + [ + "Question:Which dataset comprises four-option multiple-choice questions derived from the US Medical Licensing Examination?\nAnswer:", + " BioASQ" + ] + ], + "resps": [ + [ + [ + -12.405452728271484, + false + ] + ], + [ + [ + -13.451678276062012, + false + ] + ], + [ + [ + -11.517139434814453, + false + ] + ], + [ + [ + -15.579611778259277, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.405452728271484, + false + ], + [ + -13.451678276062012, + false + ], + [ + -11.517139434814453, + false + ], + [ + -15.579611778259277, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ff39eacbadfde0220c466c7c4682e2f8dea522c56595934cfd1e44f27d98ab7e", + "prompt_hash": "b326f1ed4d75097c5ee290924fd998b1cd403d4c50c0f32066da74d9b18bfab0", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 24, + "doc": { + "question": "Which dataset consists of 4,183 questions from Indian medical entrance exams?", + "choices": [ + "MedQA", + "MedMCQA", + "MMLU-med", + "BioASQ" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset consists of 4,183 questions from Indian medical entrance exams?\nAnswer:", + " MedQA" + ], + [ + "Question:Which dataset consists of 4,183 questions from Indian medical entrance exams?\nAnswer:", + " MedMCQA" + ], + [ + "Question:Which dataset consists of 4,183 questions from Indian medical entrance exams?\nAnswer:", + " MMLU-med" + ], + [ + "Question:Which dataset consists of 4,183 questions from Indian medical entrance exams?\nAnswer:", + " BioASQ" + ] + ], + "resps": [ + [ + [ + -10.04144287109375, + false + ] + ], + [ + [ + -11.477560997009277, + false + ] + ], + [ + [ + -17.35746955871582, + false + ] + ], + [ + [ + -13.449769973754883, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.04144287109375, + false + ], + [ + -11.477560997009277, + false + ], + [ + -17.35746955871582, + false + ], + [ + -13.449769973754883, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7d73c5a4d61656c245d61a96508c21f0938457c79aadfb3371e93093c71724c4", + "prompt_hash": "4aa67d3abbe709aca8112d6fa1f1b6c58886840955ee28a445dc0bfff11d2f1c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 25, + "doc": { + "question": "What is the maximum context window size supported by InternLM-XComposer-2.5 (IXC-2.5) through positional encoding extrapolation?", + "choices": [ + "32K", + "64K", + "96K", + "128K" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum context window size supported by InternLM-XComposer-2.5 (IXC-2.5) through positional encoding extrapolation?\nAnswer:", + " 32K" + ], + [ + "Question:What is the maximum context window size supported by InternLM-XComposer-2.5 (IXC-2.5) through positional encoding extrapolation?\nAnswer:", + " 64K" + ], + [ + "Question:What is the maximum context window size supported by InternLM-XComposer-2.5 (IXC-2.5) through positional encoding extrapolation?\nAnswer:", + " 96K" + ], + [ + "Question:What is the maximum context window size supported by InternLM-XComposer-2.5 (IXC-2.5) through positional encoding extrapolation?\nAnswer:", + " 128K" + ] + ], + "resps": [ + [ + [ + -8.073480606079102, + false + ] + ], + [ + [ + -7.711792945861816, + false + ] + ], + [ + [ + -10.791585922241211, + false + ] + ], + [ + [ + -8.099313735961914, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.073480606079102, + false + ], + [ + -7.711792945861816, + false + ], + [ + -10.791585922241211, + false + ], + [ + -8.099313735961914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0f2c2f84db9ec7c913471865cd0b8c41e8a1692ee8beaca699c03fc401f5a85d", + "prompt_hash": "3dfe147b1ecd03739fc0f37cd3ad9e7b56c59a19ca2448842d4d8fee8295cf70", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "Which vision encoder resolution does IXC-2.5 employ to enhance ultra-high-resolution image understanding?", + "choices": [ + "448×448", + "490×490", + "512×512", + "560×560" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which vision encoder resolution does IXC-2.5 employ to enhance ultra-high-resolution image understanding?\nAnswer:", + " 448×448" + ], + [ + "Question:Which vision encoder resolution does IXC-2.5 employ to enhance ultra-high-resolution image understanding?\nAnswer:", + " 490×490" + ], + [ + "Question:Which vision encoder resolution does IXC-2.5 employ to enhance ultra-high-resolution image understanding?\nAnswer:", + " 512×512" + ], + [ + "Question:Which vision encoder resolution does IXC-2.5 employ to enhance ultra-high-resolution image understanding?\nAnswer:", + " 560×560" + ] + ], + "resps": [ + [ + [ + -10.27751350402832, + false + ] + ], + [ + [ + -17.577922821044922, + false + ] + ], + [ + [ + -10.534107208251953, + false + ] + ], + [ + [ + -14.884693145751953, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.27751350402832, + false + ], + [ + -17.577922821044922, + false + ], + [ + -10.534107208251953, + false + ], + [ + -14.884693145751953, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "48f11e12cf84425b320fe029b209d3d5bfe7b85c4a83f61df33fc5f241d880e4", + "prompt_hash": "1de30132ffb210ada2cef34d8a4a9f774dec474bff03da29088e916500d1200a", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "On the MVBench benchmark, how much higher is IXC-2.5's score compared to VideoChat2-7B, the previous SOTA method?", + "choices": [ + "+5.3%", + "+8.7%", + "+12.4%", + "+15.1%" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:On the MVBench benchmark, how much higher is IXC-2.5's score compared to VideoChat2-7B, the previous SOTA method?\nAnswer:", + " +5.3%" + ], + [ + "Question:On the MVBench benchmark, how much higher is IXC-2.5's score compared to VideoChat2-7B, the previous SOTA method?\nAnswer:", + " +8.7%" + ], + [ + "Question:On the MVBench benchmark, how much higher is IXC-2.5's score compared to VideoChat2-7B, the previous SOTA method?\nAnswer:", + " +12.4%" + ], + [ + "Question:On the MVBench benchmark, how much higher is IXC-2.5's score compared to VideoChat2-7B, the previous SOTA method?\nAnswer:", + " +15.1%" + ] + ], + "resps": [ + [ + [ + -19.748226165771484, + false + ] + ], + [ + [ + -20.179574966430664, + false + ] + ], + [ + [ + -20.353487014770508, + false + ] + ], + [ + [ + -20.995635986328125, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.748226165771484, + false + ], + [ + -20.179574966430664, + false + ], + [ + -20.353487014770508, + false + ], + [ + -20.995635986328125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f84ae4d8ba1bd54bf160f301466be73ae54e58ea8ba4276dab4c2f131148aaca", + "prompt_hash": "d5e8d82f36af7daf364fd683f1b027a61bf29c34d889c1fd0a4112ed02cdc5be", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 28, + "doc": { + "question": "Which datasets were used in IXC-2.5's pre-training phase for general semantic alignment?", + "choices": [ + "ShareGPT4V-PT, COCO, Nocaps, TextCaps, LAION, SBU, CC 3M, ALLaVA", + "VQAv2, GQA, OK-VQA, ALLaVA-QA, MMDU", + "WebSight v0.1/v0.2, Stack v2, Tailwind CSS dataset", + "DVQA, ChartQA, CLEVR-MATH, DeepForm" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which datasets were used in IXC-2.5's pre-training phase for general semantic alignment?\nAnswer:", + " ShareGPT4V-PT, COCO, Nocaps, TextCaps, LAION, SBU, CC 3M, ALLaVA" + ], + [ + "Question:Which datasets were used in IXC-2.5's pre-training phase for general semantic alignment?\nAnswer:", + " VQAv2, GQA, OK-VQA, ALLaVA-QA, MMDU" + ], + [ + "Question:Which datasets were used in IXC-2.5's pre-training phase for general semantic alignment?\nAnswer:", + " WebSight v0.1/v0.2, Stack v2, Tailwind CSS dataset" + ], + [ + "Question:Which datasets were used in IXC-2.5's pre-training phase for general semantic alignment?\nAnswer:", + " DVQA, ChartQA, CLEVR-MATH, DeepForm" + ] + ], + "resps": [ + [ + [ + -93.22857666015625, + false + ] + ], + [ + [ + -47.38764953613281, + false + ] + ], + [ + [ + -66.16090393066406, + false + ] + ], + [ + [ + -52.975765228271484, + false + ] + ] + ], + "filtered_resps": [ + [ + -93.22857666015625, + false + ], + [ + -47.38764953613281, + false + ], + [ + -66.16090393066406, + false + ], + [ + -52.975765228271484, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "91e14a9eff8495d2aee9943461e779417887d1ecb4f0cc5754ae98a45ead996d", + "prompt_hash": "0fba08fe9bf2110d9ebd4867bd6f6281c78969c2efa62433145f3c25e8eaaff2", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "What is the LoRA rank used in IXC-2.5's screenshot-to-code training phase?", + "choices": [ + "128", + "256", + "384", + "512" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the LoRA rank used in IXC-2.5's screenshot-to-code training phase?\nAnswer:", + " 128" + ], + [ + "Question:What is the LoRA rank used in IXC-2.5's screenshot-to-code training phase?\nAnswer:", + " 256" + ], + [ + "Question:What is the LoRA rank used in IXC-2.5's screenshot-to-code training phase?\nAnswer:", + " 384" + ], + [ + "Question:What is the LoRA rank used in IXC-2.5's screenshot-to-code training phase?\nAnswer:", + " 512" + ] + ], + "resps": [ + [ + [ + -4.809566497802734, + false + ] + ], + [ + [ + -5.269222259521484, + false + ] + ], + [ + [ + -8.795157432556152, + false + ] + ], + [ + [ + -5.778667449951172, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.809566497802734, + false + ], + [ + -5.269222259521484, + false + ], + [ + -8.795157432556152, + false + ], + [ + -5.778667449951172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2b02af2e42eeeec4a8bfe1ebed1524093c46feab1666cf52aa334ed26fe24d71", + "prompt_hash": "d92d5eeefe98aef00d3cf8cf5a50a1e9838edc655619525802339940a0371418", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "What is the primary purpose of the RelD discriminator introduced in the paper?", + "choices": [ + "To generate bilingual question-answering datasets", + "To detect hallucinations in LLM-generated answers", + "To improve sliding window segmentation of long texts", + "To train large language models from scratch" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the primary purpose of the RelD discriminator introduced in the paper?\nAnswer:", + " To generate bilingual question-answering datasets" + ], + [ + "Question:What is the primary purpose of the RelD discriminator introduced in the paper?\nAnswer:", + " To detect hallucinations in LLM-generated answers" + ], + [ + "Question:What is the primary purpose of the RelD discriminator introduced in the paper?\nAnswer:", + " To improve sliding window segmentation of long texts" + ], + [ + "Question:What is the primary purpose of the RelD discriminator introduced in the paper?\nAnswer:", + " To train large language models from scratch" + ] + ], + "resps": [ + [ + [ + -31.954849243164062, + false + ] + ], + [ + [ + -20.652280807495117, + false + ] + ], + [ + [ + -38.18414306640625, + false + ] + ], + [ + [ + -22.242000579833984, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.954849243164062, + false + ], + [ + -20.652280807495117, + false + ], + [ + -38.18414306640625, + false + ], + [ + -22.242000579833984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f8715d4609cb5bd0ab47ee0f64b797ae754515a5d357c96e9d8936537c91a1e7", + "prompt_hash": "b5f243aca35069a0a609e6a4b62a5094d9167bd082cf70e213cd81152e70a692", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 31, + "doc": { + "question": "How many samples does the RelQA dataset contain in total?", + "choices": [ + "274,426 samples", + "1,372,130 samples", + "743,910 samples", + "628,220 samples" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many samples does the RelQA dataset contain in total?\nAnswer:", + " 274,426 samples" + ], + [ + "Question:How many samples does the RelQA dataset contain in total?\nAnswer:", + " 1,372,130 samples" + ], + [ + "Question:How many samples does the RelQA dataset contain in total?\nAnswer:", + " 743,910 samples" + ], + [ + "Question:How many samples does the RelQA dataset contain in total?\nAnswer:", + " 628,220 samples" + ] + ], + "resps": [ + [ + [ + -21.363178253173828, + false + ] + ], + [ + [ + -22.405603408813477, + false + ] + ], + [ + [ + -23.08317756652832, + false + ] + ], + [ + [ + -22.19939613342285, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.363178253173828, + false + ], + [ + -22.405603408813477, + false + ], + [ + -23.08317756652832, + false + ], + [ + -22.19939613342285, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d3f58d5d62b77f94ea2d5697f862acb17f8f7c1e25b69c1d8c43e08af54e2f04", + "prompt_hash": "38f2ec70ddb9d7d428e15946c1d8056264f914f972aab69e57de31b99df853b3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 32, + "doc": { + "question": "Which pre-trained language model serves as the backbone of RelD?", + "choices": [ + "BERT", + "RoBERTa", + "ELECTRA", + "DeBERTa" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which pre-trained language model serves as the backbone of RelD?\nAnswer:", + " BERT" + ], + [ + "Question:Which pre-trained language model serves as the backbone of RelD?\nAnswer:", + " RoBERTa" + ], + [ + "Question:Which pre-trained language model serves as the backbone of RelD?\nAnswer:", + " ELECTRA" + ], + [ + "Question:Which pre-trained language model serves as the backbone of RelD?\nAnswer:", + " DeBERTa" + ] + ], + "resps": [ + [ + [ + -4.401993751525879, + false + ] + ], + [ + [ + -5.599353790283203, + false + ] + ], + [ + [ + -8.7052583694458, + false + ] + ], + [ + [ + -7.099686145782471, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.401993751525879, + false + ], + [ + -5.599353790283203, + false + ], + [ + -8.7052583694458, + false + ], + [ + -7.099686145782471, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7e3932664074d45608cc62d85b4eaf400fcbe60f05f32f31d1ccad4f3a7268e0", + "prompt_hash": "ce210da2c71a35dbf6414602d2fe6c56508da12047c61004b12c3c94875361cb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 33, + "doc": { + "question": "Which four types of metrics are used to evaluate the reliability of LLM-generated answers in RelQA?", + "choices": [ + "Accuracy, F1, BLEU, and ROUGE", + "LLM-assessment, human, machine, and composite metrics", + "Similarity, diversity, overlap, and embedding metrics", + "Precision, recall, diversity, and entropy metrics" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which four types of metrics are used to evaluate the reliability of LLM-generated answers in RelQA?\nAnswer:", + " Accuracy, F1, BLEU, and ROUGE" + ], + [ + "Question:Which four types of metrics are used to evaluate the reliability of LLM-generated answers in RelQA?\nAnswer:", + " LLM-assessment, human, machine, and composite metrics" + ], + [ + "Question:Which four types of metrics are used to evaluate the reliability of LLM-generated answers in RelQA?\nAnswer:", + " Similarity, diversity, overlap, and embedding metrics" + ], + [ + "Question:Which four types of metrics are used to evaluate the reliability of LLM-generated answers in RelQA?\nAnswer:", + " Precision, recall, diversity, and entropy metrics" + ] + ], + "resps": [ + [ + [ + -15.617055892944336, + false + ] + ], + [ + [ + -41.019859313964844, + false + ] + ], + [ + [ + -42.50098419189453, + false + ] + ], + [ + [ + -33.57671356201172, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.617055892944336, + false + ], + [ + -41.019859313964844, + false + ], + [ + -42.50098419189453, + false + ], + [ + -33.57671356201172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c00e773de89a2acc29af1502161379c84980fbbb886884125ab8cbda482d5ba2", + "prompt_hash": "c7f2336b716039b45f75c7bee32aa77a60218ce7f2aca6f12585b3c778aa3337", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 34, + "doc": { + "question": "What is the learning rate set for training RelD?", + "choices": [ + "1e-05", + "5e-05", + "2e-05", + "3e-04" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the learning rate set for training RelD?\nAnswer:", + " 1e-05" + ], + [ + "Question:What is the learning rate set for training RelD?\nAnswer:", + " 5e-05" + ], + [ + "Question:What is the learning rate set for training RelD?\nAnswer:", + " 2e-05" + ], + [ + "Question:What is the learning rate set for training RelD?\nAnswer:", + " 3e-04" + ] + ], + "resps": [ + [ + [ + -9.810700416564941, + false + ] + ], + [ + [ + -11.288911819458008, + false + ] + ], + [ + [ + -11.6030912399292, + false + ] + ], + [ + [ + -12.463712692260742, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.810700416564941, + false + ], + [ + -11.288911819458008, + false + ], + [ + -11.6030912399292, + false + ], + [ + -12.463712692260742, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "86aed700016830d9aeb0a64b4d9f1b0b79c0d532f85d4b81a31553de5527b86e", + "prompt_hash": "a227d0d87d2da3b0353bc9d72add06c0edb62611e79dcf170ccae53537461e24", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What is the main advantage of using supervised semantic tokens in CosyVoice?", + "choices": [ + "They reduce the size of the model", + "They improve content consistency and speaker similarity", + "They speed up the training process", + "They eliminate the need for Mel spectrograms" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main advantage of using supervised semantic tokens in CosyVoice?\nAnswer:", + " They reduce the size of the model" + ], + [ + "Question:What is the main advantage of using supervised semantic tokens in CosyVoice?\nAnswer:", + " They improve content consistency and speaker similarity" + ], + [ + "Question:What is the main advantage of using supervised semantic tokens in CosyVoice?\nAnswer:", + " They speed up the training process" + ], + [ + "Question:What is the main advantage of using supervised semantic tokens in CosyVoice?\nAnswer:", + " They eliminate the need for Mel spectrograms" + ] + ], + "resps": [ + [ + [ + -16.160953521728516, + false + ] + ], + [ + [ + -23.697269439697266, + false + ] + ], + [ + [ + -14.877523422241211, + false + ] + ], + [ + [ + -23.25432777404785, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.160953521728516, + false + ], + [ + -23.697269439697266, + false + ], + [ + -14.877523422241211, + false + ], + [ + -23.25432777404785, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32f9666275762353507dd197f4616bf5e893a724a0fdf6fbdae89cf1ad4752b6", + "prompt_hash": "a2fd5519d6723f41f66469ab69bdc64db9b62dee24c06d3f546dfa8ddd5dbe22", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which technique does CosyVoice employ instead of diffusion probabilistic models (DDPM) for faster training and inference?", + "choices": [ + "Generative Adversarial Networks", + "Conditional Flow Matching", + "Recurrent Neural Networks", + "Variational Autoencoders" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which technique does CosyVoice employ instead of diffusion probabilistic models (DDPM) for faster training and inference?\nAnswer:", + " Generative Adversarial Networks" + ], + [ + "Question:Which technique does CosyVoice employ instead of diffusion probabilistic models (DDPM) for faster training and inference?\nAnswer:", + " Conditional Flow Matching" + ], + [ + "Question:Which technique does CosyVoice employ instead of diffusion probabilistic models (DDPM) for faster training and inference?\nAnswer:", + " Recurrent Neural Networks" + ], + [ + "Question:Which technique does CosyVoice employ instead of diffusion probabilistic models (DDPM) for faster training and inference?\nAnswer:", + " Variational Autoencoders" + ] + ], + "resps": [ + [ + [ + -7.09904146194458, + false + ] + ], + [ + [ + -17.3647518157959, + false + ] + ], + [ + [ + -10.57017707824707, + false + ] + ], + [ + [ + -10.066808700561523, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.09904146194458, + false + ], + [ + -17.3647518157959, + false + ], + [ + -10.57017707824707, + false + ], + [ + -10.066808700561523, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f90748ba30d980c60f68438996820d4d143ace3468c3d3363b6e9f8b67a555f8", + "prompt_hash": "b4be1101c4959c8e45f1584f97ab24176d0b2cf2ab9983d2f030b79fff120f29", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "How many codes are included in the single codebook used by the supervised semantic speech tokenizer?", + "choices": [ + "1,024", + "2,048", + "4,096", + "8,192" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many codes are included in the single codebook used by the supervised semantic speech tokenizer?\nAnswer:", + " 1,024" + ], + [ + "Question:How many codes are included in the single codebook used by the supervised semantic speech tokenizer?\nAnswer:", + " 2,048" + ], + [ + "Question:How many codes are included in the single codebook used by the supervised semantic speech tokenizer?\nAnswer:", + " 4,096" + ], + [ + "Question:How many codes are included in the single codebook used by the supervised semantic speech tokenizer?\nAnswer:", + " 8,192" + ] + ], + "resps": [ + [ + [ + -10.077766418457031, + false + ] + ], + [ + [ + -11.457889556884766, + false + ] + ], + [ + [ + -10.916120529174805, + false + ] + ], + [ + [ + -14.03486442565918, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.077766418457031, + false + ], + [ + -11.457889556884766, + false + ], + [ + -10.916120529174805, + false + ], + [ + -14.03486442565918, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6edd2ab81831c1f356526e910ee0f90df1e2589eb38db06f14ab76ab986e0f65", + "prompt_hash": "797df02bbcfc8d676a055ee518485d78dc7d9013519df369e7bfd8ddc9ad3112", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 38, + "doc": { + "question": "On the LibriTTS test-clean set, what word error rate (WER) does CosyVoice achieve after ASR re-ranking?", + "choices": [ + "2.89%", + "1.51%", + "3.17%", + "8.32%" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:On the LibriTTS test-clean set, what word error rate (WER) does CosyVoice achieve after ASR re-ranking?\nAnswer:", + " 2.89%" + ], + [ + "Question:On the LibriTTS test-clean set, what word error rate (WER) does CosyVoice achieve after ASR re-ranking?\nAnswer:", + " 1.51%" + ], + [ + "Question:On the LibriTTS test-clean set, what word error rate (WER) does CosyVoice achieve after ASR re-ranking?\nAnswer:", + " 3.17%" + ], + [ + "Question:On the LibriTTS test-clean set, what word error rate (WER) does CosyVoice achieve after ASR re-ranking?\nAnswer:", + " 8.32%" + ] + ], + "resps": [ + [ + [ + -14.48992919921875, + false + ] + ], + [ + [ + -14.496801376342773, + false + ] + ], + [ + [ + -14.643253326416016, + false + ] + ], + [ + [ + -14.531301498413086, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.48992919921875, + false + ], + [ + -14.496801376342773, + false + ], + [ + -14.643253326416016, + false + ], + [ + -14.531301498413086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d52e2878a69c9abff4521937fa3f88ff1cc0b707d0dd5a8a8e6cf741df6a6ceb", + "prompt_hash": "47292ce2c33805e39769dd3f1458ca53301685a873d6e66320ebd6d129b198bf", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "Which emotion showed the greatest improvement in control accuracy when using CosyVoice-instruct compared to CosyVoice-base?", + "choices": [ + "Happy", + "Sad", + "Disgusted", + "Fearful" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which emotion showed the greatest improvement in control accuracy when using CosyVoice-instruct compared to CosyVoice-base?\nAnswer:", + " Happy" + ], + [ + "Question:Which emotion showed the greatest improvement in control accuracy when using CosyVoice-instruct compared to CosyVoice-base?\nAnswer:", + " Sad" + ], + [ + "Question:Which emotion showed the greatest improvement in control accuracy when using CosyVoice-instruct compared to CosyVoice-base?\nAnswer:", + " Disgusted" + ], + [ + "Question:Which emotion showed the greatest improvement in control accuracy when using CosyVoice-instruct compared to CosyVoice-base?\nAnswer:", + " Fearful" + ] + ], + "resps": [ + [ + [ + -7.383396148681641, + false + ] + ], + [ + [ + -8.395179748535156, + false + ] + ], + [ + [ + -15.370882987976074, + false + ] + ], + [ + [ + -10.768058776855469, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.383396148681641, + false + ], + [ + -8.395179748535156, + false + ], + [ + -15.370882987976074, + false + ], + [ + -10.768058776855469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b74c37d3171aa5ca64392e549cde202d5fb7ea554fc7574d3e69b67e5e0e0ff6", + "prompt_hash": "6ab38ced6f896b1247e9a87a0a715bb24483db0e6ba01b45e0d480ef27562f5b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What is the main advantage of WTConv over standard depth-wise convolutions?", + "choices": [ + "It reduces the number of trainable parameters quadratically", + "It achieves a larger receptive field with logarithmic parameter growth", + "It uses attention mechanisms instead of convolutions", + "It converts the input entirely into the frequency domain" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main advantage of WTConv over standard depth-wise convolutions?\nAnswer:", + " It reduces the number of trainable parameters quadratically" + ], + [ + "Question:What is the main advantage of WTConv over standard depth-wise convolutions?\nAnswer:", + " It achieves a larger receptive field with logarithmic parameter growth" + ], + [ + "Question:What is the main advantage of WTConv over standard depth-wise convolutions?\nAnswer:", + " It uses attention mechanisms instead of convolutions" + ], + [ + "Question:What is the main advantage of WTConv over standard depth-wise convolutions?\nAnswer:", + " It converts the input entirely into the frequency domain" + ] + ], + "resps": [ + [ + [ + -22.36676025390625, + false + ] + ], + [ + [ + -45.17839050292969, + false + ] + ], + [ + [ + -18.9272518157959, + false + ] + ], + [ + [ + -33.368980407714844, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.36676025390625, + false + ], + [ + -45.17839050292969, + false + ], + [ + -18.9272518157959, + false + ], + [ + -33.368980407714844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bb9319f6f6523e6ef30fa02d44e6c00f46a83477073de301e0af4660f8332c1d", + "prompt_hash": "7c42e02ea55d1ec0818a0a5ad17920e6c29f4796dd7632d58084bb510f9888df", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 41, + "doc": { + "question": "Which wavelet basis is used in WTConv for efficiency?", + "choices": [ + "Fourier Transform", + "Haar Wavelet Transform", + "Daubechies Wavelet Transform", + "Discrete Cosine Transform" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which wavelet basis is used in WTConv for efficiency?\nAnswer:", + " Fourier Transform" + ], + [ + "Question:Which wavelet basis is used in WTConv for efficiency?\nAnswer:", + " Haar Wavelet Transform" + ], + [ + "Question:Which wavelet basis is used in WTConv for efficiency?\nAnswer:", + " Daubechies Wavelet Transform" + ], + [ + "Question:Which wavelet basis is used in WTConv for efficiency?\nAnswer:", + " Discrete Cosine Transform" + ] + ], + "resps": [ + [ + [ + -10.868124008178711, + false + ] + ], + [ + [ + -13.273340225219727, + false + ] + ], + [ + [ + -20.367637634277344, + false + ] + ], + [ + [ + -7.96662712097168, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.868124008178711, + false + ], + [ + -13.273340225219727, + false + ], + [ + -20.367637634277344, + false + ], + [ + -7.96662712097168, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8600813d0c358ab4185c7d49a3cb37e183c975e849c39676fde3a2728aec51e2", + "prompt_hash": "ec9b5a9e71d2ce56edf7cbb8827ec8527ac3518a2695db961815d6c6df323004", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 42, + "doc": { + "question": "What accuracy improvement does WTConvNeXt-T achieve over ConvNeXt-T in ImageNet-C mean corruption error (mCE)?", + "choices": [ + "53.2% to 52.0%", + "41.6% to 39.0%", + "55.0% to 50.0%", + "60.5% to 58.0%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What accuracy improvement does WTConvNeXt-T achieve over ConvNeXt-T in ImageNet-C mean corruption error (mCE)?\nAnswer:", + " 53.2% to 52.0%" + ], + [ + "Question:What accuracy improvement does WTConvNeXt-T achieve over ConvNeXt-T in ImageNet-C mean corruption error (mCE)?\nAnswer:", + " 41.6% to 39.0%" + ], + [ + "Question:What accuracy improvement does WTConvNeXt-T achieve over ConvNeXt-T in ImageNet-C mean corruption error (mCE)?\nAnswer:", + " 55.0% to 50.0%" + ], + [ + "Question:What accuracy improvement does WTConvNeXt-T achieve over ConvNeXt-T in ImageNet-C mean corruption error (mCE)?\nAnswer:", + " 60.5% to 58.0%" + ] + ], + "resps": [ + [ + [ + -36.595645904541016, + false + ] + ], + [ + [ + -38.15071105957031, + false + ] + ], + [ + [ + -34.00459289550781, + false + ] + ], + [ + [ + -36.84385681152344, + false + ] + ] + ], + "filtered_resps": [ + [ + -36.595645904541016, + false + ], + [ + -38.15071105957031, + false + ], + [ + -34.00459289550781, + false + ], + [ + -36.84385681152344, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6da62bf2f48179bd7ba682edf10f2964d3717e3a6cd687e2adf2a18abcdc82e7", + "prompt_hash": "c5bffff41cccf839939a88f2eba6f43dbda372e284f2e9876b56ee8d8ea5de3e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "How does WTConvNeXt affect shape-bias compared to ConvNeXt?", + "choices": [ + "It decreases the network's shape-bias", + "It keeps the shape-bias unchanged", + "It significantly increases the shape-bias", + "It eliminates texture-based decisions entirely" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How does WTConvNeXt affect shape-bias compared to ConvNeXt?\nAnswer:", + " It decreases the network's shape-bias" + ], + [ + "Question:How does WTConvNeXt affect shape-bias compared to ConvNeXt?\nAnswer:", + " It keeps the shape-bias unchanged" + ], + [ + "Question:How does WTConvNeXt affect shape-bias compared to ConvNeXt?\nAnswer:", + " It significantly increases the shape-bias" + ], + [ + "Question:How does WTConvNeXt affect shape-bias compared to ConvNeXt?\nAnswer:", + " It eliminates texture-based decisions entirely" + ] + ], + "resps": [ + [ + [ + -20.318492889404297, + false + ] + ], + [ + [ + -19.004671096801758, + false + ] + ], + [ + [ + -15.258134841918945, + false + ] + ], + [ + [ + -38.38514709472656, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.318492889404297, + false + ], + [ + -19.004671096801758, + false + ], + [ + -15.258134841918945, + false + ], + [ + -38.38514709472656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "163c68f81c202eaa477d5551c1c8c8d183e8e1dc6ba0b7e692cf5f2546bd3469", + "prompt_hash": "aa579b89dca17c357b86632fbae490cab4afe1b2802815f55b17e112975080cd", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 44, + "doc": { + "question": "What is the primary reason WTConv achieves better robustness under image corruptions?", + "choices": [ + "It focuses entirely on high-frequency information", + "It integrates Vision Transformer attention mechanisms", + "It emphasizes low-frequency components through wavelet decomposition", + "It replaces convolutional layers with pooling layers" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary reason WTConv achieves better robustness under image corruptions?\nAnswer:", + " It focuses entirely on high-frequency information" + ], + [ + "Question:What is the primary reason WTConv achieves better robustness under image corruptions?\nAnswer:", + " It integrates Vision Transformer attention mechanisms" + ], + [ + "Question:What is the primary reason WTConv achieves better robustness under image corruptions?\nAnswer:", + " It emphasizes low-frequency components through wavelet decomposition" + ], + [ + "Question:What is the primary reason WTConv achieves better robustness under image corruptions?\nAnswer:", + " It replaces convolutional layers with pooling layers" + ] + ], + "resps": [ + [ + [ + -25.85192108154297, + false + ] + ], + [ + [ + -28.19028091430664, + false + ] + ], + [ + [ + -27.371051788330078, + false + ] + ], + [ + [ + -21.658126831054688, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.85192108154297, + false + ], + [ + -28.19028091430664, + false + ], + [ + -27.371051788330078, + false + ], + [ + -21.658126831054688, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6bc68414e1b2a06374963af51331f539c7fad1173ed20e9990a150fe18ffd896", + "prompt_hash": "3e8d462f256feaf421b3dee7d20bac60366c94311ce2b0a57250257b68dbc4d0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 45, + "doc": { + "question": "Which model was used as the base model for all experiments in the study?", + "choices": [ + "GPT-4-0125-preview", + "Llama-2-70B-chat", + "PaLM-2", + "Claude-3" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which model was used as the base model for all experiments in the study?\nAnswer:", + " GPT-4-0125-preview" + ], + [ + "Question:Which model was used as the base model for all experiments in the study?\nAnswer:", + " Llama-2-70B-chat" + ], + [ + "Question:Which model was used as the base model for all experiments in the study?\nAnswer:", + " PaLM-2" + ], + [ + "Question:Which model was used as the base model for all experiments in the study?\nAnswer:", + " Claude-3" + ] + ], + "resps": [ + [ + [ + -11.283348083496094, + false + ] + ], + [ + [ + -16.040966033935547, + false + ] + ], + [ + [ + -14.582185745239258, + false + ] + ], + [ + [ + -12.037026405334473, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.283348083496094, + false + ], + [ + -16.040966033935547, + false + ], + [ + -14.582185745239258, + false + ], + [ + -12.037026405334473, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1383145086897e76bb0d4776b6c1e6e80782890a7ed056a1f72b9c1316943d93", + "prompt_hash": "c7d8e5ee70ecb3ea4ac98b838f06b89f9fe0824e8eb73f877a757d10423cbcf7", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 46, + "doc": { + "question": "What was the achieved accuracy after distilling the 2-Step Rephrase and Respond (RaR) method into a System 1 Llama-2-70B-chat model on the last letter concatenation task?", + "choices": [ + "44.5%", + "56.1%", + "98.0%", + "69.5%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What was the achieved accuracy after distilling the 2-Step Rephrase and Respond (RaR) method into a System 1 Llama-2-70B-chat model on the last letter concatenation task?\nAnswer:", + " 44.5%" + ], + [ + "Question:What was the achieved accuracy after distilling the 2-Step Rephrase and Respond (RaR) method into a System 1 Llama-2-70B-chat model on the last letter concatenation task?\nAnswer:", + " 56.1%" + ], + [ + "Question:What was the achieved accuracy after distilling the 2-Step Rephrase and Respond (RaR) method into a System 1 Llama-2-70B-chat model on the last letter concatenation task?\nAnswer:", + " 98.0%" + ], + [ + "Question:What was the achieved accuracy after distilling the 2-Step Rephrase and Respond (RaR) method into a System 1 Llama-2-70B-chat model on the last letter concatenation task?\nAnswer:", + " 69.5%" + ] + ], + "resps": [ + [ + [ + -15.09747314453125, + false + ] + ], + [ + [ + -14.72747802734375, + false + ] + ], + [ + [ + -13.923602104187012, + false + ] + ], + [ + [ + -14.50871467590332, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.09747314453125, + false + ], + [ + -14.72747802734375, + false + ], + [ + -13.923602104187012, + false + ], + [ + -14.50871467590332, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e60644694d85dc759b2c978a6ff884c55b4a5e79508100ae0e6c472092b69dec", + "prompt_hash": "fb69b33d2d19e03a16ce11134296fc4d84787079fc51f3d1a2112d672dab78c3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 47, + "doc": { + "question": "How does System 2 Attention (S2A) primarily improve reasoning performance?", + "choices": [ + "By adding more training data to reduce overfitting", + "By rewriting the input to remove biases or irrelevant information", + "By introducing more intermediate reasoning steps", + "By increasing the model size and token limit" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How does System 2 Attention (S2A) primarily improve reasoning performance?\nAnswer:", + " By adding more training data to reduce overfitting" + ], + [ + "Question:How does System 2 Attention (S2A) primarily improve reasoning performance?\nAnswer:", + " By rewriting the input to remove biases or irrelevant information" + ], + [ + "Question:How does System 2 Attention (S2A) primarily improve reasoning performance?\nAnswer:", + " By introducing more intermediate reasoning steps" + ], + [ + "Question:How does System 2 Attention (S2A) primarily improve reasoning performance?\nAnswer:", + " By increasing the model size and token limit" + ] + ], + "resps": [ + [ + [ + -30.416637420654297, + false + ] + ], + [ + [ + -42.379478454589844, + false + ] + ], + [ + [ + -22.114347457885742, + false + ] + ], + [ + [ + -35.356021881103516, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.416637420654297, + false + ], + [ + -42.379478454589844, + false + ], + [ + -22.114347457885742, + false + ], + [ + -35.356021881103516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f6fc0d851d2fdff1716418a5330d89655fae039bfe7cfebe205dac52103a1eea", + "prompt_hash": "947a69f16e4b2bc4a7c5f9520c71e19f88c02112f65ee62363a8077b994d2cd5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "What technique was used to ensure the quality of distillation data for Branch-Solve-Merge (BSM)?", + "choices": [ + "Cross-entropy minimization", + "Majority voting across outputs", + "Self-consistency under input perturbations", + "Manual annotation of all samples" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What technique was used to ensure the quality of distillation data for Branch-Solve-Merge (BSM)?\nAnswer:", + " Cross-entropy minimization" + ], + [ + "Question:What technique was used to ensure the quality of distillation data for Branch-Solve-Merge (BSM)?\nAnswer:", + " Majority voting across outputs" + ], + [ + "Question:What technique was used to ensure the quality of distillation data for Branch-Solve-Merge (BSM)?\nAnswer:", + " Self-consistency under input perturbations" + ], + [ + "Question:What technique was used to ensure the quality of distillation data for Branch-Solve-Merge (BSM)?\nAnswer:", + " Manual annotation of all samples" + ] + ], + "resps": [ + [ + [ + -17.37027359008789, + false + ] + ], + [ + [ + -24.0562744140625, + false + ] + ], + [ + [ + -29.128202438354492, + false + ] + ], + [ + [ + -19.985612869262695, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.37027359008789, + false + ], + [ + -24.0562744140625, + false + ], + [ + -29.128202438354492, + false + ], + [ + -19.985612869262695, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "36344b17e47b97610171de862ccd12375a23e6f878dc0a1d2cfe111f23838953", + "prompt_hash": "87290dac7772ef7346b3ef0d92b9f5bf5574ad93388b41a8ad56a49a56d96e45", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 49, + "doc": { + "question": "Which reasoning task showed poor performance when applying System 2 distillation, indicating that it could not be effectively distilled?", + "choices": [ + "Last letter concatenation task", + "Coin flip reasoning task", + "SycophancyEval biased QA task", + "GSM8k math problem-solving task" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which reasoning task showed poor performance when applying System 2 distillation, indicating that it could not be effectively distilled?\nAnswer:", + " Last letter concatenation task" + ], + [ + "Question:Which reasoning task showed poor performance when applying System 2 distillation, indicating that it could not be effectively distilled?\nAnswer:", + " Coin flip reasoning task" + ], + [ + "Question:Which reasoning task showed poor performance when applying System 2 distillation, indicating that it could not be effectively distilled?\nAnswer:", + " SycophancyEval biased QA task" + ], + [ + "Question:Which reasoning task showed poor performance when applying System 2 distillation, indicating that it could not be effectively distilled?\nAnswer:", + " GSM8k math problem-solving task" + ] + ], + "resps": [ + [ + [ + -26.82872772216797, + false + ] + ], + [ + [ + -20.605525970458984, + false + ] + ], + [ + [ + -49.651554107666016, + false + ] + ], + [ + [ + -26.793594360351562, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.82872772216797, + false + ], + [ + -20.605525970458984, + false + ], + [ + -49.651554107666016, + false + ], + [ + -26.793594360351562, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0c5a64651e593f7334dc76475ccce742ed0be1ae580af4895bb8dc289b0cda68", + "prompt_hash": "2c9437c3ff992b9dd1cb1a469649859a1c6719d05eb3060708ccfffa956091b7", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 50, + "doc": { + "question": "What is the precisely measured distance to pulsar PSR J0437−4715 based on radio timing observations?", + "choices": [ + "156.98 ± 0.15 pc", + "129.4 ± 0.5 pc", + "174.0 ± 0.2 pc", + "200.15 ± 1.0 pc" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the precisely measured distance to pulsar PSR J0437−4715 based on radio timing observations?\nAnswer:", + " 156.98 ± 0.15 pc" + ], + [ + "Question:What is the precisely measured distance to pulsar PSR J0437−4715 based on radio timing observations?\nAnswer:", + " 129.4 ± 0.5 pc" + ], + [ + "Question:What is the precisely measured distance to pulsar PSR J0437−4715 based on radio timing observations?\nAnswer:", + " 174.0 ± 0.2 pc" + ], + [ + "Question:What is the precisely measured distance to pulsar PSR J0437−4715 based on radio timing observations?\nAnswer:", + " 200.15 ± 1.0 pc" + ] + ], + "resps": [ + [ + [ + -23.42844009399414, + false + ] + ], + [ + [ + -19.135929107666016, + false + ] + ], + [ + [ + -20.670236587524414, + false + ] + ], + [ + [ + -28.07659149169922, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.42844009399414, + false + ], + [ + -19.135929107666016, + false + ], + [ + -20.670236587524414, + false + ], + [ + -28.07659149169922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9136d143f6fbec7e4ca9165be4d452f05f6ad1a30173e9d1e6a91fe102584000", + "prompt_hash": "a50819b0366d5f70b4b331b6f66c1549975ffb36782f46e713520207c6b2cfda", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "Which background modeling method used by NICER provides quantified statistical uncertainties in the net background spectrum?", + "choices": [ + "Space Weather estimate", + "3C50 background model", + "XMM-MOS1 background fit", + "Delta dataset filtering" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which background modeling method used by NICER provides quantified statistical uncertainties in the net background spectrum?\nAnswer:", + " Space Weather estimate" + ], + [ + "Question:Which background modeling method used by NICER provides quantified statistical uncertainties in the net background spectrum?\nAnswer:", + " 3C50 background model" + ], + [ + "Question:Which background modeling method used by NICER provides quantified statistical uncertainties in the net background spectrum?\nAnswer:", + " XMM-MOS1 background fit" + ], + [ + "Question:Which background modeling method used by NICER provides quantified statistical uncertainties in the net background spectrum?\nAnswer:", + " Delta dataset filtering" + ] + ], + "resps": [ + [ + [ + -23.84143829345703, + false + ] + ], + [ + [ + -15.207002639770508, + false + ] + ], + [ + [ + -34.21397018432617, + false + ] + ], + [ + [ + -29.099979400634766, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.84143829345703, + false + ], + [ + -15.207002639770508, + false + ], + [ + -34.21397018432617, + false + ], + [ + -29.099979400634766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d29a9f90f4dd47e62853c3571f7346e637325b11296cc948f9f1ecdbda8a1ae4", + "prompt_hash": "7bd2c5d239ce1590ef0c12d0ebdcae61e4f703fada9defdf904df81a3da6d226", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 52, + "doc": { + "question": "What is the measured spin frequency of PSR J0437−4715?", + "choices": [ + "174 Hz", + "129 Hz", + "210 Hz", + "98 Hz" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the measured spin frequency of PSR J0437−4715?\nAnswer:", + " 174 Hz" + ], + [ + "Question:What is the measured spin frequency of PSR J0437−4715?\nAnswer:", + " 129 Hz" + ], + [ + "Question:What is the measured spin frequency of PSR J0437−4715?\nAnswer:", + " 210 Hz" + ], + [ + "Question:What is the measured spin frequency of PSR J0437−4715?\nAnswer:", + " 98 Hz" + ] + ], + "resps": [ + [ + [ + -11.427151679992676, + false + ] + ], + [ + [ + -10.656230926513672, + false + ] + ], + [ + [ + -10.553150177001953, + false + ] + ], + [ + [ + -10.735501289367676, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.427151679992676, + false + ], + [ + -10.656230926513672, + false + ], + [ + -10.553150177001953, + false + ], + [ + -10.735501289367676, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a3317615d62bc3dac178890e48f3b4bac1a63795c3104864a252cde455139a6b", + "prompt_hash": "7b73b6789f9631e1f0f035a051058ba0607731cd7fb355c18416ec15184b1031", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 53, + "doc": { + "question": "Which software package is used to perform Bayesian parameter estimation by modeling X-ray emission from PSR J0437−4715?", + "choices": [ + "HEASoft", + "X-PSI", + "SAS", + "NSX" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which software package is used to perform Bayesian parameter estimation by modeling X-ray emission from PSR J0437−4715?\nAnswer:", + " HEASoft" + ], + [ + "Question:Which software package is used to perform Bayesian parameter estimation by modeling X-ray emission from PSR J0437−4715?\nAnswer:", + " X-PSI" + ], + [ + "Question:Which software package is used to perform Bayesian parameter estimation by modeling X-ray emission from PSR J0437−4715?\nAnswer:", + " SAS" + ], + [ + "Question:Which software package is used to perform Bayesian parameter estimation by modeling X-ray emission from PSR J0437−4715?\nAnswer:", + " NSX" + ] + ], + "resps": [ + [ + [ + -11.748787879943848, + false + ] + ], + [ + [ + -11.375994682312012, + false + ] + ], + [ + [ + -8.618194580078125, + false + ] + ], + [ + [ + -13.982128143310547, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.748787879943848, + false + ], + [ + -11.375994682312012, + false + ], + [ + -8.618194580078125, + false + ], + [ + -13.982128143310547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "40c129c953387082b60e7d8a841f9c300b1e201c6b80864f06642485aa6ff0a4", + "prompt_hash": "17722b0305f01c9020f7b3f55e579eacc373f25594dd32a17c1693d7bcaa088c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 54, + "doc": { + "question": "In the NICER observations, what is the approximate contribution of the nearby AGN RX J0437.4−4711 to the 0.3–3 keV band?", + "choices": [ + "About 0.2 counts/s", + "About 1.5 counts/s", + "About 3.0 counts/s", + "About 5.0 counts/s" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:In the NICER observations, what is the approximate contribution of the nearby AGN RX J0437.4−4711 to the 0.3–3 keV band?\nAnswer:", + " About 0.2 counts/s" + ], + [ + "Question:In the NICER observations, what is the approximate contribution of the nearby AGN RX J0437.4−4711 to the 0.3–3 keV band?\nAnswer:", + " About 1.5 counts/s" + ], + [ + "Question:In the NICER observations, what is the approximate contribution of the nearby AGN RX J0437.4−4711 to the 0.3–3 keV band?\nAnswer:", + " About 3.0 counts/s" + ], + [ + "Question:In the NICER observations, what is the approximate contribution of the nearby AGN RX J0437.4−4711 to the 0.3–3 keV band?\nAnswer:", + " About 5.0 counts/s" + ] + ], + "resps": [ + [ + [ + -22.581018447875977, + false + ] + ], + [ + [ + -24.277639389038086, + false + ] + ], + [ + [ + -27.225370407104492, + false + ] + ], + [ + [ + -27.68436050415039, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.581018447875977, + false + ], + [ + -24.277639389038086, + false + ], + [ + -27.225370407104492, + false + ], + [ + -27.68436050415039, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b32f1db2cede76d6f2cdf7c473fd967f1e38b84c0c10ee1f13c6bb56c16c79ba", + "prompt_hash": "7347866172412d992f39e909a8c48662828ecc18ea5b707099c207bbeb787234", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 55, + "doc": { + "question": "What is the primary purpose of EchoMimic in portrait animation?", + "choices": [ + "To generate portrait videos using both audio signals and facial landmarks", + "To synthesize 3D avatars from textual descriptions", + "To perform real-time lip synchronization using only 3D morphable models", + "To replace GAN-based approaches with Wav2Lip mechanisms" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the primary purpose of EchoMimic in portrait animation?\nAnswer:", + " To generate portrait videos using both audio signals and facial landmarks" + ], + [ + "Question:What is the primary purpose of EchoMimic in portrait animation?\nAnswer:", + " To synthesize 3D avatars from textual descriptions" + ], + [ + "Question:What is the primary purpose of EchoMimic in portrait animation?\nAnswer:", + " To perform real-time lip synchronization using only 3D morphable models" + ], + [ + "Question:What is the primary purpose of EchoMimic in portrait animation?\nAnswer:", + " To replace GAN-based approaches with Wav2Lip mechanisms" + ] + ], + "resps": [ + [ + [ + -35.45629119873047, + false + ] + ], + [ + [ + -22.518705368041992, + false + ] + ], + [ + [ + -41.64351272583008, + false + ] + ], + [ + [ + -52.94198226928711, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.45629119873047, + false + ], + [ + -22.518705368041992, + false + ], + [ + -41.64351272583008, + false + ], + [ + -52.94198226928711, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "09fcb207ffa83b036d34b90fd8eb0c632f12ba8cfdc9f1187a854d65968ec3a8", + "prompt_hash": "86fb8321e982423113547efe693b5dcd70111ccc80a3075f4773c97e3a4aae1a", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "Which architecture does EchoMimic's Denoising U-Net draw inspiration from?", + "choices": [ + "GAN-based MegaPortraits", + "Stable Diffusion v1.5", + "Transformer-based GPT architecture", + "3D Morphable Models" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which architecture does EchoMimic's Denoising U-Net draw inspiration from?\nAnswer:", + " GAN-based MegaPortraits" + ], + [ + "Question:Which architecture does EchoMimic's Denoising U-Net draw inspiration from?\nAnswer:", + " Stable Diffusion v1.5" + ], + [ + "Question:Which architecture does EchoMimic's Denoising U-Net draw inspiration from?\nAnswer:", + " Transformer-based GPT architecture" + ], + [ + "Question:Which architecture does EchoMimic's Denoising U-Net draw inspiration from?\nAnswer:", + " 3D Morphable Models" + ] + ], + "resps": [ + [ + [ + -43.4454345703125, + false + ] + ], + [ + [ + -12.865447044372559, + false + ] + ], + [ + [ + -22.89692497253418, + false + ] + ], + [ + [ + -18.237205505371094, + false + ] + ] + ], + "filtered_resps": [ + [ + -43.4454345703125, + false + ], + [ + -12.865447044372559, + false + ], + [ + -22.89692497253418, + false + ], + [ + -18.237205505371094, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e6976e2db7c66944c31a7d101b4a1fc4100abb4916f120c3eca5653f40f362a9", + "prompt_hash": "6e50180c89a59c87f3e94f877679ad6ef88b3aec3d9cd05c5b8d2210e0ea89e6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 57, + "doc": { + "question": "What role does the Audio Encoder play in the EchoMimic framework?", + "choices": [ + "It extracts motion vectors from reference images", + "It encodes facial landmarks into latent vectors", + "It captures pronunciation and tonality features using Wav2Vec embeddings", + "It generates synthetic audio for video narration" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What role does the Audio Encoder play in the EchoMimic framework?\nAnswer:", + " It extracts motion vectors from reference images" + ], + [ + "Question:What role does the Audio Encoder play in the EchoMimic framework?\nAnswer:", + " It encodes facial landmarks into latent vectors" + ], + [ + "Question:What role does the Audio Encoder play in the EchoMimic framework?\nAnswer:", + " It captures pronunciation and tonality features using Wav2Vec embeddings" + ], + [ + "Question:What role does the Audio Encoder play in the EchoMimic framework?\nAnswer:", + " It generates synthetic audio for video narration" + ] + ], + "resps": [ + [ + [ + -34.38471984863281, + false + ] + ], + [ + [ + -26.634109497070312, + false + ] + ], + [ + [ + -49.42478561401367, + false + ] + ], + [ + [ + -31.105899810791016, + false + ] + ] + ], + "filtered_resps": [ + [ + -34.38471984863281, + false + ], + [ + -26.634109497070312, + false + ], + [ + -49.42478561401367, + false + ], + [ + -31.105899810791016, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9d677c7e270dbe2b16981def7e4de61fddd0be1ea7456d9731b5a34dee522568", + "prompt_hash": "b7e0dd728da0f3b6498eef1a70257eaff55685def4651d494e2ca39bfc11f50f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 58, + "doc": { + "question": "Which metric evaluates the structural similarity between generated and ground truth videos?", + "choices": [ + "FID", + "SSIM", + "E-FID", + "FVD" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which metric evaluates the structural similarity between generated and ground truth videos?\nAnswer:", + " FID" + ], + [ + "Question:Which metric evaluates the structural similarity between generated and ground truth videos?\nAnswer:", + " SSIM" + ], + [ + "Question:Which metric evaluates the structural similarity between generated and ground truth videos?\nAnswer:", + " E-FID" + ], + [ + "Question:Which metric evaluates the structural similarity between generated and ground truth videos?\nAnswer:", + " FVD" + ] + ], + "resps": [ + [ + [ + -6.555878639221191, + false + ] + ], + [ + [ + -6.250504493713379, + false + ] + ], + [ + [ + -14.620190620422363, + false + ] + ], + [ + [ + -7.614960670471191, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.555878639221191, + false + ], + [ + -6.250504493713379, + false + ], + [ + -14.620190620422363, + false + ], + [ + -7.614960670471191, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "886503ac67e97bca5a9de5a56393902b6df58329bb12d6031ef7d4013109082d", + "prompt_hash": "811a868f67d2ec860fff8a94061f5ec88bf6b45090c1b838da2763ff4bfd75ae", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 59, + "doc": { + "question": "On the HDTF dataset, what is EchoMimic's Fréchet Inception Distance (FID) score?", + "choices": [ + "41.535", + "37.659", + "29.136", + "53.143" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:On the HDTF dataset, what is EchoMimic's Fréchet Inception Distance (FID) score?\nAnswer:", + " 41.535" + ], + [ + "Question:On the HDTF dataset, what is EchoMimic's Fréchet Inception Distance (FID) score?\nAnswer:", + " 37.659" + ], + [ + "Question:On the HDTF dataset, what is EchoMimic's Fréchet Inception Distance (FID) score?\nAnswer:", + " 29.136" + ], + [ + "Question:On the HDTF dataset, what is EchoMimic's Fréchet Inception Distance (FID) score?\nAnswer:", + " 53.143" + ] + ], + "resps": [ + [ + [ + -15.374874114990234, + false + ] + ], + [ + [ + -15.217829704284668, + false + ] + ], + [ + [ + -15.107488632202148, + false + ] + ], + [ + [ + -16.111392974853516, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.374874114990234, + false + ], + [ + -15.217829704284668, + false + ], + [ + -15.107488632202148, + false + ], + [ + -16.111392974853516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "91387f5df300c340c54a96119253cb331876a51247f16e1ee4a9b9cf43338efa", + "prompt_hash": "49cfff5c4848fe783889bc41c7a1a73832c0b168020a74022d71ead9be7badae", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 60, + "doc": { + "question": "Which of the following partial differential equations (PDEs) is included in the systematic review's scope?", + "choices": [ + "Korteweg–de Vries (KdV)", + "Darcy flow", + "Reaction-diffusion", + "Schrodinger equation" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which of the following partial differential equations (PDEs) is included in the systematic review's scope?\nAnswer:", + " Korteweg–de Vries (KdV)" + ], + [ + "Question:Which of the following partial differential equations (PDEs) is included in the systematic review's scope?\nAnswer:", + " Darcy flow" + ], + [ + "Question:Which of the following partial differential equations (PDEs) is included in the systematic review's scope?\nAnswer:", + " Reaction-diffusion" + ], + [ + "Question:Which of the following partial differential equations (PDEs) is included in the systematic review's scope?\nAnswer:", + " Schrodinger equation" + ] + ], + "resps": [ + [ + [ + -15.173296928405762, + false + ] + ], + [ + [ + -12.69983196258545, + false + ] + ], + [ + [ + -12.843111991882324, + false + ] + ], + [ + [ + -11.093381881713867, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.173296928405762, + false + ], + [ + -12.69983196258545, + false + ], + [ + -12.843111991882324, + false + ], + [ + -11.093381881713867, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7ae96dc5fbdda19d7b2f4085705065a1176fc56e5bd7815308596acc0bbad20e", + "prompt_hash": "5ef7dc1b26033a68d8db77326c0f3859286daabd58adfabec08b0aa7135b612c", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 61, + "doc": { + "question": "Why are physics-informed neural network (PINN) methods excluded from the systematic review?", + "choices": [ + "They are too slow and inefficient for any PDEs", + "The literature is too large and they rarely outperform standard methods", + "They cannot solve fluid-related PDEs", + "They only work for inverse problems" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Why are physics-informed neural network (PINN) methods excluded from the systematic review?\nAnswer:", + " They are too slow and inefficient for any PDEs" + ], + [ + "Question:Why are physics-informed neural network (PINN) methods excluded from the systematic review?\nAnswer:", + " The literature is too large and they rarely outperform standard methods" + ], + [ + "Question:Why are physics-informed neural network (PINN) methods excluded from the systematic review?\nAnswer:", + " They cannot solve fluid-related PDEs" + ], + [ + "Question:Why are physics-informed neural network (PINN) methods excluded from the systematic review?\nAnswer:", + " They only work for inverse problems" + ] + ], + "resps": [ + [ + [ + -35.40483093261719, + false + ] + ], + [ + [ + -45.47227096557617, + false + ] + ], + [ + [ + -27.10033416748047, + false + ] + ], + [ + [ + -22.625762939453125, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.40483093261719, + false + ], + [ + -45.47227096557617, + false + ], + [ + -27.10033416748047, + false + ], + [ + -22.625762939453125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d15fcbecf4c315a4ff5d8f95930e335c8a24e7646c955eaf0dfa6d648e779793", + "prompt_hash": "39f1f8d7d75d7a19e4298d2162bf7be88ded1d3acbaa453c06721139e58bd246", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 62, + "doc": { + "question": "Which hardware comparison is generally considered the fairest when evaluating ML-based PDE solvers?", + "choices": [ + "GPU-to-GPU or TPU-to-TPU comparisons", + "CPU-to-TPU comparisons", + "CPU-to-GPU comparisons only", + "There is no fair comparison method" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which hardware comparison is generally considered the fairest when evaluating ML-based PDE solvers?\nAnswer:", + " GPU-to-GPU or TPU-to-TPU comparisons" + ], + [ + "Question:Which hardware comparison is generally considered the fairest when evaluating ML-based PDE solvers?\nAnswer:", + " CPU-to-TPU comparisons" + ], + [ + "Question:Which hardware comparison is generally considered the fairest when evaluating ML-based PDE solvers?\nAnswer:", + " CPU-to-GPU comparisons only" + ], + [ + "Question:Which hardware comparison is generally considered the fairest when evaluating ML-based PDE solvers?\nAnswer:", + " There is no fair comparison method" + ] + ], + "resps": [ + [ + [ + -31.690475463867188, + false + ] + ], + [ + [ + -28.010419845581055, + false + ] + ], + [ + [ + -24.789653778076172, + false + ] + ], + [ + [ + -16.941194534301758, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.690475463867188, + false + ], + [ + -28.010419845581055, + false + ], + [ + -24.789653778076172, + false + ], + [ + -16.941194534301758, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6193794c56683e9761d433a69015d026a6ca3835797950eff3d94e5111d1347e", + "prompt_hash": "eb207fbf62331da84da86793e4428fc9b8395d436696beb3a47488680ba4da88", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "What is the recommended numerical method for solving large elliptic PDE problems according to the review?", + "choices": [ + "Finite element methods (FEM)", + "Multigrid solvers", + "Pseudo-spectral methods", + "First-order finite volume methods" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the recommended numerical method for solving large elliptic PDE problems according to the review?\nAnswer:", + " Finite element methods (FEM)" + ], + [ + "Question:What is the recommended numerical method for solving large elliptic PDE problems according to the review?\nAnswer:", + " Multigrid solvers" + ], + [ + "Question:What is the recommended numerical method for solving large elliptic PDE problems according to the review?\nAnswer:", + " Pseudo-spectral methods" + ], + [ + "Question:What is the recommended numerical method for solving large elliptic PDE problems according to the review?\nAnswer:", + " First-order finite volume methods" + ] + ], + "resps": [ + [ + [ + -12.821867942810059, + false + ] + ], + [ + [ + -15.478704452514648, + false + ] + ], + [ + [ + -11.922616004943848, + false + ] + ], + [ + [ + -19.134483337402344, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.821867942810059, + false + ], + [ + -15.478704452514648, + false + ], + [ + -11.922616004943848, + false + ], + [ + -19.134483337402344, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "58520cf717a0b780213d59c994f481739f3e52565e02e4eb0daad4a7a72b0cfe", + "prompt_hash": "00e9209e4c905666d275b0a8d0a74e6aded50b08c2920900a20481a5baecdec2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 64, + "doc": { + "question": "According to the random sample of PINN articles, what is the typical runtime difference between PINN-based solvers and standard numerical methods?", + "choices": [ + "PINNs usually solve PDEs faster than standard methods", + "PINNs and standard methods take about the same time", + "PINNs take hours to days, while standard methods take fractions of a second to minutes", + "PINNs are always unusable for PDEs" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:According to the random sample of PINN articles, what is the typical runtime difference between PINN-based solvers and standard numerical methods?\nAnswer:", + " PINNs usually solve PDEs faster than standard methods" + ], + [ + "Question:According to the random sample of PINN articles, what is the typical runtime difference between PINN-based solvers and standard numerical methods?\nAnswer:", + " PINNs and standard methods take about the same time" + ], + [ + "Question:According to the random sample of PINN articles, what is the typical runtime difference between PINN-based solvers and standard numerical methods?\nAnswer:", + " PINNs take hours to days, while standard methods take fractions of a second to minutes" + ], + [ + "Question:According to the random sample of PINN articles, what is the typical runtime difference between PINN-based solvers and standard numerical methods?\nAnswer:", + " PINNs are always unusable for PDEs" + ] + ], + "resps": [ + [ + [ + -23.198780059814453, + false + ] + ], + [ + [ + -23.7254581451416, + false + ] + ], + [ + [ + -40.41664505004883, + false + ] + ], + [ + [ + -35.51586151123047, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.198780059814453, + false + ], + [ + -23.7254581451416, + false + ], + [ + -40.41664505004883, + false + ], + [ + -35.51586151123047, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0781565615a4c1705bf91c1eb93c8ab0ddcb3fba972b5dd172d9240e3eff6815", + "prompt_hash": "ef3ffa2c12f24e51f8b0ad1128e0de7ebde64d0918297ca1ad76056e276d0fd0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 65, + "doc": { + "question": "What is the primary computational bottleneck in the Transformer architecture?", + "choices": [ + "The embedding layer", + "The attention mechanism", + "The feed-forward network", + "The positional encoding" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the primary computational bottleneck in the Transformer architecture?\nAnswer:", + " The embedding layer" + ], + [ + "Question:What is the primary computational bottleneck in the Transformer architecture?\nAnswer:", + " The attention mechanism" + ], + [ + "Question:What is the primary computational bottleneck in the Transformer architecture?\nAnswer:", + " The feed-forward network" + ], + [ + "Question:What is the primary computational bottleneck in the Transformer architecture?\nAnswer:", + " The positional encoding" + ] + ], + "resps": [ + [ + [ + -9.642364501953125, + false + ] + ], + [ + [ + -5.322152137756348, + false + ] + ], + [ + [ + -10.473891258239746, + false + ] + ], + [ + [ + -10.967617988586426, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.642364501953125, + false + ], + [ + -5.322152137756348, + false + ], + [ + -10.473891258239746, + false + ], + [ + -10.967617988586426, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7e3ba293ac28ebcb6bc67aa7c4670e4575db05560c9ad4da3828fec161c63b26", + "prompt_hash": "06fdde3cc2dae51beb96619eea06c16242ba38200b0bcea9af91338a8c5eb969", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 66, + "doc": { + "question": "Which hardware feature of the NVIDIA Hopper architecture allows asynchronous data transfer between GMEM and SMEM?", + "choices": [ + "Tensor Core Units", + "Warpgroup Synchronizer", + "Tensor Memory Accelerator (TMA)", + "Shared Memory Controller" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which hardware feature of the NVIDIA Hopper architecture allows asynchronous data transfer between GMEM and SMEM?\nAnswer:", + " Tensor Core Units" + ], + [ + "Question:Which hardware feature of the NVIDIA Hopper architecture allows asynchronous data transfer between GMEM and SMEM?\nAnswer:", + " Warpgroup Synchronizer" + ], + [ + "Question:Which hardware feature of the NVIDIA Hopper architecture allows asynchronous data transfer between GMEM and SMEM?\nAnswer:", + " Tensor Memory Accelerator (TMA)" + ], + [ + "Question:Which hardware feature of the NVIDIA Hopper architecture allows asynchronous data transfer between GMEM and SMEM?\nAnswer:", + " Shared Memory Controller" + ] + ], + "resps": [ + [ + [ + -19.239320755004883, + false + ] + ], + [ + [ + -22.14812469482422, + false + ] + ], + [ + [ + -17.409900665283203, + false + ] + ], + [ + [ + -12.405678749084473, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.239320755004883, + false + ], + [ + -22.14812469482422, + false + ], + [ + -17.409900665283203, + false + ], + [ + -12.405678749084473, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "52aacdf905d0e61171afc685cc7773cafc53632d7a01b4b11ccc86819e553951", + "prompt_hash": "d6a79f677f76cb627347b56319183e66e84f27fcddac5fc25e47ee201666e73f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 67, + "doc": { + "question": "In FP8 WGMMA, which data layout is required for input operands stored in shared memory?", + "choices": [ + "mn-major or k-major format", + "k-major format only", + "mn-major format only", + "Any layout is acceptable" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In FP8 WGMMA, which data layout is required for input operands stored in shared memory?\nAnswer:", + " mn-major or k-major format" + ], + [ + "Question:In FP8 WGMMA, which data layout is required for input operands stored in shared memory?\nAnswer:", + " k-major format only" + ], + [ + "Question:In FP8 WGMMA, which data layout is required for input operands stored in shared memory?\nAnswer:", + " mn-major format only" + ], + [ + "Question:In FP8 WGMMA, which data layout is required for input operands stored in shared memory?\nAnswer:", + " Any layout is acceptable" + ] + ], + "resps": [ + [ + [ + -44.76097869873047, + false + ] + ], + [ + [ + -30.443592071533203, + false + ] + ], + [ + [ + -39.042701721191406, + false + ] + ], + [ + [ + -16.37194061279297, + false + ] + ] + ], + "filtered_resps": [ + [ + -44.76097869873047, + false + ], + [ + -30.443592071533203, + false + ], + [ + -39.042701721191406, + false + ], + [ + -16.37194061279297, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3df4c138fca5f6f0e388e4f554b918ab379e9b676b3a53b59f1c0365fd71a244", + "prompt_hash": "ce26067df1fa7fce65bb596a35a024471813ff953d7445adf72449a7edd5aaa7", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 68, + "doc": { + "question": "Which technique is used in FlashAttention-3 to reduce quantization error when working with outlier features in FP8?", + "choices": [ + "Per-tensor scaling", + "Block quantization and incoherent processing", + "Global normalization", + "Gradient clipping" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which technique is used in FlashAttention-3 to reduce quantization error when working with outlier features in FP8?\nAnswer:", + " Per-tensor scaling" + ], + [ + "Question:Which technique is used in FlashAttention-3 to reduce quantization error when working with outlier features in FP8?\nAnswer:", + " Block quantization and incoherent processing" + ], + [ + "Question:Which technique is used in FlashAttention-3 to reduce quantization error when working with outlier features in FP8?\nAnswer:", + " Global normalization" + ], + [ + "Question:Which technique is used in FlashAttention-3 to reduce quantization error when working with outlier features in FP8?\nAnswer:", + " Gradient clipping" + ] + ], + "resps": [ + [ + [ + -11.715656280517578, + false + ] + ], + [ + [ + -30.47322654724121, + false + ] + ], + [ + [ + -12.290731430053711, + false + ] + ], + [ + [ + -9.121477127075195, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.715656280517578, + false + ], + [ + -30.47322654724121, + false + ], + [ + -12.290731430053711, + false + ], + [ + -9.121477127075195, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "659b27d6a215f0d1bd0a042e0e9f87d82c34e968562612e66ab4102f92140f4e", + "prompt_hash": "c0392c6a289c354ed702f9046c58b831c42e59519727b65c3659310965b1cae2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 69, + "doc": { + "question": "What is the reported forward pass speedup of FP16 FlashAttention-3 compared to FlashAttention-2?", + "choices": [ + "1.1-1.3×", + "1.5-2.0×", + "2.5-3.0×", + "3.5-4.0×" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the reported forward pass speedup of FP16 FlashAttention-3 compared to FlashAttention-2?\nAnswer:", + " 1.1-1.3×" + ], + [ + "Question:What is the reported forward pass speedup of FP16 FlashAttention-3 compared to FlashAttention-2?\nAnswer:", + " 1.5-2.0×" + ], + [ + "Question:What is the reported forward pass speedup of FP16 FlashAttention-3 compared to FlashAttention-2?\nAnswer:", + " 2.5-3.0×" + ], + [ + "Question:What is the reported forward pass speedup of FP16 FlashAttention-3 compared to FlashAttention-2?\nAnswer:", + " 3.5-4.0×" + ] + ], + "resps": [ + [ + [ + -18.02304458618164, + false + ] + ], + [ + [ + -16.681089401245117, + false + ] + ], + [ + [ + -17.597490310668945, + false + ] + ], + [ + [ + -18.239065170288086, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.02304458618164, + false + ], + [ + -16.681089401245117, + false + ], + [ + -17.597490310668945, + false + ], + [ + -18.239065170288086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e17537da619793a1d35dcb4e674e82c52561bb7ccde7d0107b2dc1b5b43f5f34", + "prompt_hash": "ba403d403f84b992092e620a1958457ffee7449f194e2c948118cabd50d95bdf", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 70, + "doc": { + "question": "What is the total number of parameters of Qwen2-Audio?", + "choices": [ + "7.0B", + "8.2B", + "6.5B", + "9.1B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the total number of parameters of Qwen2-Audio?\nAnswer:", + " 7.0B" + ], + [ + "Question:What is the total number of parameters of Qwen2-Audio?\nAnswer:", + " 8.2B" + ], + [ + "Question:What is the total number of parameters of Qwen2-Audio?\nAnswer:", + " 6.5B" + ], + [ + "Question:What is the total number of parameters of Qwen2-Audio?\nAnswer:", + " 9.1B" + ] + ], + "resps": [ + [ + [ + -12.461862564086914, + false + ] + ], + [ + [ + -11.596549987792969, + false + ] + ], + [ + [ + -11.168014526367188, + false + ] + ], + [ + [ + -12.721345901489258, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.461862564086914, + false + ], + [ + -11.596549987792969, + false + ], + [ + -11.168014526367188, + false + ], + [ + -12.721345901489258, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "36a5445979717a321cc191b2cdd44409d3c6cbc42665d07539fe017c4fece54c", + "prompt_hash": "8c8ae3b282fe9e0927e9272d4db4e1edbb49e97b0ce12de5197d8ca0751518ff", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "Which model serves as the initialization for the audio encoder of Qwen2-Audio?", + "choices": [ + "Whisper-large-v3", + "Qwen-7B", + "SpeechVerse", + "Gemini-1.5-pro" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which model serves as the initialization for the audio encoder of Qwen2-Audio?\nAnswer:", + " Whisper-large-v3" + ], + [ + "Question:Which model serves as the initialization for the audio encoder of Qwen2-Audio?\nAnswer:", + " Qwen-7B" + ], + [ + "Question:Which model serves as the initialization for the audio encoder of Qwen2-Audio?\nAnswer:", + " SpeechVerse" + ], + [ + "Question:Which model serves as the initialization for the audio encoder of Qwen2-Audio?\nAnswer:", + " Gemini-1.5-pro" + ] + ], + "resps": [ + [ + [ + -16.041715621948242, + false + ] + ], + [ + [ + -15.517387390136719, + false + ] + ], + [ + [ + -23.270618438720703, + false + ] + ], + [ + [ + -16.757564544677734, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.041715621948242, + false + ], + [ + -15.517387390136719, + false + ], + [ + -23.270618438720703, + false + ], + [ + -16.757564544677734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d4189a0ea2d13c666df4cfc81425a4981f563d9bbfbde9b32211dc1ab1f5b690", + "prompt_hash": "8cde8791295b635430528cc664bb33b82b4f4f77628c2a110026f29abd1b9917", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 72, + "doc": { + "question": "On the CoVoST2 dataset, what was Qwen2-Audio’s BLEU score for English-to-Chinese (en-zh) speech-to-text translation?", + "choices": [ + "33.1", + "41.5", + "45.2", + "24.4" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:On the CoVoST2 dataset, what was Qwen2-Audio’s BLEU score for English-to-Chinese (en-zh) speech-to-text translation?\nAnswer:", + " 33.1" + ], + [ + "Question:On the CoVoST2 dataset, what was Qwen2-Audio’s BLEU score for English-to-Chinese (en-zh) speech-to-text translation?\nAnswer:", + " 41.5" + ], + [ + "Question:On the CoVoST2 dataset, what was Qwen2-Audio’s BLEU score for English-to-Chinese (en-zh) speech-to-text translation?\nAnswer:", + " 45.2" + ], + [ + "Question:On the CoVoST2 dataset, what was Qwen2-Audio’s BLEU score for English-to-Chinese (en-zh) speech-to-text translation?\nAnswer:", + " 24.4" + ] + ], + "resps": [ + [ + [ + -9.055087089538574, + false + ] + ], + [ + [ + -9.025057792663574, + false + ] + ], + [ + [ + -9.17648696899414, + false + ] + ], + [ + [ + -8.53309154510498, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.055087089538574, + false + ], + [ + -9.025057792663574, + false + ], + [ + -9.17648696899414, + false + ], + [ + -8.53309154510498, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e255050272f4c1de3100e6a9e2c84d550470786f60c389f2b16edf1748416d92", + "prompt_hash": "d7110a0ee4e475f98bf211259b08ab7d2c5dfc613990469e965d97520b9882f0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "In voice chat mode, how are users expected to interact with Qwen2-Audio?", + "choices": [ + "By issuing commands through structured tags only", + "By engaging in voice conversations with unrestricted dialogue", + "By providing only textual commands for offline analysis", + "By uploading audio files for automatic transcription" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In voice chat mode, how are users expected to interact with Qwen2-Audio?\nAnswer:", + " By issuing commands through structured tags only" + ], + [ + "Question:In voice chat mode, how are users expected to interact with Qwen2-Audio?\nAnswer:", + " By engaging in voice conversations with unrestricted dialogue" + ], + [ + "Question:In voice chat mode, how are users expected to interact with Qwen2-Audio?\nAnswer:", + " By providing only textual commands for offline analysis" + ], + [ + "Question:In voice chat mode, how are users expected to interact with Qwen2-Audio?\nAnswer:", + " By uploading audio files for automatic transcription" + ] + ], + "resps": [ + [ + [ + -41.54837417602539, + false + ] + ], + [ + [ + -37.847511291503906, + false + ] + ], + [ + [ + -46.505123138427734, + false + ] + ], + [ + [ + -26.315853118896484, + false + ] + ] + ], + "filtered_resps": [ + [ + -41.54837417602539, + false + ], + [ + -37.847511291503906, + false + ], + [ + -46.505123138427734, + false + ], + [ + -26.315853118896484, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ea51e35a78949b693f0927adcdcbb4d74c2265f706f1dae0b0078b0df21494c6", + "prompt_hash": "7b6f1647be3f0fab5db5482826d1fadd78909d114faee55c3f711217f56dd859", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "On the Librispeech test-clean dataset, what Word Error Rate (WER) did Qwen2-Audio achieve?", + "choices": [ + "2.0%", + "1.6%", + "3.4%", + "2.4%" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:On the Librispeech test-clean dataset, what Word Error Rate (WER) did Qwen2-Audio achieve?\nAnswer:", + " 2.0%" + ], + [ + "Question:On the Librispeech test-clean dataset, what Word Error Rate (WER) did Qwen2-Audio achieve?\nAnswer:", + " 1.6%" + ], + [ + "Question:On the Librispeech test-clean dataset, what Word Error Rate (WER) did Qwen2-Audio achieve?\nAnswer:", + " 3.4%" + ], + [ + "Question:On the Librispeech test-clean dataset, what Word Error Rate (WER) did Qwen2-Audio achieve?\nAnswer:", + " 2.4%" + ] + ], + "resps": [ + [ + [ + -14.759432792663574, + false + ] + ], + [ + [ + -14.206562042236328, + false + ] + ], + [ + [ + -14.24560546875, + false + ] + ], + [ + [ + -14.26672649383545, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.759432792663574, + false + ], + [ + -14.206562042236328, + false + ], + [ + -14.24560546875, + false + ], + [ + -14.26672649383545, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "da4aa04a709b7560efdf38c0d06ed4427fe1f08aa21e2f69fc46ce5c3a2c86ee", + "prompt_hash": "7c7c13464cd82ac0be19496dfcb8324f810a6fab804a26e7a03a1b7b13fd66f2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 75, + "doc": { + "question": "What is the surrogate LLM used in the experiments?", + "choices": [ + "gpt-2", + "LLaMA3-70b", + "GPT3.5", + "BERT" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the surrogate LLM used in the experiments?\nAnswer:", + " gpt-2" + ], + [ + "Question:What is the surrogate LLM used in the experiments?\nAnswer:", + " LLaMA3-70b" + ], + [ + "Question:What is the surrogate LLM used in the experiments?\nAnswer:", + " GPT3.5" + ], + [ + "Question:What is the surrogate LLM used in the experiments?\nAnswer:", + " BERT" + ] + ], + "resps": [ + [ + [ + -8.955815315246582, + false + ] + ], + [ + [ + -11.751947402954102, + false + ] + ], + [ + [ + -6.5450286865234375, + false + ] + ], + [ + [ + -7.920544147491455, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.955815315246582, + false + ], + [ + -11.751947402954102, + false + ], + [ + -6.5450286865234375, + false + ], + [ + -7.920544147491455, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4a88247828c8f65da2cd023e803e88a1c6608c741bde2221dfd34031cdfd2f42", + "prompt_hash": "dc64d8747fe9a2cb0affa9df7dac972f5670662c4c675aec047337d950ba6669", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 76, + "doc": { + "question": "How many poisoned instances were injected for Agent-Driver in the experiments?", + "choices": [ + "20", + "10", + "4", + "2" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many poisoned instances were injected for Agent-Driver in the experiments?\nAnswer:", + " 20" + ], + [ + "Question:How many poisoned instances were injected for Agent-Driver in the experiments?\nAnswer:", + " 10" + ], + [ + "Question:How many poisoned instances were injected for Agent-Driver in the experiments?\nAnswer:", + " 4" + ], + [ + "Question:How many poisoned instances were injected for Agent-Driver in the experiments?\nAnswer:", + " 2" + ] + ], + "resps": [ + [ + [ + -4.250162124633789, + false + ] + ], + [ + [ + -3.488814353942871, + false + ] + ], + [ + [ + -3.920569896697998, + false + ] + ], + [ + [ + -3.1337132453918457, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.250162124633789, + false + ], + [ + -3.488814353942871, + false + ], + [ + -3.920569896697998, + false + ], + [ + -3.1337132453918457, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "20464913b9534ff408468a1f95f8958de14c3cd42204762ef42250e30d8d1651", + "prompt_hash": "bdcd08af0934c3af61aae0936cdf4fad05cbe1058f4b25e99c350f19b2f7d7dc", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 77, + "doc": { + "question": "Which retriever model is adopted as the contrastive retriever for ReAct-StrategyQA?", + "choices": [ + "DPR", + "REALM", + "BM25", + "ORQA" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which retriever model is adopted as the contrastive retriever for ReAct-StrategyQA?\nAnswer:", + " DPR" + ], + [ + "Question:Which retriever model is adopted as the contrastive retriever for ReAct-StrategyQA?\nAnswer:", + " REALM" + ], + [ + "Question:Which retriever model is adopted as the contrastive retriever for ReAct-StrategyQA?\nAnswer:", + " BM25" + ], + [ + "Question:Which retriever model is adopted as the contrastive retriever for ReAct-StrategyQA?\nAnswer:", + " ORQA" + ] + ], + "resps": [ + [ + [ + -7.363696098327637, + false + ] + ], + [ + [ + -10.3474760055542, + false + ] + ], + [ + [ + -5.98929500579834, + false + ] + ], + [ + [ + -15.886300086975098, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.363696098327637, + false + ], + [ + -10.3474760055542, + false + ], + [ + -5.98929500579834, + false + ], + [ + -15.886300086975098, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "19c0ce4a7df7efcbf4a36fbaf8f3bfff57ad8a7c82889a59823d14ecd7b9adbb", + "prompt_hash": "a30cd2e4ac0962c29654038025c04b00bca127955e10339d2bb80e7c8a8b36a2", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 78, + "doc": { + "question": "What is the attack target action for EHRAgent in the experiments?", + "choices": [ + "delete data of patient ID", + "sudden stop", + "I don't know", + "output random text" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the attack target action for EHRAgent in the experiments?\nAnswer:", + " delete data of patient ID" + ], + [ + "Question:What is the attack target action for EHRAgent in the experiments?\nAnswer:", + " sudden stop" + ], + [ + "Question:What is the attack target action for EHRAgent in the experiments?\nAnswer:", + " I don't know" + ], + [ + "Question:What is the attack target action for EHRAgent in the experiments?\nAnswer:", + " output random text" + ] + ], + "resps": [ + [ + [ + -23.995281219482422, + false + ] + ], + [ + [ + -16.279922485351562, + false + ] + ], + [ + [ + -12.055635452270508, + false + ] + ], + [ + [ + -20.101879119873047, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.995281219482422, + false + ], + [ + -16.279922485351562, + false + ], + [ + -12.055635452270508, + false + ], + [ + -20.101879119873047, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5d1ccd809307220bac91b010edf697bb9722ebf7ae3ab6259bf2cedb88970cb1", + "prompt_hash": "27a600525351da0238a0a909ca0511e3f9f89742b1be812a583a12b1fa3c8c77", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 79, + "doc": { + "question": "What type of poisoning strategy is used for Agent-Driver?", + "choices": [ + "spurious correlation", + "adversarial backdoor", + "jailbreak prompts", + "gradient masking" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What type of poisoning strategy is used for Agent-Driver?\nAnswer:", + " spurious correlation" + ], + [ + "Question:What type of poisoning strategy is used for Agent-Driver?\nAnswer:", + " adversarial backdoor" + ], + [ + "Question:What type of poisoning strategy is used for Agent-Driver?\nAnswer:", + " jailbreak prompts" + ], + [ + "Question:What type of poisoning strategy is used for Agent-Driver?\nAnswer:", + " gradient masking" + ] + ], + "resps": [ + [ + [ + -13.441458702087402, + false + ] + ], + [ + [ + -13.96750545501709, + false + ] + ], + [ + [ + -14.220865249633789, + false + ] + ], + [ + [ + -12.348712921142578, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.441458702087402, + false + ], + [ + -13.96750545501709, + false + ], + [ + -14.220865249633789, + false + ], + [ + -12.348712921142578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d0c29abf8504cc8a487eec700cbc60e9c4a4c5875b48c80d52c13f41baf401f9", + "prompt_hash": "685d77301b39a898b2f0d0096f143aaf107f546574081bbae97b4bd9d7e3029b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 80, + "doc": { + "question": "What is the core representation used to model dynamic scenes in the described method?", + "choices": [ + "Volumetric voxel grids", + "Canonical 3D Gaussians", + "Polygonal meshes", + "Point clouds with implicit surfaces" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the core representation used to model dynamic scenes in the described method?\nAnswer:", + " Volumetric voxel grids" + ], + [ + "Question:What is the core representation used to model dynamic scenes in the described method?\nAnswer:", + " Canonical 3D Gaussians" + ], + [ + "Question:What is the core representation used to model dynamic scenes in the described method?\nAnswer:", + " Polygonal meshes" + ], + [ + "Question:What is the core representation used to model dynamic scenes in the described method?\nAnswer:", + " Point clouds with implicit surfaces" + ] + ], + "resps": [ + [ + [ + -17.52205467224121, + false + ] + ], + [ + [ + -18.49441146850586, + false + ] + ], + [ + [ + -12.976879119873047, + false + ] + ], + [ + [ + -21.736690521240234, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.52205467224121, + false + ], + [ + -18.49441146850586, + false + ], + [ + -12.976879119873047, + false + ], + [ + -21.736690521240234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4a71a8710b5b4843c89b5e6094a6155d44f27f5426dd622bfa594c5ed1e7ac15", + "prompt_hash": "2a45fd7aa1fbba28a6cd62c8d01844656671ea39f3b50e8c8d38853203338ad5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 81, + "doc": { + "question": "Which dataset provides 14 sequences of 200-500 frames recorded using a handheld moving camera?", + "choices": [ + "KITTI", + "iPhone dataset", + "Kubric MOVi-F", + "TAP-Vid benchmark" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset provides 14 sequences of 200-500 frames recorded using a handheld moving camera?\nAnswer:", + " KITTI" + ], + [ + "Question:Which dataset provides 14 sequences of 200-500 frames recorded using a handheld moving camera?\nAnswer:", + " iPhone dataset" + ], + [ + "Question:Which dataset provides 14 sequences of 200-500 frames recorded using a handheld moving camera?\nAnswer:", + " Kubric MOVi-F" + ], + [ + "Question:Which dataset provides 14 sequences of 200-500 frames recorded using a handheld moving camera?\nAnswer:", + " TAP-Vid benchmark" + ] + ], + "resps": [ + [ + [ + -7.701709747314453, + false + ] + ], + [ + [ + -14.98783016204834, + false + ] + ], + [ + [ + -36.20477294921875, + false + ] + ], + [ + [ + -25.44028663635254, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.701709747314453, + false + ], + [ + -14.98783016204834, + false + ], + [ + -36.20477294921875, + false + ], + [ + -25.44028663635254, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0556d92c2af3244fbc95362eba3d1d35ddc141fbb94f3200ca54e53f30798f02", + "prompt_hash": "daf1ca27eafd86ec618f917cdd82261a07bb49b6fcc6093bfa3a1df9aa0ed739", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 82, + "doc": { + "question": "How many SE(3) motion bases are used in the experiments?", + "choices": [ + "10", + "15", + "20", + "25" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many SE(3) motion bases are used in the experiments?\nAnswer:", + " 10" + ], + [ + "Question:How many SE(3) motion bases are used in the experiments?\nAnswer:", + " 15" + ], + [ + "Question:How many SE(3) motion bases are used in the experiments?\nAnswer:", + " 20" + ], + [ + "Question:How many SE(3) motion bases are used in the experiments?\nAnswer:", + " 25" + ] + ], + "resps": [ + [ + [ + -4.920566082000732, + false + ] + ], + [ + [ + -6.513179302215576, + false + ] + ], + [ + [ + -5.890079021453857, + false + ] + ], + [ + [ + -7.041357517242432, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.920566082000732, + false + ], + [ + -6.513179302215576, + false + ], + [ + -5.890079021453857, + false + ], + [ + -7.041357517242432, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "410a101894c872b47e15db365f4cf6dee0284104c5d11a6e94f06759bca584ac", + "prompt_hash": "003ef24bc5a450560bbc48db84d2484a00eaf229698184c67258246b368be939", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "Which optimizer is used to optimize the model parameters?", + "choices": [ + "SGD with momentum", + "Adam Optimizer", + "RMSProp", + "AdaGrad" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which optimizer is used to optimize the model parameters?\nAnswer:", + " SGD with momentum" + ], + [ + "Question:Which optimizer is used to optimize the model parameters?\nAnswer:", + " Adam Optimizer" + ], + [ + "Question:Which optimizer is used to optimize the model parameters?\nAnswer:", + " RMSProp" + ], + [ + "Question:Which optimizer is used to optimize the model parameters?\nAnswer:", + " AdaGrad" + ] + ], + "resps": [ + [ + [ + -12.31170654296875, + false + ] + ], + [ + [ + -5.3426055908203125, + false + ] + ], + [ + [ + -7.934563636779785, + false + ] + ], + [ + [ + -9.722707748413086, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.31170654296875, + false + ], + [ + -5.3426055908203125, + false + ], + [ + -7.934563636779785, + false + ], + [ + -9.722707748413086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "92bffbd3914d4baa00750110dfc81d6a3872c735eb4ad90041cadcdad0acb517", + "prompt_hash": "57f4207bf268aed9b8544b57c46fce191db3a2e7fcf6f2909282f3f48f230172", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 84, + "doc": { + "question": "What frame resolution and GPU are mentioned for training a 300-frame sequence?", + "choices": [ + "720p on RTX 3090", + "1080p on Tesla V100", + "960×720 on A100", + "640×480 on T4" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What frame resolution and GPU are mentioned for training a 300-frame sequence?\nAnswer:", + " 720p on RTX 3090" + ], + [ + "Question:What frame resolution and GPU are mentioned for training a 300-frame sequence?\nAnswer:", + " 1080p on Tesla V100" + ], + [ + "Question:What frame resolution and GPU are mentioned for training a 300-frame sequence?\nAnswer:", + " 960×720 on A100" + ], + [ + "Question:What frame resolution and GPU are mentioned for training a 300-frame sequence?\nAnswer:", + " 640×480 on T4" + ] + ], + "resps": [ + [ + [ + -18.086414337158203, + false + ] + ], + [ + [ + -19.665401458740234, + false + ] + ], + [ + [ + -20.820384979248047, + false + ] + ], + [ + [ + -22.602657318115234, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.086414337158203, + false + ], + [ + -19.665401458740234, + false + ], + [ + -20.820384979248047, + false + ], + [ + -22.602657318115234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c3733ea2e70f8366500e2478d3ace4710fcc86588a565cfcc3b92b76e836e154", + "prompt_hash": "b0d8157bc04fad12b6a8e0e7582c6e21007152788beeec7dcdaaacab10e65098", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 85, + "doc": { + "question": "What is the total number of multiple-choice questions included in the benchmark?", + "choices": [ + "5,412", + "6,678", + "7,235", + "4,950" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the total number of multiple-choice questions included in the benchmark?\nAnswer:", + " 5,412" + ], + [ + "Question:What is the total number of multiple-choice questions included in the benchmark?\nAnswer:", + " 6,678" + ], + [ + "Question:What is the total number of multiple-choice questions included in the benchmark?\nAnswer:", + " 7,235" + ], + [ + "Question:What is the total number of multiple-choice questions included in the benchmark?\nAnswer:", + " 4,950" + ] + ], + "resps": [ + [ + [ + -16.877378463745117, + false + ] + ], + [ + [ + -16.803268432617188, + false + ] + ], + [ + [ + -17.370201110839844, + false + ] + ], + [ + [ + -15.794563293457031, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.877378463745117, + false + ], + [ + -16.803268432617188, + false + ], + [ + -17.370201110839844, + false + ], + [ + -15.794563293457031, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9eefefe26ce7dd38d28a8386f844287bf0dda24eba0aedec041f58140a26cbe1", + "prompt_hash": "3b8576c5b0841afdce8201794aafedd83491500189ebb11a38c439c1477b1e62", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "Which task was introduced to address the issue of single-frame bias in video understanding metrics?", + "choices": [ + "Sequential Tracking", + "Referring Reasoning", + "Contextual Retrieval", + "Visual Summarization" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which task was introduced to address the issue of single-frame bias in video understanding metrics?\nAnswer:", + " Sequential Tracking" + ], + [ + "Question:Which task was introduced to address the issue of single-frame bias in video understanding metrics?\nAnswer:", + " Referring Reasoning" + ], + [ + "Question:Which task was introduced to address the issue of single-frame bias in video understanding metrics?\nAnswer:", + " Contextual Retrieval" + ], + [ + "Question:Which task was introduced to address the issue of single-frame bias in video understanding metrics?\nAnswer:", + " Visual Summarization" + ] + ], + "resps": [ + [ + [ + -20.925914764404297, + false + ] + ], + [ + [ + -22.508527755737305, + false + ] + ], + [ + [ + -18.34051513671875, + false + ] + ], + [ + [ + -18.849546432495117, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.925914764404297, + false + ], + [ + -22.508527755737305, + false + ], + [ + -18.34051513671875, + false + ], + [ + -18.849546432495117, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0a74c55939e0ee0aef7576105a07b339281c8eebc617f9de10a2ddfc75391010", + "prompt_hash": "8aca42956d2d1211359e4faefbe33be74f7456a7be81b279e4d0a92537f6b520", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 87, + "doc": { + "question": "How many progressive duration groups of videos are included in the benchmark?", + "choices": [ + "Three", + "Four", + "Five", + "Six" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many progressive duration groups of videos are included in the benchmark?\nAnswer:", + " Three" + ], + [ + "Question:How many progressive duration groups of videos are included in the benchmark?\nAnswer:", + " Four" + ], + [ + "Question:How many progressive duration groups of videos are included in the benchmark?\nAnswer:", + " Five" + ], + [ + "Question:How many progressive duration groups of videos are included in the benchmark?\nAnswer:", + " Six" + ] + ], + "resps": [ + [ + [ + -5.014612197875977, + false + ] + ], + [ + [ + -5.665154933929443, + false + ] + ], + [ + [ + -5.873777389526367, + false + ] + ], + [ + [ + -6.582821846008301, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.014612197875977, + false + ], + [ + -5.665154933929443, + false + ], + [ + -5.873777389526367, + false + ], + [ + -6.582821846008301, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4ddb076c5236ac1d85766f7f715661e83a2f723b33a91965d1114ece439c600e", + "prompt_hash": "75c18a03dceda279b144d6687ea54a3c5c0de57431fa510734136918a426ca79", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 88, + "doc": { + "question": "Which open-source model faced severe accuracy degradation when processing 64 input frames?", + "choices": [ + "Idefics2", + "GPT-4o", + "Gemini-1.5-Pro", + "Phi-3-Vision-Instruct" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which open-source model faced severe accuracy degradation when processing 64 input frames?\nAnswer:", + " Idefics2" + ], + [ + "Question:Which open-source model faced severe accuracy degradation when processing 64 input frames?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which open-source model faced severe accuracy degradation when processing 64 input frames?\nAnswer:", + " Gemini-1.5-Pro" + ], + [ + "Question:Which open-source model faced severe accuracy degradation when processing 64 input frames?\nAnswer:", + " Phi-3-Vision-Instruct" + ] + ], + "resps": [ + [ + [ + -14.050100326538086, + false + ] + ], + [ + [ + -7.555898189544678, + false + ] + ], + [ + [ + -10.493349075317383, + false + ] + ], + [ + [ + -18.406396865844727, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.050100326538086, + false + ], + [ + -7.555898189544678, + false + ], + [ + -10.493349075317383, + false + ], + [ + -18.406396865844727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "72ecfa49ab93693acca940d13fad99666b07e54dae8fbeee5b8695b848f68432", + "prompt_hash": "a319e9c070d1ffd3109994fa4594e8e66c9c932cf2ae4db25fe0b41536899ab4", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 89, + "doc": { + "question": "What is the average length of a question in the benchmark?", + "choices": [ + "27.15 words", + "35.82 words", + "43.53 words", + "50.20 words" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the average length of a question in the benchmark?\nAnswer:", + " 27.15 words" + ], + [ + "Question:What is the average length of a question in the benchmark?\nAnswer:", + " 35.82 words" + ], + [ + "Question:What is the average length of a question in the benchmark?\nAnswer:", + " 43.53 words" + ], + [ + "Question:What is the average length of a question in the benchmark?\nAnswer:", + " 50.20 words" + ] + ], + "resps": [ + [ + [ + -15.660734176635742, + false + ] + ], + [ + [ + -15.121524810791016, + false + ] + ], + [ + [ + -15.357699394226074, + false + ] + ], + [ + [ + -17.811992645263672, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.660734176635742, + false + ], + [ + -15.121524810791016, + false + ], + [ + -15.357699394226074, + false + ], + [ + -17.811992645263672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f6ea5f4b873d21defd6d4012ffe39fc0fe8c93d275d7f6bbd55ca96239576e3a", + "prompt_hash": "085e2913fade8b060f9c9f02a85206298f94060133217cab224c91b002010bfc", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 90, + "doc": { + "question": "What does the RISE algorithm stand for?", + "choices": [ + "Recursive Introspection for Self-Improvement", + "Reinforced Inference for Sequential Evaluation", + "Reward-based Incremental Sampling Enhancement", + "Regression Induced System Evaluation" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What does the RISE algorithm stand for?\nAnswer:", + " Recursive Introspection for Self-Improvement" + ], + [ + "Question:What does the RISE algorithm stand for?\nAnswer:", + " Reinforced Inference for Sequential Evaluation" + ], + [ + "Question:What does the RISE algorithm stand for?\nAnswer:", + " Reward-based Incremental Sampling Enhancement" + ], + [ + "Question:What does the RISE algorithm stand for?\nAnswer:", + " Regression Induced System Evaluation" + ] + ], + "resps": [ + [ + [ + -18.053543090820312, + false + ] + ], + [ + [ + -22.69965362548828, + false + ] + ], + [ + [ + -24.5802059173584, + false + ] + ], + [ + [ + -22.592044830322266, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.053543090820312, + false + ], + [ + -22.69965362548828, + false + ], + [ + -24.5802059173584, + false + ], + [ + -22.592044830322266, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "447cedc70ee958e43686d7c7a4918f12ce062181a998804a729c7fd95aad7a1b", + "prompt_hash": "56a91389443b53bba1b14ecaa15c800a07be68f9373e6a155dc743fcc90e78d1", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 91, + "doc": { + "question": "Which dataset showed a 23.9% improvement for Mistral-7B when using RISE over five turns?", + "choices": [ + "SVAMP", + "GSM8K", + "MATH", + "ARC" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset showed a 23.9% improvement for Mistral-7B when using RISE over five turns?\nAnswer:", + " SVAMP" + ], + [ + "Question:Which dataset showed a 23.9% improvement for Mistral-7B when using RISE over five turns?\nAnswer:", + " GSM8K" + ], + [ + "Question:Which dataset showed a 23.9% improvement for Mistral-7B when using RISE over five turns?\nAnswer:", + " MATH" + ], + [ + "Question:Which dataset showed a 23.9% improvement for Mistral-7B when using RISE over five turns?\nAnswer:", + " ARC" + ] + ], + "resps": [ + [ + [ + -12.474897384643555, + false + ] + ], + [ + [ + -6.9842848777771, + false + ] + ], + [ + [ + -8.997828483581543, + false + ] + ], + [ + [ + -8.794258117675781, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.474897384643555, + false + ], + [ + -6.9842848777771, + false + ], + [ + -8.997828483581543, + false + ], + [ + -8.794258117675781, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "68b8dc73dcb9f561ec8b17b5ec220eb99c6f176dc768fce36d551a221af66114", + "prompt_hash": "47c072b1d8d8e66607250531175d735cd6aadaaeb74c1a9e232137cdcc37b57f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 92, + "doc": { + "question": "What is the purpose of the 'knowledge boosting' stage in RISE training?", + "choices": [ + "To improve model performance on out-of-distribution data", + "To pre-train the model on diverse internet-scale corpora", + "To teach response style and instruction-following before on-policy rollouts", + "To reduce computational costs during reinforcement learning" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the purpose of the 'knowledge boosting' stage in RISE training?\nAnswer:", + " To improve model performance on out-of-distribution data" + ], + [ + "Question:What is the purpose of the 'knowledge boosting' stage in RISE training?\nAnswer:", + " To pre-train the model on diverse internet-scale corpora" + ], + [ + "Question:What is the purpose of the 'knowledge boosting' stage in RISE training?\nAnswer:", + " To teach response style and instruction-following before on-policy rollouts" + ], + [ + "Question:What is the purpose of the 'knowledge boosting' stage in RISE training?\nAnswer:", + " To reduce computational costs during reinforcement learning" + ] + ], + "resps": [ + [ + [ + -19.200429916381836, + false + ] + ], + [ + [ + -32.36049270629883, + false + ] + ], + [ + [ + -58.351783752441406, + false + ] + ], + [ + [ + -25.660242080688477, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.200429916381836, + false + ], + [ + -32.36049270629883, + false + ], + [ + -58.351783752441406, + false + ], + [ + -25.660242080688477, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "691fc113bfe9da94b48453014c9d0e8e30f5ad527803e97f11a8a3d27eca1efd", + "prompt_hash": "e53d5e7ebff513231466da7a62f31d10ec7f1751bb32499edc7d8f04afc51efb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 93, + "doc": { + "question": "How does RISE decide the final response when running 'without oracle' mode at inference time?", + "choices": [ + "It selects the response with the highest confidence score", + "It uses majority voting over candidate outputs from different turns", + "It always chooses the response from the last turn", + "It queries a stronger external model for validation" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How does RISE decide the final response when running 'without oracle' mode at inference time?\nAnswer:", + " It selects the response with the highest confidence score" + ], + [ + "Question:How does RISE decide the final response when running 'without oracle' mode at inference time?\nAnswer:", + " It uses majority voting over candidate outputs from different turns" + ], + [ + "Question:How does RISE decide the final response when running 'without oracle' mode at inference time?\nAnswer:", + " It always chooses the response from the last turn" + ], + [ + "Question:How does RISE decide the final response when running 'without oracle' mode at inference time?\nAnswer:", + " It queries a stronger external model for validation" + ] + ], + "resps": [ + [ + [ + -14.699235916137695, + false + ] + ], + [ + [ + -37.79487609863281, + false + ] + ], + [ + [ + -23.1787109375, + false + ] + ], + [ + [ + -37.22762680053711, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.699235916137695, + false + ], + [ + -37.79487609863281, + false + ], + [ + -23.1787109375, + false + ], + [ + -37.22762680053711, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "49446d10ea114fe85f650e62cbe696e3ea6acf8592931f6f15b48a6e90ac48d8", + "prompt_hash": "189a98bbc8bce5e6af01dfbae399f1fe09d20c3d95d2bb4c3740f2334aa23892", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 94, + "doc": { + "question": "Which method did RISE outperform by solving problems unsolved by larger budgets at the first turn?", + "choices": [ + "Self-Refine", + "GLoRE", + "Parallel pass@B sampling", + "Chain-of-Thought prompting" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which method did RISE outperform by solving problems unsolved by larger budgets at the first turn?\nAnswer:", + " Self-Refine" + ], + [ + "Question:Which method did RISE outperform by solving problems unsolved by larger budgets at the first turn?\nAnswer:", + " GLoRE" + ], + [ + "Question:Which method did RISE outperform by solving problems unsolved by larger budgets at the first turn?\nAnswer:", + " Parallel pass@B sampling" + ], + [ + "Question:Which method did RISE outperform by solving problems unsolved by larger budgets at the first turn?\nAnswer:", + " Chain-of-Thought prompting" + ] + ], + "resps": [ + [ + [ + -14.033670425415039, + false + ] + ], + [ + [ + -17.320749282836914, + false + ] + ], + [ + [ + -44.752960205078125, + false + ] + ], + [ + [ + -16.29299545288086, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.033670425415039, + false + ], + [ + -17.320749282836914, + false + ], + [ + -44.752960205078125, + false + ], + [ + -16.29299545288086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5be7b9aac29cff178ab3bfa0c1f27fedbcbdf9a6e094059fa58fed8b6f0d8b3c", + "prompt_hash": "ee21eb2bda8b580daea4f8db981b89e8aca39a5c8ffdf31bf5c5c81239103415", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 95, + "doc": { + "question": "What model is used as the initial seed model in the experiments?", + "choices": [ + "Llama-3-8B-Instruct", + "Llama-2-70B-Chat", + "GPT-4-1106-preview", + "Starling-RM-34B" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What model is used as the initial seed model in the experiments?\nAnswer:", + " Llama-3-8B-Instruct" + ], + [ + "Question:What model is used as the initial seed model in the experiments?\nAnswer:", + " Llama-2-70B-Chat" + ], + [ + "Question:What model is used as the initial seed model in the experiments?\nAnswer:", + " GPT-4-1106-preview" + ], + [ + "Question:What model is used as the initial seed model in the experiments?\nAnswer:", + " Starling-RM-34B" + ] + ], + "resps": [ + [ + [ + -12.638608932495117, + false + ] + ], + [ + [ + -15.421242713928223, + false + ] + ], + [ + [ + -9.687981605529785, + false + ] + ], + [ + [ + -31.80432891845703, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.638608932495117, + false + ], + [ + -15.421242713928223, + false + ], + [ + -9.687981605529785, + false + ], + [ + -31.80432891845703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "decef1964b2e2a5dd8554539fdf609f75ca52d60a5b511362d93b3606e83d977", + "prompt_hash": "5def577581854c72012d82f7d550508d28c90b62f0d3d9e78ebcc1de138bea1c", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 96, + "doc": { + "question": "How many response variations are generated per prompt in each iteration during training?", + "choices": [ + "5", + "7", + "9", + "11" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many response variations are generated per prompt in each iteration during training?\nAnswer:", + " 5" + ], + [ + "Question:How many response variations are generated per prompt in each iteration during training?\nAnswer:", + " 7" + ], + [ + "Question:How many response variations are generated per prompt in each iteration during training?\nAnswer:", + " 9" + ], + [ + "Question:How many response variations are generated per prompt in each iteration during training?\nAnswer:", + " 11" + ] + ], + "resps": [ + [ + [ + -4.11230993270874, + false + ] + ], + [ + [ + -6.176117420196533, + false + ] + ], + [ + [ + -6.593822956085205, + false + ] + ], + [ + [ + -7.345574378967285, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.11230993270874, + false + ], + [ + -6.176117420196533, + false + ], + [ + -6.593822956085205, + false + ], + [ + -7.345574378967285, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5938e09b994ec80f367a369388816c68f9946c9c16ee409c43cd4c90f9b6da07", + "prompt_hash": "863a5b0d309f6953bac0812d35bcb80f2c0def65dfe1a0df4ace5bdc9f2e5b7f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 97, + "doc": { + "question": "Which benchmark evaluates the model's ability in multi-turn conversations?", + "choices": [ + "AlpacaEval 2", + "Arena-Hard", + "MT-Bench", + "Open Assistant" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which benchmark evaluates the model's ability in multi-turn conversations?\nAnswer:", + " AlpacaEval 2" + ], + [ + "Question:Which benchmark evaluates the model's ability in multi-turn conversations?\nAnswer:", + " Arena-Hard" + ], + [ + "Question:Which benchmark evaluates the model's ability in multi-turn conversations?\nAnswer:", + " MT-Bench" + ], + [ + "Question:Which benchmark evaluates the model's ability in multi-turn conversations?\nAnswer:", + " Open Assistant" + ] + ], + "resps": [ + [ + [ + -12.953899383544922, + false + ] + ], + [ + [ + -10.193154335021973, + false + ] + ], + [ + [ + -5.521928787231445, + false + ] + ], + [ + [ + -9.941776275634766, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.953899383544922, + false + ], + [ + -10.193154335021973, + false + ], + [ + -5.521928787231445, + false + ], + [ + -9.941776275634766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1d0b2ec747493415aa8998fb6c5d640e86cd06eaededa404995f36837b0af203", + "prompt_hash": "fb8092185a1e5cd834f0e4266ceac170583b388675f40a41987e3bbdc5577481", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 98, + "doc": { + "question": "What was the length-controlled (LC) win rate achieved on AlpacaEval 2 after the fourth iteration of training?", + "choices": [ + "22.92%", + "35.49%", + "39.44%", + "41.20%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What was the length-controlled (LC) win rate achieved on AlpacaEval 2 after the fourth iteration of training?\nAnswer:", + " 22.92%" + ], + [ + "Question:What was the length-controlled (LC) win rate achieved on AlpacaEval 2 after the fourth iteration of training?\nAnswer:", + " 35.49%" + ], + [ + "Question:What was the length-controlled (LC) win rate achieved on AlpacaEval 2 after the fourth iteration of training?\nAnswer:", + " 39.44%" + ], + [ + "Question:What was the length-controlled (LC) win rate achieved on AlpacaEval 2 after the fourth iteration of training?\nAnswer:", + " 41.20%" + ] + ], + "resps": [ + [ + [ + -16.45294189453125, + false + ] + ], + [ + [ + -16.92536163330078, + false + ] + ], + [ + [ + -16.5533390045166, + false + ] + ], + [ + [ + -16.253902435302734, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.45294189453125, + false + ], + [ + -16.92536163330078, + false + ], + [ + -16.5533390045166, + false + ], + [ + -16.253902435302734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7ebd37c8db4919b509d761fba7404258b206bf0ef716354b999a7cac85aef19b", + "prompt_hash": "55db4484c93ad2de95b643524b062dfe42f48efd8075611645e98ddb832df074", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 99, + "doc": { + "question": "Which strong external reward model failed to improve the LC win rate in experiments?", + "choices": [ + "Starling-RM-34B", + "Claude-Opus", + "SPPO", + "GPT-4-0314" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which strong external reward model failed to improve the LC win rate in experiments?\nAnswer:", + " Starling-RM-34B" + ], + [ + "Question:Which strong external reward model failed to improve the LC win rate in experiments?\nAnswer:", + " Claude-Opus" + ], + [ + "Question:Which strong external reward model failed to improve the LC win rate in experiments?\nAnswer:", + " SPPO" + ], + [ + "Question:Which strong external reward model failed to improve the LC win rate in experiments?\nAnswer:", + " GPT-4-0314" + ] + ], + "resps": [ + [ + [ + -33.60536193847656, + false + ] + ], + [ + [ + -20.415925979614258, + false + ] + ], + [ + [ + -15.506284713745117, + false + ] + ], + [ + [ + -19.35174560546875, + false + ] + ] + ], + "filtered_resps": [ + [ + -33.60536193847656, + false + ], + [ + -20.415925979614258, + false + ], + [ + -15.506284713745117, + false + ], + [ + -19.35174560546875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "01506c30c1ffb5f3d34526184f0caaf6b0ac57ae5d3ef1294a5db9c7ac44b382", + "prompt_hash": "4a6107d70777877591a3f8886e2ef6bbc0d0830fa0869dee0cfdb68d9cf22027", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + } + ], + "arxiv_mc_2408": [ + { + "doc_id": 0, + "doc": { + "question": "What activation function do JumpReLU SAEs use in place of the ReLU function?", + "choices": [ + "Softmax", + "TopK", + "JumpReLU", + "Sigmoid" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What activation function do JumpReLU SAEs use in place of the ReLU function?\nAnswer:", + " Softmax" + ], + [ + "Question:What activation function do JumpReLU SAEs use in place of the ReLU function?\nAnswer:", + " TopK" + ], + [ + "Question:What activation function do JumpReLU SAEs use in place of the ReLU function?\nAnswer:", + " JumpReLU" + ], + [ + "Question:What activation function do JumpReLU SAEs use in place of the ReLU function?\nAnswer:", + " Sigmoid" + ] + ], + "resps": [ + [ + [ + -7.370088577270508, + false + ] + ], + [ + [ + -15.097153663635254, + false + ] + ], + [ + [ + -10.174015998840332, + false + ] + ], + [ + [ + -2.91923189163208, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.370088577270508, + false + ], + [ + -15.097153663635254, + false + ], + [ + -10.174015998840332, + false + ], + [ + -2.91923189163208, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9fc664838b502e0969a0d397b3807dd078d361efa07bb02fce0d3b229d6d70eb", + "prompt_hash": "b541b7d775a03e2f5dffc078cc247a2493ab42c838ac3de8f3625b1bbb5734a5", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "Which loss function do JumpReLU SAEs use for enforcing sparsity?", + "choices": [ + "L1 norm penalty", + "L2 norm penalty", + "KL divergence", + "L0 sparsity penalty" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which loss function do JumpReLU SAEs use for enforcing sparsity?\nAnswer:", + " L1 norm penalty" + ], + [ + "Question:Which loss function do JumpReLU SAEs use for enforcing sparsity?\nAnswer:", + " L2 norm penalty" + ], + [ + "Question:Which loss function do JumpReLU SAEs use for enforcing sparsity?\nAnswer:", + " KL divergence" + ], + [ + "Question:Which loss function do JumpReLU SAEs use for enforcing sparsity?\nAnswer:", + " L0 sparsity penalty" + ] + ], + "resps": [ + [ + [ + -16.749191284179688, + false + ] + ], + [ + [ + -16.98282241821289, + false + ] + ], + [ + [ + -9.006500244140625, + false + ] + ], + [ + [ + -21.58685302734375, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.749191284179688, + false + ], + [ + -16.98282241821289, + false + ], + [ + -9.006500244140625, + false + ], + [ + -21.58685302734375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3083d36e417a35b7fa92a09da8aaf3ab53bba600d0297616c7530c16aab83764", + "prompt_hash": "d44b0a6d22d713fabb047ea2f9974b6dd36525eea409ff85bd6052fb23fb085c", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 2, + "doc": { + "question": "What is the main role of the straight-through estimator (STE) in training JumpReLU SAEs?", + "choices": [ + "To estimate the TopK feature indices", + "To avoid dead neurons during initialization", + "To enable training through the threshold parameter", + "To calculate decoder weights directly" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the main role of the straight-through estimator (STE) in training JumpReLU SAEs?\nAnswer:", + " To estimate the TopK feature indices" + ], + [ + "Question:What is the main role of the straight-through estimator (STE) in training JumpReLU SAEs?\nAnswer:", + " To avoid dead neurons during initialization" + ], + [ + "Question:What is the main role of the straight-through estimator (STE) in training JumpReLU SAEs?\nAnswer:", + " To enable training through the threshold parameter" + ], + [ + "Question:What is the main role of the straight-through estimator (STE) in training JumpReLU SAEs?\nAnswer:", + " To calculate decoder weights directly" + ] + ], + "resps": [ + [ + [ + -32.831172943115234, + false + ] + ], + [ + [ + -29.019672393798828, + false + ] + ], + [ + [ + -32.593955993652344, + false + ] + ], + [ + [ + -31.319580078125, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.831172943115234, + false + ], + [ + -29.019672393798828, + false + ], + [ + -32.593955993652344, + false + ], + [ + -31.319580078125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6e057a450980431afda4f7c99a40bae2259ebd40c01007a7d3c605ba1eae3bbb", + "prompt_hash": "3e38d72fc777f347897440eac6491904bfa9a74477d3cd6b0c0ef9488461822f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 3, + "doc": { + "question": "Compared to Gated and TopK SAEs, which of the following is an advantage of JumpReLU SAEs?", + "choices": [ + "They use more auxiliary loss terms", + "They require partial sorting of activations", + "They are more efficient to train", + "They rely on dead feature resampling" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Compared to Gated and TopK SAEs, which of the following is an advantage of JumpReLU SAEs?\nAnswer:", + " They use more auxiliary loss terms" + ], + [ + "Question:Compared to Gated and TopK SAEs, which of the following is an advantage of JumpReLU SAEs?\nAnswer:", + " They require partial sorting of activations" + ], + [ + "Question:Compared to Gated and TopK SAEs, which of the following is an advantage of JumpReLU SAEs?\nAnswer:", + " They are more efficient to train" + ], + [ + "Question:Compared to Gated and TopK SAEs, which of the following is an advantage of JumpReLU SAEs?\nAnswer:", + " They rely on dead feature resampling" + ] + ], + "resps": [ + [ + [ + -32.257041931152344, + false + ] + ], + [ + [ + -35.025230407714844, + false + ] + ], + [ + [ + -19.65345573425293, + false + ] + ], + [ + [ + -45.214271545410156, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.257041931152344, + false + ], + [ + -35.025230407714844, + false + ], + [ + -19.65345573425293, + false + ], + [ + -45.214271545410156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f27eed3312662f0cba14084f910dcc19898dade925b92cb182c54756586dc699", + "prompt_hash": "73c0c75bccfd2179a85934c79c1e4df44fb62f7d04764903f2332e11bdc742c1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 4, + "doc": { + "question": "What is the function of the Heaviside step function in JumpReLU SAEs?", + "choices": [ + "To smooth gradients for backpropagation", + "To normalize decoder weights", + "To zero out activations below a threshold", + "To increase model capacity" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the function of the Heaviside step function in JumpReLU SAEs?\nAnswer:", + " To smooth gradients for backpropagation" + ], + [ + "Question:What is the function of the Heaviside step function in JumpReLU SAEs?\nAnswer:", + " To normalize decoder weights" + ], + [ + "Question:What is the function of the Heaviside step function in JumpReLU SAEs?\nAnswer:", + " To zero out activations below a threshold" + ], + [ + "Question:What is the function of the Heaviside step function in JumpReLU SAEs?\nAnswer:", + " To increase model capacity" + ] + ], + "resps": [ + [ + [ + -25.059850692749023, + false + ] + ], + [ + [ + -26.03301239013672, + false + ] + ], + [ + [ + -21.755271911621094, + false + ] + ], + [ + [ + -18.161693572998047, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.059850692749023, + false + ], + [ + -26.03301239013672, + false + ], + [ + -21.755271911621094, + false + ], + [ + -18.161693572998047, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "95fba6f112c51a73314c20365df8a022603cc6c4cbc821783a1f2033bdd519ce", + "prompt_hash": "8792b4fc53b183725631029120e0b33a31b36460f7a4876f205e8b0ddef5e9b0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 5, + "doc": { + "question": "What is the main architectural difference between KAN and traditional MLP?", + "choices": [ + "KAN uses fixed activation functions on nodes, while MLP uses learnable splines on edges", + "KAN uses learnable activation functions on edges, while MLP uses fixed activation functions on nodes", + "KAN employs convolutional layers instead of fully connected layers", + "KAN is trained using unsupervised learning while MLP is trained using supervised learning" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main architectural difference between KAN and traditional MLP?\nAnswer:", + " KAN uses fixed activation functions on nodes, while MLP uses learnable splines on edges" + ], + [ + "Question:What is the main architectural difference between KAN and traditional MLP?\nAnswer:", + " KAN uses learnable activation functions on edges, while MLP uses fixed activation functions on nodes" + ], + [ + "Question:What is the main architectural difference between KAN and traditional MLP?\nAnswer:", + " KAN employs convolutional layers instead of fully connected layers" + ], + [ + "Question:What is the main architectural difference between KAN and traditional MLP?\nAnswer:", + " KAN is trained using unsupervised learning while MLP is trained using supervised learning" + ] + ], + "resps": [ + [ + [ + -46.42828369140625, + false + ] + ], + [ + [ + -34.54854202270508, + false + ] + ], + [ + [ + -14.264701843261719, + false + ] + ], + [ + [ + -24.592636108398438, + false + ] + ] + ], + "filtered_resps": [ + [ + -46.42828369140625, + false + ], + [ + -34.54854202270508, + false + ], + [ + -14.264701843261719, + false + ], + [ + -24.592636108398438, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "911fd11e4b4b821da26541f5d056b94e2bba893d69a41294b9bc51ef3ed360aa", + "prompt_hash": "0c8417a74421a7ad9ab39e3bcb35cba73d03b091094d707553204f4773194c3e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 6, + "doc": { + "question": "What is the formula for the number of parameters in one layer of KAN?", + "choices": [ + "(d_in × d_out) + d_out", + "(d_in × d_out) × (G + K + 3) + d_out", + "(d_in × d_out) × (K + 1) + G", + "(d_in × d_out) × G + K + d_out" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the formula for the number of parameters in one layer of KAN?\nAnswer:", + " (d_in × d_out) + d_out" + ], + [ + "Question:What is the formula for the number of parameters in one layer of KAN?\nAnswer:", + " (d_in × d_out) × (G + K + 3) + d_out" + ], + [ + "Question:What is the formula for the number of parameters in one layer of KAN?\nAnswer:", + " (d_in × d_out) × (K + 1) + G" + ], + [ + "Question:What is the formula for the number of parameters in one layer of KAN?\nAnswer:", + " (d_in × d_out) × G + K + d_out" + ] + ], + "resps": [ + [ + [ + -35.33420944213867, + false + ] + ], + [ + [ + -62.38064193725586, + false + ] + ], + [ + [ + -51.30956268310547, + false + ] + ], + [ + [ + -52.87654113769531, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.33420944213867, + false + ], + [ + -62.38064193725586, + false + ], + [ + -51.30956268310547, + false + ], + [ + -52.87654113769531, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6daacd702e0384b76b887d3bd437cd38b9be76e1212ae8929806b70a47db4db6", + "prompt_hash": "6c9621501c25caaf5fe65ccccbf091ede450342dfedd95e3427774408a59a814", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "In symbolic formula representing tasks, what is the observed performance of KAN when compared to MLP under equal parameter settings?", + "choices": [ + "KAN outperforms MLP on all datasets", + "KAN performs worse than MLP on all datasets", + "KAN outperforms MLP on 7 out of 8 datasets", + "KAN matches MLP's performance on all datasets" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In symbolic formula representing tasks, what is the observed performance of KAN when compared to MLP under equal parameter settings?\nAnswer:", + " KAN outperforms MLP on all datasets" + ], + [ + "Question:In symbolic formula representing tasks, what is the observed performance of KAN when compared to MLP under equal parameter settings?\nAnswer:", + " KAN performs worse than MLP on all datasets" + ], + [ + "Question:In symbolic formula representing tasks, what is the observed performance of KAN when compared to MLP under equal parameter settings?\nAnswer:", + " KAN outperforms MLP on 7 out of 8 datasets" + ], + [ + "Question:In symbolic formula representing tasks, what is the observed performance of KAN when compared to MLP under equal parameter settings?\nAnswer:", + " KAN matches MLP's performance on all datasets" + ] + ], + "resps": [ + [ + [ + -15.381976127624512, + false + ] + ], + [ + [ + -17.349285125732422, + false + ] + ], + [ + [ + -20.085693359375, + false + ] + ], + [ + [ + -25.82134246826172, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.381976127624512, + false + ], + [ + -17.349285125732422, + false + ], + [ + -20.085693359375, + false + ], + [ + -25.82134246826172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0422e76e7024b4c69b03f0b91afd3ee7ac85822d48f642aec7da11033312ccd2", + "prompt_hash": "27932ccdc73ae1363aa597199437308d54d250a9fe6c9800766248ce37794fe0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 8, + "doc": { + "question": "What is the impact of using B-spline activation functions in MLP for symbolic formula representation?", + "choices": [ + "It degrades performance compared to ReLU and GELU", + "It causes MLP to perform worse than KAN", + "It allows MLP to match or exceed the performance of KAN", + "It leads to unstable training results" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the impact of using B-spline activation functions in MLP for symbolic formula representation?\nAnswer:", + " It degrades performance compared to ReLU and GELU" + ], + [ + "Question:What is the impact of using B-spline activation functions in MLP for symbolic formula representation?\nAnswer:", + " It causes MLP to perform worse than KAN" + ], + [ + "Question:What is the impact of using B-spline activation functions in MLP for symbolic formula representation?\nAnswer:", + " It allows MLP to match or exceed the performance of KAN" + ], + [ + "Question:What is the impact of using B-spline activation functions in MLP for symbolic formula representation?\nAnswer:", + " It leads to unstable training results" + ] + ], + "resps": [ + [ + [ + -26.361316680908203, + false + ] + ], + [ + [ + -32.22442626953125, + false + ] + ], + [ + [ + -33.5744743347168, + false + ] + ], + [ + [ + -22.515552520751953, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.361316680908203, + false + ], + [ + -32.22442626953125, + false + ], + [ + -33.5744743347168, + false + ], + [ + -22.515552520751953, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5819bef38b6c4a8aedafc0779016465827c41c35096c1de0f338950589d288c0", + "prompt_hash": "edb092d62713dc9bf15940bb778ea24bef9a4af3cd8757d7028acfdcf1d11dbf", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 9, + "doc": { + "question": "In continual learning experiments on MNIST, what was the key difference between KAN and MLP?", + "choices": [ + "KAN exhibited better retention on earlier tasks than MLP", + "MLP's accuracy dropped to zero on all tasks", + "KAN's accuracy dropped to zero on the first and second tasks", + "Both models achieved similar performance on all tasks" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In continual learning experiments on MNIST, what was the key difference between KAN and MLP?\nAnswer:", + " KAN exhibited better retention on earlier tasks than MLP" + ], + [ + "Question:In continual learning experiments on MNIST, what was the key difference between KAN and MLP?\nAnswer:", + " MLP's accuracy dropped to zero on all tasks" + ], + [ + "Question:In continual learning experiments on MNIST, what was the key difference between KAN and MLP?\nAnswer:", + " KAN's accuracy dropped to zero on the first and second tasks" + ], + [ + "Question:In continual learning experiments on MNIST, what was the key difference between KAN and MLP?\nAnswer:", + " Both models achieved similar performance on all tasks" + ] + ], + "resps": [ + [ + [ + -31.27310562133789, + false + ] + ], + [ + [ + -35.415992736816406, + false + ] + ], + [ + [ + -39.42005157470703, + false + ] + ], + [ + [ + -25.61410140991211, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.27310562133789, + false + ], + [ + -35.415992736816406, + false + ], + [ + -39.42005157470703, + false + ], + [ + -25.61410140991211, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7e145678f19dd1a4b83d91cb268d1242b9e4f1b1dc845419014afacca59518d4", + "prompt_hash": "16c3f9f67701a26d194a004914d00521cc41f1bc6aea494b53e38fa8293ea555", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 10, + "doc": { + "question": "What percentage of the world population is estimated to carry a location sensor due to smartphone ownership?", + "choices": [ + "65%", + "70%", + "85%", + "90%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What percentage of the world population is estimated to carry a location sensor due to smartphone ownership?\nAnswer:", + " 65%" + ], + [ + "Question:What percentage of the world population is estimated to carry a location sensor due to smartphone ownership?\nAnswer:", + " 70%" + ], + [ + "Question:What percentage of the world population is estimated to carry a location sensor due to smartphone ownership?\nAnswer:", + " 85%" + ], + [ + "Question:What percentage of the world population is estimated to carry a location sensor due to smartphone ownership?\nAnswer:", + " 90%" + ] + ], + "resps": [ + [ + [ + -11.196426391601562, + false + ] + ], + [ + [ + -10.125539779663086, + false + ] + ], + [ + [ + -11.262310028076172, + false + ] + ], + [ + [ + -10.178831100463867, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.196426391601562, + false + ], + [ + -10.125539779663086, + false + ], + [ + -11.262310028076172, + false + ], + [ + -10.178831100463867, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1141f4d522435d88a7e99c842d874368792cc64ef497e76fadc94e22760ea1d1", + "prompt_hash": "af6926ce5c08968af4aef7cc9ae47432a9649e3156aa85fae74a2b1c68a3004c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 11, + "doc": { + "question": "Which type of cells in the brain encode regional boundaries in spatial memory?", + "choices": [ + "Place cells", + "Border cells", + "Grid cells", + "Goal direction cells" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which type of cells in the brain encode regional boundaries in spatial memory?\nAnswer:", + " Place cells" + ], + [ + "Question:Which type of cells in the brain encode regional boundaries in spatial memory?\nAnswer:", + " Border cells" + ], + [ + "Question:Which type of cells in the brain encode regional boundaries in spatial memory?\nAnswer:", + " Grid cells" + ], + [ + "Question:Which type of cells in the brain encode regional boundaries in spatial memory?\nAnswer:", + " Goal direction cells" + ] + ], + "resps": [ + [ + [ + -9.464567184448242, + false + ] + ], + [ + [ + -12.598109245300293, + false + ] + ], + [ + [ + -10.90217399597168, + false + ] + ], + [ + [ + -22.165721893310547, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.464567184448242, + false + ], + [ + -12.598109245300293, + false + ], + [ + -10.90217399597168, + false + ], + [ + -22.165721893310547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fbcbb23568e5ca42e2399aa75ebcd4d5ac6bad4d1546255ebf5e1d4c249a04ef", + "prompt_hash": "8916e785d3289e19cee658505debe89b3d67ae8e1d4274f3d30de8a64c39cbc7", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 12, + "doc": { + "question": "Which five elements constitute the perceptions of cities in Kevin Lynch’s framework?", + "choices": [ + "Paths, zones, directions, nodes, views", + "Landmarks, borders, areas, paths, cues", + "Edges, nodes, landmarks, regions, paths", + "Streets, plazas, edges, centers, directions" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which five elements constitute the perceptions of cities in Kevin Lynch’s framework?\nAnswer:", + " Paths, zones, directions, nodes, views" + ], + [ + "Question:Which five elements constitute the perceptions of cities in Kevin Lynch’s framework?\nAnswer:", + " Landmarks, borders, areas, paths, cues" + ], + [ + "Question:Which five elements constitute the perceptions of cities in Kevin Lynch’s framework?\nAnswer:", + " Edges, nodes, landmarks, regions, paths" + ], + [ + "Question:Which five elements constitute the perceptions of cities in Kevin Lynch’s framework?\nAnswer:", + " Streets, plazas, edges, centers, directions" + ] + ], + "resps": [ + [ + [ + -44.72914505004883, + false + ] + ], + [ + [ + -50.86505889892578, + false + ] + ], + [ + [ + -42.66518020629883, + false + ] + ], + [ + [ + -45.75241470336914, + false + ] + ] + ], + "filtered_resps": [ + [ + -44.72914505004883, + false + ], + [ + -50.86505889892578, + false + ], + [ + -42.66518020629883, + false + ], + [ + -45.75241470336914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "79a4226348287d8cfd75ea3ab13ab54f5720f434c1a1ce9a3907d0bea4e1be4e", + "prompt_hash": "a76f1a1be672cf9a876ede967bb11db4dcdfb132b57f0ddb98837b99a000de9e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 13, + "doc": { + "question": "What proportion of mobile phone subscriptions per 100 people was recorded in high-income countries in 2020?", + "choices": [ + "59", + "85", + "100", + "122" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What proportion of mobile phone subscriptions per 100 people was recorded in high-income countries in 2020?\nAnswer:", + " 59" + ], + [ + "Question:What proportion of mobile phone subscriptions per 100 people was recorded in high-income countries in 2020?\nAnswer:", + " 85" + ], + [ + "Question:What proportion of mobile phone subscriptions per 100 people was recorded in high-income countries in 2020?\nAnswer:", + " 100" + ], + [ + "Question:What proportion of mobile phone subscriptions per 100 people was recorded in high-income countries in 2020?\nAnswer:", + " 122" + ] + ], + "resps": [ + [ + [ + -7.109789848327637, + false + ] + ], + [ + [ + -7.487434387207031, + false + ] + ], + [ + [ + -7.320727348327637, + false + ] + ], + [ + [ + -9.849771499633789, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.109789848327637, + false + ], + [ + -7.487434387207031, + false + ], + [ + -7.320727348327637, + false + ], + [ + -9.849771499633789, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "515273aa9ee0792dc3371cff6d2cb2b7c54d4b91ed4d9bba6aab772d426428b0", + "prompt_hash": "769fc9a81750bd9200d69d2df3de1d5c2c66edbbab6a6daac4bcc0e912cd9a1a", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "Which AI architectures are currently being investigated for modeling individual trajectories?", + "choices": [ + "Transformer networks", + "Decision trees", + "Naive Bayes classifiers", + "Support vector machines" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which AI architectures are currently being investigated for modeling individual trajectories?\nAnswer:", + " Transformer networks" + ], + [ + "Question:Which AI architectures are currently being investigated for modeling individual trajectories?\nAnswer:", + " Decision trees" + ], + [ + "Question:Which AI architectures are currently being investigated for modeling individual trajectories?\nAnswer:", + " Naive Bayes classifiers" + ], + [ + "Question:Which AI architectures are currently being investigated for modeling individual trajectories?\nAnswer:", + " Support vector machines" + ] + ], + "resps": [ + [ + [ + -11.995979309082031, + false + ] + ], + [ + [ + -9.45278549194336, + false + ] + ], + [ + [ + -18.51933479309082, + false + ] + ], + [ + [ + -16.56319808959961, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.995979309082031, + false + ], + [ + -9.45278549194336, + false + ], + [ + -18.51933479309082, + false + ], + [ + -16.56319808959961, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "475ef1d0e01261c4e570972e3e9e62387d7a1ec4d652bc497728c5a0ae2edc72", + "prompt_hash": "9d6e1d74a7034601fd57fc7d31c8595cfd9c915813ecb87f2766abdf316baa48", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 15, + "doc": { + "question": "What model is used as the base language model in the revision model experiments?", + "choices": [ + "GPT-4", + "Gemini", + "PaLM 2-S*", + "DeepSeekMath" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What model is used as the base language model in the revision model experiments?\nAnswer:", + " GPT-4" + ], + [ + "Question:What model is used as the base language model in the revision model experiments?\nAnswer:", + " Gemini" + ], + [ + "Question:What model is used as the base language model in the revision model experiments?\nAnswer:", + " PaLM 2-S*" + ], + [ + "Question:What model is used as the base language model in the revision model experiments?\nAnswer:", + " DeepSeekMath" + ] + ], + "resps": [ + [ + [ + -4.3542022705078125, + false + ] + ], + [ + [ + -7.10027551651001, + false + ] + ], + [ + [ + -22.711105346679688, + false + ] + ], + [ + [ + -8.928313255310059, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.3542022705078125, + false + ], + [ + -7.10027551651001, + false + ], + [ + -22.711105346679688, + false + ], + [ + -8.928313255310059, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8337c7e84c1d80f0359656a5652ee2eccec31324c1a601d8f001f21c3d6eb339", + "prompt_hash": "8b451711454ba8e6bdddaeb09477f1dfab62d3a1213412ba92e032097ccd54eb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "What optimizer is used to train the PRM model?", + "choices": [ + "SGD", + "Adam", + "RMSProp", + "AdamW" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What optimizer is used to train the PRM model?\nAnswer:", + " SGD" + ], + [ + "Question:What optimizer is used to train the PRM model?\nAnswer:", + " Adam" + ], + [ + "Question:What optimizer is used to train the PRM model?\nAnswer:", + " RMSProp" + ], + [ + "Question:What optimizer is used to train the PRM model?\nAnswer:", + " AdamW" + ] + ], + "resps": [ + [ + [ + -6.562948226928711, + false + ] + ], + [ + [ + -3.0958411693573, + false + ] + ], + [ + [ + -7.73266077041626, + false + ] + ], + [ + [ + -6.134220123291016, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.562948226928711, + false + ], + [ + -3.0958411693573, + false + ], + [ + -7.73266077041626, + false + ], + [ + -6.134220123291016, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "51df039bbc085148d23b1cdb96c6980b12dc391a8c701d0a0f90203d7bdaf97d", + "prompt_hash": "d3ceb3491c7b106b1ac9831e2ac891743f9610880eaf5e54e5640aedae1c7beb", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 17, + "doc": { + "question": "What aggregation strategy is found to perform best when aggregating PRM per-step scores?", + "choices": [ + "Min", + "Product", + "Last", + "Average" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What aggregation strategy is found to perform best when aggregating PRM per-step scores?\nAnswer:", + " Min" + ], + [ + "Question:What aggregation strategy is found to perform best when aggregating PRM per-step scores?\nAnswer:", + " Product" + ], + [ + "Question:What aggregation strategy is found to perform best when aggregating PRM per-step scores?\nAnswer:", + " Last" + ], + [ + "Question:What aggregation strategy is found to perform best when aggregating PRM per-step scores?\nAnswer:", + " Average" + ] + ], + "resps": [ + [ + [ + -7.221947193145752, + false + ] + ], + [ + [ + -12.822408676147461, + false + ] + ], + [ + [ + -9.58028793334961, + false + ] + ], + [ + [ + -5.567903518676758, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.221947193145752, + false + ], + [ + -12.822408676147461, + false + ], + [ + -9.58028793334961, + false + ], + [ + -5.567903518676758, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a2cca9fe75f490b9d2c160579dcc2cbc475bd78c0212f3a1262c0b35e1d734e4", + "prompt_hash": "a26d398514ea5f1ad9c41d45ab6110045960417a6404f3537fd8ad8e0be9af26", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 18, + "doc": { + "question": "How many revision trajectories are generated per question in the ReST^EM experiment?", + "choices": [ + "16", + "32", + "64", + "128" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many revision trajectories are generated per question in the ReST^EM experiment?\nAnswer:", + " 16" + ], + [ + "Question:How many revision trajectories are generated per question in the ReST^EM experiment?\nAnswer:", + " 32" + ], + [ + "Question:How many revision trajectories are generated per question in the ReST^EM experiment?\nAnswer:", + " 64" + ], + [ + "Question:How many revision trajectories are generated per question in the ReST^EM experiment?\nAnswer:", + " 128" + ] + ], + "resps": [ + [ + [ + -5.620156288146973, + false + ] + ], + [ + [ + -5.936687469482422, + false + ] + ], + [ + [ + -7.073380470275879, + false + ] + ], + [ + [ + -8.123558044433594, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.620156288146973, + false + ], + [ + -5.936687469482422, + false + ], + [ + -7.073380470275879, + false + ], + [ + -8.123558044433594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "25a48f5278cf30831b8fcc9459e68c0629abecb992d2ad92897f60dbb567c294", + "prompt_hash": "76fced7a6d9d02e3bc0d5d3a95f27d27dff4ed7aa83814c3d31253d98376feba", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 19, + "doc": { + "question": "What is used as the final answer selection method for majority voting in revision experiments?", + "choices": [ + "Best-of-N weighted within each chain only", + "Majority voting across all trajectories at once", + "Greedy selection from last revision", + "Averaging PRM scores over revisions" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is used as the final answer selection method for majority voting in revision experiments?\nAnswer:", + " Best-of-N weighted within each chain only" + ], + [ + "Question:What is used as the final answer selection method for majority voting in revision experiments?\nAnswer:", + " Majority voting across all trajectories at once" + ], + [ + "Question:What is used as the final answer selection method for majority voting in revision experiments?\nAnswer:", + " Greedy selection from last revision" + ], + [ + "Question:What is used as the final answer selection method for majority voting in revision experiments?\nAnswer:", + " Averaging PRM scores over revisions" + ] + ], + "resps": [ + [ + [ + -50.39833068847656, + false + ] + ], + [ + [ + -38.51416778564453, + false + ] + ], + [ + [ + -28.31541633605957, + false + ] + ], + [ + [ + -37.91179275512695, + false + ] + ] + ], + "filtered_resps": [ + [ + -50.39833068847656, + false + ], + [ + -38.51416778564453, + false + ], + [ + -28.31541633605957, + false + ], + [ + -37.91179275512695, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32cec6c9dac45cdbd7b4ae2ff91380b2948758777d6d00398cf5fec3dc69ca7a", + "prompt_hash": "c252151d86183b34e325f3f2a07d726cb0f9c50eae68c43ccc1c5f589dd497f8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What is the image resolution supported by MiniCPM-V 2.0 and MiniCPM-Llama3-V 2.5?", + "choices": [ + "448 × 448", + "1024 × 1024", + "1344 × 1344", + "2048 × 2048" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the image resolution supported by MiniCPM-V 2.0 and MiniCPM-Llama3-V 2.5?\nAnswer:", + " 448 × 448" + ], + [ + "Question:What is the image resolution supported by MiniCPM-V 2.0 and MiniCPM-Llama3-V 2.5?\nAnswer:", + " 1024 × 1024" + ], + [ + "Question:What is the image resolution supported by MiniCPM-V 2.0 and MiniCPM-Llama3-V 2.5?\nAnswer:", + " 1344 × 1344" + ], + [ + "Question:What is the image resolution supported by MiniCPM-V 2.0 and MiniCPM-Llama3-V 2.5?\nAnswer:", + " 2048 × 2048" + ] + ], + "resps": [ + [ + [ + -8.775908470153809, + false + ] + ], + [ + [ + -8.181621551513672, + false + ] + ], + [ + [ + -16.271177291870117, + false + ] + ], + [ + [ + -9.781907081604004, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.775908470153809, + false + ], + [ + -8.181621551513672, + false + ], + [ + -16.271177291870117, + false + ], + [ + -9.781907081604004, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "82090db10931345cca27913e7f38d4bfff7672f3fce02e9017bce5f71b0cdecd", + "prompt_hash": "330f53849fa0a8ddf62d3e07304633ea021f4b8ae15e19c127f3ccce591f8140", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 21, + "doc": { + "question": "Which model in the MiniCPM-V series uses Llama3-Instruct 8B as its base LLM?", + "choices": [ + "MiniCPM-V 1.0", + "MiniCPM-V 2.0", + "MiniCPM-Llama3-V 2.5", + "MiniCPM-Gemma 2.0" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model in the MiniCPM-V series uses Llama3-Instruct 8B as its base LLM?\nAnswer:", + " MiniCPM-V 1.0" + ], + [ + "Question:Which model in the MiniCPM-V series uses Llama3-Instruct 8B as its base LLM?\nAnswer:", + " MiniCPM-V 2.0" + ], + [ + "Question:Which model in the MiniCPM-V series uses Llama3-Instruct 8B as its base LLM?\nAnswer:", + " MiniCPM-Llama3-V 2.5" + ], + [ + "Question:Which model in the MiniCPM-V series uses Llama3-Instruct 8B as its base LLM?\nAnswer:", + " MiniCPM-Gemma 2.0" + ] + ], + "resps": [ + [ + [ + -9.62120246887207, + false + ] + ], + [ + [ + -9.776691436767578, + false + ] + ], + [ + [ + -20.5820255279541, + false + ] + ], + [ + [ + -21.101959228515625, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.62120246887207, + false + ], + [ + -9.776691436767578, + false + ], + [ + -20.5820255279541, + false + ], + [ + -21.101959228515625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "85bd1e157ef3f4bd174f5747f899fc2ec436766a522f5474ac8656ebc11a1d46", + "prompt_hash": "aa8c134e336cda1667e37d843d645a0159908280757421028f42bf05f788bc7f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "Which technique is used in MiniCPM-Llama3-V 2.5 to reduce hallucination rates?", + "choices": [ + "Multi-head Attention Dropout", + "Gradient Checkpointing", + "RLAIF-V", + "Contextual Retrieval Augmentation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which technique is used in MiniCPM-Llama3-V 2.5 to reduce hallucination rates?\nAnswer:", + " Multi-head Attention Dropout" + ], + [ + "Question:Which technique is used in MiniCPM-Llama3-V 2.5 to reduce hallucination rates?\nAnswer:", + " Gradient Checkpointing" + ], + [ + "Question:Which technique is used in MiniCPM-Llama3-V 2.5 to reduce hallucination rates?\nAnswer:", + " RLAIF-V" + ], + [ + "Question:Which technique is used in MiniCPM-Llama3-V 2.5 to reduce hallucination rates?\nAnswer:", + " Contextual Retrieval Augmentation" + ] + ], + "resps": [ + [ + [ + -20.53322410583496, + false + ] + ], + [ + [ + -13.587026596069336, + false + ] + ], + [ + [ + -17.670570373535156, + false + ] + ], + [ + [ + -19.511798858642578, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.53322410583496, + false + ], + [ + -13.587026596069336, + false + ], + [ + -17.670570373535156, + false + ], + [ + -19.511798858642578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9cc7b1431eecfbefaab0cad4d1bfc115fbee148efffefcfe3d47f7606be3bfa1", + "prompt_hash": "fd737561175617926bfa8f244f46d3099c33f30f5b671c138f38204c596bfcc6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 23, + "doc": { + "question": "What visual encoder is employed in MiniCPM-V?", + "choices": [ + "ViT-B/16", + "ResNet-101", + "SigLIP SoViT-400m/14", + "ConvNeXt-Base" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What visual encoder is employed in MiniCPM-V?\nAnswer:", + " ViT-B/16" + ], + [ + "Question:What visual encoder is employed in MiniCPM-V?\nAnswer:", + " ResNet-101" + ], + [ + "Question:What visual encoder is employed in MiniCPM-V?\nAnswer:", + " SigLIP SoViT-400m/14" + ], + [ + "Question:What visual encoder is employed in MiniCPM-V?\nAnswer:", + " ConvNeXt-Base" + ] + ], + "resps": [ + [ + [ + -8.633031845092773, + false + ] + ], + [ + [ + -7.921908378601074, + false + ] + ], + [ + [ + -60.48279571533203, + false + ] + ], + [ + [ + -12.991559028625488, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.633031845092773, + false + ], + [ + -7.921908378601074, + false + ], + [ + -60.48279571533203, + false + ], + [ + -12.991559028625488, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0bcf0b692a7f5d9b6dac4b2405e70338d6c5e404c7bf8402cd5cf3bcd39a08a5", + "prompt_hash": "a19a91d025aa9bcc97165320583e88300005349ba251f66af54c7b65b72dba7a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 24, + "doc": { + "question": "What type of quantization is used to reduce the memory requirement of MiniCPM-Llama3-V 2.5 to around 5GB?", + "choices": [ + "Q8_0 mode", + "Q4_K_M mode 4-bit", + "INT8 symmetric quantization", + "Post-training dynamic quantization" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of quantization is used to reduce the memory requirement of MiniCPM-Llama3-V 2.5 to around 5GB?\nAnswer:", + " Q8_0 mode" + ], + [ + "Question:What type of quantization is used to reduce the memory requirement of MiniCPM-Llama3-V 2.5 to around 5GB?\nAnswer:", + " Q4_K_M mode 4-bit" + ], + [ + "Question:What type of quantization is used to reduce the memory requirement of MiniCPM-Llama3-V 2.5 to around 5GB?\nAnswer:", + " INT8 symmetric quantization" + ], + [ + "Question:What type of quantization is used to reduce the memory requirement of MiniCPM-Llama3-V 2.5 to around 5GB?\nAnswer:", + " Post-training dynamic quantization" + ] + ], + "resps": [ + [ + [ + -27.991249084472656, + false + ] + ], + [ + [ + -52.5985107421875, + false + ] + ], + [ + [ + -18.92511749267578, + false + ] + ], + [ + [ + -19.703536987304688, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.991249084472656, + false + ], + [ + -52.5985107421875, + false + ], + [ + -18.92511749267578, + false + ], + [ + -19.703536987304688, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "171a9a1a3420e7c88940f30480db74ec63f19e986bc54eb2b008f0d0f6dab4fe", + "prompt_hash": "9371558e2e30178b96278ed6261f1d3f2c9360cb841f0503c558ab264ac0153b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 25, + "doc": { + "question": "Which visual encoder is used by default in mPLUG-Owl3?", + "choices": [ + "Siglip-400m", + "CLIP-ViT", + "BLIP-2", + "ResNet-101" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which visual encoder is used by default in mPLUG-Owl3?\nAnswer:", + " Siglip-400m" + ], + [ + "Question:Which visual encoder is used by default in mPLUG-Owl3?\nAnswer:", + " CLIP-ViT" + ], + [ + "Question:Which visual encoder is used by default in mPLUG-Owl3?\nAnswer:", + " BLIP-2" + ], + [ + "Question:Which visual encoder is used by default in mPLUG-Owl3?\nAnswer:", + " ResNet-101" + ] + ], + "resps": [ + [ + [ + -32.85588836669922, + false + ] + ], + [ + [ + -10.125411987304688, + false + ] + ], + [ + [ + -7.34080696105957, + false + ] + ], + [ + [ + -9.431852340698242, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.85588836669922, + false + ], + [ + -10.125411987304688, + false + ], + [ + -7.34080696105957, + false + ], + [ + -9.431852340698242, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "45f0233992f0526f9e2ddd64462932a6d005f950483384bbfbca4e03e739a662", + "prompt_hash": "3720b56a27492cba6007f1d309029ab312d06cd133740ae26813ef842ac9ad04", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "Which benchmark evaluates a model's ability to understand science diagrams?", + "choices": [ + "POPE", + "AI2D", + "MM-Vet", + "MMBench" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark evaluates a model's ability to understand science diagrams?\nAnswer:", + " POPE" + ], + [ + "Question:Which benchmark evaluates a model's ability to understand science diagrams?\nAnswer:", + " AI2D" + ], + [ + "Question:Which benchmark evaluates a model's ability to understand science diagrams?\nAnswer:", + " MM-Vet" + ], + [ + "Question:Which benchmark evaluates a model's ability to understand science diagrams?\nAnswer:", + " MMBench" + ] + ], + "resps": [ + [ + [ + -10.768207550048828, + false + ] + ], + [ + [ + -10.82089900970459, + false + ] + ], + [ + [ + -18.138641357421875, + false + ] + ], + [ + [ + -13.92343521118164, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.768207550048828, + false + ], + [ + -10.82089900970459, + false + ], + [ + -18.138641357421875, + false + ], + [ + -13.92343521118164, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a0c39f7cbd5af613b6c4a6a7fe47428530770d0eef8a26d5baaf7d7f040ee0a4", + "prompt_hash": "76dc790ed1dd7a04d27b4229b74a54ac654ebe1a670bdc39f84965a75d3fd8b0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "Which component is reused as the Query vector in the Hyper Attention cross-attention mechanism?", + "choices": [ + "Key from visual features", + "Value from textual features", + "Query from the self-attention mechanism", + "Projection from the vision encoder" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which component is reused as the Query vector in the Hyper Attention cross-attention mechanism?\nAnswer:", + " Key from visual features" + ], + [ + "Question:Which component is reused as the Query vector in the Hyper Attention cross-attention mechanism?\nAnswer:", + " Value from textual features" + ], + [ + "Question:Which component is reused as the Query vector in the Hyper Attention cross-attention mechanism?\nAnswer:", + " Query from the self-attention mechanism" + ], + [ + "Question:Which component is reused as the Query vector in the Hyper Attention cross-attention mechanism?\nAnswer:", + " Projection from the vision encoder" + ] + ], + "resps": [ + [ + [ + -24.740676879882812, + false + ] + ], + [ + [ + -28.112730026245117, + false + ] + ], + [ + [ + -18.406658172607422, + false + ] + ], + [ + [ + -28.374183654785156, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.740676879882812, + false + ], + [ + -28.112730026245117, + false + ], + [ + -18.406658172607422, + false + ], + [ + -28.374183654785156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d6e1aeb928e60b21d65c1bac533238b17407b50ce0764e7918e781c5cf0ee053", + "prompt_hash": "11bffdf541567d8f57e5e5a7070dfad452865ebcb490b23d144e8a14c5068895", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 28, + "doc": { + "question": "How many image-text pairs are used for pre-training mPLUG-Owl3?", + "choices": [ + "10 million", + "25 million", + "41 million", + "65 million" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many image-text pairs are used for pre-training mPLUG-Owl3?\nAnswer:", + " 10 million" + ], + [ + "Question:How many image-text pairs are used for pre-training mPLUG-Owl3?\nAnswer:", + " 25 million" + ], + [ + "Question:How many image-text pairs are used for pre-training mPLUG-Owl3?\nAnswer:", + " 41 million" + ], + [ + "Question:How many image-text pairs are used for pre-training mPLUG-Owl3?\nAnswer:", + " 65 million" + ] + ], + "resps": [ + [ + [ + -8.595640182495117, + false + ] + ], + [ + [ + -10.362495422363281, + false + ] + ], + [ + [ + -12.96182918548584, + false + ] + ], + [ + [ + -11.332928657531738, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.595640182495117, + false + ], + [ + -10.362495422363281, + false + ], + [ + -12.96182918548584, + false + ], + [ + -11.332928657531738, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6223644cc4cf68c322f7b5014ae6ff464cef6f65987f256ad3709d4167105ed3", + "prompt_hash": "8dc3d0f2ed68203b2a262b65e63a7bd7cfc7354608ce01694ec99ac2d9ff13af", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "Which technique is introduced in mPLUG-Owl3 to preserve image positional information in interleaved sequences?", + "choices": [ + "Rotary Positional Embedding", + "Vision Transformer", + "Multimodal-Interleaved Rotary Position Embedding (MI-Rope)", + "Temporal Fusion Encoder" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which technique is introduced in mPLUG-Owl3 to preserve image positional information in interleaved sequences?\nAnswer:", + " Rotary Positional Embedding" + ], + [ + "Question:Which technique is introduced in mPLUG-Owl3 to preserve image positional information in interleaved sequences?\nAnswer:", + " Vision Transformer" + ], + [ + "Question:Which technique is introduced in mPLUG-Owl3 to preserve image positional information in interleaved sequences?\nAnswer:", + " Multimodal-Interleaved Rotary Position Embedding (MI-Rope)" + ], + [ + "Question:Which technique is introduced in mPLUG-Owl3 to preserve image positional information in interleaved sequences?\nAnswer:", + " Temporal Fusion Encoder" + ] + ], + "resps": [ + [ + [ + -20.903343200683594, + false + ] + ], + [ + [ + -11.205512046813965, + false + ] + ], + [ + [ + -62.90228271484375, + false + ] + ], + [ + [ + -19.450517654418945, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.903343200683594, + false + ], + [ + -11.205512046813965, + false + ], + [ + -62.90228271484375, + false + ], + [ + -19.450517654418945, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cc614c25a588bd9452a11be0ad9545544972a67e547ef9d3087f295344e8ef11", + "prompt_hash": "81b34377e5e16772b0ab5f0bf087b92e357f2585521d1c53857c44370f4f33e3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "What does the HybridRAG system combine to enhance information retrieval?", + "choices": [ + "VectorRAG and WebRAG", + "GraphRAG and Symbolic Logic", + "VectorRAG and GraphRAG", + "Knowledge Graph and Decision Trees" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the HybridRAG system combine to enhance information retrieval?\nAnswer:", + " VectorRAG and WebRAG" + ], + [ + "Question:What does the HybridRAG system combine to enhance information retrieval?\nAnswer:", + " GraphRAG and Symbolic Logic" + ], + [ + "Question:What does the HybridRAG system combine to enhance information retrieval?\nAnswer:", + " VectorRAG and GraphRAG" + ], + [ + "Question:What does the HybridRAG system combine to enhance information retrieval?\nAnswer:", + " Knowledge Graph and Decision Trees" + ] + ], + "resps": [ + [ + [ + -34.7384147644043, + false + ] + ], + [ + [ + -39.80352020263672, + false + ] + ], + [ + [ + -29.742319107055664, + false + ] + ], + [ + [ + -24.235549926757812, + false + ] + ] + ], + "filtered_resps": [ + [ + -34.7384147644043, + false + ], + [ + -39.80352020263672, + false + ], + [ + -29.742319107055664, + false + ], + [ + -24.235549926757812, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4e4bddf8d1d7a09a1e25a76b3b334f4773dd3a665f508a46dcf705157f7e844f", + "prompt_hash": "0fdedbbb63fe8d41251f7883aa0a4a161425d586290781222f71de0ac51d00e7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 31, + "doc": { + "question": "What is the first step in the knowledge extraction process for constructing a knowledge graph?", + "choices": [ + "Entity Disambiguation", + "Knowledge Fusion", + "Knowledge Extraction", + "Temporal KG Construction" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the first step in the knowledge extraction process for constructing a knowledge graph?\nAnswer:", + " Entity Disambiguation" + ], + [ + "Question:What is the first step in the knowledge extraction process for constructing a knowledge graph?\nAnswer:", + " Knowledge Fusion" + ], + [ + "Question:What is the first step in the knowledge extraction process for constructing a knowledge graph?\nAnswer:", + " Knowledge Extraction" + ], + [ + "Question:What is the first step in the knowledge extraction process for constructing a knowledge graph?\nAnswer:", + " Temporal KG Construction" + ] + ], + "resps": [ + [ + [ + -13.80317497253418, + false + ] + ], + [ + [ + -13.312759399414062, + false + ] + ], + [ + [ + -6.910901069641113, + false + ] + ], + [ + [ + -18.86545181274414, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.80317497253418, + false + ], + [ + -13.312759399414062, + false + ], + [ + -6.910901069641113, + false + ], + [ + -18.86545181274414, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "121e6b0d2999afd4b068adada4169a6657a154acf6504ec312f11dbd1f473325", + "prompt_hash": "bd6cd21014f88e0e46adf28cece79c686071b520ee6f8b416105fa27e93b2cc7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 32, + "doc": { + "question": "Which LLM embedding model is used in the VectorRAG setup?", + "choices": [ + "text-embedding-babbage-001", + "text-embedding-ada-002", + "sentence-transformers/all-MiniLM-L6-v2", + "bert-base-nli-mean-tokens" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which LLM embedding model is used in the VectorRAG setup?\nAnswer:", + " text-embedding-babbage-001" + ], + [ + "Question:Which LLM embedding model is used in the VectorRAG setup?\nAnswer:", + " text-embedding-ada-002" + ], + [ + "Question:Which LLM embedding model is used in the VectorRAG setup?\nAnswer:", + " sentence-transformers/all-MiniLM-L6-v2" + ], + [ + "Question:Which LLM embedding model is used in the VectorRAG setup?\nAnswer:", + " bert-base-nli-mean-tokens" + ] + ], + "resps": [ + [ + [ + -35.047874450683594, + false + ] + ], + [ + [ + -8.769493103027344, + false + ] + ], + [ + [ + -9.794758796691895, + false + ] + ], + [ + [ + -12.21292781829834, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.047874450683594, + false + ], + [ + -8.769493103027344, + false + ], + [ + -9.794758796691895, + false + ], + [ + -12.21292781829834, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e5d0a848899461bf1ccf87ebe38f1531bbd1129582b1096b830f7c05a1fc2068", + "prompt_hash": "286cc4d87a6f8915033be3fe3283998246025225bbdb787a003cace0368e9b03", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 33, + "doc": { + "question": "Which sector is NOT listed among those included in the Nifty 50 earnings call dataset?", + "choices": [ + "Consumer Goods", + "Aerospace", + "Healthcare", + "Energy - Power" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which sector is NOT listed among those included in the Nifty 50 earnings call dataset?\nAnswer:", + " Consumer Goods" + ], + [ + "Question:Which sector is NOT listed among those included in the Nifty 50 earnings call dataset?\nAnswer:", + " Aerospace" + ], + [ + "Question:Which sector is NOT listed among those included in the Nifty 50 earnings call dataset?\nAnswer:", + " Healthcare" + ], + [ + "Question:Which sector is NOT listed among those included in the Nifty 50 earnings call dataset?\nAnswer:", + " Energy - Power" + ] + ], + "resps": [ + [ + [ + -12.80567741394043, + false + ] + ], + [ + [ + -8.0639066696167, + false + ] + ], + [ + [ + -6.295193195343018, + false + ] + ], + [ + [ + -19.14309310913086, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.80567741394043, + false + ], + [ + -8.0639066696167, + false + ], + [ + -6.295193195343018, + false + ], + [ + -19.14309310913086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c4f0abe77cfd8a8682c3bd77a15030f0ad96cd9ccdaee1b95042dad325560bbd", + "prompt_hash": "7f81198daf1670620d9f0b27b723a619ea15b2c7005f9d523ac2b38ac90e1a67", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 34, + "doc": { + "question": "What metric measures how much of the generated answer can be inferred from the provided context?", + "choices": [ + "Context Precision", + "Faithfulness", + "Answer Relevance", + "Context Recall" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What metric measures how much of the generated answer can be inferred from the provided context?\nAnswer:", + " Context Precision" + ], + [ + "Question:What metric measures how much of the generated answer can be inferred from the provided context?\nAnswer:", + " Faithfulness" + ], + [ + "Question:What metric measures how much of the generated answer can be inferred from the provided context?\nAnswer:", + " Answer Relevance" + ], + [ + "Question:What metric measures how much of the generated answer can be inferred from the provided context?\nAnswer:", + " Context Recall" + ] + ], + "resps": [ + [ + [ + -13.284663200378418, + false + ] + ], + [ + [ + -9.042125701904297, + false + ] + ], + [ + [ + -9.404643058776855, + false + ] + ], + [ + [ + -12.565004348754883, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.284663200378418, + false + ], + [ + -9.042125701904297, + false + ], + [ + -9.404643058776855, + false + ], + [ + -12.565004348754883, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0a7992bd5405e2455bb7ae703195734ee1820ba790899d28e13c6ebe89a23403", + "prompt_hash": "c9b81d11c3bf2b77ca20585ce25ccdcb42b99e3832f02d5ec511fea14e837b3b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What is the activation function used in JumpReLU sparse autoencoders?", + "choices": [ + "Sigmoid function", + "Shifted Heaviside step function combined with ReLU", + "Standard ReLU without thresholding", + "Tanh with adaptive scaling" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the activation function used in JumpReLU sparse autoencoders?\nAnswer:", + " Sigmoid function" + ], + [ + "Question:What is the activation function used in JumpReLU sparse autoencoders?\nAnswer:", + " Shifted Heaviside step function combined with ReLU" + ], + [ + "Question:What is the activation function used in JumpReLU sparse autoencoders?\nAnswer:", + " Standard ReLU without thresholding" + ], + [ + "Question:What is the activation function used in JumpReLU sparse autoencoders?\nAnswer:", + " Tanh with adaptive scaling" + ] + ], + "resps": [ + [ + [ + -6.715402603149414, + false + ] + ], + [ + [ + -36.528255462646484, + false + ] + ], + [ + [ + -22.49020004272461, + false + ] + ], + [ + [ + -20.276412963867188, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.715402603149414, + false + ], + [ + -36.528255462646484, + false + ], + [ + -22.49020004272461, + false + ], + [ + -20.276412963867188, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cd9fc825aa138a5de9abaf4b87783158bd71ee7a58cc07a955ccee016045d4b4", + "prompt_hash": "dd3b8472617fc9214c3074ab154d01f4b93ce8dc5cc3ce9cad8f5a02e1ca9c65", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 36, + "doc": { + "question": "What optimizer is used to train the sparse autoencoders in Gemma Scope?", + "choices": [ + "SGD with momentum", + "RMSprop", + "Adam with (β₁, β₂) = (0, 0.999)", + "LAMB optimizer" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What optimizer is used to train the sparse autoencoders in Gemma Scope?\nAnswer:", + " SGD with momentum" + ], + [ + "Question:What optimizer is used to train the sparse autoencoders in Gemma Scope?\nAnswer:", + " RMSprop" + ], + [ + "Question:What optimizer is used to train the sparse autoencoders in Gemma Scope?\nAnswer:", + " Adam with (β₁, β₂) = (0, 0.999)" + ], + [ + "Question:What optimizer is used to train the sparse autoencoders in Gemma Scope?\nAnswer:", + " LAMB optimizer" + ] + ], + "resps": [ + [ + [ + -9.294715881347656, + false + ] + ], + [ + [ + -6.094078540802002, + false + ] + ], + [ + [ + -42.684532165527344, + false + ] + ], + [ + [ + -18.675086975097656, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.294715881347656, + false + ], + [ + -6.094078540802002, + false + ], + [ + -42.684532165527344, + false + ], + [ + -18.675086975097656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "de2f0308b302e5574f464cfe4d280ec12ddce5ae7374d0329b8ba50f15149a2b", + "prompt_hash": "bfc1545c194ee3ba20c4127cc60c14269bd7c1acea9a6895bc2d69d521fd7eee", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "How many sparse autoencoders were released in the main Gemma Scope suite?", + "choices": [ + "Over 500", + "More than 400", + "Exactly 1000", + "About 100" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many sparse autoencoders were released in the main Gemma Scope suite?\nAnswer:", + " Over 500" + ], + [ + "Question:How many sparse autoencoders were released in the main Gemma Scope suite?\nAnswer:", + " More than 400" + ], + [ + "Question:How many sparse autoencoders were released in the main Gemma Scope suite?\nAnswer:", + " Exactly 1000" + ], + [ + "Question:How many sparse autoencoders were released in the main Gemma Scope suite?\nAnswer:", + " About 100" + ] + ], + "resps": [ + [ + [ + -14.328933715820312, + false + ] + ], + [ + [ + -15.292580604553223, + false + ] + ], + [ + [ + -18.465763092041016, + false + ] + ], + [ + [ + -13.992822647094727, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.328933715820312, + false + ], + [ + -15.292580604553223, + false + ], + [ + -18.465763092041016, + false + ], + [ + -13.992822647094727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7850c75efcc76953fb620cc4a3af670b82c3e762747707a436a0957e4d0b0305", + "prompt_hash": "48ee84c6b1656b477831163a4cc8b71f25196393ec4831517a8282d90481b03f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 38, + "doc": { + "question": "Which subset of The Pile dataset did the SAEs perform worst on?", + "choices": [ + "PubMed Central", + "Books3", + "Europarl", + "DeepMind Mathematics" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which subset of The Pile dataset did the SAEs perform worst on?\nAnswer:", + " PubMed Central" + ], + [ + "Question:Which subset of The Pile dataset did the SAEs perform worst on?\nAnswer:", + " Books3" + ], + [ + "Question:Which subset of The Pile dataset did the SAEs perform worst on?\nAnswer:", + " Europarl" + ], + [ + "Question:Which subset of The Pile dataset did the SAEs perform worst on?\nAnswer:", + " DeepMind Mathematics" + ] + ], + "resps": [ + [ + [ + -16.985855102539062, + false + ] + ], + [ + [ + -16.546205520629883, + false + ] + ], + [ + [ + -13.369335174560547, + false + ] + ], + [ + [ + -20.967653274536133, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.985855102539062, + false + ], + [ + -16.546205520629883, + false + ], + [ + -13.369335174560547, + false + ], + [ + -20.967653274536133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a4c6cc58667fe8997ffb8b3b6267ff80e8ed45b7165caa316b931df0e4d133d0", + "prompt_hash": "076909a7af06d7da4669d0113738cb750045745399d1ab5485f09fa67193a98e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "Where are the released weights of Gemma Scope hosted?", + "choices": [ + "OpenAI API", + "Google Cloud Storage", + "HuggingFace", + "Kaggle Datasets" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Where are the released weights of Gemma Scope hosted?\nAnswer:", + " OpenAI API" + ], + [ + "Question:Where are the released weights of Gemma Scope hosted?\nAnswer:", + " Google Cloud Storage" + ], + [ + "Question:Where are the released weights of Gemma Scope hosted?\nAnswer:", + " HuggingFace" + ], + [ + "Question:Where are the released weights of Gemma Scope hosted?\nAnswer:", + " Kaggle Datasets" + ] + ], + "resps": [ + [ + [ + -10.245248794555664, + false + ] + ], + [ + [ + -8.508049011230469, + false + ] + ], + [ + [ + -8.190152168273926, + false + ] + ], + [ + [ + -14.849760055541992, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.245248794555664, + false + ], + [ + -8.508049011230469, + false + ], + [ + -8.190152168273926, + false + ], + [ + -14.849760055541992, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e6a6066ec0f1781c89108fb4c80a0f7fecc28035e65e670b6c243bb3407759fa", + "prompt_hash": "e21190f5c3f44f85ac12be0cfc218a9571772a27f53432e7b90a7b2bce991577", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What is the accuracy of LLaMA2-7B on GSM8K when using few-shot CoT prompting?", + "choices": [ + "12.51%", + "24.34%", + "36.46%", + "63.91%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the accuracy of LLaMA2-7B on GSM8K when using few-shot CoT prompting?\nAnswer:", + " 12.51%" + ], + [ + "Question:What is the accuracy of LLaMA2-7B on GSM8K when using few-shot CoT prompting?\nAnswer:", + " 24.34%" + ], + [ + "Question:What is the accuracy of LLaMA2-7B on GSM8K when using few-shot CoT prompting?\nAnswer:", + " 36.46%" + ], + [ + "Question:What is the accuracy of LLaMA2-7B on GSM8K when using few-shot CoT prompting?\nAnswer:", + " 63.91%" + ] + ], + "resps": [ + [ + [ + -18.55419921875, + false + ] + ], + [ + [ + -17.668134689331055, + false + ] + ], + [ + [ + -17.610607147216797, + false + ] + ], + [ + [ + -16.299419403076172, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.55419921875, + false + ], + [ + -17.668134689331055, + false + ], + [ + -17.610607147216797, + false + ], + [ + -16.299419403076172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "740e8672e7d357a835b0e5f48081bf47de1508d71d7ddbacef30b6ccfe7647b6", + "prompt_hash": "86183804e555821736b88e17d55e2e58e0f2ba608af3b603957c175c4bc474d1", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 41, + "doc": { + "question": "Which action in the reasoning process is inspired by least-to-most prompting and involves generating sub-questions and their answers?", + "choices": [ + "A1: Propose an one-step thought", + "A2: Propose the remaining thought steps", + "A3: Propose next sub-question along with its answer", + "A5: Rephrase the question/sub-question" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which action in the reasoning process is inspired by least-to-most prompting and involves generating sub-questions and their answers?\nAnswer:", + " A1: Propose an one-step thought" + ], + [ + "Question:Which action in the reasoning process is inspired by least-to-most prompting and involves generating sub-questions and their answers?\nAnswer:", + " A2: Propose the remaining thought steps" + ], + [ + "Question:Which action in the reasoning process is inspired by least-to-most prompting and involves generating sub-questions and their answers?\nAnswer:", + " A3: Propose next sub-question along with its answer" + ], + [ + "Question:Which action in the reasoning process is inspired by least-to-most prompting and involves generating sub-questions and their answers?\nAnswer:", + " A5: Rephrase the question/sub-question" + ] + ], + "resps": [ + [ + [ + -41.947181701660156, + false + ] + ], + [ + [ + -44.858089447021484, + false + ] + ], + [ + [ + -43.70464324951172, + false + ] + ], + [ + [ + -39.109134674072266, + false + ] + ] + ], + "filtered_resps": [ + [ + -41.947181701660156, + false + ], + [ + -44.858089447021484, + false + ], + [ + -43.70464324951172, + false + ], + [ + -39.109134674072266, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3ea6706835ff4cef39765d4fbc51408146c9d806c0b39ea78d8336ca66d2f0c0", + "prompt_hash": "4473cd7385465f7f8305ee6cbb2a236cd6ae73493ae23b5dec906975d167b2f7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 42, + "doc": { + "question": "According to the ablation study, replacing \\( r_2 \\) with random values in RAP causes a significant drop in accuracy on which dataset?", + "choices": [ + "StrategyQA", + "GSM8K", + "MATH-500", + "SVAMP" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:According to the ablation study, replacing \\( r_2 \\) with random values in RAP causes a significant drop in accuracy on which dataset?\nAnswer:", + " StrategyQA" + ], + [ + "Question:According to the ablation study, replacing \\( r_2 \\) with random values in RAP causes a significant drop in accuracy on which dataset?\nAnswer:", + " GSM8K" + ], + [ + "Question:According to the ablation study, replacing \\( r_2 \\) with random values in RAP causes a significant drop in accuracy on which dataset?\nAnswer:", + " MATH-500" + ], + [ + "Question:According to the ablation study, replacing \\( r_2 \\) with random values in RAP causes a significant drop in accuracy on which dataset?\nAnswer:", + " SVAMP" + ] + ], + "resps": [ + [ + [ + -18.641420364379883, + false + ] + ], + [ + [ + -9.388848304748535, + false + ] + ], + [ + [ + -26.169233322143555, + false + ] + ], + [ + [ + -20.590858459472656, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.641420364379883, + false + ], + [ + -9.388848304748535, + false + ], + [ + -26.169233322143555, + false + ], + [ + -20.590858459472656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d54783b122fe68a4da778a99bf5b317c3d85719946a207ee26874b0ebdf376b7", + "prompt_hash": "d3f2d537a02b19810dfd28784b8c1fb5d5a0db3ef651fe26c655f2a64292a1ad", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "What average number of inferences is required to solve a GSM8K question using the MCTS self-generator on LLaMA2-7B?", + "choices": [ + "148.90", + "166.81", + "348.6k", + "367.1k" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What average number of inferences is required to solve a GSM8K question using the MCTS self-generator on LLaMA2-7B?\nAnswer:", + " 148.90" + ], + [ + "Question:What average number of inferences is required to solve a GSM8K question using the MCTS self-generator on LLaMA2-7B?\nAnswer:", + " 166.81" + ], + [ + "Question:What average number of inferences is required to solve a GSM8K question using the MCTS self-generator on LLaMA2-7B?\nAnswer:", + " 348.6k" + ], + [ + "Question:What average number of inferences is required to solve a GSM8K question using the MCTS self-generator on LLaMA2-7B?\nAnswer:", + " 367.1k" + ] + ], + "resps": [ + [ + [ + -18.378009796142578, + false + ] + ], + [ + [ + -17.80447006225586, + false + ] + ], + [ + [ + -22.77227020263672, + false + ] + ], + [ + [ + -22.721303939819336, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.378009796142578, + false + ], + [ + -17.80447006225586, + false + ], + [ + -22.77227020263672, + false + ], + [ + -22.721303939819336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3376851ef9784dbe4a9c6462e7e4f78293c0bbdef2ccf6833768c5eb0ba07529", + "prompt_hash": "2266edfa9c35484137149027d1a51807b862ff7d81c9c1a8e6f890426a2a021e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 44, + "doc": { + "question": "Which discriminator model achieved the highest accuracy when verifying LLaMA3-8B-Instruct reasoning trajectories?", + "choices": [ + "LLaMA3.1-8B-Instruct", + "GPT-4 (2024-05-01)", + "Phi3-Mini-Instruct", + "LLaMA3-8B-Instruct" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which discriminator model achieved the highest accuracy when verifying LLaMA3-8B-Instruct reasoning trajectories?\nAnswer:", + " LLaMA3.1-8B-Instruct" + ], + [ + "Question:Which discriminator model achieved the highest accuracy when verifying LLaMA3-8B-Instruct reasoning trajectories?\nAnswer:", + " GPT-4 (2024-05-01)" + ], + [ + "Question:Which discriminator model achieved the highest accuracy when verifying LLaMA3-8B-Instruct reasoning trajectories?\nAnswer:", + " Phi3-Mini-Instruct" + ], + [ + "Question:Which discriminator model achieved the highest accuracy when verifying LLaMA3-8B-Instruct reasoning trajectories?\nAnswer:", + " LLaMA3-8B-Instruct" + ] + ], + "resps": [ + [ + [ + -19.940017700195312, + false + ] + ], + [ + [ + -19.82349395751953, + false + ] + ], + [ + [ + -19.991985321044922, + false + ] + ], + [ + [ + -6.56914758682251, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.940017700195312, + false + ], + [ + -19.82349395751953, + false + ], + [ + -19.991985321044922, + false + ], + [ + -6.56914758682251, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a5fa2750dda4b0a422394d424529a33f963cd403d2e052df741d8830b813cecd", + "prompt_hash": "8de90c8da0c1c343da54a74d5862f70ef62ae9ebc2eb448c91b7095ec982465b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 45, + "doc": { + "question": "What is the name of the benchmark designed to evaluate long-form writing with outputs over 4,000 words?", + "choices": [ + "LongBench-Write", + "WriteEval-Bench", + "UltraLong-Test", + "GenLength-Benchmark" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the name of the benchmark designed to evaluate long-form writing with outputs over 4,000 words?\nAnswer:", + " LongBench-Write" + ], + [ + "Question:What is the name of the benchmark designed to evaluate long-form writing with outputs over 4,000 words?\nAnswer:", + " WriteEval-Bench" + ], + [ + "Question:What is the name of the benchmark designed to evaluate long-form writing with outputs over 4,000 words?\nAnswer:", + " UltraLong-Test" + ], + [ + "Question:What is the name of the benchmark designed to evaluate long-form writing with outputs over 4,000 words?\nAnswer:", + " GenLength-Benchmark" + ] + ], + "resps": [ + [ + [ + -17.639150619506836, + false + ] + ], + [ + [ + -22.761621475219727, + false + ] + ], + [ + [ + -22.010944366455078, + false + ] + ], + [ + [ + -26.39200210571289, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.639150619506836, + false + ], + [ + -22.761621475219727, + false + ], + [ + -22.010944366455078, + false + ], + [ + -26.39200210571289, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "54fb257c443ac9f24ce1b11083739858fd3f2cdf765fd1910193190b209fade9", + "prompt_hash": "aa259a28a254ba6c18a005278ba04aed52a7d3bfb7946d5685b0f9eabcea9c25", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 46, + "doc": { + "question": "What dataset was constructed using GPT-4o to produce long outputs exceeding 10,000 words?", + "choices": [ + "WriteMaster-10k", + "LongForm-Builder", + "AgentData-6k", + "LongWriter-6k" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What dataset was constructed using GPT-4o to produce long outputs exceeding 10,000 words?\nAnswer:", + " WriteMaster-10k" + ], + [ + "Question:What dataset was constructed using GPT-4o to produce long outputs exceeding 10,000 words?\nAnswer:", + " LongForm-Builder" + ], + [ + "Question:What dataset was constructed using GPT-4o to produce long outputs exceeding 10,000 words?\nAnswer:", + " AgentData-6k" + ], + [ + "Question:What dataset was constructed using GPT-4o to produce long outputs exceeding 10,000 words?\nAnswer:", + " LongWriter-6k" + ] + ], + "resps": [ + [ + [ + -33.696617126464844, + false + ] + ], + [ + [ + -27.476381301879883, + false + ] + ], + [ + [ + -29.436206817626953, + false + ] + ], + [ + [ + -28.958377838134766, + false + ] + ] + ], + "filtered_resps": [ + [ + -33.696617126464844, + false + ], + [ + -27.476381301879883, + false + ], + [ + -29.436206817626953, + false + ], + [ + -28.958377838134766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "27d094e756d8f9ebc7fb50418d9b28707cfe3d244a12dc577e75c18f11d89e18", + "prompt_hash": "517bc5fb77a00c66e82a92783f2a4c11c8247ed5245f6b23c04fb4d3124ce0dc", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 47, + "doc": { + "question": "What approach does AgentWrite use to generate long-form content?", + "choices": [ + "Retrieval-augmented prompting", + "Divide-and-conquer planning", + "Backtranslation-based expansion", + "Reinforcement learning tuning" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What approach does AgentWrite use to generate long-form content?\nAnswer:", + " Retrieval-augmented prompting" + ], + [ + "Question:What approach does AgentWrite use to generate long-form content?\nAnswer:", + " Divide-and-conquer planning" + ], + [ + "Question:What approach does AgentWrite use to generate long-form content?\nAnswer:", + " Backtranslation-based expansion" + ], + [ + "Question:What approach does AgentWrite use to generate long-form content?\nAnswer:", + " Reinforcement learning tuning" + ] + ], + "resps": [ + [ + [ + -15.73776626586914, + false + ] + ], + [ + [ + -23.26085662841797, + false + ] + ], + [ + [ + -28.372581481933594, + false + ] + ], + [ + [ + -19.8125, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.73776626586914, + false + ], + [ + -23.26085662841797, + false + ], + [ + -28.372581481933594, + false + ], + [ + -19.8125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3c94c38725119f23b11c90a449ac7187d854ac1e0b4570f5f12610bd3bf40f2a", + "prompt_hash": "bdefd51ded190912aa2c257c03204c8892b280aec8dc0411bdb4075b87ebe1cf", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "What is the typical maximum output length achieved by current long-context models before improvement?", + "choices": [ + "Around 2,000 words", + "Approximately 5,000 words", + "Less than 1,000 words", + "Over 10,000 words" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the typical maximum output length achieved by current long-context models before improvement?\nAnswer:", + " Around 2,000 words" + ], + [ + "Question:What is the typical maximum output length achieved by current long-context models before improvement?\nAnswer:", + " Approximately 5,000 words" + ], + [ + "Question:What is the typical maximum output length achieved by current long-context models before improvement?\nAnswer:", + " Less than 1,000 words" + ], + [ + "Question:What is the typical maximum output length achieved by current long-context models before improvement?\nAnswer:", + " Over 10,000 words" + ] + ], + "resps": [ + [ + [ + -16.012025833129883, + false + ] + ], + [ + [ + -17.129650115966797, + false + ] + ], + [ + [ + -13.902952194213867, + false + ] + ], + [ + [ + -14.598623275756836, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.012025833129883, + false + ], + [ + -17.129650115966797, + false + ], + [ + -13.902952194213867, + false + ], + [ + -14.598623275756836, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "46daa63f9f10a6d83f6a7fa2ea4265bd4a87bc34680ce77a6fcfee59cc290a17", + "prompt_hash": "98cf74434b5cc943c6188659f011b859a46162e99a69d36fd75e3768051df5c8", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 49, + "doc": { + "question": "Which fine-tuning method is used to help the model better follow long writing instructions?", + "choices": [ + "Contrastive Alignment Learning", + "DPO (Direct Preference Optimization)", + "Reinforcement Reward Ranking", + "Instruction Injection Strategy" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which fine-tuning method is used to help the model better follow long writing instructions?\nAnswer:", + " Contrastive Alignment Learning" + ], + [ + "Question:Which fine-tuning method is used to help the model better follow long writing instructions?\nAnswer:", + " DPO (Direct Preference Optimization)" + ], + [ + "Question:Which fine-tuning method is used to help the model better follow long writing instructions?\nAnswer:", + " Reinforcement Reward Ranking" + ], + [ + "Question:Which fine-tuning method is used to help the model better follow long writing instructions?\nAnswer:", + " Instruction Injection Strategy" + ] + ], + "resps": [ + [ + [ + -19.225650787353516, + false + ] + ], + [ + [ + -16.75514793395996, + false + ] + ], + [ + [ + -30.011341094970703, + false + ] + ], + [ + [ + -21.480850219726562, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.225650787353516, + false + ], + [ + -16.75514793395996, + false + ], + [ + -30.011341094970703, + false + ], + [ + -21.480850219726562, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2defad948ec9d397fb72a91e28c7e97a7601e4e89f3e19b32498452725bc6940", + "prompt_hash": "8797c5e2080c8bef6869ed8990e4a53cc5e23ffd3c224c7b33dc177f608eae0c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 50, + "doc": { + "question": "Which models are cited as examples of frontier Large Language Models?", + "choices": [ + "ChatGPT, Gemini, Opus, LLaMA-3", + "GPT-3.5, Claude, Gemini, Mistral", + "ChatGPT, Palm, BERT, LLaMA-2", + "GPT-4, PaLM-2, Claude, Falcon" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which models are cited as examples of frontier Large Language Models?\nAnswer:", + " ChatGPT, Gemini, Opus, LLaMA-3" + ], + [ + "Question:Which models are cited as examples of frontier Large Language Models?\nAnswer:", + " GPT-3.5, Claude, Gemini, Mistral" + ], + [ + "Question:Which models are cited as examples of frontier Large Language Models?\nAnswer:", + " ChatGPT, Palm, BERT, LLaMA-2" + ], + [ + "Question:Which models are cited as examples of frontier Large Language Models?\nAnswer:", + " GPT-4, PaLM-2, Claude, Falcon" + ] + ], + "resps": [ + [ + [ + -23.581823348999023, + false + ] + ], + [ + [ + -16.29320526123047, + false + ] + ], + [ + [ + -29.092092514038086, + false + ] + ], + [ + [ + -26.421634674072266, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.581823348999023, + false + ], + [ + -16.29320526123047, + false + ], + [ + -29.092092514038086, + false + ], + [ + -26.421634674072266, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f6ada91318d37cb7f383e38593a0e69095e66499aa66b673bcdc84d3a89c40e9", + "prompt_hash": "59bc8556ec178cc8d50dc761afb81b367a36dc4f12592b3bb9608d2bc6942926", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "Which benchmark is used to evaluate performance in a simulated e-commerce environment?", + "choices": [ + "WebShop", + "WebArena", + "OpenAgent", + "SimWeb" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark is used to evaluate performance in a simulated e-commerce environment?\nAnswer:", + " WebShop" + ], + [ + "Question:Which benchmark is used to evaluate performance in a simulated e-commerce environment?\nAnswer:", + " WebArena" + ], + [ + "Question:Which benchmark is used to evaluate performance in a simulated e-commerce environment?\nAnswer:", + " OpenAgent" + ], + [ + "Question:Which benchmark is used to evaluate performance in a simulated e-commerce environment?\nAnswer:", + " SimWeb" + ] + ], + "resps": [ + [ + [ + -10.959060668945312, + false + ] + ], + [ + [ + -12.799799919128418, + false + ] + ], + [ + [ + -15.167540550231934, + false + ] + ], + [ + [ + -18.200868606567383, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.959060668945312, + false + ], + [ + -12.799799919128418, + false + ], + [ + -15.167540550231934, + false + ], + [ + -18.200868606567383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bcd557127a7ef8d24395fff0b170b5c6f0002615a0db26a1fac8c3f77df7782a", + "prompt_hash": "5923b738e042d2f50011132393f1b705029ab444f06fba66639ea6f14d902829", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 52, + "doc": { + "question": "What was the zero-shot success rate of Agent Q in the real-world booking experiment?", + "choices": [ + "81.7%", + "62.6%", + "67.2%", + "95.4%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What was the zero-shot success rate of Agent Q in the real-world booking experiment?\nAnswer:", + " 81.7%" + ], + [ + "Question:What was the zero-shot success rate of Agent Q in the real-world booking experiment?\nAnswer:", + " 62.6%" + ], + [ + "Question:What was the zero-shot success rate of Agent Q in the real-world booking experiment?\nAnswer:", + " 67.2%" + ], + [ + "Question:What was the zero-shot success rate of Agent Q in the real-world booking experiment?\nAnswer:", + " 95.4%" + ] + ], + "resps": [ + [ + [ + -14.445587158203125, + false + ] + ], + [ + [ + -14.898151397705078, + false + ] + ], + [ + [ + -14.475791931152344, + false + ] + ], + [ + [ + -14.124256134033203, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.445587158203125, + false + ], + [ + -14.898151397705078, + false + ], + [ + -14.475791931152344, + false + ], + [ + -14.124256134033203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9b124c03b2602c4e92e8adcbfdb16a4ad8c364b3c126fb59a84f65b6cef4a1fc", + "prompt_hash": "0a95943469f8ba1096d26ad4f4a42c731bf57e1a3ca93c33dff45a879ee03016", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 53, + "doc": { + "question": "Which algorithm is used to optimize the policy in the proposed training method?", + "choices": [ + "Direct Preference Optimization (DPO)", + "Proximal Policy Optimization (PPO)", + "Q-learning", + "Actor-Critic" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which algorithm is used to optimize the policy in the proposed training method?\nAnswer:", + " Direct Preference Optimization (DPO)" + ], + [ + "Question:Which algorithm is used to optimize the policy in the proposed training method?\nAnswer:", + " Proximal Policy Optimization (PPO)" + ], + [ + "Question:Which algorithm is used to optimize the policy in the proposed training method?\nAnswer:", + " Q-learning" + ], + [ + "Question:Which algorithm is used to optimize the policy in the proposed training method?\nAnswer:", + " Actor-Critic" + ] + ], + "resps": [ + [ + [ + -10.699943542480469, + false + ] + ], + [ + [ + -8.01321792602539, + false + ] + ], + [ + [ + -4.749229907989502, + false + ] + ], + [ + [ + -8.794071197509766, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.699943542480469, + false + ], + [ + -8.01321792602539, + false + ], + [ + -4.749229907989502, + false + ], + [ + -8.794071197509766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7a8ffac9727722acc7e16ea4e31c950d04a2414d310d30d10782400f58c19526", + "prompt_hash": "929eea14da123173493f83ba8cafdaed60dab174680214e80fedcdf45aef33b5", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 54, + "doc": { + "question": "What is the average number of steps required to complete a task in the OpenTable environment?", + "choices": [ + "13.9", + "6.8", + "20.3", + "10.5" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the average number of steps required to complete a task in the OpenTable environment?\nAnswer:", + " 13.9" + ], + [ + "Question:What is the average number of steps required to complete a task in the OpenTable environment?\nAnswer:", + " 6.8" + ], + [ + "Question:What is the average number of steps required to complete a task in the OpenTable environment?\nAnswer:", + " 20.3" + ], + [ + "Question:What is the average number of steps required to complete a task in the OpenTable environment?\nAnswer:", + " 10.5" + ] + ], + "resps": [ + [ + [ + -9.529898643493652, + false + ] + ], + [ + [ + -8.548165321350098, + false + ] + ], + [ + [ + -9.393954277038574, + false + ] + ], + [ + [ + -8.277305603027344, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.529898643493652, + false + ], + [ + -8.548165321350098, + false + ], + [ + -9.393954277038574, + false + ], + [ + -8.277305603027344, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "661302f971fbae21fe155e9a5cf88b752f5af1e05fbb2c9bf77ac54642151bdd", + "prompt_hash": "1d55c170c55f70a6d6dbff3674955aab12ce4afd95c6b5036e2d432fe92c0d73", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 55, + "doc": { + "question": "What is the pass@128 accuracy of the RL model using CoT prompting on the miniF2F-test dataset?", + "choices": [ + "51.6%", + "49.8%", + "50.4%", + "50.5%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the pass@128 accuracy of the RL model using CoT prompting on the miniF2F-test dataset?\nAnswer:", + " 51.6%" + ], + [ + "Question:What is the pass@128 accuracy of the RL model using CoT prompting on the miniF2F-test dataset?\nAnswer:", + " 49.8%" + ], + [ + "Question:What is the pass@128 accuracy of the RL model using CoT prompting on the miniF2F-test dataset?\nAnswer:", + " 50.4%" + ], + [ + "Question:What is the pass@128 accuracy of the RL model using CoT prompting on the miniF2F-test dataset?\nAnswer:", + " 50.5%" + ] + ], + "resps": [ + [ + [ + -14.946520805358887, + false + ] + ], + [ + [ + -15.035083770751953, + false + ] + ], + [ + [ + -14.563060760498047, + false + ] + ], + [ + [ + -14.588592529296875, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.946520805358887, + false + ], + [ + -15.035083770751953, + false + ], + [ + -14.563060760498047, + false + ], + [ + -14.588592529296875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bea78ff6e30c23805612ce89fc7e2cefaea9962704630da8fc21a1edb3e75a45", + "prompt_hash": "31568cdaed8248fb9347dd0f50d2e6a40ffe00ebf9f77248d7a9fe570922cde2", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "What reward is assigned to a generated proof if it is verified as correct by the Lean prover?", + "choices": [ + "1", + "0", + "0.5", + "2" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What reward is assigned to a generated proof if it is verified as correct by the Lean prover?\nAnswer:", + " 1" + ], + [ + "Question:What reward is assigned to a generated proof if it is verified as correct by the Lean prover?\nAnswer:", + " 0" + ], + [ + "Question:What reward is assigned to a generated proof if it is verified as correct by the Lean prover?\nAnswer:", + " 0.5" + ], + [ + "Question:What reward is assigned to a generated proof if it is verified as correct by the Lean prover?\nAnswer:", + " 2" + ] + ], + "resps": [ + [ + [ + -4.328453540802002, + false + ] + ], + [ + [ + -4.019128322601318, + false + ] + ], + [ + [ + -6.707319259643555, + false + ] + ], + [ + [ + -5.582779407501221, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.328453540802002, + false + ], + [ + -4.019128322601318, + false + ], + [ + -6.707319259643555, + false + ], + [ + -5.582779407501221, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "81c2eecd521ab20a30312484726d6d4385d46d1dd5c6e3b646d62c95390c4d1e", + "prompt_hash": "db6dff08fbd10cab9630fb8848105787301cae9945b45dfc78f32ac6c943c813", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 57, + "doc": { + "question": "What guiding information is added to the prompt during supervised fine-tuning to aid proof completion?", + "choices": [ + "Intermediate tactic state as a comment block", + "Problem difficulty level", + "Proof hints from humans", + "External symbolic solvers" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What guiding information is added to the prompt during supervised fine-tuning to aid proof completion?\nAnswer:", + " Intermediate tactic state as a comment block" + ], + [ + "Question:What guiding information is added to the prompt during supervised fine-tuning to aid proof completion?\nAnswer:", + " Problem difficulty level" + ], + [ + "Question:What guiding information is added to the prompt during supervised fine-tuning to aid proof completion?\nAnswer:", + " Proof hints from humans" + ], + [ + "Question:What guiding information is added to the prompt during supervised fine-tuning to aid proof completion?\nAnswer:", + " External symbolic solvers" + ] + ], + "resps": [ + [ + [ + -48.464988708496094, + false + ] + ], + [ + [ + -23.567293167114258, + false + ] + ], + [ + [ + -25.195629119873047, + false + ] + ], + [ + [ + -31.9509220123291, + false + ] + ] + ], + "filtered_resps": [ + [ + -48.464988708496094, + false + ], + [ + -23.567293167114258, + false + ], + [ + -25.195629119873047, + false + ], + [ + -31.9509220123291, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3bd462c4b12de939d731deec2d9c2b24baecef0a3bc3cad2463612293aad9bbd", + "prompt_hash": "7ab890996c46750808a93880844c85ebc90272d8217bc68c896abac71c3d1c7b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 58, + "doc": { + "question": "What dataset is used to evaluate high-school level theorem-proving performance?", + "choices": [ + "miniF2F", + "ProofNet", + "LeanDojo", + "MATH" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What dataset is used to evaluate high-school level theorem-proving performance?\nAnswer:", + " miniF2F" + ], + [ + "Question:What dataset is used to evaluate high-school level theorem-proving performance?\nAnswer:", + " ProofNet" + ], + [ + "Question:What dataset is used to evaluate high-school level theorem-proving performance?\nAnswer:", + " LeanDojo" + ], + [ + "Question:What dataset is used to evaluate high-school level theorem-proving performance?\nAnswer:", + " MATH" + ] + ], + "resps": [ + [ + [ + -24.148351669311523, + false + ] + ], + [ + [ + -10.6226167678833, + false + ] + ], + [ + [ + -20.867656707763672, + false + ] + ], + [ + [ + -7.023367881774902, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.148351669311523, + false + ], + [ + -10.6226167678833, + false + ], + [ + -20.867656707763672, + false + ], + [ + -7.023367881774902, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "66a9544783bca91ed0271a51b7911425a7e817ac41cfc9bbf1b22f97967d7f97", + "prompt_hash": "f12755e00fb283a488fd026d2cdba0e31b3fb40c906e349ed7a9b4dd052e4296", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 59, + "doc": { + "question": "What is the purpose of the 'truncate-and-resume' mechanism in the tree search method?", + "choices": [ + "To continue proof generation from the last verified step", + "To minimize inference time", + "To switch to a different model mid-proof", + "To discard all failed proof attempts" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the purpose of the 'truncate-and-resume' mechanism in the tree search method?\nAnswer:", + " To continue proof generation from the last verified step" + ], + [ + "Question:What is the purpose of the 'truncate-and-resume' mechanism in the tree search method?\nAnswer:", + " To minimize inference time" + ], + [ + "Question:What is the purpose of the 'truncate-and-resume' mechanism in the tree search method?\nAnswer:", + " To switch to a different model mid-proof" + ], + [ + "Question:What is the purpose of the 'truncate-and-resume' mechanism in the tree search method?\nAnswer:", + " To discard all failed proof attempts" + ] + ], + "resps": [ + [ + [ + -39.913795471191406, + false + ] + ], + [ + [ + -18.19488525390625, + false + ] + ], + [ + [ + -40.979583740234375, + false + ] + ], + [ + [ + -31.746326446533203, + false + ] + ] + ], + "filtered_resps": [ + [ + -39.913795471191406, + false + ], + [ + -18.19488525390625, + false + ], + [ + -40.979583740234375, + false + ], + [ + -31.746326446533203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a5a46607c4afedd35a65857b77845b884203f0330d97aed61bcf885dd9c7fe0b", + "prompt_hash": "315a3809a2d4007dfeed2413ac84811418552960e915f01cb771ab3031e6fcd1", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 60, + "doc": { + "question": "What is the primary evaluation metric for Track 1 in ASVspoof~5?", + "choices": [ + "Equal Error Rate (EER)", + "Tandem Detection Cost Function (t-DCF)", + "Minimum Detection Cost Function (minDCF)", + "Architecture-Agnostic Detection Cost Function (a-DCF)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary evaluation metric for Track 1 in ASVspoof~5?\nAnswer:", + " Equal Error Rate (EER)" + ], + [ + "Question:What is the primary evaluation metric for Track 1 in ASVspoof~5?\nAnswer:", + " Tandem Detection Cost Function (t-DCF)" + ], + [ + "Question:What is the primary evaluation metric for Track 1 in ASVspoof~5?\nAnswer:", + " Minimum Detection Cost Function (minDCF)" + ], + [ + "Question:What is the primary evaluation metric for Track 1 in ASVspoof~5?\nAnswer:", + " Architecture-Agnostic Detection Cost Function (a-DCF)" + ] + ], + "resps": [ + [ + [ + -8.293449401855469, + false + ] + ], + [ + [ + -31.978302001953125, + false + ] + ], + [ + [ + -17.47964096069336, + false + ] + ], + [ + [ + -41.507606506347656, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.293449401855469, + false + ], + [ + -31.978302001953125, + false + ], + [ + -17.47964096069336, + false + ], + [ + -41.507606506347656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "95d3ba17b14751019214fa4ae9518138be8ff4a6b03f3e88ff5cbe369c6bb78d", + "prompt_hash": "2990d2f55d6dc5185802979087c79d0697bcbbe40d9deeb2f34554bb1ef0c0d8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 61, + "doc": { + "question": "Which dataset is used as the source for ASVspoof~5 database?", + "choices": [ + "VCTK Corpus", + "Librispeech Clean", + "CommonVoice", + "Multilingual Librispeech (MLS) English partition" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which dataset is used as the source for ASVspoof~5 database?\nAnswer:", + " VCTK Corpus" + ], + [ + "Question:Which dataset is used as the source for ASVspoof~5 database?\nAnswer:", + " Librispeech Clean" + ], + [ + "Question:Which dataset is used as the source for ASVspoof~5 database?\nAnswer:", + " CommonVoice" + ], + [ + "Question:Which dataset is used as the source for ASVspoof~5 database?\nAnswer:", + " Multilingual Librispeech (MLS) English partition" + ] + ], + "resps": [ + [ + [ + -14.687994003295898, + false + ] + ], + [ + [ + -21.001323699951172, + false + ] + ], + [ + [ + -10.582550048828125, + false + ] + ], + [ + [ + -39.72848129272461, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.687994003295898, + false + ], + [ + -21.001323699951172, + false + ], + [ + -10.582550048828125, + false + ], + [ + -39.72848129272461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5bf324f6272f99e64abdb10484dbd372127010afacd962840086cabdbc778e16", + "prompt_hash": "2330b3711bd77be2ac52843201f12f5d6d53f885329c0cf9fae3e7365cb83525", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 62, + "doc": { + "question": "Which two pre-trained systems are used in ASVspoof~5 to perform zero-shot voice cloning?", + "choices": [ + "Tacotron2 and WaveGlow", + "YourTTS and XTTS", + "FastSpeech and DeepVoice", + "Glow-TTS and MelGAN" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which two pre-trained systems are used in ASVspoof~5 to perform zero-shot voice cloning?\nAnswer:", + " Tacotron2 and WaveGlow" + ], + [ + "Question:Which two pre-trained systems are used in ASVspoof~5 to perform zero-shot voice cloning?\nAnswer:", + " YourTTS and XTTS" + ], + [ + "Question:Which two pre-trained systems are used in ASVspoof~5 to perform zero-shot voice cloning?\nAnswer:", + " FastSpeech and DeepVoice" + ], + [ + "Question:Which two pre-trained systems are used in ASVspoof~5 to perform zero-shot voice cloning?\nAnswer:", + " Glow-TTS and MelGAN" + ] + ], + "resps": [ + [ + [ + -23.865249633789062, + false + ] + ], + [ + [ + -27.683504104614258, + false + ] + ], + [ + [ + -19.734195709228516, + false + ] + ], + [ + [ + -25.660432815551758, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.865249633789062, + false + ], + [ + -27.683504104614258, + false + ], + [ + -19.734195709228516, + false + ], + [ + -25.660432815551758, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2db558857d21d05a1c8a6fa58aeff22e54d375701abcb3140367b8bee946e24a", + "prompt_hash": "6389dcbde532b910b8790d57751c0cdcf9247572d9f7ca74529c8c6a47fb7287", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "What is the speaker encoder used in the common ASV system provided by ASVspoof~5 organisers?", + "choices": [ + "TDNN-F", + "ECAPA-TDNN", + "Conformer-XL", + "X-Vector" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the speaker encoder used in the common ASV system provided by ASVspoof~5 organisers?\nAnswer:", + " TDNN-F" + ], + [ + "Question:What is the speaker encoder used in the common ASV system provided by ASVspoof~5 organisers?\nAnswer:", + " ECAPA-TDNN" + ], + [ + "Question:What is the speaker encoder used in the common ASV system provided by ASVspoof~5 organisers?\nAnswer:", + " Conformer-XL" + ], + [ + "Question:What is the speaker encoder used in the common ASV system provided by ASVspoof~5 organisers?\nAnswer:", + " X-Vector" + ] + ], + "resps": [ + [ + [ + -15.269363403320312, + false + ] + ], + [ + [ + -7.209596157073975, + false + ] + ], + [ + [ + -12.833272933959961, + false + ] + ], + [ + [ + -11.507760047912598, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.269363403320312, + false + ], + [ + -7.209596157073975, + false + ], + [ + -12.833272933959961, + false + ], + [ + -11.507760047912598, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4617e752ea45a441df277dfe97cdfbe02b4ce00b0138647f2860fe561600d97e", + "prompt_hash": "ccf9aa7d197663dcc62b2793d0c6bae5b70d0fda8efc656d19f5f3a46002e3a5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 64, + "doc": { + "question": "Which attack filter introduced in ASVspoof~5 escalates the threat of spoofed data to ASV systems?", + "choices": [ + "Malafide", + "Malacopula", + "MFA-Conformer", + "ZMM-TTS" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which attack filter introduced in ASVspoof~5 escalates the threat of spoofed data to ASV systems?\nAnswer:", + " Malafide" + ], + [ + "Question:Which attack filter introduced in ASVspoof~5 escalates the threat of spoofed data to ASV systems?\nAnswer:", + " Malacopula" + ], + [ + "Question:Which attack filter introduced in ASVspoof~5 escalates the threat of spoofed data to ASV systems?\nAnswer:", + " MFA-Conformer" + ], + [ + "Question:Which attack filter introduced in ASVspoof~5 escalates the threat of spoofed data to ASV systems?\nAnswer:", + " ZMM-TTS" + ] + ], + "resps": [ + [ + [ + -19.35630226135254, + false + ] + ], + [ + [ + -30.244190216064453, + false + ] + ], + [ + [ + -26.727115631103516, + false + ] + ], + [ + [ + -24.757612228393555, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.35630226135254, + false + ], + [ + -30.244190216064453, + false + ], + [ + -26.727115631103516, + false + ], + [ + -24.757612228393555, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0fb2458d508f2d9147675e4363f0942d600d225d663c5fc2ee26073611a48489", + "prompt_hash": "328fce83d3ace1b2ab7bf92fb975d199a432f841f1ee570bc90ade3a388e2066", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 65, + "doc": { + "question": "What is the loss function used to train text tokens in Transfusion?", + "choices": [ + "Contrastive loss", + "Perplexity loss", + "Next-token prediction (LM loss)", + "Reconstruction loss" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the loss function used to train text tokens in Transfusion?\nAnswer:", + " Contrastive loss" + ], + [ + "Question:What is the loss function used to train text tokens in Transfusion?\nAnswer:", + " Perplexity loss" + ], + [ + "Question:What is the loss function used to train text tokens in Transfusion?\nAnswer:", + " Next-token prediction (LM loss)" + ], + [ + "Question:What is the loss function used to train text tokens in Transfusion?\nAnswer:", + " Reconstruction loss" + ] + ], + "resps": [ + [ + [ + -7.832241058349609, + false + ] + ], + [ + [ + -10.432533264160156, + false + ] + ], + [ + [ + -28.241981506347656, + false + ] + ], + [ + [ + -10.100059509277344, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.832241058349609, + false + ], + [ + -10.432533264160156, + false + ], + [ + -28.241981506347656, + false + ], + [ + -10.100059509277344, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6b0ace319fe15fa9f5d0bf46932d99e5bd2793db908e2c5b2f2bb258b28f6072", + "prompt_hash": "570aa441db2093a1b3421527b81e83562db6be29dde0a696b673b704491a7ddb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 66, + "doc": { + "question": "Which patch encoding architecture improves Transfusion’s image generation performance the most?", + "choices": [ + "Linear layer with attention", + "U-Net with AdaLayerNorm", + "U-Net with regular layer norm", + "Convolutional GRU blocks" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which patch encoding architecture improves Transfusion’s image generation performance the most?\nAnswer:", + " Linear layer with attention" + ], + [ + "Question:Which patch encoding architecture improves Transfusion’s image generation performance the most?\nAnswer:", + " U-Net with AdaLayerNorm" + ], + [ + "Question:Which patch encoding architecture improves Transfusion’s image generation performance the most?\nAnswer:", + " U-Net with regular layer norm" + ], + [ + "Question:Which patch encoding architecture improves Transfusion’s image generation performance the most?\nAnswer:", + " Convolutional GRU blocks" + ] + ], + "resps": [ + [ + [ + -18.62102508544922, + false + ] + ], + [ + [ + -32.28581237792969, + false + ] + ], + [ + [ + -31.470565795898438, + false + ] + ], + [ + [ + -27.30034637451172, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.62102508544922, + false + ], + [ + -32.28581237792969, + false + ], + [ + -31.470565795898438, + false + ], + [ + -27.30034637451172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "29cae45755b78d5aedaa01332ccf6118f4c9b014417563694179a1960d6fc7e9", + "prompt_hash": "a1c19527a1165a8e5d2900fbc716ef9b0a394aaa0a23aef8dc8bcaca1f488f21", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 67, + "doc": { + "question": "What is the purpose of the BOI and EOI tokens in Transfusion?", + "choices": [ + "To indicate the boundaries of text sequences", + "To mark the beginning and end of the entire multi-modal document", + "To separate images from each other during training", + "To surround image sequences in mixed-modal examples" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the purpose of the BOI and EOI tokens in Transfusion?\nAnswer:", + " To indicate the boundaries of text sequences" + ], + [ + "Question:What is the purpose of the BOI and EOI tokens in Transfusion?\nAnswer:", + " To mark the beginning and end of the entire multi-modal document" + ], + [ + "Question:What is the purpose of the BOI and EOI tokens in Transfusion?\nAnswer:", + " To separate images from each other during training" + ], + [ + "Question:What is the purpose of the BOI and EOI tokens in Transfusion?\nAnswer:", + " To surround image sequences in mixed-modal examples" + ] + ], + "resps": [ + [ + [ + -29.421863555908203, + false + ] + ], + [ + [ + -33.25529861450195, + false + ] + ], + [ + [ + -27.936172485351562, + false + ] + ], + [ + [ + -54.490135192871094, + false + ] + ] + ], + "filtered_resps": [ + [ + -29.421863555908203, + false + ], + [ + -33.25529861450195, + false + ], + [ + -27.936172485351562, + false + ], + [ + -54.490135192871094, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2caec057216248e225a486a5981b79b5095d183fd431a4d2e6425a38484e4039", + "prompt_hash": "91cb037498b2189102728567418847c093f76af545eaaed3fd616858c3e8827f", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 68, + "doc": { + "question": "What metric is used to evaluate text-to-image alignment in Transfusion?", + "choices": [ + "BLEU score", + "FID score", + "CLIP score", + "ROUGE-L score" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What metric is used to evaluate text-to-image alignment in Transfusion?\nAnswer:", + " BLEU score" + ], + [ + "Question:What metric is used to evaluate text-to-image alignment in Transfusion?\nAnswer:", + " FID score" + ], + [ + "Question:What metric is used to evaluate text-to-image alignment in Transfusion?\nAnswer:", + " CLIP score" + ], + [ + "Question:What metric is used to evaluate text-to-image alignment in Transfusion?\nAnswer:", + " ROUGE-L score" + ] + ], + "resps": [ + [ + [ + -9.753241539001465, + false + ] + ], + [ + [ + -8.78629207611084, + false + ] + ], + [ + [ + -6.556405067443848, + false + ] + ], + [ + [ + -14.04121208190918, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.753241539001465, + false + ], + [ + -8.78629207611084, + false + ], + [ + -6.556405067443848, + false + ], + [ + -14.04121208190918, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b521b9c06830e960b02ae0ea71ffdfa87e7339a23c2d6484253ad1161c0301cb", + "prompt_hash": "ce159dbcd9e01068e8c6c93f0f5b4e4a654718f7d8ad80f9ba8381063ac2c2bd", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 69, + "doc": { + "question": "Which dataset provides the 2T tokens used for Transfusion's large-scale training?", + "choices": [ + "LAION-5B and C4", + "Wikipedia and ImageNet", + "Llama 2 corpus and Shutterstock + Conceptual 12M", + "OpenWebText and MS-COCO" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dataset provides the 2T tokens used for Transfusion's large-scale training?\nAnswer:", + " LAION-5B and C4" + ], + [ + "Question:Which dataset provides the 2T tokens used for Transfusion's large-scale training?\nAnswer:", + " Wikipedia and ImageNet" + ], + [ + "Question:Which dataset provides the 2T tokens used for Transfusion's large-scale training?\nAnswer:", + " Llama 2 corpus and Shutterstock + Conceptual 12M" + ], + [ + "Question:Which dataset provides the 2T tokens used for Transfusion's large-scale training?\nAnswer:", + " OpenWebText and MS-COCO" + ] + ], + "resps": [ + [ + [ + -18.460792541503906, + false + ] + ], + [ + [ + -15.87649917602539, + false + ] + ], + [ + [ + -65.93722534179688, + false + ] + ], + [ + [ + -22.97542953491211, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.460792541503906, + false + ], + [ + -15.87649917602539, + false + ], + [ + -65.93722534179688, + false + ], + [ + -22.97542953491211, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ce183691522901f523c3138a5c649a02dbdbd4008d6c70d0596b60660ff8963f", + "prompt_hash": "7a61699516049970699cc38aeb4d62adb1611a28704277f83b5ad9d41bdf14f9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 70, + "doc": { + "question": "What key feature distinguishes Kolmogorov-Arnold Networks (KANs) from multilayer perceptrons (MLPs)?", + "choices": [ + "KANs use learnable activation functions on edges", + "KANs have fixed activation functions on nodes", + "KANs use convolutional layers instead of fully connected layers", + "KANs rely on recurrent architectures for interpretability" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What key feature distinguishes Kolmogorov-Arnold Networks (KANs) from multilayer perceptrons (MLPs)?\nAnswer:", + " KANs use learnable activation functions on edges" + ], + [ + "Question:What key feature distinguishes Kolmogorov-Arnold Networks (KANs) from multilayer perceptrons (MLPs)?\nAnswer:", + " KANs have fixed activation functions on nodes" + ], + [ + "Question:What key feature distinguishes Kolmogorov-Arnold Networks (KANs) from multilayer perceptrons (MLPs)?\nAnswer:", + " KANs use convolutional layers instead of fully connected layers" + ], + [ + "Question:What key feature distinguishes Kolmogorov-Arnold Networks (KANs) from multilayer perceptrons (MLPs)?\nAnswer:", + " KANs rely on recurrent architectures for interpretability" + ] + ], + "resps": [ + [ + [ + -24.835132598876953, + false + ] + ], + [ + [ + -25.078359603881836, + false + ] + ], + [ + [ + -14.701416015625, + false + ] + ], + [ + [ + -32.338748931884766, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.835132598876953, + false + ], + [ + -25.078359603881836, + false + ], + [ + -14.701416015625, + false + ], + [ + -32.338748931884766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2d5acfa87e3ab29480130d3e939c998ab2e76f35cb68b702fb4f7e3bf7909d32", + "prompt_hash": "aed15bb52a7728777f97d79fb23a7e5e41c96fedefd5d574c8fa3267850a078b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "What is the primary enhancement that MultKAN introduces over the original KAN?", + "choices": [ + "It introduces explicit multiplication operations between subnodes", + "It replaces all activation functions with ReLU", + "It allows recurrent feedback connections", + "It removes all addition nodes to reduce complexity" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the primary enhancement that MultKAN introduces over the original KAN?\nAnswer:", + " It introduces explicit multiplication operations between subnodes" + ], + [ + "Question:What is the primary enhancement that MultKAN introduces over the original KAN?\nAnswer:", + " It replaces all activation functions with ReLU" + ], + [ + "Question:What is the primary enhancement that MultKAN introduces over the original KAN?\nAnswer:", + " It allows recurrent feedback connections" + ], + [ + "Question:What is the primary enhancement that MultKAN introduces over the original KAN?\nAnswer:", + " It removes all addition nodes to reduce complexity" + ] + ], + "resps": [ + [ + [ + -34.7784538269043, + false + ] + ], + [ + [ + -20.881629943847656, + false + ] + ], + [ + [ + -27.06278419494629, + false + ] + ], + [ + [ + -40.277320861816406, + false + ] + ] + ], + "filtered_resps": [ + [ + -34.7784538269043, + false + ], + [ + -20.881629943847656, + false + ], + [ + -27.06278419494629, + false + ], + [ + -40.277320861816406, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fb27a0f9ec84e9f94e9b94583d80b07de729f25497b2c72e4719f5ff0483d5be", + "prompt_hash": "f0c4cbb2d6ce4e80ffa1c1860fc18807ddf434f9b562da0fe5cd3e73656c49fa", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 72, + "doc": { + "question": "What is the purpose of the 'kanpiler' tool introduced in the paper?", + "choices": [ + "To convert symbolic formulas into KAN architectures", + "To extract symbolic expressions from trained KANs", + "To optimize KAN performance through weight pruning", + "To compress large KANs into smaller versions" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the purpose of the 'kanpiler' tool introduced in the paper?\nAnswer:", + " To convert symbolic formulas into KAN architectures" + ], + [ + "Question:What is the purpose of the 'kanpiler' tool introduced in the paper?\nAnswer:", + " To extract symbolic expressions from trained KANs" + ], + [ + "Question:What is the purpose of the 'kanpiler' tool introduced in the paper?\nAnswer:", + " To optimize KAN performance through weight pruning" + ], + [ + "Question:What is the purpose of the 'kanpiler' tool introduced in the paper?\nAnswer:", + " To compress large KANs into smaller versions" + ] + ], + "resps": [ + [ + [ + -37.53588104248047, + false + ] + ], + [ + [ + -38.3877067565918, + false + ] + ], + [ + [ + -34.88467025756836, + false + ] + ], + [ + [ + -30.9049072265625, + false + ] + ] + ], + "filtered_resps": [ + [ + -37.53588104248047, + false + ], + [ + -38.3877067565918, + false + ], + [ + -34.88467025756836, + false + ], + [ + -30.9049072265625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aaeac0339e72fc575d316c42e2c96a6e83d98979c63d77b1374b1e94e07143a5", + "prompt_hash": "ca2994149774fcb47bcb0098296ea6473f8ad6dbe711695fccf9a6c6044922bc", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "Which method is introduced to assign scores to input variables in KANs to reflect their importance?", + "choices": [ + "Attribution score", + "Gradient clipping", + "Dropout rate", + "Batch normalization" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which method is introduced to assign scores to input variables in KANs to reflect their importance?\nAnswer:", + " Attribution score" + ], + [ + "Question:Which method is introduced to assign scores to input variables in KANs to reflect their importance?\nAnswer:", + " Gradient clipping" + ], + [ + "Question:Which method is introduced to assign scores to input variables in KANs to reflect their importance?\nAnswer:", + " Dropout rate" + ], + [ + "Question:Which method is introduced to assign scores to input variables in KANs to reflect their importance?\nAnswer:", + " Batch normalization" + ] + ], + "resps": [ + [ + [ + -14.220232963562012, + false + ] + ], + [ + [ + -13.326740264892578, + false + ] + ], + [ + [ + -16.06420135498047, + false + ] + ], + [ + [ + -12.452866554260254, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.220232963562012, + false + ], + [ + -13.326740264892578, + false + ], + [ + -16.06420135498047, + false + ], + [ + -12.452866554260254, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f2198d6262fe8ca5c6bec0aa07d9085fe88f229976cde0456857cb3a842d1e58", + "prompt_hash": "2f05faff3fb35d1886cbf3237ae1313cbf1f731bd8403d103c36c10ca9cd99e0", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "What property must a function satisfy to be considered additively separable?", + "choices": [ + "It can be expressed as the sum of functions over non-overlapping variable groups", + "It is invariant under any permutation of its variables", + "It only involves multiplicative combinations of inputs", + "It satisfies the condition of zero second derivatives across all input variables" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What property must a function satisfy to be considered additively separable?\nAnswer:", + " It can be expressed as the sum of functions over non-overlapping variable groups" + ], + [ + "Question:What property must a function satisfy to be considered additively separable?\nAnswer:", + " It is invariant under any permutation of its variables" + ], + [ + "Question:What property must a function satisfy to be considered additively separable?\nAnswer:", + " It only involves multiplicative combinations of inputs" + ], + [ + "Question:What property must a function satisfy to be considered additively separable?\nAnswer:", + " It satisfies the condition of zero second derivatives across all input variables" + ] + ], + "resps": [ + [ + [ + -51.171424865722656, + false + ] + ], + [ + [ + -22.00145721435547, + false + ] + ], + [ + [ + -32.46794128417969, + false + ] + ], + [ + [ + -46.33948516845703, + false + ] + ] + ], + "filtered_resps": [ + [ + -51.171424865722656, + false + ], + [ + -22.00145721435547, + false + ], + [ + -32.46794128417969, + false + ], + [ + -46.33948516845703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ca1ded857af8cbbce2e7be004360e511f4a4b7dab07917fcbbec93bc80290b7c", + "prompt_hash": "43d2ceabec8b8b54d9da9df9ae1d9e73050475a31ca8789daf1e6e1f3a5074a5", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 75, + "doc": { + "question": "What is the total number of A100 GPUs used to pretrain the Sapiens-2B model?", + "choices": [ + "512", + "768", + "1024", + "2048" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total number of A100 GPUs used to pretrain the Sapiens-2B model?\nAnswer:", + " 512" + ], + [ + "Question:What is the total number of A100 GPUs used to pretrain the Sapiens-2B model?\nAnswer:", + " 768" + ], + [ + "Question:What is the total number of A100 GPUs used to pretrain the Sapiens-2B model?\nAnswer:", + " 1024" + ], + [ + "Question:What is the total number of A100 GPUs used to pretrain the Sapiens-2B model?\nAnswer:", + " 2048" + ] + ], + "resps": [ + [ + [ + -7.625455379486084, + false + ] + ], + [ + [ + -7.575810432434082, + false + ] + ], + [ + [ + -7.642750263214111, + false + ] + ], + [ + [ + -8.01429557800293, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.625455379486084, + false + ], + [ + -7.575810432434082, + false + ], + [ + -7.642750263214111, + false + ], + [ + -8.01429557800293, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "528dd299433b3d911f39a72b35f638ff4c8a8cedde10d8c3ac216018203cac9e", + "prompt_hash": "0789a74cd22126b5cbae6c1be7e15e03778db286e3bf042ec9dd5b0bd6b34b50", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 76, + "doc": { + "question": "What is the masking ratio used during pretraining in the masked autoencoder setup?", + "choices": [ + "75%", + "80%", + "90%", + "95%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the masking ratio used during pretraining in the masked autoencoder setup?\nAnswer:", + " 75%" + ], + [ + "Question:What is the masking ratio used during pretraining in the masked autoencoder setup?\nAnswer:", + " 80%" + ], + [ + "Question:What is the masking ratio used during pretraining in the masked autoencoder setup?\nAnswer:", + " 90%" + ], + [ + "Question:What is the masking ratio used during pretraining in the masked autoencoder setup?\nAnswer:", + " 95%" + ] + ], + "resps": [ + [ + [ + -10.97347354888916, + false + ] + ], + [ + [ + -9.670283317565918, + false + ] + ], + [ + [ + -10.75654125213623, + false + ] + ], + [ + [ + -11.948522567749023, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.97347354888916, + false + ], + [ + -9.670283317565918, + false + ], + [ + -10.75654125213623, + false + ], + [ + -11.948522567749023, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e8382292752998f36fe4788c6504c9081c1414f2d0968a303ced787a1a38c768", + "prompt_hash": "27d9f7078490b4956caea33002cad3f1b07ad508db4129edf312f78eb525cf18", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 77, + "doc": { + "question": "How many facial keypoints are included in the new detailed skeleton used for 2D pose estimation?", + "choices": [ + "68", + "133", + "243", + "308" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many facial keypoints are included in the new detailed skeleton used for 2D pose estimation?\nAnswer:", + " 68" + ], + [ + "Question:How many facial keypoints are included in the new detailed skeleton used for 2D pose estimation?\nAnswer:", + " 133" + ], + [ + "Question:How many facial keypoints are included in the new detailed skeleton used for 2D pose estimation?\nAnswer:", + " 243" + ], + [ + "Question:How many facial keypoints are included in the new detailed skeleton used for 2D pose estimation?\nAnswer:", + " 308" + ] + ], + "resps": [ + [ + [ + -6.453620910644531, + false + ] + ], + [ + [ + -8.351236343383789, + false + ] + ], + [ + [ + -9.181026458740234, + false + ] + ], + [ + [ + -8.84392261505127, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.453620910644531, + false + ], + [ + -8.351236343383789, + false + ], + [ + -9.181026458740234, + false + ], + [ + -8.84392261505127, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dfd9b997c52aa078ec356428d1c4511a2bad04a2dfd3edfccbf0978417918deb", + "prompt_hash": "108d87a3f6ded0b18c149bed498682e004bbe4a51516edc01fd171d7b90b72df", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 78, + "doc": { + "question": "How many synthetic images are rendered for training the depth estimation model?", + "choices": [ + "100,000", + "250,000", + "500,000", + "1,000,000" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many synthetic images are rendered for training the depth estimation model?\nAnswer:", + " 100,000" + ], + [ + "Question:How many synthetic images are rendered for training the depth estimation model?\nAnswer:", + " 250,000" + ], + [ + "Question:How many synthetic images are rendered for training the depth estimation model?\nAnswer:", + " 500,000" + ], + [ + "Question:How many synthetic images are rendered for training the depth estimation model?\nAnswer:", + " 1,000,000" + ] + ], + "resps": [ + [ + [ + -7.2633056640625, + false + ] + ], + [ + [ + -9.151877403259277, + false + ] + ], + [ + [ + -8.701854705810547, + false + ] + ], + [ + [ + -8.134246826171875, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.2633056640625, + false + ], + [ + -9.151877403259277, + false + ], + [ + -8.701854705810547, + false + ], + [ + -8.134246826171875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a530c8a96b5dcf04ebb711438555ef1844421b25d1bd65e8a7d1179ec529fb08", + "prompt_hash": "de28faf4590b46ded02d75a918fc983f41e86d217d29fcb34fcc6aac506ff354", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 79, + "doc": { + "question": "What is the number of classes in the new vocabulary used for body-part segmentation?", + "choices": [ + "20", + "25", + "28", + "30" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the number of classes in the new vocabulary used for body-part segmentation?\nAnswer:", + " 20" + ], + [ + "Question:What is the number of classes in the new vocabulary used for body-part segmentation?\nAnswer:", + " 25" + ], + [ + "Question:What is the number of classes in the new vocabulary used for body-part segmentation?\nAnswer:", + " 28" + ], + [ + "Question:What is the number of classes in the new vocabulary used for body-part segmentation?\nAnswer:", + " 30" + ] + ], + "resps": [ + [ + [ + -4.645571708679199, + false + ] + ], + [ + [ + -5.531121253967285, + false + ] + ], + [ + [ + -6.0805463790893555, + false + ] + ], + [ + [ + -4.977658271789551, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.645571708679199, + false + ], + [ + -5.531121253967285, + false + ], + [ + -6.0805463790893555, + false + ], + [ + -4.977658271789551, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2155b83e33c84634108fee0adac58d9233fc4ae33e1af8a63ee713084ec1c697", + "prompt_hash": "daf55490b1941e597310bfb2fbc17a7bbef7e53bbeb445d5de70be9b1a9a66dd", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 80, + "doc": { + "question": "What is the total number of parameters in the self-attention architecture model using Mistral-7B and SigLIP-SO400M?", + "choices": [ + "A. 10B", + "B. 8.3B", + "C. 7.4B", + "D. 12.1B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the total number of parameters in the self-attention architecture model using Mistral-7B and SigLIP-SO400M?\nAnswer:", + " A. 10B" + ], + [ + "Question:What is the total number of parameters in the self-attention architecture model using Mistral-7B and SigLIP-SO400M?\nAnswer:", + " B. 8.3B" + ], + [ + "Question:What is the total number of parameters in the self-attention architecture model using Mistral-7B and SigLIP-SO400M?\nAnswer:", + " C. 7.4B" + ], + [ + "Question:What is the total number of parameters in the self-attention architecture model using Mistral-7B and SigLIP-SO400M?\nAnswer:", + " D. 12.1B" + ] + ], + "resps": [ + [ + [ + -19.912111282348633, + false + ] + ], + [ + [ + -26.02424430847168, + false + ] + ], + [ + [ + -25.916641235351562, + false + ] + ], + [ + [ + -29.070547103881836, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.912111282348633, + false + ], + [ + -26.02424430847168, + false + ], + [ + -25.916641235351562, + false + ], + [ + -29.070547103881836, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3186417562470569683dcde8ba032394572cd147a9accfdf3a8415b0dddc5574", + "prompt_hash": "f1275803e5fe3ae87a49bb0ba73b02f83e368b95c61df7c05fda292ac06ff3f4", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 81, + "doc": { + "question": "Which model feeds image patches directly into the language model using a simple linear projection?", + "choices": [ + "A. Idefics3", + "B. LLaVA", + "C. Fuyu", + "D. BLIP2" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model feeds image patches directly into the language model using a simple linear projection?\nAnswer:", + " A. Idefics3" + ], + [ + "Question:Which model feeds image patches directly into the language model using a simple linear projection?\nAnswer:", + " B. LLaVA" + ], + [ + "Question:Which model feeds image patches directly into the language model using a simple linear projection?\nAnswer:", + " C. Fuyu" + ], + [ + "Question:Which model feeds image patches directly into the language model using a simple linear projection?\nAnswer:", + " D. BLIP2" + ] + ], + "resps": [ + [ + [ + -29.24394416809082, + false + ] + ], + [ + [ + -21.120861053466797, + false + ] + ], + [ + [ + -27.182903289794922, + false + ] + ], + [ + [ + -22.341655731201172, + false + ] + ] + ], + "filtered_resps": [ + [ + -29.24394416809082, + false + ], + [ + -21.120861053466797, + false + ], + [ + -27.182903289794922, + false + ], + [ + -22.341655731201172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4e044d2aaa20375be6cd8c444dca3b26a8dab26ce04dc4bc9ca080f73e9b67d4", + "prompt_hash": "724be7e74e772a2e41442b4f177b04d04d82390c745bc0803b5e18d09232a6d7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 82, + "doc": { + "question": "What token is inserted after each row of tiles in Idefics3 to preserve spatial layout?", + "choices": [ + "A. ", + "B. ", + "C. ", + "D. \\n" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What token is inserted after each row of tiles in Idefics3 to preserve spatial layout?\nAnswer:", + " A. " + ], + [ + "Question:What token is inserted after each row of tiles in Idefics3 to preserve spatial layout?\nAnswer:", + " B. " + ], + [ + "Question:What token is inserted after each row of tiles in Idefics3 to preserve spatial layout?\nAnswer:", + " C. " + ], + [ + "Question:What token is inserted after each row of tiles in Idefics3 to preserve spatial layout?\nAnswer:", + " D. \\n" + ] + ], + "resps": [ + [ + [ + -19.776901245117188, + false + ] + ], + [ + [ + -26.64897918701172, + false + ] + ], + [ + [ + -30.480342864990234, + false + ] + ], + [ + [ + -20.79037094116211, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.776901245117188, + false + ], + [ + -26.64897918701172, + false + ], + [ + -30.480342864990234, + false + ], + [ + -20.79037094116211, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7252e08a4477e278086b5719b659420ecbbe5c6b03d49ff0430c5d1b75f442c7", + "prompt_hash": "6f9e27249956f97a187ee548ff99115a8575ea2f5fa94228895a4fc701f1938d", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "How many QA pairs does the Docmatix dataset contain?", + "choices": [ + "A. 2.4 million", + "B. 1.3 million", + "C. 9.5 million", + "D. 40 thousand" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many QA pairs does the Docmatix dataset contain?\nAnswer:", + " A. 2.4 million" + ], + [ + "Question:How many QA pairs does the Docmatix dataset contain?\nAnswer:", + " B. 1.3 million" + ], + [ + "Question:How many QA pairs does the Docmatix dataset contain?\nAnswer:", + " C. 9.5 million" + ], + [ + "Question:How many QA pairs does the Docmatix dataset contain?\nAnswer:", + " D. 40 thousand" + ] + ], + "resps": [ + [ + [ + -20.991960525512695, + false + ] + ], + [ + [ + -25.32790756225586, + false + ] + ], + [ + [ + -27.947509765625, + false + ] + ], + [ + [ + -26.229846954345703, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.991960525512695, + false + ], + [ + -25.32790756225586, + false + ], + [ + -27.947509765625, + false + ], + [ + -26.229846954345703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0ac12468abd789b0f9037786e07f66561aa6aff5b692663db49d0947cbc58283", + "prompt_hash": "be81629cf550f614486bee6f4e41327c8b3d3a1ec1029e2750ce0c8b6f2214ba", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 84, + "doc": { + "question": "Which benchmark score is used to evaluate performance on the DocVQA task?", + "choices": [ + "A. Accuracy", + "B. ANLS", + "C. BLEU", + "D. VQA Score" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark score is used to evaluate performance on the DocVQA task?\nAnswer:", + " A. Accuracy" + ], + [ + "Question:Which benchmark score is used to evaluate performance on the DocVQA task?\nAnswer:", + " B. ANLS" + ], + [ + "Question:Which benchmark score is used to evaluate performance on the DocVQA task?\nAnswer:", + " C. BLEU" + ], + [ + "Question:Which benchmark score is used to evaluate performance on the DocVQA task?\nAnswer:", + " D. VQA Score" + ] + ], + "resps": [ + [ + [ + -11.713038444519043, + false + ] + ], + [ + [ + -27.411283493041992, + false + ] + ], + [ + [ + -15.24494457244873, + false + ] + ], + [ + [ + -23.070180892944336, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.713038444519043, + false + ], + [ + -27.411283493041992, + false + ], + [ + -15.24494457244873, + false + ], + [ + -23.070180892944336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ed66f74f3c578709f069bfccd45cde901335880474927d904012d9eb1b0f641f", + "prompt_hash": "1dcf27ccd8b63f609b9e414a8d4155fa598dec6391547a212abd20758440d679", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 85, + "doc": { + "question": "What is the approximate logical error suppression factor (Λ) observed using the neural network decoder for the surface code?", + "choices": [ + "1.71", + "2.14", + "2.04", + "1.43" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the approximate logical error suppression factor (Λ) observed using the neural network decoder for the surface code?\nAnswer:", + " 1.71" + ], + [ + "Question:What is the approximate logical error suppression factor (Λ) observed using the neural network decoder for the surface code?\nAnswer:", + " 2.14" + ], + [ + "Question:What is the approximate logical error suppression factor (Λ) observed using the neural network decoder for the surface code?\nAnswer:", + " 2.04" + ], + [ + "Question:What is the approximate logical error suppression factor (Λ) observed using the neural network decoder for the surface code?\nAnswer:", + " 1.43" + ] + ], + "resps": [ + [ + [ + -9.656071662902832, + false + ] + ], + [ + [ + -10.014352798461914, + false + ] + ], + [ + [ + -10.03097915649414, + false + ] + ], + [ + [ + -9.16510009765625, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.656071662902832, + false + ], + [ + -10.014352798461914, + false + ], + [ + -10.03097915649414, + false + ], + [ + -9.16510009765625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f83dcb8fa32e705ac558c88af25d83b05b9235f1ba3ae7c15ad1b6a626edc67f", + "prompt_hash": "2855d1e6072b4821bc28e368c7ca76e92054e3f7dae601b26cc65c12ae23bb94", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "How many physical qubits are used in the distance-7 surface code implementation on the 105-qubit processor?", + "choices": [ + "49", + "48", + "101", + "105" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many physical qubits are used in the distance-7 surface code implementation on the 105-qubit processor?\nAnswer:", + " 49" + ], + [ + "Question:How many physical qubits are used in the distance-7 surface code implementation on the 105-qubit processor?\nAnswer:", + " 48" + ], + [ + "Question:How many physical qubits are used in the distance-7 surface code implementation on the 105-qubit processor?\nAnswer:", + " 101" + ], + [ + "Question:How many physical qubits are used in the distance-7 surface code implementation on the 105-qubit processor?\nAnswer:", + " 105" + ] + ], + "resps": [ + [ + [ + -6.783773422241211, + false + ] + ], + [ + [ + -6.077127456665039, + false + ] + ], + [ + [ + -7.488647937774658, + false + ] + ], + [ + [ + -5.374570369720459, + true + ] + ] + ], + "filtered_resps": [ + [ + -6.783773422241211, + false + ], + [ + -6.077127456665039, + false + ], + [ + -7.488647937774658, + false + ], + [ + -5.374570369720459, + true + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3ac616f94d005707134ede2e1c37cad01760706de6162fd22fdddef31fa7a996", + "prompt_hash": "e8041853efe4132459bdbeafc15bd95282ad9772ff068dfb2c03880e3ae2f4a0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 87, + "doc": { + "question": "What is the reported cycle time for the superconducting processor requiring real-time decoding?", + "choices": [ + "1.1 ms", + "24 µs", + "1.1 µs", + "10 µs" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the reported cycle time for the superconducting processor requiring real-time decoding?\nAnswer:", + " 1.1 ms" + ], + [ + "Question:What is the reported cycle time for the superconducting processor requiring real-time decoding?\nAnswer:", + " 24 µs" + ], + [ + "Question:What is the reported cycle time for the superconducting processor requiring real-time decoding?\nAnswer:", + " 1.1 µs" + ], + [ + "Question:What is the reported cycle time for the superconducting processor requiring real-time decoding?\nAnswer:", + " 10 µs" + ] + ], + "resps": [ + [ + [ + -10.168475151062012, + false + ] + ], + [ + [ + -14.862546920776367, + false + ] + ], + [ + [ + -14.548297882080078, + false + ] + ], + [ + [ + -11.576759338378906, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.168475151062012, + false + ], + [ + -14.862546920776367, + false + ], + [ + -14.548297882080078, + false + ], + [ + -11.576759338378906, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e5a76c2d4a27925f448844ee634c1336c9eb420f57be71bf6bde21e0c69f8477", + "prompt_hash": "690ad43c485740a8a906fe16420a2ef6163d4e4876bf79d429256dcca0f3dfb1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 88, + "doc": { + "question": "Which component contributes the most to the error budget in the 72-qubit processor according to simulations?", + "choices": [ + "Measurement and reset error", + "Leakage during CZs and due to heating", + "CZ gate errors including stray interactions", + "Single-qubit gate error" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which component contributes the most to the error budget in the 72-qubit processor according to simulations?\nAnswer:", + " Measurement and reset error" + ], + [ + "Question:Which component contributes the most to the error budget in the 72-qubit processor according to simulations?\nAnswer:", + " Leakage during CZs and due to heating" + ], + [ + "Question:Which component contributes the most to the error budget in the 72-qubit processor according to simulations?\nAnswer:", + " CZ gate errors including stray interactions" + ], + [ + "Question:Which component contributes the most to the error budget in the 72-qubit processor according to simulations?\nAnswer:", + " Single-qubit gate error" + ] + ], + "resps": [ + [ + [ + -20.62749481201172, + false + ] + ], + [ + [ + -50.088890075683594, + false + ] + ], + [ + [ + -41.10272216796875, + false + ] + ], + [ + [ + -10.830703735351562, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.62749481201172, + false + ], + [ + -50.088890075683594, + false + ], + [ + -41.10272216796875, + false + ], + [ + -10.830703735351562, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e33cdc4c62885ee64dd285f973173693dcae0da6c8297e11d45f259b5f7c89ad", + "prompt_hash": "26648c0d5fdbfedcc1b4c10426c3760df47ac02e344db4805edb96fd47a50733", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 89, + "doc": { + "question": "What method is used to mitigate data qubit leakage during surface code operation?", + "choices": [ + "Passive cooling", + "Dynamical decoupling", + "Data qubit leakage removal (DQLR)", + "Multi-level reset" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What method is used to mitigate data qubit leakage during surface code operation?\nAnswer:", + " Passive cooling" + ], + [ + "Question:What method is used to mitigate data qubit leakage during surface code operation?\nAnswer:", + " Dynamical decoupling" + ], + [ + "Question:What method is used to mitigate data qubit leakage during surface code operation?\nAnswer:", + " Data qubit leakage removal (DQLR)" + ], + [ + "Question:What method is used to mitigate data qubit leakage during surface code operation?\nAnswer:", + " Multi-level reset" + ] + ], + "resps": [ + [ + [ + -14.593650817871094, + false + ] + ], + [ + [ + -9.150117874145508, + false + ] + ], + [ + [ + -26.144542694091797, + false + ] + ], + [ + [ + -20.310792922973633, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.593650817871094, + false + ], + [ + -9.150117874145508, + false + ], + [ + -26.144542694091797, + false + ], + [ + -20.310792922973633, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2c5843b32d9ee6229b54175779e7c1d53fa45c7089f073735fe900bc778a6641", + "prompt_hash": "ad8aeed8ec0324509fadc57ee2c062485ed9a7411672ea9a453fb225ef02e5c3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 90, + "doc": { + "question": "Which model is based on LLaMA3-8B and adopts visual expert modules?", + "choices": [ + "CogAgent", + "GLM-4V-9B", + "CogVLM2", + "VisualGLM" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model is based on LLaMA3-8B and adopts visual expert modules?\nAnswer:", + " CogAgent" + ], + [ + "Question:Which model is based on LLaMA3-8B and adopts visual expert modules?\nAnswer:", + " GLM-4V-9B" + ], + [ + "Question:Which model is based on LLaMA3-8B and adopts visual expert modules?\nAnswer:", + " CogVLM2" + ], + [ + "Question:Which model is based on LLaMA3-8B and adopts visual expert modules?\nAnswer:", + " VisualGLM" + ] + ], + "resps": [ + [ + [ + -15.072737693786621, + false + ] + ], + [ + [ + -24.121051788330078, + false + ] + ], + [ + [ + -16.509078979492188, + false + ] + ], + [ + [ + -12.67910385131836, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.072737693786621, + false + ], + [ + -24.121051788330078, + false + ], + [ + -16.509078979492188, + false + ], + [ + -12.67910385131836, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "17140906c051bae2097e001a7d8abdf39877f5558dc393130c5a3adb52fb259c", + "prompt_hash": "e8cecc717ed96ab7822f3e7bea68d62464c74e7c1f0a1f19854dfaadf291243f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 91, + "doc": { + "question": "Which dataset contains 7 million English and 5 million Chinese entries created from browser-captured screenshots?", + "choices": [ + "CLAY-1B", + "Digital World Grounding Dataset", + "Synthetic OCR Dataset", + "LAION-40M-grounding" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset contains 7 million English and 5 million Chinese entries created from browser-captured screenshots?\nAnswer:", + " CLAY-1B" + ], + [ + "Question:Which dataset contains 7 million English and 5 million Chinese entries created from browser-captured screenshots?\nAnswer:", + " Digital World Grounding Dataset" + ], + [ + "Question:Which dataset contains 7 million English and 5 million Chinese entries created from browser-captured screenshots?\nAnswer:", + " Synthetic OCR Dataset" + ], + [ + "Question:Which dataset contains 7 million English and 5 million Chinese entries created from browser-captured screenshots?\nAnswer:", + " LAION-40M-grounding" + ] + ], + "resps": [ + [ + [ + -30.70033073425293, + false + ] + ], + [ + [ + -35.19849395751953, + false + ] + ], + [ + [ + -22.89411163330078, + false + ] + ], + [ + [ + -34.586299896240234, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.70033073425293, + false + ], + [ + -35.19849395751953, + false + ], + [ + -22.89411163330078, + false + ], + [ + -34.586299896240234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e18c9e2c9f7e6b3038aef222e26ffcdce6251b96c24cc379159162b29e79729b", + "prompt_hash": "9753bf05d93a5e528813540a56d21ba96eb749574ff9d408a6650516cb96d993", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 92, + "doc": { + "question": "Which architecture component reduces image sequence length to one-quarter its original size?", + "choices": [ + "Q-former", + "Linear layer", + "2×2 convolutional layer", + "ViT encoder" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which architecture component reduces image sequence length to one-quarter its original size?\nAnswer:", + " Q-former" + ], + [ + "Question:Which architecture component reduces image sequence length to one-quarter its original size?\nAnswer:", + " Linear layer" + ], + [ + "Question:Which architecture component reduces image sequence length to one-quarter its original size?\nAnswer:", + " 2×2 convolutional layer" + ], + [ + "Question:Which architecture component reduces image sequence length to one-quarter its original size?\nAnswer:", + " ViT encoder" + ] + ], + "resps": [ + [ + [ + -10.258474349975586, + false + ] + ], + [ + [ + -10.730835914611816, + false + ] + ], + [ + [ + -18.697784423828125, + false + ] + ], + [ + [ + -14.105308532714844, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.258474349975586, + false + ], + [ + -10.730835914611816, + false + ], + [ + -18.697784423828125, + false + ], + [ + -14.105308532714844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "af39862375d46684cfdb2dc96930bf4914e05937e23f1ab07d1c6a9490ae4a92", + "prompt_hash": "e3c3ebaf6ae5c48799dd038be07a5c66a8af5874ad2c98fa58f76dedac17beca", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 93, + "doc": { + "question": "What was the learning rate used in the second stage of image supervised fine-tuning for CogVLM2?", + "choices": [ + "1e-4", + "1e-6", + "1e-5", + "Not specified" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What was the learning rate used in the second stage of image supervised fine-tuning for CogVLM2?\nAnswer:", + " 1e-4" + ], + [ + "Question:What was the learning rate used in the second stage of image supervised fine-tuning for CogVLM2?\nAnswer:", + " 1e-6" + ], + [ + "Question:What was the learning rate used in the second stage of image supervised fine-tuning for CogVLM2?\nAnswer:", + " 1e-5" + ], + [ + "Question:What was the learning rate used in the second stage of image supervised fine-tuning for CogVLM2?\nAnswer:", + " Not specified" + ] + ], + "resps": [ + [ + [ + -5.152227401733398, + false + ] + ], + [ + [ + -6.627537727355957, + false + ] + ], + [ + [ + -5.376708984375, + false + ] + ], + [ + [ + -10.316917419433594, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.152227401733398, + false + ], + [ + -6.627537727355957, + false + ], + [ + -5.376708984375, + false + ], + [ + -10.316917419433594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4399dd4afd8a3359d187f5d62e0738fc49b754310b74e1483e2faca1f0496c18", + "prompt_hash": "b3d42e6622a06ce8d5e9fdad5e5637a516c8b11334e8eae2fc1c90ba5de91ff3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 94, + "doc": { + "question": "Which post-training dataset includes 150 million Chinese entries focused on OCR scenarios?", + "choices": [ + "CLAY-1B", + "Digital World Grounding Dataset", + "Synthetic OCR Dataset", + "LAION-2B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which post-training dataset includes 150 million Chinese entries focused on OCR scenarios?\nAnswer:", + " CLAY-1B" + ], + [ + "Question:Which post-training dataset includes 150 million Chinese entries focused on OCR scenarios?\nAnswer:", + " Digital World Grounding Dataset" + ], + [ + "Question:Which post-training dataset includes 150 million Chinese entries focused on OCR scenarios?\nAnswer:", + " Synthetic OCR Dataset" + ], + [ + "Question:Which post-training dataset includes 150 million Chinese entries focused on OCR scenarios?\nAnswer:", + " LAION-2B" + ] + ], + "resps": [ + [ + [ + -28.669137954711914, + false + ] + ], + [ + [ + -34.62318420410156, + false + ] + ], + [ + [ + -17.01984977722168, + false + ] + ], + [ + [ + -14.952802658081055, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.669137954711914, + false + ], + [ + -34.62318420410156, + false + ], + [ + -17.01984977722168, + false + ], + [ + -14.952802658081055, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "03e6f501becf101b8451db2075dd9cbefc9c5273162555dd24142c94a997bfc3", + "prompt_hash": "d0789a0d2362a5a4facaa5ded8fc2cea59864ee3bad34cfcffbb419b4f55151c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 95, + "doc": { + "question": "Which dataset is used to train the model for novel view synthesis?", + "choices": [ + "KITTI", + "ScanNet++", + "Cityscapes", + "MegaDepth" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset is used to train the model for novel view synthesis?\nAnswer:", + " KITTI" + ], + [ + "Question:Which dataset is used to train the model for novel view synthesis?\nAnswer:", + " ScanNet++" + ], + [ + "Question:Which dataset is used to train the model for novel view synthesis?\nAnswer:", + " Cityscapes" + ], + [ + "Question:Which dataset is used to train the model for novel view synthesis?\nAnswer:", + " MegaDepth" + ] + ], + "resps": [ + [ + [ + -6.542630672454834, + false + ] + ], + [ + [ + -11.459537506103516, + false + ] + ], + [ + [ + -7.253916263580322, + false + ] + ], + [ + [ + -11.059212684631348, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.542630672454834, + false + ], + [ + -11.459537506103516, + false + ], + [ + -7.253916263580322, + false + ], + [ + -11.059212684631348, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "329f9d464b42c81c455e7a95a0a531e934b2d871c240ea8a3cd0f6296ce9c045", + "prompt_hash": "435e87cff1bc9dc01a42c14bc019d2595c9b9f66304252673b9ee8888b9f4904", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 96, + "doc": { + "question": "What is the primary input to the proposed model for 3D reconstruction?", + "choices": [ + "Calibrated image pair", + "Depth map and RGB image", + "Two uncalibrated images", + "Monocular video stream" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary input to the proposed model for 3D reconstruction?\nAnswer:", + " Calibrated image pair" + ], + [ + "Question:What is the primary input to the proposed model for 3D reconstruction?\nAnswer:", + " Depth map and RGB image" + ], + [ + "Question:What is the primary input to the proposed model for 3D reconstruction?\nAnswer:", + " Two uncalibrated images" + ], + [ + "Question:What is the primary input to the proposed model for 3D reconstruction?\nAnswer:", + " Monocular video stream" + ] + ], + "resps": [ + [ + [ + -21.52069664001465, + false + ] + ], + [ + [ + -17.651397705078125, + false + ] + ], + [ + [ + -22.90048599243164, + false + ] + ], + [ + [ + -18.692649841308594, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.52069664001465, + false + ], + [ + -17.651397705078125, + false + ], + [ + -22.90048599243164, + false + ], + [ + -18.692649841308594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "054f7f6b7928cc3c3df3a3a2677f1bb85093ecf64f9fc86fbf18428192a6c62f", + "prompt_hash": "e984d345e214624538cd6076bc0cdbe27590bb6da385151f3d3f141f61859d57", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 97, + "doc": { + "question": "What does the model use to parameterize the covariance matrix of each 3D Gaussian?", + "choices": [ + "Matrix multiplication and bias", + "Euler angles and scale", + "Rotation quaternion and scale", + "Pose vectors and tensors" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the model use to parameterize the covariance matrix of each 3D Gaussian?\nAnswer:", + " Matrix multiplication and bias" + ], + [ + "Question:What does the model use to parameterize the covariance matrix of each 3D Gaussian?\nAnswer:", + " Euler angles and scale" + ], + [ + "Question:What does the model use to parameterize the covariance matrix of each 3D Gaussian?\nAnswer:", + " Rotation quaternion and scale" + ], + [ + "Question:What does the model use to parameterize the covariance matrix of each 3D Gaussian?\nAnswer:", + " Pose vectors and tensors" + ] + ], + "resps": [ + [ + [ + -19.31600570678711, + false + ] + ], + [ + [ + -16.103641510009766, + false + ] + ], + [ + [ + -19.320552825927734, + false + ] + ], + [ + [ + -25.82880401611328, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.31600570678711, + false + ], + [ + -16.103641510009766, + false + ], + [ + -19.320552825927734, + false + ], + [ + -25.82880401611328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6a81dcfab0959a77b46d84e940ed374e7c1ae77922ce9c436ac83baa1cb49086", + "prompt_hash": "d562f05e3bd10c0e334befc0db9db03a35259e4277c131ddb0da2c44c6f74cc5", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 98, + "doc": { + "question": "What rendering losses are applied to train the Gaussian prediction head?", + "choices": [ + "L1 loss and SSIM", + "MSE and LPIPS", + "Cross-entropy and Focal loss", + "KL divergence and Huber loss" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What rendering losses are applied to train the Gaussian prediction head?\nAnswer:", + " L1 loss and SSIM" + ], + [ + "Question:What rendering losses are applied to train the Gaussian prediction head?\nAnswer:", + " MSE and LPIPS" + ], + [ + "Question:What rendering losses are applied to train the Gaussian prediction head?\nAnswer:", + " Cross-entropy and Focal loss" + ], + [ + "Question:What rendering losses are applied to train the Gaussian prediction head?\nAnswer:", + " KL divergence and Huber loss" + ] + ], + "resps": [ + [ + [ + -15.82857894897461, + false + ] + ], + [ + [ + -13.206483840942383, + false + ] + ], + [ + [ + -20.15416145324707, + false + ] + ], + [ + [ + -18.332340240478516, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.82857894897461, + false + ], + [ + -13.206483840942383, + false + ], + [ + -20.15416145324707, + false + ], + [ + -18.332340240478516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7d12159581cafe65f033926ab36dfe7446ac692929a70477c621c3d96b5b6f6c", + "prompt_hash": "6f111e0ff63c4d6c53ca139655096ade67eda570b7417bade09f30f7236276fa", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 99, + "doc": { + "question": "What purpose does the loss masking strategy serve during training?", + "choices": [ + "To reduce GPU memory usage", + "To encourage smooth transitions between views", + "To avoid supervising pixels not visible in context images", + "To enhance the visibility of depth maps" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What purpose does the loss masking strategy serve during training?\nAnswer:", + " To reduce GPU memory usage" + ], + [ + "Question:What purpose does the loss masking strategy serve during training?\nAnswer:", + " To encourage smooth transitions between views" + ], + [ + "Question:What purpose does the loss masking strategy serve during training?\nAnswer:", + " To avoid supervising pixels not visible in context images" + ], + [ + "Question:What purpose does the loss masking strategy serve during training?\nAnswer:", + " To enhance the visibility of depth maps" + ] + ], + "resps": [ + [ + [ + -14.565971374511719, + false + ] + ], + [ + [ + -22.251873016357422, + false + ] + ], + [ + [ + -47.93913269042969, + false + ] + ], + [ + [ + -28.24319076538086, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.565971374511719, + false + ], + [ + -22.251873016357422, + false + ], + [ + -47.93913269042969, + false + ], + [ + -28.24319076538086, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0734bd5e11f0e733942ec8180ee434e9a3c295a28e77baf791843224e58b1d37", + "prompt_hash": "3ca9c635bfa6bb022e05cc5a47c2a7a230bd6ee0c46b649a6219726d4f4c5862", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + } + ], + "arxiv_mc_2409": [ + { + "doc_id": 0, + "doc": { + "question": "What is the visual encoder used in the architecture of \\u005cmodelname?", + "choices": [ + "Video Swin Transformer", + "TubeViT", + "CLIP-ViT", + "SigLIP" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the visual encoder used in the architecture of \\u005cmodelname?\nAnswer:", + " Video Swin Transformer" + ], + [ + "Question:What is the visual encoder used in the architecture of \\u005cmodelname?\nAnswer:", + " TubeViT" + ], + [ + "Question:What is the visual encoder used in the architecture of \\u005cmodelname?\nAnswer:", + " CLIP-ViT" + ], + [ + "Question:What is the visual encoder used in the architecture of \\u005cmodelname?\nAnswer:", + " SigLIP" + ] + ], + "resps": [ + [ + [ + -15.495767593383789, + false + ] + ], + [ + [ + -17.449329376220703, + false + ] + ], + [ + [ + -12.375112533569336, + false + ] + ], + [ + [ + -13.049660682678223, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.495767593383789, + false + ], + [ + -17.449329376220703, + false + ], + [ + -12.375112533569336, + false + ], + [ + -13.049660682678223, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8f43676dfa8e99d64af0868235cfc9038da15b626ad93e21c4b65905122e0d60", + "prompt_hash": "1b419424cdc31dcd1715f04450f85f4d9581dcae81486bf8952af146acbdc655", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "How many video clips are included in the \\u005cdatasetname benchmark?", + "choices": [ + "500", + "1,000", + "2,000", + "10,000" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many video clips are included in the \\u005cdatasetname benchmark?\nAnswer:", + " 500" + ], + [ + "Question:How many video clips are included in the \\u005cdatasetname benchmark?\nAnswer:", + " 1,000" + ], + [ + "Question:How many video clips are included in the \\u005cdatasetname benchmark?\nAnswer:", + " 2,000" + ], + [ + "Question:How many video clips are included in the \\u005cdatasetname benchmark?\nAnswer:", + " 10,000" + ] + ], + "resps": [ + [ + [ + -6.426371097564697, + false + ] + ], + [ + [ + -8.035820960998535, + false + ] + ], + [ + [ + -8.831319808959961, + false + ] + ], + [ + [ + -8.405014038085938, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.426371097564697, + false + ], + [ + -8.035820960998535, + false + ], + [ + -8.831319808959961, + false + ], + [ + -8.405014038085938, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aa7b82cc13c10a1cfd45dcefe86437a90471b75fdc8e8173718d949889adecbb", + "prompt_hash": "68377350cdd14f3bdf1dd8b868e22f0683d7519a7b67d812761480c27d8c051d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 2, + "doc": { + "question": "What two components does \\u005cevalname use to evaluate descriptions?", + "choices": [ + "Precision and recall metrics", + "BLEU and METEOR", + "Event extraction and entailment models", + "Human ratings and CIDEr scores" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What two components does \\u005cevalname use to evaluate descriptions?\nAnswer:", + " Precision and recall metrics" + ], + [ + "Question:What two components does \\u005cevalname use to evaluate descriptions?\nAnswer:", + " BLEU and METEOR" + ], + [ + "Question:What two components does \\u005cevalname use to evaluate descriptions?\nAnswer:", + " Event extraction and entailment models" + ], + [ + "Question:What two components does \\u005cevalname use to evaluate descriptions?\nAnswer:", + " Human ratings and CIDEr scores" + ] + ], + "resps": [ + [ + [ + -19.11516571044922, + false + ] + ], + [ + [ + -12.382637977600098, + false + ] + ], + [ + [ + -30.587970733642578, + false + ] + ], + [ + [ + -26.8033390045166, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.11516571044922, + false + ], + [ + -12.382637977600098, + false + ], + [ + -30.587970733642578, + false + ], + [ + -26.8033390045166, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6225b0f32d9f853fce5a77063c84800ff8eedcbe76a686813ca0e4146ee019aa", + "prompt_hash": "f3e0850221596a87411a4319e2cf07cddc5cb851555a6894f0861579adf68da4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 3, + "doc": { + "question": "What is the total number of video-text pairs used during \\u005cmodelname's pre-training?", + "choices": [ + "2.8 million", + "6.5 million", + "13.6 million", + "38 million" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total number of video-text pairs used during \\u005cmodelname's pre-training?\nAnswer:", + " 2.8 million" + ], + [ + "Question:What is the total number of video-text pairs used during \\u005cmodelname's pre-training?\nAnswer:", + " 6.5 million" + ], + [ + "Question:What is the total number of video-text pairs used during \\u005cmodelname's pre-training?\nAnswer:", + " 13.6 million" + ], + [ + "Question:What is the total number of video-text pairs used during \\u005cmodelname's pre-training?\nAnswer:", + " 38 million" + ] + ], + "resps": [ + [ + [ + -10.427526473999023, + false + ] + ], + [ + [ + -10.509381294250488, + false + ] + ], + [ + [ + -11.587591171264648, + false + ] + ], + [ + [ + -10.692402839660645, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.427526473999023, + false + ], + [ + -10.509381294250488, + false + ], + [ + -11.587591171264648, + false + ], + [ + -10.692402839660645, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ec9ad2089dbc4bdfa6bd2478e3dfe84d4fb2e0aa51458b552776d9fcd0f039ce", + "prompt_hash": "decd19be57941d960406e3809c930cc8043d44b9bfd94d681e7ec4a5b369859f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 4, + "doc": { + "question": "Which proprietary model is slightly preferred over \\u005cmodelname-34B in human side-by-side evaluation?", + "choices": [ + "GPT-4V", + "Gemini 1.5 Pro", + "GPT-4o", + "VideoChat2" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which proprietary model is slightly preferred over \\u005cmodelname-34B in human side-by-side evaluation?\nAnswer:", + " GPT-4V" + ], + [ + "Question:Which proprietary model is slightly preferred over \\u005cmodelname-34B in human side-by-side evaluation?\nAnswer:", + " Gemini 1.5 Pro" + ], + [ + "Question:Which proprietary model is slightly preferred over \\u005cmodelname-34B in human side-by-side evaluation?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which proprietary model is slightly preferred over \\u005cmodelname-34B in human side-by-side evaluation?\nAnswer:", + " VideoChat2" + ] + ], + "resps": [ + [ + [ + -12.168880462646484, + false + ] + ], + [ + [ + -11.913797378540039, + false + ] + ], + [ + [ + -9.950221061706543, + false + ] + ], + [ + [ + -19.21074867248535, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.168880462646484, + false + ], + [ + -11.913797378540039, + false + ], + [ + -9.950221061706543, + false + ], + [ + -19.21074867248535, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3aeea6b1458836b9cfd8e1d80e468cc01769beb5d9c845810764ad974c715b99", + "prompt_hash": "1a27e107b5dcb6af186b7105913c3d136087e2b0c73041bb22c25768fb0b0b6e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 5, + "doc": { + "question": "Which theorem forms the basis of Kolmogorov-Arnold Networks (KANs)?", + "choices": [ + "Universal Approximation Theorem", + "Kolmogorov-Arnold Representation Theorem", + "Central Limit Theorem", + "Banach Fixed-Point Theorem" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which theorem forms the basis of Kolmogorov-Arnold Networks (KANs)?\nAnswer:", + " Universal Approximation Theorem" + ], + [ + "Question:Which theorem forms the basis of Kolmogorov-Arnold Networks (KANs)?\nAnswer:", + " Kolmogorov-Arnold Representation Theorem" + ], + [ + "Question:Which theorem forms the basis of Kolmogorov-Arnold Networks (KANs)?\nAnswer:", + " Central Limit Theorem" + ], + [ + "Question:Which theorem forms the basis of Kolmogorov-Arnold Networks (KANs)?\nAnswer:", + " Banach Fixed-Point Theorem" + ] + ], + "resps": [ + [ + [ + -11.135324478149414, + false + ] + ], + [ + [ + -10.827104568481445, + false + ] + ], + [ + [ + -10.046956062316895, + false + ] + ], + [ + [ + -17.358673095703125, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.135324478149414, + false + ], + [ + -10.827104568481445, + false + ], + [ + -10.046956062316895, + false + ], + [ + -17.358673095703125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "564ec16c40c0c8d379bae9ed11a470dd4f4b978724e42191d260862063c2d06b", + "prompt_hash": "7ea86d8f7700ff5117a06df5224c06b7c949b2625a74137d37d9b73b523a5bec", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 6, + "doc": { + "question": "What activation function was originally proposed by Liu et al. for KANs?", + "choices": [ + "Gaussian radial basis functions", + "Swish", + "ReLU", + "B-spline" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What activation function was originally proposed by Liu et al. for KANs?\nAnswer:", + " Gaussian radial basis functions" + ], + [ + "Question:What activation function was originally proposed by Liu et al. for KANs?\nAnswer:", + " Swish" + ], + [ + "Question:What activation function was originally proposed by Liu et al. for KANs?\nAnswer:", + " ReLU" + ], + [ + "Question:What activation function was originally proposed by Liu et al. for KANs?\nAnswer:", + " B-spline" + ] + ], + "resps": [ + [ + [ + -17.572092056274414, + false + ] + ], + [ + [ + -6.097117900848389, + false + ] + ], + [ + [ + -2.6757876873016357, + false + ] + ], + [ + [ + -12.105659484863281, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.572092056274414, + false + ], + [ + -6.097117900848389, + false + ], + [ + -2.6757876873016357, + false + ], + [ + -12.105659484863281, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dcbfbd43678c7eaf0b55b660693da39aff35639414c5df78ab209de375693c0f", + "prompt_hash": "e9ffeb039d445a5fccb6de2caa808f2544a0fc5812d3375b0151d9902141da92", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "Which ODE solver was employed by default in the implementation of KAN-ODEs?", + "choices": [ + "Euler method", + "Tsit5 Runge-Kutta method", + "Rodas5 Rosenbrock method", + "RK4 method" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which ODE solver was employed by default in the implementation of KAN-ODEs?\nAnswer:", + " Euler method" + ], + [ + "Question:Which ODE solver was employed by default in the implementation of KAN-ODEs?\nAnswer:", + " Tsit5 Runge-Kutta method" + ], + [ + "Question:Which ODE solver was employed by default in the implementation of KAN-ODEs?\nAnswer:", + " Rodas5 Rosenbrock method" + ], + [ + "Question:Which ODE solver was employed by default in the implementation of KAN-ODEs?\nAnswer:", + " RK4 method" + ] + ], + "resps": [ + [ + [ + -6.391721725463867, + false + ] + ], + [ + [ + -29.77779769897461, + false + ] + ], + [ + [ + -39.73161315917969, + false + ] + ], + [ + [ + -11.668317794799805, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.391721725463867, + false + ], + [ + -29.77779769897461, + false + ], + [ + -39.73161315917969, + false + ], + [ + -11.668317794799805, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "747de9bc42d4924efdd61e9028dd07faec0e61e0db10a3b467fcf3c01d3a6b18", + "prompt_hash": "ae852e95920e9e1a207f05a15800b95046c3fcf68978f4b13614a111b0dddc28", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 8, + "doc": { + "question": "In the Lotka-Volterra predator-prey example, what was the initial condition used?", + "choices": [ + "[0, 0]", + "[2, 2]", + "[1, 1]", + "[3, 3]" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the Lotka-Volterra predator-prey example, what was the initial condition used?\nAnswer:", + " [0, 0]" + ], + [ + "Question:In the Lotka-Volterra predator-prey example, what was the initial condition used?\nAnswer:", + " [2, 2]" + ], + [ + "Question:In the Lotka-Volterra predator-prey example, what was the initial condition used?\nAnswer:", + " [1, 1]" + ], + [ + "Question:In the Lotka-Volterra predator-prey example, what was the initial condition used?\nAnswer:", + " [3, 3]" + ] + ], + "resps": [ + [ + [ + -16.677711486816406, + false + ] + ], + [ + [ + -16.641386032104492, + false + ] + ], + [ + [ + -15.760937690734863, + false + ] + ], + [ + [ + -18.1982479095459, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.677711486816406, + false + ], + [ + -16.641386032104492, + false + ], + [ + -15.760937690734863, + false + ], + [ + -18.1982479095459, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "93021bd825ff8280fcea636dfde8e164be3d2313e95d71cf8c3a9a6b8914e9d6", + "prompt_hash": "2329c2216a6718d4bebf58ca3594b68e7b3dde2251b5f6b547995d755e2a37d0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 9, + "doc": { + "question": "Which equation was used as an example of modeling hidden physics in PDEs?", + "choices": [ + "Schrödinger equation", + "Burgers' equation", + "Fisher-KPP equation", + "Allen-Cahn equation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which equation was used as an example of modeling hidden physics in PDEs?\nAnswer:", + " Schrödinger equation" + ], + [ + "Question:Which equation was used as an example of modeling hidden physics in PDEs?\nAnswer:", + " Burgers' equation" + ], + [ + "Question:Which equation was used as an example of modeling hidden physics in PDEs?\nAnswer:", + " Fisher-KPP equation" + ], + [ + "Question:Which equation was used as an example of modeling hidden physics in PDEs?\nAnswer:", + " Allen-Cahn equation" + ] + ], + "resps": [ + [ + [ + -8.50059986114502, + false + ] + ], + [ + [ + -10.451626777648926, + false + ] + ], + [ + [ + -15.836602210998535, + false + ] + ], + [ + [ + -11.835906028747559, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.50059986114502, + false + ], + [ + -10.451626777648926, + false + ], + [ + -15.836602210998535, + false + ], + [ + -11.835906028747559, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "37fc0f6caf5b17531830efba868ae0162dcf3e8e5a3a619bd6b9c917b96a321b", + "prompt_hash": "b8d3f719e0aa2b23ede74e5f04b31d155a25bf5d6fc7eade785b66a3af24ea0f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 10, + "doc": { + "question": "What is the number of activated parameters in Qwen2-57B-A14B?", + "choices": [ + "12B", + "14B", + "30B", + "57B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the number of activated parameters in Qwen2-57B-A14B?\nAnswer:", + " 12B" + ], + [ + "Question:What is the number of activated parameters in Qwen2-57B-A14B?\nAnswer:", + " 14B" + ], + [ + "Question:What is the number of activated parameters in Qwen2-57B-A14B?\nAnswer:", + " 30B" + ], + [ + "Question:What is the number of activated parameters in Qwen2-57B-A14B?\nAnswer:", + " 57B" + ] + ], + "resps": [ + [ + [ + -8.936101913452148, + false + ] + ], + [ + [ + -7.763552188873291, + false + ] + ], + [ + [ + -10.072871208190918, + false + ] + ], + [ + [ + -7.646878242492676, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.936101913452148, + false + ], + [ + -7.763552188873291, + false + ], + [ + -10.072871208190918, + false + ], + [ + -7.646878242492676, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "291e995ab3bccb0fa73fd6bb6aaa4aa51ad07b89c69502bf7bba0ac9e0cda457", + "prompt_hash": "d9bf57b047928f08c5addb57d1272fd20ecd564c0618409f6f817f3de20824c6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 11, + "doc": { + "question": "Which benchmark dataset uses 25-shot prompting for evaluation?", + "choices": [ + "ARC-C", + "GPQA", + "MMLU", + "TruthfulQA" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark dataset uses 25-shot prompting for evaluation?\nAnswer:", + " ARC-C" + ], + [ + "Question:Which benchmark dataset uses 25-shot prompting for evaluation?\nAnswer:", + " GPQA" + ], + [ + "Question:Which benchmark dataset uses 25-shot prompting for evaluation?\nAnswer:", + " MMLU" + ], + [ + "Question:Which benchmark dataset uses 25-shot prompting for evaluation?\nAnswer:", + " TruthfulQA" + ] + ], + "resps": [ + [ + [ + -9.260011672973633, + false + ] + ], + [ + [ + -12.34814167022705, + false + ] + ], + [ + [ + -5.928160667419434, + false + ] + ], + [ + [ + -9.763457298278809, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.260011672973633, + false + ], + [ + -12.34814167022705, + false + ], + [ + -5.928160667419434, + false + ], + [ + -9.763457298278809, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "410e02e0a3e49a49831d1bc18cf1089e2c13a77e008734557ccc741fc5f69a62", + "prompt_hash": "d2c298b8489b04c8e91b01c98dc9a3b1e72049354d6e818adba81be34aa6290e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 12, + "doc": { + "question": "Which dataset did Qwen2-72B achieve the highest score on among the coding benchmarks?", + "choices": [ + "HumanEval", + "MBPP", + "EvalPlus", + "MultiPL-E" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset did Qwen2-72B achieve the highest score on among the coding benchmarks?\nAnswer:", + " HumanEval" + ], + [ + "Question:Which dataset did Qwen2-72B achieve the highest score on among the coding benchmarks?\nAnswer:", + " MBPP" + ], + [ + "Question:Which dataset did Qwen2-72B achieve the highest score on among the coding benchmarks?\nAnswer:", + " EvalPlus" + ], + [ + "Question:Which dataset did Qwen2-72B achieve the highest score on among the coding benchmarks?\nAnswer:", + " MultiPL-E" + ] + ], + "resps": [ + [ + [ + -7.070164203643799, + false + ] + ], + [ + [ + -9.641318321228027, + false + ] + ], + [ + [ + -12.466957092285156, + false + ] + ], + [ + [ + -24.767515182495117, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.070164203643799, + false + ], + [ + -9.641318321228027, + false + ], + [ + -12.466957092285156, + false + ], + [ + -24.767515182495117, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9856e8c7bc348dff04f4b4a132918f0f1eb2affb840d3ca01d81a61ea651d092", + "prompt_hash": "99510d8e3c0f104f96e5d57839543c71b371b81be7e5f884f54c54078d7920f7", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 13, + "doc": { + "question": "What is the MMLU score achieved by Qwen2-7B?", + "choices": [ + "64.2", + "66.6", + "61.0", + "70.3" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the MMLU score achieved by Qwen2-7B?\nAnswer:", + " 64.2" + ], + [ + "Question:What is the MMLU score achieved by Qwen2-7B?\nAnswer:", + " 66.6" + ], + [ + "Question:What is the MMLU score achieved by Qwen2-7B?\nAnswer:", + " 61.0" + ], + [ + "Question:What is the MMLU score achieved by Qwen2-7B?\nAnswer:", + " 70.3" + ] + ], + "resps": [ + [ + [ + -8.043088912963867, + false + ] + ], + [ + [ + -8.122020721435547, + false + ] + ], + [ + [ + -8.118026733398438, + false + ] + ], + [ + [ + -8.125846862792969, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.043088912963867, + false + ], + [ + -8.122020721435547, + false + ], + [ + -8.118026733398438, + false + ], + [ + -8.125846862792969, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2851a4005490cadd120a7d3628ea0f7f021360798079b32af678d54dbdea6110", + "prompt_hash": "62f178a26a549057120966177c6af7cd055f261ae620fa49ac14ff14f36ed083", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "In the multilingual human evaluation, which language did Qwen2-72B-Instruct score the lowest in?", + "choices": [ + "Arabic", + "Korean", + "Japanese", + "Thai" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:In the multilingual human evaluation, which language did Qwen2-72B-Instruct score the lowest in?\nAnswer:", + " Arabic" + ], + [ + "Question:In the multilingual human evaluation, which language did Qwen2-72B-Instruct score the lowest in?\nAnswer:", + " Korean" + ], + [ + "Question:In the multilingual human evaluation, which language did Qwen2-72B-Instruct score the lowest in?\nAnswer:", + " Japanese" + ], + [ + "Question:In the multilingual human evaluation, which language did Qwen2-72B-Instruct score the lowest in?\nAnswer:", + " Thai" + ] + ], + "resps": [ + [ + [ + -6.313841342926025, + false + ] + ], + [ + [ + -8.17893123626709, + false + ] + ], + [ + [ + -5.562231063842773, + false + ] + ], + [ + [ + -8.58788776397705, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.313841342926025, + false + ], + [ + -8.17893123626709, + false + ], + [ + -5.562231063842773, + false + ], + [ + -8.58788776397705, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3ad3b7e606e9801b2a95c0d03c502ff29f5e16cd6d50667c6adf7073e402d078", + "prompt_hash": "5aa70bedcf6224988ae2ce4fef1f93fe7a13492bd675ae91110604a0143d3017", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 15, + "doc": { + "question": "What is the maximum number of visual tokens used by SF-LLaVA to avoid exceeding 80G GPU memory?", + "choices": [ + "4096", + "3200", + "3680", + "4608" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum number of visual tokens used by SF-LLaVA to avoid exceeding 80G GPU memory?\nAnswer:", + " 4096" + ], + [ + "Question:What is the maximum number of visual tokens used by SF-LLaVA to avoid exceeding 80G GPU memory?\nAnswer:", + " 3200" + ], + [ + "Question:What is the maximum number of visual tokens used by SF-LLaVA to avoid exceeding 80G GPU memory?\nAnswer:", + " 3680" + ], + [ + "Question:What is the maximum number of visual tokens used by SF-LLaVA to avoid exceeding 80G GPU memory?\nAnswer:", + " 4608" + ] + ], + "resps": [ + [ + [ + -5.425139427185059, + false + ] + ], + [ + [ + -9.016348838806152, + false + ] + ], + [ + [ + -13.149456024169922, + false + ] + ], + [ + [ + -10.514032363891602, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.425139427185059, + false + ], + [ + -9.016348838806152, + false + ], + [ + -13.149456024169922, + false + ], + [ + -10.514032363891602, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ad13fe9a163f19221c6bf35b82ed2c73b4d4ad49359f2624d8530674e1407ac8", + "prompt_hash": "86ba060cafa27e42e47324515c56ecde20b126a762287ed01eb0fd5c0d91d0f4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "Which dataset is used to evaluate the Temporal Understanding (TU) of Video LLMs?", + "choices": [ + "VCGBench", + "EgoSchema", + "MSRVTT-QA", + "ActivityNet-QA" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset is used to evaluate the Temporal Understanding (TU) of Video LLMs?\nAnswer:", + " VCGBench" + ], + [ + "Question:Which dataset is used to evaluate the Temporal Understanding (TU) of Video LLMs?\nAnswer:", + " EgoSchema" + ], + [ + "Question:Which dataset is used to evaluate the Temporal Understanding (TU) of Video LLMs?\nAnswer:", + " MSRVTT-QA" + ], + [ + "Question:Which dataset is used to evaluate the Temporal Understanding (TU) of Video LLMs?\nAnswer:", + " ActivityNet-QA" + ] + ], + "resps": [ + [ + [ + -27.394710540771484, + false + ] + ], + [ + [ + -17.418739318847656, + false + ] + ], + [ + [ + -16.56558609008789, + false + ] + ], + [ + [ + -15.635619163513184, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.394710540771484, + false + ], + [ + -17.418739318847656, + false + ], + [ + -16.56558609008789, + false + ], + [ + -15.635619163513184, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4b72d0430c20d5fc5c3b275e253d36baee93c4a33960828036bb124617144e65", + "prompt_hash": "6f7a3738219247c007d3fa09ddd5f4dc026be5d9ad5f5684ae8fe63f33097793", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 17, + "doc": { + "question": "What is the primary visual encoder used in SF-LLaVA?", + "choices": [ + "ViT-L/14", + "CLIP-L-14", + "BLIP-2", + "AnyRes" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the primary visual encoder used in SF-LLaVA?\nAnswer:", + " ViT-L/14" + ], + [ + "Question:What is the primary visual encoder used in SF-LLaVA?\nAnswer:", + " CLIP-L-14" + ], + [ + "Question:What is the primary visual encoder used in SF-LLaVA?\nAnswer:", + " BLIP-2" + ], + [ + "Question:What is the primary visual encoder used in SF-LLaVA?\nAnswer:", + " AnyRes" + ] + ], + "resps": [ + [ + [ + -10.990835189819336, + false + ] + ], + [ + [ + -18.62881851196289, + false + ] + ], + [ + [ + -6.553082466125488, + false + ] + ], + [ + [ + -15.750223159790039, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.990835189819336, + false + ], + [ + -18.62881851196289, + false + ], + [ + -6.553082466125488, + false + ], + [ + -15.750223159790039, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ddafe5811d17653c4db88355421d924675dc6e564970045ec933199f7d1fadd2", + "prompt_hash": "5c6a92c4e5fb08ca37a06d1e9e9abe48117262d031e354edfeca6b89b5de773d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 18, + "doc": { + "question": "How many frames are used in the Fast pathway by default in SF-LLaVA?", + "choices": [ + "12", + "32", + "50", + "64" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many frames are used in the Fast pathway by default in SF-LLaVA?\nAnswer:", + " 12" + ], + [ + "Question:How many frames are used in the Fast pathway by default in SF-LLaVA?\nAnswer:", + " 32" + ], + [ + "Question:How many frames are used in the Fast pathway by default in SF-LLaVA?\nAnswer:", + " 50" + ], + [ + "Question:How many frames are used in the Fast pathway by default in SF-LLaVA?\nAnswer:", + " 64" + ] + ], + "resps": [ + [ + [ + -5.225262641906738, + false + ] + ], + [ + [ + -6.255336761474609, + false + ] + ], + [ + [ + -6.196992874145508, + false + ] + ], + [ + [ + -6.987529754638672, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.225262641906738, + false + ], + [ + -6.255336761474609, + false + ], + [ + -6.196992874145508, + false + ], + [ + -6.987529754638672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1814f8766dbb483c1dc51b998d04b17576a13be811648b5484767fafac01a560", + "prompt_hash": "7eaee63b4adef0215d837ca19de0c801fd35277d6c27ba03361aa161438858f6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 19, + "doc": { + "question": "What kind of prompt does SF-LLaVA use for Multiple Choice VideoQA tasks?", + "choices": [ + "Answer based on image content", + "In this video, the answer is:", + "Best Option:(", + "Choose the correct label" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What kind of prompt does SF-LLaVA use for Multiple Choice VideoQA tasks?\nAnswer:", + " Answer based on image content" + ], + [ + "Question:What kind of prompt does SF-LLaVA use for Multiple Choice VideoQA tasks?\nAnswer:", + " In this video, the answer is:" + ], + [ + "Question:What kind of prompt does SF-LLaVA use for Multiple Choice VideoQA tasks?\nAnswer:", + " Best Option:(" + ], + [ + "Question:What kind of prompt does SF-LLaVA use for Multiple Choice VideoQA tasks?\nAnswer:", + " Choose the correct label" + ] + ], + "resps": [ + [ + [ + -21.13357162475586, + false + ] + ], + [ + [ + -18.91696548461914, + false + ] + ], + [ + [ + -25.731494903564453, + false + ] + ], + [ + [ + -16.9730281829834, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.13357162475586, + false + ], + [ + -18.91696548461914, + false + ], + [ + -25.731494903564453, + false + ], + [ + -16.9730281829834, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "67f0766718f330067f520f7a6b1b7f8182af7a2ac16cc7c42325778a9f1e77ac", + "prompt_hash": "764ec196e50ebad35d033eb2c74554ffdc888ebdc44bcb6debabfac2c87e97fe", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What condition must the dark energy equation of state (EoS) satisfy to cause cosmic acceleration?", + "choices": [ + "w > 0", + "w < -1/3", + "w = 0", + "w > 1" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What condition must the dark energy equation of state (EoS) satisfy to cause cosmic acceleration?\nAnswer:", + " w > 0" + ], + [ + "Question:What condition must the dark energy equation of state (EoS) satisfy to cause cosmic acceleration?\nAnswer:", + " w < -1/3" + ], + [ + "Question:What condition must the dark energy equation of state (EoS) satisfy to cause cosmic acceleration?\nAnswer:", + " w = 0" + ], + [ + "Question:What condition must the dark energy equation of state (EoS) satisfy to cause cosmic acceleration?\nAnswer:", + " w > 1" + ] + ], + "resps": [ + [ + [ + -14.41776180267334, + false + ] + ], + [ + [ + -19.103534698486328, + false + ] + ], + [ + [ + -13.807682037353516, + false + ] + ], + [ + [ + -13.923848152160645, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.41776180267334, + false + ], + [ + -19.103534698486328, + false + ], + [ + -13.807682037353516, + false + ], + [ + -13.923848152160645, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "82323cd2bfdb0979387995bd708d5e4373cea800896b67b4c7fbcbd77a25ba6c", + "prompt_hash": "85096bbeff721214820f6ef5e78686e35fa5052e6205e981155da67c7fb588a3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 21, + "doc": { + "question": "What is the main conceptual problem associated with inserting a positive cosmological constant into Einstein's equations?", + "choices": [ + "It predicts an open universe.", + "It leads to a cyclic universe model.", + "It implies an asymptotically de Sitter universe, conflicting with quantum gravity theories.", + "It eliminates the need for dark matter." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the main conceptual problem associated with inserting a positive cosmological constant into Einstein's equations?\nAnswer:", + " It predicts an open universe." + ], + [ + "Question:What is the main conceptual problem associated with inserting a positive cosmological constant into Einstein's equations?\nAnswer:", + " It leads to a cyclic universe model." + ], + [ + "Question:What is the main conceptual problem associated with inserting a positive cosmological constant into Einstein's equations?\nAnswer:", + " It implies an asymptotically de Sitter universe, conflicting with quantum gravity theories." + ], + [ + "Question:What is the main conceptual problem associated with inserting a positive cosmological constant into Einstein's equations?\nAnswer:", + " It eliminates the need for dark matter." + ] + ], + "resps": [ + [ + [ + -24.33345603942871, + false + ] + ], + [ + [ + -26.65534210205078, + false + ] + ], + [ + [ + -47.05223846435547, + false + ] + ], + [ + [ + -16.610246658325195, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.33345603942871, + false + ], + [ + -26.65534210205078, + false + ], + [ + -47.05223846435547, + false + ], + [ + -16.610246658325195, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "86148b6f729db3fd0ac1d7971cc469ddad875ba18641ffa5a971809b9250a4ac", + "prompt_hash": "5ab838d236da42281a56567224f23b0bbb4b61eb83a09089fafbd94761d5565f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "Which experiment provided measurements of the CMB lensing potential using the PR4 NPIPE data release?", + "choices": [ + "ACT-DR6", + "Planck", + "DESI", + "DES" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which experiment provided measurements of the CMB lensing potential using the PR4 NPIPE data release?\nAnswer:", + " ACT-DR6" + ], + [ + "Question:Which experiment provided measurements of the CMB lensing potential using the PR4 NPIPE data release?\nAnswer:", + " Planck" + ], + [ + "Question:Which experiment provided measurements of the CMB lensing potential using the PR4 NPIPE data release?\nAnswer:", + " DESI" + ], + [ + "Question:Which experiment provided measurements of the CMB lensing potential using the PR4 NPIPE data release?\nAnswer:", + " DES" + ] + ], + "resps": [ + [ + [ + -23.941354751586914, + false + ] + ], + [ + [ + -8.566020011901855, + false + ] + ], + [ + [ + -10.961417198181152, + false + ] + ], + [ + [ + -10.11159896850586, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.941354751586914, + false + ], + [ + -8.566020011901855, + false + ], + [ + -10.961417198181152, + false + ], + [ + -10.11159896850586, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e1f9cfeac744ffec2dfa9e26688706bdd37d486ca5e583514ac532844115c757", + "prompt_hash": "a9cf649c16a1421d67dfe91a4f4556f9da722bf8231c08498c8e4ab673908ab6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 23, + "doc": { + "question": "Which DE parameterization is characterized by a functional form w(a) = w_0 - w_a * ln a?", + "choices": [ + "CPL", + "Exponential", + "JBP", + "Logarithmic" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which DE parameterization is characterized by a functional form w(a) = w_0 - w_a * ln a?\nAnswer:", + " CPL" + ], + [ + "Question:Which DE parameterization is characterized by a functional form w(a) = w_0 - w_a * ln a?\nAnswer:", + " Exponential" + ], + [ + "Question:Which DE parameterization is characterized by a functional form w(a) = w_0 - w_a * ln a?\nAnswer:", + " JBP" + ], + [ + "Question:Which DE parameterization is characterized by a functional form w(a) = w_0 - w_a * ln a?\nAnswer:", + " Logarithmic" + ] + ], + "resps": [ + [ + [ + -11.294991493225098, + false + ] + ], + [ + [ + -8.75047779083252, + false + ] + ], + [ + [ + -15.90029525756836, + false + ] + ], + [ + [ + -10.654081344604492, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.294991493225098, + false + ], + [ + -8.75047779083252, + false + ], + [ + -15.90029525756836, + false + ], + [ + -10.654081344604492, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8dfd0f223423807451c6063af78c0afc900efca2f97c9c13ec34378122a50e73", + "prompt_hash": "709eb426ccdf71005995f3d6037e5013d2b95815c6f220f2f1c422762fb51d13", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 24, + "doc": { + "question": "Which supernova sample produced the largest statistical preference for Dynamical Dark Energy (DDE)?", + "choices": [ + "PantheonPlus", + "Union3", + "DESY5", + "SDSS-II" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which supernova sample produced the largest statistical preference for Dynamical Dark Energy (DDE)?\nAnswer:", + " PantheonPlus" + ], + [ + "Question:Which supernova sample produced the largest statistical preference for Dynamical Dark Energy (DDE)?\nAnswer:", + " Union3" + ], + [ + "Question:Which supernova sample produced the largest statistical preference for Dynamical Dark Energy (DDE)?\nAnswer:", + " DESY5" + ], + [ + "Question:Which supernova sample produced the largest statistical preference for Dynamical Dark Energy (DDE)?\nAnswer:", + " SDSS-II" + ] + ], + "resps": [ + [ + [ + -11.708585739135742, + false + ] + ], + [ + [ + -16.84764289855957, + false + ] + ], + [ + [ + -10.6693754196167, + false + ] + ], + [ + [ + -13.301567077636719, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.708585739135742, + false + ], + [ + -16.84764289855957, + false + ], + [ + -10.6693754196167, + false + ], + [ + -13.301567077636719, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "14d642bf381ec0530fe6821aa6181c2344c27825e93f56616bdcf94a79d6edd1", + "prompt_hash": "064d714bc1f66de31bbb6f9847b45cd804d589dfa7fa0fa1ef48a35905e1ba6b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 25, + "doc": { + "question": "What is the primary challenge in global Structure-from-Motion related to translation averaging?", + "choices": [ + "Lack of feature detection methods", + "Scale ambiguity in relative translation", + "Insufficient camera rotation data", + "Inability to compute homographies" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the primary challenge in global Structure-from-Motion related to translation averaging?\nAnswer:", + " Lack of feature detection methods" + ], + [ + "Question:What is the primary challenge in global Structure-from-Motion related to translation averaging?\nAnswer:", + " Scale ambiguity in relative translation" + ], + [ + "Question:What is the primary challenge in global Structure-from-Motion related to translation averaging?\nAnswer:", + " Insufficient camera rotation data" + ], + [ + "Question:What is the primary challenge in global Structure-from-Motion related to translation averaging?\nAnswer:", + " Inability to compute homographies" + ] + ], + "resps": [ + [ + [ + -24.926162719726562, + false + ] + ], + [ + [ + -22.360336303710938, + false + ] + ], + [ + [ + -26.29901885986328, + false + ] + ], + [ + [ + -31.63737678527832, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.926162719726562, + false + ], + [ + -22.360336303710938, + false + ], + [ + -26.29901885986328, + false + ], + [ + -31.63737678527832, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f99d08f9316fce91b510f5d57417ce96c55cfbbbad46e9e0ed2510b687397869", + "prompt_hash": "a725097860ca0ad0446924e2aaecd9eeca8bbe5cbada9dca5897cb2f8366be48", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 26, + "doc": { + "question": "Which robust loss function is used in the global positioning optimization of GLOMAP?", + "choices": [ + "Mean squared error", + "Absolute error", + "Huber loss", + "Log-cosh loss" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which robust loss function is used in the global positioning optimization of GLOMAP?\nAnswer:", + " Mean squared error" + ], + [ + "Question:Which robust loss function is used in the global positioning optimization of GLOMAP?\nAnswer:", + " Absolute error" + ], + [ + "Question:Which robust loss function is used in the global positioning optimization of GLOMAP?\nAnswer:", + " Huber loss" + ], + [ + "Question:Which robust loss function is used in the global positioning optimization of GLOMAP?\nAnswer:", + " Log-cosh loss" + ] + ], + "resps": [ + [ + [ + -10.13412857055664, + false + ] + ], + [ + [ + -11.498472213745117, + false + ] + ], + [ + [ + -8.70973014831543, + false + ] + ], + [ + [ + -22.59506607055664, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.13412857055664, + false + ], + [ + -11.498472213745117, + false + ], + [ + -8.70973014831543, + false + ], + [ + -22.59506607055664, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "77c25d36e232925c9f49584d45188e8d9c9f8d946821699754d5bc19f98e8279", + "prompt_hash": "7eca0f7c3a1febfdd9b41f98198073faa3be68055d4641da35e96e285afc07dc", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "Which datasets are used in the paper to evaluate performance on sequential data with millimeter-accurate ground truth?", + "choices": [ + "IMC 2023 and MIP360", + "ETH3D SLAM and ETH3D MVS (rig)", + "Strecha and KITTI", + "LaMAR and VisualSfM" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which datasets are used in the paper to evaluate performance on sequential data with millimeter-accurate ground truth?\nAnswer:", + " IMC 2023 and MIP360" + ], + [ + "Question:Which datasets are used in the paper to evaluate performance on sequential data with millimeter-accurate ground truth?\nAnswer:", + " ETH3D SLAM and ETH3D MVS (rig)" + ], + [ + "Question:Which datasets are used in the paper to evaluate performance on sequential data with millimeter-accurate ground truth?\nAnswer:", + " Strecha and KITTI" + ], + [ + "Question:Which datasets are used in the paper to evaluate performance on sequential data with millimeter-accurate ground truth?\nAnswer:", + " LaMAR and VisualSfM" + ] + ], + "resps": [ + [ + [ + -47.251590728759766, + false + ] + ], + [ + [ + -51.58158874511719, + false + ] + ], + [ + [ + -32.44324493408203, + false + ] + ], + [ + [ + -46.49724578857422, + false + ] + ] + ], + "filtered_resps": [ + [ + -47.251590728759766, + false + ], + [ + -51.58158874511719, + false + ], + [ + -32.44324493408203, + false + ], + [ + -46.49724578857422, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2df268151d36dc6a519f2de02a98aeffc1c67b48b4cc66a2316d317982acd099", + "prompt_hash": "2a3241ff08915e7beb5070a53126698c15aa77fe6abdadcb898f4ba796b9f07f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 28, + "doc": { + "question": "Which optimization method is applied during global bundle adjustment in the GLOMAP pipeline?", + "choices": [ + "Gradient descent", + "Stochastic gradient descent", + "Levenberg–Marquardt", + "Newton-Raphson" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which optimization method is applied during global bundle adjustment in the GLOMAP pipeline?\nAnswer:", + " Gradient descent" + ], + [ + "Question:Which optimization method is applied during global bundle adjustment in the GLOMAP pipeline?\nAnswer:", + " Stochastic gradient descent" + ], + [ + "Question:Which optimization method is applied during global bundle adjustment in the GLOMAP pipeline?\nAnswer:", + " Levenberg–Marquardt" + ], + [ + "Question:Which optimization method is applied during global bundle adjustment in the GLOMAP pipeline?\nAnswer:", + " Newton-Raphson" + ] + ], + "resps": [ + [ + [ + -7.872706890106201, + false + ] + ], + [ + [ + -10.082393646240234, + false + ] + ], + [ + [ + -11.263614654541016, + false + ] + ], + [ + [ + -9.485450744628906, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.872706890106201, + false + ], + [ + -10.082393646240234, + false + ], + [ + -11.263614654541016, + false + ], + [ + -9.485450744628906, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "178654b53a9965290627138bc1a447ef847b56db76ca0dada65316f74dc3afc7", + "prompt_hash": "60df566c5bdf7cca2faa297f7a4005c0307ba8f475cd2f1c17d78c6af9629a03", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "Which open-source global SfM library is noted for using the LUD method for translation averaging?", + "choices": [ + "OpenMVG", + "Theia", + "COLMAP", + "PixSfM" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which open-source global SfM library is noted for using the LUD method for translation averaging?\nAnswer:", + " OpenMVG" + ], + [ + "Question:Which open-source global SfM library is noted for using the LUD method for translation averaging?\nAnswer:", + " Theia" + ], + [ + "Question:Which open-source global SfM library is noted for using the LUD method for translation averaging?\nAnswer:", + " COLMAP" + ], + [ + "Question:Which open-source global SfM library is noted for using the LUD method for translation averaging?\nAnswer:", + " PixSfM" + ] + ], + "resps": [ + [ + [ + -15.288139343261719, + false + ] + ], + [ + [ + -18.35643768310547, + false + ] + ], + [ + [ + -7.876762390136719, + false + ] + ], + [ + [ + -8.51231575012207, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.288139343261719, + false + ], + [ + -18.35643768310547, + false + ], + [ + -7.876762390136719, + false + ], + [ + -8.51231575012207, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c1596459ab419391f5f7ede5691aa505a70638bb2de19354c58c6248c362e109", + "prompt_hash": "295125479a954ebe2e6c8d1fb4f3e35924196041365f10421b3e29a2c0b83a74", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "What is the average success rate (ASR) of human red teaming on CYGNET before circuit breaking?", + "choices": [ + "A. 70.4%", + "B. 77.5%", + "C. 7.1%", + "D. 69.2%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the average success rate (ASR) of human red teaming on CYGNET before circuit breaking?\nAnswer:", + " A. 70.4%" + ], + [ + "Question:What is the average success rate (ASR) of human red teaming on CYGNET before circuit breaking?\nAnswer:", + " B. 77.5%" + ], + [ + "Question:What is the average success rate (ASR) of human red teaming on CYGNET before circuit breaking?\nAnswer:", + " C. 7.1%" + ], + [ + "Question:What is the average success rate (ASR) of human red teaming on CYGNET before circuit breaking?\nAnswer:", + " D. 69.2%" + ] + ], + "resps": [ + [ + [ + -22.771133422851562, + false + ] + ], + [ + [ + -26.42609405517578, + false + ] + ], + [ + [ + -26.433547973632812, + false + ] + ], + [ + [ + -27.742061614990234, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.771133422851562, + false + ], + [ + -26.42609405517578, + false + ], + [ + -26.433547973632812, + false + ], + [ + -27.742061614990234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2d216ad67d2d149e00496b388a15d9ac43439c72be3eca6ce5bec1c2f9a37076", + "prompt_hash": "e07b5748a3f14c1055fbf1e15ba87ad68c3b73db2b1981ba77687205852aa3f2", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 31, + "doc": { + "question": "Which tactic involves layering different fictional scenarios within a fictional scenario?", + "choices": [ + "A. Roleplay", + "B. Opposite Day", + "C. Recursion", + "D. Allegory" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which tactic involves layering different fictional scenarios within a fictional scenario?\nAnswer:", + " A. Roleplay" + ], + [ + "Question:Which tactic involves layering different fictional scenarios within a fictional scenario?\nAnswer:", + " B. Opposite Day" + ], + [ + "Question:Which tactic involves layering different fictional scenarios within a fictional scenario?\nAnswer:", + " C. Recursion" + ], + [ + "Question:Which tactic involves layering different fictional scenarios within a fictional scenario?\nAnswer:", + " D. Allegory" + ] + ], + "resps": [ + [ + [ + -15.934167861938477, + false + ] + ], + [ + [ + -27.397541046142578, + false + ] + ], + [ + [ + -21.708358764648438, + false + ] + ], + [ + [ + -20.459033966064453, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.934167861938477, + false + ], + [ + -27.397541046142578, + false + ], + [ + -21.708358764648438, + false + ], + [ + -20.459033966064453, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f723e49506add63585f0863f4da23e2998a1ba84e8c0c774b6979bb327c52286", + "prompt_hash": "0990249ca41f5db19b4b4e6d9ff3106bf97a20fcc4eba5a607f4ce2e5468fd41", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 32, + "doc": { + "question": "How many harmful behaviors are in HarmBench's standard and contextual test set?", + "choices": [ + "A. 120", + "B. 240", + "C. 360", + "D. 480" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many harmful behaviors are in HarmBench's standard and contextual test set?\nAnswer:", + " A. 120" + ], + [ + "Question:How many harmful behaviors are in HarmBench's standard and contextual test set?\nAnswer:", + " B. 240" + ], + [ + "Question:How many harmful behaviors are in HarmBench's standard and contextual test set?\nAnswer:", + " C. 360" + ], + [ + "Question:How many harmful behaviors are in HarmBench's standard and contextual test set?\nAnswer:", + " D. 480" + ] + ], + "resps": [ + [ + [ + -17.521814346313477, + false + ] + ], + [ + [ + -23.202856063842773, + false + ] + ], + [ + [ + -22.82879638671875, + false + ] + ], + [ + [ + -24.634403228759766, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.521814346313477, + false + ], + [ + -23.202856063842773, + false + ], + [ + -22.82879638671875, + false + ], + [ + -24.634403228759766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e52d539c2546e24b6326d4ead53f0b17c674a1670d2c556d896fd19da605e7a1", + "prompt_hash": "39b763fbfc320c740fe3ca2c9f753f8bcbe864adfe0f3fa96efed184d3653b16", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 33, + "doc": { + "question": "Which defense mechanism is applied via API access and is not open-source?", + "choices": [ + "A. LAT", + "B. DERTA", + "C. CYGNET", + "D. RR" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which defense mechanism is applied via API access and is not open-source?\nAnswer:", + " A. LAT" + ], + [ + "Question:Which defense mechanism is applied via API access and is not open-source?\nAnswer:", + " B. DERTA" + ], + [ + "Question:Which defense mechanism is applied via API access and is not open-source?\nAnswer:", + " C. CYGNET" + ], + [ + "Question:Which defense mechanism is applied via API access and is not open-source?\nAnswer:", + " D. RR" + ] + ], + "resps": [ + [ + [ + -21.102420806884766, + false + ] + ], + [ + [ + -32.22483444213867, + false + ] + ], + [ + [ + -30.71807098388672, + false + ] + ], + [ + [ + -22.92583656311035, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.102420806884766, + false + ], + [ + -32.22483444213867, + false + ], + [ + -30.71807098388672, + false + ], + [ + -22.92583656311035, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c31587319e9576d92e88a14e3a5d10106bb01d195060f1c3c2701e592ca7768d", + "prompt_hash": "e7519de7eadcc94f3aa9761a55eecf5ff23e51af685818f4ceafcf5e1e7443ca", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 34, + "doc": { + "question": "Which unlearning method is applied on llama-3-8b-instruct in the evaluation?", + "choices": [ + "A. LAT", + "B. RR", + "C. RMU", + "D. CYGNET" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which unlearning method is applied on llama-3-8b-instruct in the evaluation?\nAnswer:", + " A. LAT" + ], + [ + "Question:Which unlearning method is applied on llama-3-8b-instruct in the evaluation?\nAnswer:", + " B. RR" + ], + [ + "Question:Which unlearning method is applied on llama-3-8b-instruct in the evaluation?\nAnswer:", + " C. RMU" + ], + [ + "Question:Which unlearning method is applied on llama-3-8b-instruct in the evaluation?\nAnswer:", + " D. CYGNET" + ] + ], + "resps": [ + [ + [ + -21.075618743896484, + false + ] + ], + [ + [ + -23.058002471923828, + false + ] + ], + [ + [ + -26.396194458007812, + false + ] + ], + [ + [ + -36.67958068847656, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.075618743896484, + false + ], + [ + -23.058002471923828, + false + ], + [ + -26.396194458007812, + false + ], + [ + -36.67958068847656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f4a7fceca1cd86a4f8a0d7d203d463a77882225cc6b1bf417ab9e93108068ad0", + "prompt_hash": "a7a82305a6edbec80a6ece0b8791b8462b52fe8847dfbb00d98e2346ed3ca779", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What is the total number of parameters in the GOT model?", + "choices": [ + "80M", + "500M", + "580M", + "1B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total number of parameters in the GOT model?\nAnswer:", + " 80M" + ], + [ + "Question:What is the total number of parameters in the GOT model?\nAnswer:", + " 500M" + ], + [ + "Question:What is the total number of parameters in the GOT model?\nAnswer:", + " 580M" + ], + [ + "Question:What is the total number of parameters in the GOT model?\nAnswer:", + " 1B" + ] + ], + "resps": [ + [ + [ + -8.71826457977295, + false + ] + ], + [ + [ + -9.68232536315918, + false + ] + ], + [ + [ + -11.287875175476074, + false + ] + ], + [ + [ + -9.855000495910645, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.71826457977295, + false + ], + [ + -9.68232536315918, + false + ], + [ + -11.287875175476074, + false + ], + [ + -9.855000495910645, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7380787e1c057f5435f570a3bbc5da525962ce15f476f949afecb4e65921862e", + "prompt_hash": "5e469dc8fe8590f2dc84e502a359096b01f776c79608066fe25c79a1720b858d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which dataset was used to extract pseudo ground truth for scene text OCR data in the GOT pre-training stage?", + "choices": [ + "SynthText", + "PaddleOCR", + "Tesseract", + "TextOCR" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset was used to extract pseudo ground truth for scene text OCR data in the GOT pre-training stage?\nAnswer:", + " SynthText" + ], + [ + "Question:Which dataset was used to extract pseudo ground truth for scene text OCR data in the GOT pre-training stage?\nAnswer:", + " PaddleOCR" + ], + [ + "Question:Which dataset was used to extract pseudo ground truth for scene text OCR data in the GOT pre-training stage?\nAnswer:", + " Tesseract" + ], + [ + "Question:Which dataset was used to extract pseudo ground truth for scene text OCR data in the GOT pre-training stage?\nAnswer:", + " TextOCR" + ] + ], + "resps": [ + [ + [ + -11.177651405334473, + false + ] + ], + [ + [ + -9.446195602416992, + false + ] + ], + [ + [ + -8.80298900604248, + false + ] + ], + [ + [ + -10.531850814819336, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.177651405334473, + false + ], + [ + -9.446195602416992, + false + ], + [ + -8.80298900604248, + false + ], + [ + -10.531850814819336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "994deb6439a522548ebd5a163665b864cc2c0aa755b14b8cb6f0cc432c3a0309", + "prompt_hash": "8cac60a6fb45ae86eba482ac1556dee5bc0271a924826a110316a0f98106b562", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "Which rendering tool was used to generate images for geometric shapes in GOT's dataset?", + "choices": [ + "Verovio", + "TikZ", + "Mathpix", + "Matplotlib" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which rendering tool was used to generate images for geometric shapes in GOT's dataset?\nAnswer:", + " Verovio" + ], + [ + "Question:Which rendering tool was used to generate images for geometric shapes in GOT's dataset?\nAnswer:", + " TikZ" + ], + [ + "Question:Which rendering tool was used to generate images for geometric shapes in GOT's dataset?\nAnswer:", + " Mathpix" + ], + [ + "Question:Which rendering tool was used to generate images for geometric shapes in GOT's dataset?\nAnswer:", + " Matplotlib" + ] + ], + "resps": [ + [ + [ + -15.639054298400879, + false + ] + ], + [ + [ + -11.802534103393555, + false + ] + ], + [ + [ + -16.534772872924805, + false + ] + ], + [ + [ + -11.40635871887207, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.639054298400879, + false + ], + [ + -11.802534103393555, + false + ], + [ + -16.534772872924805, + false + ], + [ + -11.40635871887207, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7cfda73fc25b4dd06193ffc4b04cfbaa74dc7b5eccaf4c5ae4c08835ab0a5c34", + "prompt_hash": "1dca76e7c2d4d469366a1fb76962c55c4f505b97dbfd9d4c9b94834dd1c7c65e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 38, + "doc": { + "question": "What decoder model is used in the final architecture of GOT?", + "choices": [ + "LLaMA-2 7B", + "OPT-125M", + "Qwen-0.5B", + "GPT-2" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What decoder model is used in the final architecture of GOT?\nAnswer:", + " LLaMA-2 7B" + ], + [ + "Question:What decoder model is used in the final architecture of GOT?\nAnswer:", + " OPT-125M" + ], + [ + "Question:What decoder model is used in the final architecture of GOT?\nAnswer:", + " Qwen-0.5B" + ], + [ + "Question:What decoder model is used in the final architecture of GOT?\nAnswer:", + " GPT-2" + ] + ], + "resps": [ + [ + [ + -14.056198120117188, + false + ] + ], + [ + [ + -14.997190475463867, + false + ] + ], + [ + [ + -16.97852897644043, + false + ] + ], + [ + [ + -7.084348678588867, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.056198120117188, + false + ], + [ + -14.997190475463867, + false + ], + [ + -16.97852897644043, + false + ], + [ + -7.084348678588867, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b6d651e38fd2fcd7c40a7bbb536c4c771234618ad561faf300b5f1e81aadd4c0", + "prompt_hash": "dcc0ed11c1112a6cf430d28aa636753877743b73b960590f90a3ac0c42185762", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "How many image-text pairs are used in the pre-training of the GOT vision encoder?", + "choices": [ + "3 million", + "5 million", + "8 million", + "10 million" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many image-text pairs are used in the pre-training of the GOT vision encoder?\nAnswer:", + " 3 million" + ], + [ + "Question:How many image-text pairs are used in the pre-training of the GOT vision encoder?\nAnswer:", + " 5 million" + ], + [ + "Question:How many image-text pairs are used in the pre-training of the GOT vision encoder?\nAnswer:", + " 8 million" + ], + [ + "Question:How many image-text pairs are used in the pre-training of the GOT vision encoder?\nAnswer:", + " 10 million" + ] + ], + "resps": [ + [ + [ + -9.339093208312988, + false + ] + ], + [ + [ + -9.036966323852539, + false + ] + ], + [ + [ + -10.546751022338867, + false + ] + ], + [ + [ + -8.766083717346191, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.339093208312988, + false + ], + [ + -9.036966323852539, + false + ], + [ + -10.546751022338867, + false + ], + [ + -8.766083717346191, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8b7f7f937740eb1703167a07d32f5fca12fae238eb7612113ec02e2feb40ae65", + "prompt_hash": "d6b8f044cc916afe2580b1ac3d16140829d110e6f941af99d0bebf937f4a4aaf", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What does the ViewCrafter framework use to achieve precise control of 6 DoF camera poses?", + "choices": [ + "Text prompts and depth estimation", + "Latent space embeddings and video warping", + "Point cloud representations and video diffusion models", + "2D image-based monocular estimations" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the ViewCrafter framework use to achieve precise control of 6 DoF camera poses?\nAnswer:", + " Text prompts and depth estimation" + ], + [ + "Question:What does the ViewCrafter framework use to achieve precise control of 6 DoF camera poses?\nAnswer:", + " Latent space embeddings and video warping" + ], + [ + "Question:What does the ViewCrafter framework use to achieve precise control of 6 DoF camera poses?\nAnswer:", + " Point cloud representations and video diffusion models" + ], + [ + "Question:What does the ViewCrafter framework use to achieve precise control of 6 DoF camera poses?\nAnswer:", + " 2D image-based monocular estimations" + ] + ], + "resps": [ + [ + [ + -23.642772674560547, + false + ] + ], + [ + [ + -31.760562896728516, + false + ] + ], + [ + [ + -31.7762451171875, + false + ] + ], + [ + [ + -29.73705291748047, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.642772674560547, + false + ], + [ + -31.760562896728516, + false + ], + [ + -31.7762451171875, + false + ], + [ + -29.73705291748047, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "20efdb6f39d6e770c732dcdf1c0b35244cedabe9d6dcd0701091d58548cf74ab", + "prompt_hash": "f01ce7509a42c001a8b4687c85922b07e4e5b3e7e3646bca906850209df2650a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 41, + "doc": { + "question": "Which model is used as the base video diffusion model in ViewCrafter?", + "choices": [ + "AnimateDiff", + "DynamiCrafter", + "ZeroNVS", + "LucidDreamer" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which model is used as the base video diffusion model in ViewCrafter?\nAnswer:", + " AnimateDiff" + ], + [ + "Question:Which model is used as the base video diffusion model in ViewCrafter?\nAnswer:", + " DynamiCrafter" + ], + [ + "Question:Which model is used as the base video diffusion model in ViewCrafter?\nAnswer:", + " ZeroNVS" + ], + [ + "Question:Which model is used as the base video diffusion model in ViewCrafter?\nAnswer:", + " LucidDreamer" + ] + ], + "resps": [ + [ + [ + -11.66192626953125, + false + ] + ], + [ + [ + -11.834842681884766, + false + ] + ], + [ + [ + -18.940771102905273, + false + ] + ], + [ + [ + -12.462959289550781, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.66192626953125, + false + ], + [ + -11.834842681884766, + false + ], + [ + -18.940771102905273, + false + ], + [ + -12.462959289550781, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0dd03d82ddbc637e00a05439ac2ea544b4b5bcb60d72a3b24720bb57f23ce709", + "prompt_hash": "c533106d2d00d3188a020b9dcead6a9543bdb2b762a234e8f4deba5e3bf21ea3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 42, + "doc": { + "question": "What algorithm does ViewCrafter employ to determine the next best camera pose during iterative synthesis?", + "choices": [ + "Random walk sampling", + "Fixed circular trajectory", + "Next-Best-View (NBV) planning", + "Gradient-based motion estimation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What algorithm does ViewCrafter employ to determine the next best camera pose during iterative synthesis?\nAnswer:", + " Random walk sampling" + ], + [ + "Question:What algorithm does ViewCrafter employ to determine the next best camera pose during iterative synthesis?\nAnswer:", + " Fixed circular trajectory" + ], + [ + "Question:What algorithm does ViewCrafter employ to determine the next best camera pose during iterative synthesis?\nAnswer:", + " Next-Best-View (NBV) planning" + ], + [ + "Question:What algorithm does ViewCrafter employ to determine the next best camera pose during iterative synthesis?\nAnswer:", + " Gradient-based motion estimation" + ] + ], + "resps": [ + [ + [ + -17.710290908813477, + false + ] + ], + [ + [ + -23.67266082763672, + false + ] + ], + [ + [ + -24.032934188842773, + false + ] + ], + [ + [ + -19.99657440185547, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.710290908813477, + false + ], + [ + -23.67266082763672, + false + ], + [ + -24.032934188842773, + false + ], + [ + -19.99657440185547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8a425b4867abc928d2df97796ef238fe6f5ed803bee6c22e708315eeb0f1e59b", + "prompt_hash": "8a872d35921534c9cd4fdd3e6bac16a8947410679a755b76e5afe39b674f49cc", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "Which datasets are used to train the ViewCrafter model?", + "choices": [ + "ScanNet and CO3D", + "DL3DV and RealEstate10K", + "ShapeNet and COCO", + "Tanks-and-Temples and Matterport3D" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which datasets are used to train the ViewCrafter model?\nAnswer:", + " ScanNet and CO3D" + ], + [ + "Question:Which datasets are used to train the ViewCrafter model?\nAnswer:", + " DL3DV and RealEstate10K" + ], + [ + "Question:Which datasets are used to train the ViewCrafter model?\nAnswer:", + " ShapeNet and COCO" + ], + [ + "Question:Which datasets are used to train the ViewCrafter model?\nAnswer:", + " Tanks-and-Temples and Matterport3D" + ] + ], + "resps": [ + [ + [ + -17.817087173461914, + false + ] + ], + [ + [ + -42.86650466918945, + false + ] + ], + [ + [ + -19.645099639892578, + false + ] + ], + [ + [ + -27.194059371948242, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.817087173461914, + false + ], + [ + -42.86650466918945, + false + ], + [ + -19.645099639892578, + false + ], + [ + -27.194059371948242, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a80bdbf698650f3e6766a717561335bd14e5e7f1f2567eecbc983bb009bb4fb5", + "prompt_hash": "48f041efbc3663626df4f54a69d1bfcf3f8393fdef02d683f8686aba0b98cd46", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 44, + "doc": { + "question": "Which method does ViewCrafter use to reconstruct the point cloud from sparse input images?", + "choices": [ + "COLMAP", + "Structure-from-Motion", + "DUSt3R", + "Plücker coordinate regression" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which method does ViewCrafter use to reconstruct the point cloud from sparse input images?\nAnswer:", + " COLMAP" + ], + [ + "Question:Which method does ViewCrafter use to reconstruct the point cloud from sparse input images?\nAnswer:", + " Structure-from-Motion" + ], + [ + "Question:Which method does ViewCrafter use to reconstruct the point cloud from sparse input images?\nAnswer:", + " DUSt3R" + ], + [ + "Question:Which method does ViewCrafter use to reconstruct the point cloud from sparse input images?\nAnswer:", + " Plücker coordinate regression" + ] + ], + "resps": [ + [ + [ + -10.547996520996094, + false + ] + ], + [ + [ + -9.73310375213623, + false + ] + ], + [ + [ + -32.7406005859375, + false + ] + ], + [ + [ + -33.1044921875, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.547996520996094, + false + ], + [ + -9.73310375213623, + false + ], + [ + -32.7406005859375, + false + ], + [ + -33.1044921875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4e725f4f1dd36dad41d72a0ef19b65be68e912ec0bb12af51288fe5c9a242232", + "prompt_hash": "fbcbd75cc4ed82855cd63198990031d0dcf1bca655ab55f830aedb5b3bf69e46", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 45, + "doc": { + "question": "Which benchmark includes complex scenarios like parallel and multiple function calls in languages such as Java, JavaScript, and Python?", + "choices": [ + "AgentBench", + "ToolBench", + "ToolQuery", + "Berkeley Function-Calling Benchmark" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which benchmark includes complex scenarios like parallel and multiple function calls in languages such as Java, JavaScript, and Python?\nAnswer:", + " AgentBench" + ], + [ + "Question:Which benchmark includes complex scenarios like parallel and multiple function calls in languages such as Java, JavaScript, and Python?\nAnswer:", + " ToolBench" + ], + [ + "Question:Which benchmark includes complex scenarios like parallel and multiple function calls in languages such as Java, JavaScript, and Python?\nAnswer:", + " ToolQuery" + ], + [ + "Question:Which benchmark includes complex scenarios like parallel and multiple function calls in languages such as Java, JavaScript, and Python?\nAnswer:", + " Berkeley Function-Calling Benchmark" + ] + ], + "resps": [ + [ + [ + -12.500007629394531, + false + ] + ], + [ + [ + -13.430700302124023, + false + ] + ], + [ + [ + -22.2950439453125, + false + ] + ], + [ + [ + -26.18708610534668, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.500007629394531, + false + ], + [ + -13.430700302124023, + false + ], + [ + -22.2950439453125, + false + ], + [ + -26.18708610534668, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "50e437a64770455b952405b5a456fde6bac837d3fbeacbcde5eb5eacb46b1d1e", + "prompt_hash": "c7c05251e7a48d01e77bfe1c75830f826e9b4fc4fadf5119c27132a213fbbd0a", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 46, + "doc": { + "question": "Which data synthesis framework is used to generate verifiable datasets based on executable APIs?", + "choices": [ + "ToolBench", + "AgentOhana", + "APIGen", + "DialogStudio" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which data synthesis framework is used to generate verifiable datasets based on executable APIs?\nAnswer:", + " ToolBench" + ], + [ + "Question:Which data synthesis framework is used to generate verifiable datasets based on executable APIs?\nAnswer:", + " AgentOhana" + ], + [ + "Question:Which data synthesis framework is used to generate verifiable datasets based on executable APIs?\nAnswer:", + " APIGen" + ], + [ + "Question:Which data synthesis framework is used to generate verifiable datasets based on executable APIs?\nAnswer:", + " DialogStudio" + ] + ], + "resps": [ + [ + [ + -17.682613372802734, + false + ] + ], + [ + [ + -25.438610076904297, + false + ] + ], + [ + [ + -10.732872009277344, + false + ] + ], + [ + [ + -22.601285934448242, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.682613372802734, + false + ], + [ + -25.438610076904297, + false + ], + [ + -10.732872009277344, + false + ], + [ + -22.601285934448242, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6bdea1c9b0ac67637689735ba7b180e755376e3f84be30e1bf448190ed012d64", + "prompt_hash": "3c98548a57b1c17f173a9e5501a877d63d3d69d4993c2749ba04d30a62713361", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 47, + "doc": { + "question": "What type of augmentation involves rephrasing instructions and verifying them with LLMs to ensure consistency?", + "choices": [ + "Prompt Format Augmentation", + "Order Shuffling", + "Instruction-Following Augmentation", + "Concatenation Tokens" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of augmentation involves rephrasing instructions and verifying them with LLMs to ensure consistency?\nAnswer:", + " Prompt Format Augmentation" + ], + [ + "Question:What type of augmentation involves rephrasing instructions and verifying them with LLMs to ensure consistency?\nAnswer:", + " Order Shuffling" + ], + [ + "Question:What type of augmentation involves rephrasing instructions and verifying them with LLMs to ensure consistency?\nAnswer:", + " Instruction-Following Augmentation" + ], + [ + "Question:What type of augmentation involves rephrasing instructions and verifying them with LLMs to ensure consistency?\nAnswer:", + " Concatenation Tokens" + ] + ], + "resps": [ + [ + [ + -18.61235809326172, + false + ] + ], + [ + [ + -20.94488525390625, + false + ] + ], + [ + [ + -13.356648445129395, + false + ] + ], + [ + [ + -25.019317626953125, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.61235809326172, + false + ], + [ + -20.94488525390625, + false + ], + [ + -13.356648445129395, + false + ], + [ + -25.019317626953125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0c1b6a5d3000ee881d1e8dded9f31d9fb738710106c6552e7ab4f1d80cd1a4f1", + "prompt_hash": "2ae7793b6c6d78c339d6de0dc6bbb465497256c56f8735df1ef4cd965487f3b6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 48, + "doc": { + "question": "What is the smallest model in the xLAM series mentioned, and what is its approximate accuracy on the BFCL v2 benchmark?", + "choices": [ + "xLAM-1b-fc-r with 75.43%", + "xLAM-7b-r with 80.33%", + "xLAM-8x22b-r with 87.31%", + "xLAM-8x7b-r with 84.69%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the smallest model in the xLAM series mentioned, and what is its approximate accuracy on the BFCL v2 benchmark?\nAnswer:", + " xLAM-1b-fc-r with 75.43%" + ], + [ + "Question:What is the smallest model in the xLAM series mentioned, and what is its approximate accuracy on the BFCL v2 benchmark?\nAnswer:", + " xLAM-7b-r with 80.33%" + ], + [ + "Question:What is the smallest model in the xLAM series mentioned, and what is its approximate accuracy on the BFCL v2 benchmark?\nAnswer:", + " xLAM-8x22b-r with 87.31%" + ], + [ + "Question:What is the smallest model in the xLAM series mentioned, and what is its approximate accuracy on the BFCL v2 benchmark?\nAnswer:", + " xLAM-8x7b-r with 84.69%" + ] + ], + "resps": [ + [ + [ + -58.31315612792969, + false + ] + ], + [ + [ + -44.66631317138672, + false + ] + ], + [ + [ + -51.25954818725586, + false + ] + ], + [ + [ + -48.78382873535156, + false + ] + ] + ], + "filtered_resps": [ + [ + -58.31315612792969, + false + ], + [ + -44.66631317138672, + false + ], + [ + -51.25954818725586, + false + ], + [ + -48.78382873535156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6c0bc5fff5cb3e8d98cb7fa345607fe20ecaa1d0c3c3cc95554662f11a026b5f", + "prompt_hash": "bce0a721dac988be6371d832269753de01df1413eb3a30636480ef35325f7ab9", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 49, + "doc": { + "question": "Which technique is applied to avoid overfitting on a specific tool order in the training data?", + "choices": [ + "Format Instruction-Following", + "LoRA-based Fine-Tuning", + "Order Shuffling", + "Function-Calling Templates" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which technique is applied to avoid overfitting on a specific tool order in the training data?\nAnswer:", + " Format Instruction-Following" + ], + [ + "Question:Which technique is applied to avoid overfitting on a specific tool order in the training data?\nAnswer:", + " LoRA-based Fine-Tuning" + ], + [ + "Question:Which technique is applied to avoid overfitting on a specific tool order in the training data?\nAnswer:", + " Order Shuffling" + ], + [ + "Question:Which technique is applied to avoid overfitting on a specific tool order in the training data?\nAnswer:", + " Function-Calling Templates" + ] + ], + "resps": [ + [ + [ + -27.455965042114258, + false + ] + ], + [ + [ + -21.98909568786621, + false + ] + ], + [ + [ + -18.27690315246582, + false + ] + ], + [ + [ + -29.975255966186523, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.455965042114258, + false + ], + [ + -21.98909568786621, + false + ], + [ + -18.27690315246582, + false + ], + [ + -29.975255966186523, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7bf55761c45c0593f901523c8d8abff6c2a18e28c75c860b2cce9eb771f6ee34", + "prompt_hash": "52a3b30b08b978b46fc93a59102d2acf835be5a76f55ba7c83221c2637a6438a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 50, + "doc": { + "question": "What is the internal code name of Baidu's next-generation query-ad matching project?", + "choices": [ + "Mobius", + "Phoenix Nest", + "OPQ", + "ANN" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the internal code name of Baidu's next-generation query-ad matching project?\nAnswer:", + " Mobius" + ], + [ + "Question:What is the internal code name of Baidu's next-generation query-ad matching project?\nAnswer:", + " Phoenix Nest" + ], + [ + "Question:What is the internal code name of Baidu's next-generation query-ad matching project?\nAnswer:", + " OPQ" + ], + [ + "Question:What is the internal code name of Baidu's next-generation query-ad matching project?\nAnswer:", + " ANN" + ] + ], + "resps": [ + [ + [ + -12.405713081359863, + false + ] + ], + [ + [ + -20.490856170654297, + false + ] + ], + [ + [ + -15.323873519897461, + false + ] + ], + [ + [ + -12.232057571411133, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.405713081359863, + false + ], + [ + -20.490856170654297, + false + ], + [ + -15.323873519897461, + false + ], + [ + -12.232057571411133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "add4053f4575b941d604a1c65c539138615d9db0ae66f788419ffd4c83255035", + "prompt_hash": "aa88127607633251bb957100ec9d8356d119bc74bcdfc424d6633f46d0d4ef5f", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "What is the business metric defined as CPM in Baidu's sponsored search?", + "choices": [ + "CPM = Bid × ROI", + "CPM = CTR × Bid", + "CPM = Clicks × ROI", + "CPM = CPC × CTR" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the business metric defined as CPM in Baidu's sponsored search?\nAnswer:", + " CPM = Bid × ROI" + ], + [ + "Question:What is the business metric defined as CPM in Baidu's sponsored search?\nAnswer:", + " CPM = CTR × Bid" + ], + [ + "Question:What is the business metric defined as CPM in Baidu's sponsored search?\nAnswer:", + " CPM = Clicks × ROI" + ], + [ + "Question:What is the business metric defined as CPM in Baidu's sponsored search?\nAnswer:", + " CPM = CPC × CTR" + ] + ], + "resps": [ + [ + [ + -32.9281120300293, + false + ] + ], + [ + [ + -25.26803207397461, + false + ] + ], + [ + [ + -30.90808868408203, + false + ] + ], + [ + [ + -18.589683532714844, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.9281120300293, + false + ], + [ + -25.26803207397461, + false + ], + [ + -30.90808868408203, + false + ], + [ + -18.589683532714844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "86b40e187d2628ac02aa83671304f7187db6bccfc71eba9db2b342a4d7f78653", + "prompt_hash": "d52c15f9293fd3abce639a3ac5f5e00a7e01b38415210b2bbcce80b8baf5b40e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 52, + "doc": { + "question": "Which algorithm does Mobius-V1 use for fast ad indexing and retrieval?", + "choices": [ + "Random Partition Trees", + "Product Quantization", + "Optimized Product Quantization", + "K-Means Hashing" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which algorithm does Mobius-V1 use for fast ad indexing and retrieval?\nAnswer:", + " Random Partition Trees" + ], + [ + "Question:Which algorithm does Mobius-V1 use for fast ad indexing and retrieval?\nAnswer:", + " Product Quantization" + ], + [ + "Question:Which algorithm does Mobius-V1 use for fast ad indexing and retrieval?\nAnswer:", + " Optimized Product Quantization" + ], + [ + "Question:Which algorithm does Mobius-V1 use for fast ad indexing and retrieval?\nAnswer:", + " K-Means Hashing" + ] + ], + "resps": [ + [ + [ + -24.32925796508789, + false + ] + ], + [ + [ + -16.3418025970459, + false + ] + ], + [ + [ + -22.6213321685791, + false + ] + ], + [ + [ + -22.01201820373535, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.32925796508789, + false + ], + [ + -16.3418025970459, + false + ], + [ + -22.6213321685791, + false + ], + [ + -22.01201820373535, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "28685b874477d3310d55c078b762704fe9ce7e447e7417772159e91e24f5faed", + "prompt_hash": "eefc4ae19997e45fd3f48c0cf92fd7f448ac3545a5ae88edfa561cbd874ea861", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 53, + "doc": { + "question": "What performance improvement did Mobius-V1 achieve in CPM on the Baidu App?", + "choices": [ + "+1.0%", + "+2.9%", + "+3.8%", + "+5.0%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What performance improvement did Mobius-V1 achieve in CPM on the Baidu App?\nAnswer:", + " +1.0%" + ], + [ + "Question:What performance improvement did Mobius-V1 achieve in CPM on the Baidu App?\nAnswer:", + " +2.9%" + ], + [ + "Question:What performance improvement did Mobius-V1 achieve in CPM on the Baidu App?\nAnswer:", + " +3.8%" + ], + [ + "Question:What performance improvement did Mobius-V1 achieve in CPM on the Baidu App?\nAnswer:", + " +5.0%" + ] + ], + "resps": [ + [ + [ + -20.06861114501953, + false + ] + ], + [ + [ + -20.90535545349121, + false + ] + ], + [ + [ + -20.866792678833008, + false + ] + ], + [ + [ + -21.263343811035156, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.06861114501953, + false + ], + [ + -20.90535545349121, + false + ], + [ + -20.866792678833008, + false + ], + [ + -21.263343811035156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4e5118f4ffe10fb48bdfb0a855069d9e266c775ca731744ccd557342c37dff21", + "prompt_hash": "f3f24a3f3a904c41ff9d03ece9af0310d088dd1c3c69829375bcf4326424b28d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 54, + "doc": { + "question": "In the CTR model training, what is the role of the 'teacher' component in Mobius-V1?", + "choices": [ + "To predict final ad rankings", + "To simulate click behavior of users", + "To judge semantic relevance of query-ad pairs", + "To compress feature vectors for retrieval" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the CTR model training, what is the role of the 'teacher' component in Mobius-V1?\nAnswer:", + " To predict final ad rankings" + ], + [ + "Question:In the CTR model training, what is the role of the 'teacher' component in Mobius-V1?\nAnswer:", + " To simulate click behavior of users" + ], + [ + "Question:In the CTR model training, what is the role of the 'teacher' component in Mobius-V1?\nAnswer:", + " To judge semantic relevance of query-ad pairs" + ], + [ + "Question:In the CTR model training, what is the role of the 'teacher' component in Mobius-V1?\nAnswer:", + " To compress feature vectors for retrieval" + ] + ], + "resps": [ + [ + [ + -35.07558059692383, + false + ] + ], + [ + [ + -27.24683380126953, + false + ] + ], + [ + [ + -40.58359909057617, + false + ] + ], + [ + [ + -33.87470245361328, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.07558059692383, + false + ], + [ + -27.24683380126953, + false + ], + [ + -40.58359909057617, + false + ], + [ + -33.87470245361328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7b2260f5f2914c1b0824e72b40840441445c51603206d3bc51ba5c7c90eef71b", + "prompt_hash": "a5715e3e45fb1e58df657cd7dfa7b3a20372d1b0f3222421b6e810d662ed1e9c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 55, + "doc": { + "question": "What is the maximum number of papers retrieved by the research ideation agent during the paper retrieval step?", + "choices": [ + "20", + "60", + "120", + "200" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum number of papers retrieved by the research ideation agent during the paper retrieval step?\nAnswer:", + " 20" + ], + [ + "Question:What is the maximum number of papers retrieved by the research ideation agent during the paper retrieval step?\nAnswer:", + " 60" + ], + [ + "Question:What is the maximum number of papers retrieved by the research ideation agent during the paper retrieval step?\nAnswer:", + " 120" + ], + [ + "Question:What is the maximum number of papers retrieved by the research ideation agent during the paper retrieval step?\nAnswer:", + " 200" + ] + ], + "resps": [ + [ + [ + -3.57956862449646, + false + ] + ], + [ + [ + -5.394436836242676, + false + ] + ], + [ + [ + -6.261102199554443, + false + ] + ], + [ + [ + -5.285019397735596, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.57956862449646, + false + ], + [ + -5.394436836242676, + false + ], + [ + -6.261102199554443, + false + ], + [ + -5.285019397735596, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f646bbd461c4465b3ec2a1464705c9f7184cadc5d25e456f267250e6668570ce", + "prompt_hash": "1df44aa395f1715ec809617aad796f435ab7a39758dfc89b92b3d86bbdda0282", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "Which model was selected as the ranker for project proposal ranking based on pairwise comparisons?", + "choices": [ + "GPT-4o", + "Claude-3.5-Sonnet", + "Claude-3-Opus", + "Gemini 1.5" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which model was selected as the ranker for project proposal ranking based on pairwise comparisons?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which model was selected as the ranker for project proposal ranking based on pairwise comparisons?\nAnswer:", + " Claude-3.5-Sonnet" + ], + [ + "Question:Which model was selected as the ranker for project proposal ranking based on pairwise comparisons?\nAnswer:", + " Claude-3-Opus" + ], + [ + "Question:Which model was selected as the ranker for project proposal ranking based on pairwise comparisons?\nAnswer:", + " Gemini 1.5" + ] + ], + "resps": [ + [ + [ + -9.801225662231445, + false + ] + ], + [ + [ + -18.31494903564453, + false + ] + ], + [ + [ + -13.818419456481934, + false + ] + ], + [ + [ + -11.413298606872559, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.801225662231445, + false + ], + [ + -18.31494903564453, + false + ], + [ + -13.818419456481934, + false + ], + [ + -11.413298606872559, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fc2ed4314dcee0565ca79572989aca6514627cfccf638ed7f981ebdb0c6b2078", + "prompt_hash": "ec596453bf625ac5a51f528394afe1d5e65a5cb337ed866606699df4b7014ee6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 57, + "doc": { + "question": "How many seed ideas does the ideation agent generate per research topic?", + "choices": [ + "500", + "1000", + "2000", + "4000" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:How many seed ideas does the ideation agent generate per research topic?\nAnswer:", + " 500" + ], + [ + "Question:How many seed ideas does the ideation agent generate per research topic?\nAnswer:", + " 1000" + ], + [ + "Question:How many seed ideas does the ideation agent generate per research topic?\nAnswer:", + " 2000" + ], + [ + "Question:How many seed ideas does the ideation agent generate per research topic?\nAnswer:", + " 4000" + ] + ], + "resps": [ + [ + [ + -8.664156913757324, + false + ] + ], + [ + [ + -9.027714729309082, + false + ] + ], + [ + [ + -10.901153564453125, + false + ] + ], + [ + [ + -11.820749282836914, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.664156913757324, + false + ], + [ + -9.027714729309082, + false + ], + [ + -10.901153564453125, + false + ], + [ + -11.820749282836914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9a34ce2f253785dd2e4ea6e29e06a175a7e030c54c846d40ebb91821f527e3fc", + "prompt_hash": "e932a0054f93b6ab828dd21259f0574bb1b42dabcbfde0b81e8a9d003b5d410b", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 58, + "doc": { + "question": "Which Sentence-Transformers model is used for deduplicating seed ideas?", + "choices": [ + "all-MiniLM-L6-v2", + "paraphrase-MiniLM-L12-v2", + "mpnet-base-v2", + "bert-base-nli-mean-tokens" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which Sentence-Transformers model is used for deduplicating seed ideas?\nAnswer:", + " all-MiniLM-L6-v2" + ], + [ + "Question:Which Sentence-Transformers model is used for deduplicating seed ideas?\nAnswer:", + " paraphrase-MiniLM-L12-v2" + ], + [ + "Question:Which Sentence-Transformers model is used for deduplicating seed ideas?\nAnswer:", + " mpnet-base-v2" + ], + [ + "Question:Which Sentence-Transformers model is used for deduplicating seed ideas?\nAnswer:", + " bert-base-nli-mean-tokens" + ] + ], + "resps": [ + [ + [ + -11.210933685302734, + false + ] + ], + [ + [ + -23.711734771728516, + false + ] + ], + [ + [ + -13.684947967529297, + false + ] + ], + [ + [ + -13.334312438964844, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.210933685302734, + false + ], + [ + -23.711734771728516, + false + ], + [ + -13.684947967529297, + false + ], + [ + -13.334312438964844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4485c7632925269c4b382a986bad0930a216df9f3a9b00c78f27ea87e5f0ed10", + "prompt_hash": "90afa19aaaee164fc1373ae78040ef1852c7c499388d136a5263d41f281cf1da", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 59, + "doc": { + "question": "What threshold is used for cosine similarity in the seed idea deduplication process?", + "choices": [ + "0.5", + "0.7", + "0.8", + "0.9" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What threshold is used for cosine similarity in the seed idea deduplication process?\nAnswer:", + " 0.5" + ], + [ + "Question:What threshold is used for cosine similarity in the seed idea deduplication process?\nAnswer:", + " 0.7" + ], + [ + "Question:What threshold is used for cosine similarity in the seed idea deduplication process?\nAnswer:", + " 0.8" + ], + [ + "Question:What threshold is used for cosine similarity in the seed idea deduplication process?\nAnswer:", + " 0.9" + ] + ], + "resps": [ + [ + [ + -4.469439506530762, + false + ] + ], + [ + [ + -4.292275428771973, + false + ] + ], + [ + [ + -4.272160530090332, + false + ] + ], + [ + [ + -3.832259178161621, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.469439506530762, + false + ], + [ + -4.292275428771973, + false + ], + [ + -4.272160530090332, + false + ], + [ + -3.832259178161621, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9c46621f2977b227ecc3f554d71640c222a63328eb6c7b6f54cffa5e90a36cfb", + "prompt_hash": "66edf2fc1c0a71ba26ed0fc6a7dce2a2e8f92579c61bb033bff3b3ea6d7b435c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 60, + "doc": { + "question": "What fundamental question lies at the core of large language models?", + "choices": [ + "How to encode images into text", + "How to simulate human reasoning", + "What word is likely to come next given a sequence of words", + "How to generate grammatically perfect sentences" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What fundamental question lies at the core of large language models?\nAnswer:", + " How to encode images into text" + ], + [ + "Question:What fundamental question lies at the core of large language models?\nAnswer:", + " How to simulate human reasoning" + ], + [ + "Question:What fundamental question lies at the core of large language models?\nAnswer:", + " What word is likely to come next given a sequence of words" + ], + [ + "Question:What fundamental question lies at the core of large language models?\nAnswer:", + " How to generate grammatically perfect sentences" + ] + ], + "resps": [ + [ + [ + -22.318111419677734, + false + ] + ], + [ + [ + -18.891098022460938, + false + ] + ], + [ + [ + -35.946693420410156, + false + ] + ], + [ + [ + -24.28699493408203, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.318111419677734, + false + ], + [ + -18.891098022460938, + false + ], + [ + -35.946693420410156, + false + ], + [ + -24.28699493408203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2bc651e9602b4caca953fde43181ea664f77a71d0f2d9c59a2be5be71d706f8b", + "prompt_hash": "6036288fedf64bb453b55ddbeaabaeabc0c5d4993693333928f10a56655d8f20", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 61, + "doc": { + "question": "What problem does the Mamba model aim to solve in sequence modeling?", + "choices": [ + "High memory usage of RNNs", + "Low accuracy on short sequences", + "Quadratic bottleneck in the attention mechanism", + "Lack of generalization in convolutional models" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What problem does the Mamba model aim to solve in sequence modeling?\nAnswer:", + " High memory usage of RNNs" + ], + [ + "Question:What problem does the Mamba model aim to solve in sequence modeling?\nAnswer:", + " Low accuracy on short sequences" + ], + [ + "Question:What problem does the Mamba model aim to solve in sequence modeling?\nAnswer:", + " Quadratic bottleneck in the attention mechanism" + ], + [ + "Question:What problem does the Mamba model aim to solve in sequence modeling?\nAnswer:", + " Lack of generalization in convolutional models" + ] + ], + "resps": [ + [ + [ + -26.314756393432617, + false + ] + ], + [ + [ + -24.300613403320312, + false + ] + ], + [ + [ + -31.88043975830078, + false + ] + ], + [ + [ + -26.176528930664062, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.314756393432617, + false + ], + [ + -24.300613403320312, + false + ], + [ + -31.88043975830078, + false + ], + [ + -26.176528930664062, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3938cd1105c8c6addb1747a25a4e9533a4491d59d54415b0095e03dfac182ec6", + "prompt_hash": "a123996b1c5197ad6764012ae08b4e0a72db1f108396c12b875028b51eafd764", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 62, + "doc": { + "question": "What does the BitFit fine-tuning method update?", + "choices": [ + "All transformer layers", + "Only the output layer weights", + "Bias terms and the task-specific classification layer", + "The entire embedding matrix" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the BitFit fine-tuning method update?\nAnswer:", + " All transformer layers" + ], + [ + "Question:What does the BitFit fine-tuning method update?\nAnswer:", + " Only the output layer weights" + ], + [ + "Question:What does the BitFit fine-tuning method update?\nAnswer:", + " Bias terms and the task-specific classification layer" + ], + [ + "Question:What does the BitFit fine-tuning method update?\nAnswer:", + " The entire embedding matrix" + ] + ], + "resps": [ + [ + [ + -15.890267372131348, + false + ] + ], + [ + [ + -15.56641960144043, + false + ] + ], + [ + [ + -37.99882507324219, + false + ] + ], + [ + [ + -15.27823543548584, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.890267372131348, + false + ], + [ + -15.56641960144043, + false + ], + [ + -37.99882507324219, + false + ], + [ + -15.27823543548584, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f2207e170fc43164dbf1d42e40561c3f4250ca00745940935f99d5dc20438dc0", + "prompt_hash": "01c9f1436cd9999ff1800b6eebc4567206d2b4ecdf5da6dad52c30ed2d2f1319", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "What is a characteristic of Kolmogorov-Arnold Networks (KANs) compared to traditional neural networks?", + "choices": [ + "They use fixed activation functions on nodes", + "They apply learnable activation functions on edges", + "They avoid using activation functions entirely", + "They use convolutional filters for feature learning" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is a characteristic of Kolmogorov-Arnold Networks (KANs) compared to traditional neural networks?\nAnswer:", + " They use fixed activation functions on nodes" + ], + [ + "Question:What is a characteristic of Kolmogorov-Arnold Networks (KANs) compared to traditional neural networks?\nAnswer:", + " They apply learnable activation functions on edges" + ], + [ + "Question:What is a characteristic of Kolmogorov-Arnold Networks (KANs) compared to traditional neural networks?\nAnswer:", + " They avoid using activation functions entirely" + ], + [ + "Question:What is a characteristic of Kolmogorov-Arnold Networks (KANs) compared to traditional neural networks?\nAnswer:", + " They use convolutional filters for feature learning" + ] + ], + "resps": [ + [ + [ + -27.779762268066406, + false + ] + ], + [ + [ + -32.78925323486328, + false + ] + ], + [ + [ + -26.899110794067383, + false + ] + ], + [ + [ + -23.58316421508789, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.779762268066406, + false + ], + [ + -32.78925323486328, + false + ], + [ + -26.899110794067383, + false + ], + [ + -23.58316421508789, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7a4b192cfda32cda5e83b7be84297fb65f0922104d79f4ed508b830e34f12957", + "prompt_hash": "5c76c2773ed113ea2061bda0694c74214eb2e6f96b091a4fb1d3f9547e65de70", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 64, + "doc": { + "question": "What does Chain-of-Thought (CoT) prompting aim to improve in LLMs?", + "choices": [ + "Memory efficiency during training", + "Logical consistency and reduction of hallucinations", + "Vocabulary expansion", + "Encoding of positional information" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What does Chain-of-Thought (CoT) prompting aim to improve in LLMs?\nAnswer:", + " Memory efficiency during training" + ], + [ + "Question:What does Chain-of-Thought (CoT) prompting aim to improve in LLMs?\nAnswer:", + " Logical consistency and reduction of hallucinations" + ], + [ + "Question:What does Chain-of-Thought (CoT) prompting aim to improve in LLMs?\nAnswer:", + " Vocabulary expansion" + ], + [ + "Question:What does Chain-of-Thought (CoT) prompting aim to improve in LLMs?\nAnswer:", + " Encoding of positional information" + ] + ], + "resps": [ + [ + [ + -23.194095611572266, + false + ] + ], + [ + [ + -23.975643157958984, + false + ] + ], + [ + [ + -16.475173950195312, + false + ] + ], + [ + [ + -24.811058044433594, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.194095611572266, + false + ], + [ + -23.975643157958984, + false + ], + [ + -16.475173950195312, + false + ], + [ + -24.811058044433594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "780cbf0fabbb53d71f45195aadaf088049f8c1fc307a088210da70f2f04fec4c", + "prompt_hash": "ab10be7a2b6fdc7339decae8a574a1da022b8713ad30ecf09512ac21ac410ccd", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 65, + "doc": { + "question": "What type of license is the gsplat library released under?", + "choices": [ + "MIT License", + "Apache License 2.0", + "GPLv3", + "BSD 3-Clause" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of license is the gsplat library released under?\nAnswer:", + " MIT License" + ], + [ + "Question:What type of license is the gsplat library released under?\nAnswer:", + " Apache License 2.0" + ], + [ + "Question:What type of license is the gsplat library released under?\nAnswer:", + " GPLv3" + ], + [ + "Question:What type of license is the gsplat library released under?\nAnswer:", + " BSD 3-Clause" + ] + ], + "resps": [ + [ + [ + -3.371497392654419, + false + ] + ], + [ + [ + -4.135261058807373, + false + ] + ], + [ + [ + -5.155677795410156, + false + ] + ], + [ + [ + -7.361147880554199, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.371497392654419, + false + ], + [ + -4.135261058807373, + false + ], + [ + -5.155677795410156, + false + ], + [ + -7.361147880554199, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d889ba1674f868cc5cfc85b5b1827d87aabbd6426f6e15ba997cd07ac9eb3859", + "prompt_hash": "caf3c756d70db75fe1205e6839156b37c2667f3ddb4a08ac10da2bab6dfef009", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 66, + "doc": { + "question": "Which densification strategy in gsplat is based on a Bayesian perspective using SGLD?", + "choices": [ + "ADC", + "Absgrad", + "MCMC", + "SHG" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which densification strategy in gsplat is based on a Bayesian perspective using SGLD?\nAnswer:", + " ADC" + ], + [ + "Question:Which densification strategy in gsplat is based on a Bayesian perspective using SGLD?\nAnswer:", + " Absgrad" + ], + [ + "Question:Which densification strategy in gsplat is based on a Bayesian perspective using SGLD?\nAnswer:", + " MCMC" + ], + [ + "Question:Which densification strategy in gsplat is based on a Bayesian perspective using SGLD?\nAnswer:", + " SHG" + ] + ], + "resps": [ + [ + [ + -15.597640991210938, + false + ] + ], + [ + [ + -21.404687881469727, + false + ] + ], + [ + [ + -8.992700576782227, + false + ] + ], + [ + [ + -13.896909713745117, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.597640991210938, + false + ], + [ + -21.404687881469727, + false + ], + [ + -8.992700576782227, + false + ], + [ + -13.896909713745117, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9d49ca94b540e249c2679eaa12be0fa663ff1ee9035b86504f1a8d460014df12", + "prompt_hash": "a30c1008a89164e906dd377775ee686a1b43a3d9ce66da3b1d847ab0aa19aae0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 67, + "doc": { + "question": "Which CUDA-accelerated library supports both RGB and depth map rendering using differentiable rasterization?", + "choices": [ + "torch-splatting", + "gsplat", + "GauStudio", + "instant-ngp" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which CUDA-accelerated library supports both RGB and depth map rendering using differentiable rasterization?\nAnswer:", + " torch-splatting" + ], + [ + "Question:Which CUDA-accelerated library supports both RGB and depth map rendering using differentiable rasterization?\nAnswer:", + " gsplat" + ], + [ + "Question:Which CUDA-accelerated library supports both RGB and depth map rendering using differentiable rasterization?\nAnswer:", + " GauStudio" + ], + [ + "Question:Which CUDA-accelerated library supports both RGB and depth map rendering using differentiable rasterization?\nAnswer:", + " instant-ngp" + ] + ], + "resps": [ + [ + [ + -20.24197006225586, + false + ] + ], + [ + [ + -21.16048812866211, + false + ] + ], + [ + [ + -22.701190948486328, + false + ] + ], + [ + [ + -14.450319290161133, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.24197006225586, + false + ], + [ + -21.16048812866211, + false + ], + [ + -22.701190948486328, + false + ], + [ + -14.450319290161133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3a5eb42470b10857233e816971280246b52c1ab8353efd055aab5c2f4ef7c48b", + "prompt_hash": "80efea59a81b004277094513555a8d3fea5085fa20b2bd149bf0a251c372755e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 68, + "doc": { + "question": "What is the default threshold for culling Gaussians based on opacity in the ADC strategy?", + "choices": [ + "0.01", + "0.001", + "0.005", + "0.02" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the default threshold for culling Gaussians based on opacity in the ADC strategy?\nAnswer:", + " 0.01" + ], + [ + "Question:What is the default threshold for culling Gaussians based on opacity in the ADC strategy?\nAnswer:", + " 0.001" + ], + [ + "Question:What is the default threshold for culling Gaussians based on opacity in the ADC strategy?\nAnswer:", + " 0.005" + ], + [ + "Question:What is the default threshold for culling Gaussians based on opacity in the ADC strategy?\nAnswer:", + " 0.02" + ] + ], + "resps": [ + [ + [ + -4.799712181091309, + false + ] + ], + [ + [ + -5.712863922119141, + false + ] + ], + [ + [ + -6.453279495239258, + false + ] + ], + [ + [ + -5.598419666290283, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.799712181091309, + false + ], + [ + -5.712863922119141, + false + ], + [ + -6.453279495239258, + false + ], + [ + -5.598419666290283, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2753c252c20ee778d0651fa89f051f9ce493d0a7f6758d12c43c280951658466", + "prompt_hash": "5f66e60bcb2b0eb7eee2efda17ca71e50bdd8e0ce36d7877dbbabe8d2a5667dc", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 69, + "doc": { + "question": "What is the default value of the anti-aliasing parameter 's' in gsplat?", + "choices": [ + "0.1", + "0.2", + "0.3", + "0.5" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the default value of the anti-aliasing parameter 's' in gsplat?\nAnswer:", + " 0.1" + ], + [ + "Question:What is the default value of the anti-aliasing parameter 's' in gsplat?\nAnswer:", + " 0.2" + ], + [ + "Question:What is the default value of the anti-aliasing parameter 's' in gsplat?\nAnswer:", + " 0.3" + ], + [ + "Question:What is the default value of the anti-aliasing parameter 's' in gsplat?\nAnswer:", + " 0.5" + ] + ], + "resps": [ + [ + [ + -5.626408576965332, + false + ] + ], + [ + [ + -5.924612998962402, + false + ] + ], + [ + [ + -6.593517303466797, + false + ] + ], + [ + [ + -5.611193656921387, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.626408576965332, + false + ], + [ + -5.924612998962402, + false + ], + [ + -6.593517303466797, + false + ], + [ + -5.611193656921387, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d9ef7a2b2cb18d913274bf908caa43fe2c0e3d868458ce345eb5de473f7e9baf", + "prompt_hash": "01b84fcc2036f104fa4e0df9fa2ac7ca74feb1a427a13cd782bfb7759396c2f5", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 70, + "doc": { + "question": "What is the smallest positive integer \\( t \\) such that there exist integers \\( x_1, x_2, \\ldots, x_t \\) satisfying \\( x_1^3 + x_2^3 + \\cdots + x_t^3 = 2002^{2002} \\)?", + "choices": [ + "A. 3", + "B. 4", + "C. 5", + "D. 6" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the smallest positive integer \\( t \\) such that there exist integers \\( x_1, x_2, \\ldots, x_t \\) satisfying \\( x_1^3 + x_2^3 + \\cdots + x_t^3 = 2002^{2002} \\)?\nAnswer:", + " A. 3" + ], + [ + "Question:What is the smallest positive integer \\( t \\) such that there exist integers \\( x_1, x_2, \\ldots, x_t \\) satisfying \\( x_1^3 + x_2^3 + \\cdots + x_t^3 = 2002^{2002} \\)?\nAnswer:", + " B. 4" + ], + [ + "Question:What is the smallest positive integer \\( t \\) such that there exist integers \\( x_1, x_2, \\ldots, x_t \\) satisfying \\( x_1^3 + x_2^3 + \\cdots + x_t^3 = 2002^{2002} \\)?\nAnswer:", + " C. 5" + ], + [ + "Question:What is the smallest positive integer \\( t \\) such that there exist integers \\( x_1, x_2, \\ldots, x_t \\) satisfying \\( x_1^3 + x_2^3 + \\cdots + x_t^3 = 2002^{2002} \\)?\nAnswer:", + " D. 6" + ] + ], + "resps": [ + [ + [ + -12.540119171142578, + false + ] + ], + [ + [ + -17.282943725585938, + false + ] + ], + [ + [ + -18.027965545654297, + false + ] + ], + [ + [ + -17.628211975097656, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.540119171142578, + false + ], + [ + -17.282943725585938, + false + ], + [ + -18.027965545654297, + false + ], + [ + -17.628211975097656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ff1d2def68ec018b36ac8c6dfa4ce07df301cfbc19e9c315d5e20acf8f8bcd9b", + "prompt_hash": "58bd525ae7ffa87e25e53c0d1d63c3f6e27f16d75ff862d206a4b58fffed811f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "Which function satisfies the property that for each \\( x \\in \\mathbb{R}^+ \\), there exists exactly one \\( y \\in \\mathbb{R}^+ \\) such that \\( xf(y) + yf(x) \\leq 2 \\)?", + "choices": [ + "A.\\( f(x) = x^2 \\)", + "B. \\( f(x) = 1 \\)", + "C. \\( f(x) = \\frac{1}{x} \\)", + "D. \\( f(x) = x + 1 \\)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which function satisfies the property that for each \\( x \\in \\mathbb{R}^+ \\), there exists exactly one \\( y \\in \\mathbb{R}^+ \\) such that \\( xf(y) + yf(x) \\leq 2 \\)?\nAnswer:", + " A.\\( f(x) = x^2 \\)" + ], + [ + "Question:Which function satisfies the property that for each \\( x \\in \\mathbb{R}^+ \\), there exists exactly one \\( y \\in \\mathbb{R}^+ \\) such that \\( xf(y) + yf(x) \\leq 2 \\)?\nAnswer:", + " B. \\( f(x) = 1 \\)" + ], + [ + "Question:Which function satisfies the property that for each \\( x \\in \\mathbb{R}^+ \\), there exists exactly one \\( y \\in \\mathbb{R}^+ \\) such that \\( xf(y) + yf(x) \\leq 2 \\)?\nAnswer:", + " C. \\( f(x) = \\frac{1}{x} \\)" + ], + [ + "Question:Which function satisfies the property that for each \\( x \\in \\mathbb{R}^+ \\), there exists exactly one \\( y \\in \\mathbb{R}^+ \\) such that \\( xf(y) + yf(x) \\leq 2 \\)?\nAnswer:", + " D. \\( f(x) = x + 1 \\)" + ] + ], + "resps": [ + [ + [ + -19.827207565307617, + false + ] + ], + [ + [ + -20.322336196899414, + false + ] + ], + [ + [ + -21.418210983276367, + false + ] + ], + [ + [ + -22.060394287109375, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.827207565307617, + false + ], + [ + -20.322336196899414, + false + ], + [ + -21.418210983276367, + false + ], + [ + -22.060394287109375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aef2aa8bcc1161172ca855d45b2b7026cbb5b6215ec2b4835db010afe650bff4", + "prompt_hash": "5ba85c492e639300e8f33662c7377e2dd6cfa7c4c8201da4c4e2fb94ab263f27", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 72, + "doc": { + "question": "What is the answer to whether it is possible that \\( s(k) \\) and \\( s(P(k)) \\) have the same parity for all positive integers \\( k \\), where \\( s(n) \\) denotes the sum of digits of \\( n \\)?", + "choices": [ + "A. Yes", + "B. No", + "C. Sometimes", + "D. Cannot be determined" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the answer to whether it is possible that \\( s(k) \\) and \\( s(P(k)) \\) have the same parity for all positive integers \\( k \\), where \\( s(n) \\) denotes the sum of digits of \\( n \\)?\nAnswer:", + " A. Yes" + ], + [ + "Question:What is the answer to whether it is possible that \\( s(k) \\) and \\( s(P(k)) \\) have the same parity for all positive integers \\( k \\), where \\( s(n) \\) denotes the sum of digits of \\( n \\)?\nAnswer:", + " B. No" + ], + [ + "Question:What is the answer to whether it is possible that \\( s(k) \\) and \\( s(P(k)) \\) have the same parity for all positive integers \\( k \\), where \\( s(n) \\) denotes the sum of digits of \\( n \\)?\nAnswer:", + " C. Sometimes" + ], + [ + "Question:What is the answer to whether it is possible that \\( s(k) \\) and \\( s(P(k)) \\) have the same parity for all positive integers \\( k \\), where \\( s(n) \\) denotes the sum of digits of \\( n \\)?\nAnswer:", + " D. Cannot be determined" + ] + ], + "resps": [ + [ + [ + -10.544660568237305, + false + ] + ], + [ + [ + -14.121866226196289, + false + ] + ], + [ + [ + -20.223602294921875, + false + ] + ], + [ + [ + -25.502727508544922, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.544660568237305, + false + ], + [ + -14.121866226196289, + false + ], + [ + -20.223602294921875, + false + ], + [ + -25.502727508544922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e52014fa502465af56621ee5459f41c8d9ef996e16731caf76c60c39f71a0572", + "prompt_hash": "b751d112055d2f216ddab13abfb425a9e4b66761413232e32738a2ffc59a0d5a", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "What is the maximum number of students \\( k \\) such that any two have at least one course in common, and no outsider can be classmates with all \\( k \\)?", + "choices": [ + "A. 256", + "B. 512", + "C. 1023", + "D. 1024" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the maximum number of students \\( k \\) such that any two have at least one course in common, and no outsider can be classmates with all \\( k \\)?\nAnswer:", + " A. 256" + ], + [ + "Question:What is the maximum number of students \\( k \\) such that any two have at least one course in common, and no outsider can be classmates with all \\( k \\)?\nAnswer:", + " B. 512" + ], + [ + "Question:What is the maximum number of students \\( k \\) such that any two have at least one course in common, and no outsider can be classmates with all \\( k \\)?\nAnswer:", + " C. 1023" + ], + [ + "Question:What is the maximum number of students \\( k \\) such that any two have at least one course in common, and no outsider can be classmates with all \\( k \\)?\nAnswer:", + " D. 1024" + ] + ], + "resps": [ + [ + [ + -19.086959838867188, + false + ] + ], + [ + [ + -24.054302215576172, + false + ] + ], + [ + [ + -28.452510833740234, + false + ] + ], + [ + [ + -27.108470916748047, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.086959838867188, + false + ], + [ + -24.054302215576172, + false + ], + [ + -28.452510833740234, + false + ], + [ + -27.108470916748047, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "69a9652202d73079db7a9bd7ed603c9584a9846c30e7d73e2aa2a0cd8723d7b9", + "prompt_hash": "eedfc36fc0e2bfd5bc91cf0e269066cc1dec19da4fa2ffd9332c0dbbc7320d8d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "Who wins the game where Joe and Penny remove stones with increasing limits per move, starting from 5000 stones and alternating turns?", + "choices": [ + "A. Joe", + "B. Penny", + "C. It's a draw", + "D. Cannot be determined" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Who wins the game where Joe and Penny remove stones with increasing limits per move, starting from 5000 stones and alternating turns?\nAnswer:", + " A. Joe" + ], + [ + "Question:Who wins the game where Joe and Penny remove stones with increasing limits per move, starting from 5000 stones and alternating turns?\nAnswer:", + " B. Penny" + ], + [ + "Question:Who wins the game where Joe and Penny remove stones with increasing limits per move, starting from 5000 stones and alternating turns?\nAnswer:", + " C. It's a draw" + ], + [ + "Question:Who wins the game where Joe and Penny remove stones with increasing limits per move, starting from 5000 stones and alternating turns?\nAnswer:", + " D. Cannot be determined" + ] + ], + "resps": [ + [ + [ + -8.734781265258789, + false + ] + ], + [ + [ + -11.859012603759766, + false + ] + ], + [ + [ + -23.23371124267578, + false + ] + ], + [ + [ + -28.801799774169922, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.734781265258789, + false + ], + [ + -11.859012603759766, + false + ], + [ + -23.23371124267578, + false + ], + [ + -28.801799774169922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "384f7ec867d80108c16e91e7b85808724733266fff2fdce9a3462149734ba984", + "prompt_hash": "05de3ecb83cbdc67d7a9928100b0a19366b0596238a98cb064bca07239cbc3d9", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 75, + "doc": { + "question": "What does LoRA update during fine-tuning?", + "choices": [ + "Only the original model weights", + "Only the bias terms", + "Only the low-rank matrices A and B", + "All parameters of the model" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does LoRA update during fine-tuning?\nAnswer:", + " Only the original model weights" + ], + [ + "Question:What does LoRA update during fine-tuning?\nAnswer:", + " Only the bias terms" + ], + [ + "Question:What does LoRA update during fine-tuning?\nAnswer:", + " Only the low-rank matrices A and B" + ], + [ + "Question:What does LoRA update during fine-tuning?\nAnswer:", + " All parameters of the model" + ] + ], + "resps": [ + [ + [ + -16.489402770996094, + false + ] + ], + [ + [ + -14.673160552978516, + false + ] + ], + [ + [ + -19.73063087463379, + false + ] + ], + [ + [ + -12.429987907409668, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.489402770996094, + false + ], + [ + -14.673160552978516, + false + ], + [ + -19.73063087463379, + false + ], + [ + -12.429987907409668, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c9ddacc3de73b039949f28da7796fef39b37af97348266e2cddc327310dc2b12", + "prompt_hash": "45f5e6babc5b65261709ef37a6a4ba781194093d2b182e9b94babfe251f5005b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 76, + "doc": { + "question": "What is the primary limitation of FedIT when clients have different LoRA ranks?", + "choices": [ + "It cannot communicate with more than 10 clients", + "It introduces delays due to encryption", + "It cannot aggregate local LoRAs with heterogeneous ranks", + "It lacks support for any LoRA rank" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary limitation of FedIT when clients have different LoRA ranks?\nAnswer:", + " It cannot communicate with more than 10 clients" + ], + [ + "Question:What is the primary limitation of FedIT when clients have different LoRA ranks?\nAnswer:", + " It introduces delays due to encryption" + ], + [ + "Question:What is the primary limitation of FedIT when clients have different LoRA ranks?\nAnswer:", + " It cannot aggregate local LoRAs with heterogeneous ranks" + ], + [ + "Question:What is the primary limitation of FedIT when clients have different LoRA ranks?\nAnswer:", + " It lacks support for any LoRA rank" + ] + ], + "resps": [ + [ + [ + -28.548847198486328, + false + ] + ], + [ + [ + -30.07623863220215, + false + ] + ], + [ + [ + -34.82258605957031, + false + ] + ], + [ + [ + -23.75527572631836, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.548847198486328, + false + ], + [ + -30.07623863220215, + false + ], + [ + -34.82258605957031, + false + ], + [ + -23.75527572631836, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5f313bde67b9e18bca0d640d237389a6a41cdcc68166bbc185ac0dcc9d6d1c74", + "prompt_hash": "54439a6ab5141dff92cc07e799f1f6ac1f9d6a0a970c1aa8d9ed283468651382", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 77, + "doc": { + "question": "Which mathematical inequality illustrates the aggregation noise in FedIT?", + "choices": [ + "ΣA × ΣB = ΣA×B", + "ΣA + ΣB = Σ(A+B)", + "ΣA × ΣB ≠ ΣA×B", + "A × B = B × A" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which mathematical inequality illustrates the aggregation noise in FedIT?\nAnswer:", + " ΣA × ΣB = ΣA×B" + ], + [ + "Question:Which mathematical inequality illustrates the aggregation noise in FedIT?\nAnswer:", + " ΣA + ΣB = Σ(A+B)" + ], + [ + "Question:Which mathematical inequality illustrates the aggregation noise in FedIT?\nAnswer:", + " ΣA × ΣB ≠ ΣA×B" + ], + [ + "Question:Which mathematical inequality illustrates the aggregation noise in FedIT?\nAnswer:", + " A × B = B × A" + ] + ], + "resps": [ + [ + [ + -46.70803451538086, + false + ] + ], + [ + [ + -35.111167907714844, + false + ] + ], + [ + [ + -52.45105743408203, + false + ] + ], + [ + [ + -23.067930221557617, + false + ] + ] + ], + "filtered_resps": [ + [ + -46.70803451538086, + false + ], + [ + -35.111167907714844, + false + ], + [ + -52.45105743408203, + false + ], + [ + -23.067930221557617, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c38660c2b5fdfbe620a25e1771ca70b1c0e8f7a6d749d26a1a8eb72abcd55f5f", + "prompt_hash": "b23c2a87198b56c5a1b9b032fb8f76f58cc1b32740c848d8b3d6c77e185a9d49", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 78, + "doc": { + "question": "What is the dimension of LoRA matrix A when the rank is set to 16 for a LLaMA-7B model?", + "choices": [ + "4096 × 4096", + "4096 × 16", + "16 × 4096", + "16 × 16" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the dimension of LoRA matrix A when the rank is set to 16 for a LLaMA-7B model?\nAnswer:", + " 4096 × 4096" + ], + [ + "Question:What is the dimension of LoRA matrix A when the rank is set to 16 for a LLaMA-7B model?\nAnswer:", + " 4096 × 16" + ], + [ + "Question:What is the dimension of LoRA matrix A when the rank is set to 16 for a LLaMA-7B model?\nAnswer:", + " 16 × 4096" + ], + [ + "Question:What is the dimension of LoRA matrix A when the rank is set to 16 for a LLaMA-7B model?\nAnswer:", + " 16 × 16" + ] + ], + "resps": [ + [ + [ + -14.631674766540527, + false + ] + ], + [ + [ + -16.16933250427246, + false + ] + ], + [ + [ + -16.902545928955078, + false + ] + ], + [ + [ + -10.089134216308594, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.631674766540527, + false + ], + [ + -16.16933250427246, + false + ], + [ + -16.902545928955078, + false + ], + [ + -10.089134216308594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7b37553842b931dc0bc0dd2ce757625784693d0ca565acd95c7612158325c33f", + "prompt_hash": "fa9872f95ee586fc82ac541c3731438cf2389ee3d88d1488bd93cd963667165d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 79, + "doc": { + "question": "What operation is used in FLoRA to aggregate LoRA modules from multiple clients?", + "choices": [ + "Matrix inversion", + "Module duplication", + "Stacking", + "Element-wise multiplication" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What operation is used in FLoRA to aggregate LoRA modules from multiple clients?\nAnswer:", + " Matrix inversion" + ], + [ + "Question:What operation is used in FLoRA to aggregate LoRA modules from multiple clients?\nAnswer:", + " Module duplication" + ], + [ + "Question:What operation is used in FLoRA to aggregate LoRA modules from multiple clients?\nAnswer:", + " Stacking" + ], + [ + "Question:What operation is used in FLoRA to aggregate LoRA modules from multiple clients?\nAnswer:", + " Element-wise multiplication" + ] + ], + "resps": [ + [ + [ + -13.950897216796875, + false + ] + ], + [ + [ + -15.547689437866211, + false + ] + ], + [ + [ + -9.502487182617188, + false + ] + ], + [ + [ + -8.775291442871094, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.950897216796875, + false + ], + [ + -15.547689437866211, + false + ], + [ + -9.502487182617188, + false + ], + [ + -8.775291442871094, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "18616f7d416b734bb496239b878291e49111f7c0ab679eeddadaf7a1fc1f9fdc", + "prompt_hash": "cd7bd01f285b3dea733e29ccaddf2c1e488215351be7baf12819bc3c33d3a944", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 80, + "doc": { + "question": "What is the highest accuracy achieved by LLaMA 3.1 405B on the regular Blocksworld dataset?", + "choices": [ + "62.6%", + "52.8%", + "82.0%", + "97.8%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the highest accuracy achieved by LLaMA 3.1 405B on the regular Blocksworld dataset?\nAnswer:", + " 62.6%" + ], + [ + "Question:What is the highest accuracy achieved by LLaMA 3.1 405B on the regular Blocksworld dataset?\nAnswer:", + " 52.8%" + ], + [ + "Question:What is the highest accuracy achieved by LLaMA 3.1 405B on the regular Blocksworld dataset?\nAnswer:", + " 82.0%" + ], + [ + "Question:What is the highest accuracy achieved by LLaMA 3.1 405B on the regular Blocksworld dataset?\nAnswer:", + " 97.8%" + ] + ], + "resps": [ + [ + [ + -14.532271385192871, + false + ] + ], + [ + [ + -14.77785587310791, + false + ] + ], + [ + [ + -13.464859962463379, + false + ] + ], + [ + [ + -13.568106651306152, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.532271385192871, + false + ], + [ + -14.77785587310791, + false + ], + [ + -13.464859962463379, + false + ], + [ + -13.568106651306152, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4d3a6a1e8cfc25ac671d4c12e53eb1831e93fe6c7b2b3858fb109b324f5acd4a", + "prompt_hash": "5cf63224372e80012ab0d364dde0ac06edc4a61d4bf4f677d8f899c76df107de", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 81, + "doc": { + "question": "How many steps do the Blocksworld problems in the PlanBench dataset require to solve?", + "choices": [ + "2 to 16 steps", + "10 to 30 steps", + "3 to 15 steps", + "5 to 20 steps" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many steps do the Blocksworld problems in the PlanBench dataset require to solve?\nAnswer:", + " 2 to 16 steps" + ], + [ + "Question:How many steps do the Blocksworld problems in the PlanBench dataset require to solve?\nAnswer:", + " 10 to 30 steps" + ], + [ + "Question:How many steps do the Blocksworld problems in the PlanBench dataset require to solve?\nAnswer:", + " 3 to 15 steps" + ], + [ + "Question:How many steps do the Blocksworld problems in the PlanBench dataset require to solve?\nAnswer:", + " 5 to 20 steps" + ] + ], + "resps": [ + [ + [ + -16.07807159423828, + false + ] + ], + [ + [ + -13.862556457519531, + false + ] + ], + [ + [ + -14.577719688415527, + false + ] + ], + [ + [ + -13.827674865722656, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.07807159423828, + false + ], + [ + -13.862556457519531, + false + ], + [ + -14.577719688415527, + false + ], + [ + -13.827674865722656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3759c5f8da919bb1d2ea2b0722011fe9d55074e9f7991c5078ee4b9970fc6eae", + "prompt_hash": "63490735f777659af0d2ebe263086aa21b8e16fce98f28321ed0d880b0693ff4", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 82, + "doc": { + "question": "How much did the researchers report spending on o1 model experiments?", + "choices": [ + "$1897.55", + "$975.50", + "$2783.40", + "$1325.00" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How much did the researchers report spending on o1 model experiments?\nAnswer:", + " $1897.55" + ], + [ + "Question:How much did the researchers report spending on o1 model experiments?\nAnswer:", + " $975.50" + ], + [ + "Question:How much did the researchers report spending on o1 model experiments?\nAnswer:", + " $2783.40" + ], + [ + "Question:How much did the researchers report spending on o1 model experiments?\nAnswer:", + " $1325.00" + ] + ], + "resps": [ + [ + [ + -19.34306526184082, + false + ] + ], + [ + [ + -17.32545280456543, + false + ] + ], + [ + [ + -19.518150329589844, + false + ] + ], + [ + [ + -15.12232494354248, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.34306526184082, + false + ], + [ + -17.32545280456543, + false + ], + [ + -19.518150329589844, + false + ], + [ + -15.12232494354248, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3f1f4e50c43d044f5631966b4bcaa790ad7a0c82e22086d1f50d5ecd2cb50a41", + "prompt_hash": "ce84816c19ef6f5dbd25e89ad489bf610fb24cd3b759bdebb4f1ab53474df311", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "What percentage of randomized Mystery Blocksworld unsolvable instances were falsely claimed as impossible by the model?", + "choices": [ + "11.5%", + "5%", + "16%", + "27%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What percentage of randomized Mystery Blocksworld unsolvable instances were falsely claimed as impossible by the model?\nAnswer:", + " 11.5%" + ], + [ + "Question:What percentage of randomized Mystery Blocksworld unsolvable instances were falsely claimed as impossible by the model?\nAnswer:", + " 5%" + ], + [ + "Question:What percentage of randomized Mystery Blocksworld unsolvable instances were falsely claimed as impossible by the model?\nAnswer:", + " 16%" + ], + [ + "Question:What percentage of randomized Mystery Blocksworld unsolvable instances were falsely claimed as impossible by the model?\nAnswer:", + " 27%" + ] + ], + "resps": [ + [ + [ + -12.747138023376465, + false + ] + ], + [ + [ + -9.35422134399414, + false + ] + ], + [ + [ + -10.66763973236084, + false + ] + ], + [ + [ + -11.035689353942871, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.747138023376465, + false + ], + [ + -9.35422134399414, + false + ], + [ + -10.66763973236084, + false + ], + [ + -11.035689353942871, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dfb7671ceb3b0e0d7d62d30d1f081a8b650635dc785392ee16b600aab062f7ec", + "prompt_hash": "21053b00326cf3ea0175f96b68b215af89dbc864bf99d0da0d1654df70ab6699", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 84, + "doc": { + "question": "How long did Fast Downward take on average to solve a Blocksworld instance?", + "choices": [ + "0.265 seconds", + "2.65 seconds", + "1.25 seconds", + "0.562 seconds" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How long did Fast Downward take on average to solve a Blocksworld instance?\nAnswer:", + " 0.265 seconds" + ], + [ + "Question:How long did Fast Downward take on average to solve a Blocksworld instance?\nAnswer:", + " 2.65 seconds" + ], + [ + "Question:How long did Fast Downward take on average to solve a Blocksworld instance?\nAnswer:", + " 1.25 seconds" + ], + [ + "Question:How long did Fast Downward take on average to solve a Blocksworld instance?\nAnswer:", + " 0.562 seconds" + ] + ], + "resps": [ + [ + [ + -12.132086753845215, + false + ] + ], + [ + [ + -11.014654159545898, + false + ] + ], + [ + [ + -9.894144058227539, + false + ] + ], + [ + [ + -12.297073364257812, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.132086753845215, + false + ], + [ + -11.014654159545898, + false + ], + [ + -9.894144058227539, + false + ], + [ + -12.297073364257812, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "019764cc7d216dc4c71ecb9c7b035d108cddded072ed5a1536a2314957381b76", + "prompt_hash": "6ed5a165c77e6410e7ce5b5e3b46407a205fa6a4ccbe9298c7ab249929ed554e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 85, + "doc": { + "question": "What is the main reason OCR-based text extraction is unsuitable for high-quality document data extraction?", + "choices": [ + "It is too slow for large documents.", + "It requires labeled training data.", + "It introduces noise when processing images, tables, and formulas.", + "It only supports English text." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the main reason OCR-based text extraction is unsuitable for high-quality document data extraction?\nAnswer:", + " It is too slow for large documents." + ], + [ + "Question:What is the main reason OCR-based text extraction is unsuitable for high-quality document data extraction?\nAnswer:", + " It requires labeled training data." + ], + [ + "Question:What is the main reason OCR-based text extraction is unsuitable for high-quality document data extraction?\nAnswer:", + " It introduces noise when processing images, tables, and formulas." + ], + [ + "Question:What is the main reason OCR-based text extraction is unsuitable for high-quality document data extraction?\nAnswer:", + " It only supports English text." + ] + ], + "resps": [ + [ + [ + -19.091449737548828, + false + ] + ], + [ + [ + -16.6263370513916, + false + ] + ], + [ + [ + -46.70049285888672, + false + ] + ], + [ + [ + -18.63629913330078, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.091449737548828, + false + ], + [ + -16.6263370513916, + false + ], + [ + -46.70049285888672, + false + ], + [ + -18.63629913330078, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "466404aaa1d54d80fa94d7b2cfe084ca64f9489abe8d2f9a933a9acb1b08236e", + "prompt_hash": "4bf51dce5b41b771f49adf61efe7e776d91851252c2d335140a9c3c53bcdb4f3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "Which model does MinerU use for formula recognition?", + "choices": [ + "Pix2tex", + "Texify", + "UniMERNet", + "Mathpix" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model does MinerU use for formula recognition?\nAnswer:", + " Pix2tex" + ], + [ + "Question:Which model does MinerU use for formula recognition?\nAnswer:", + " Texify" + ], + [ + "Question:Which model does MinerU use for formula recognition?\nAnswer:", + " UniMERNet" + ], + [ + "Question:Which model does MinerU use for formula recognition?\nAnswer:", + " Mathpix" + ] + ], + "resps": [ + [ + [ + -24.9783935546875, + false + ] + ], + [ + [ + -19.929180145263672, + false + ] + ], + [ + [ + -27.404014587402344, + false + ] + ], + [ + [ + -20.638700485229492, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.9783935546875, + false + ], + [ + -19.929180145263672, + false + ], + [ + -27.404014587402344, + false + ], + [ + -20.638700485229492, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5f798d7eefabe6752366a0de70c102ffe7012c3160888e78084fd0aaab2b2d3d", + "prompt_hash": "4ab0e7faeb3668d22081b105fca0cb00395e4b53703df6f58970034e5b60fc8d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 87, + "doc": { + "question": "What layout detection model was fine-tuned on MinerU's internal dataset?", + "choices": [ + "DocXchain", + "LayoutLMv3", + "Surya", + "360LayoutAnalysis-Paper" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What layout detection model was fine-tuned on MinerU's internal dataset?\nAnswer:", + " DocXchain" + ], + [ + "Question:What layout detection model was fine-tuned on MinerU's internal dataset?\nAnswer:", + " LayoutLMv3" + ], + [ + "Question:What layout detection model was fine-tuned on MinerU's internal dataset?\nAnswer:", + " Surya" + ], + [ + "Question:What layout detection model was fine-tuned on MinerU's internal dataset?\nAnswer:", + " 360LayoutAnalysis-Paper" + ] + ], + "resps": [ + [ + [ + -27.757598876953125, + false + ] + ], + [ + [ + -11.168102264404297, + false + ] + ], + [ + [ + -17.636960983276367, + false + ] + ], + [ + [ + -40.06000518798828, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.757598876953125, + false + ], + [ + -11.168102264404297, + false + ], + [ + -17.636960983276367, + false + ], + [ + -40.06000518798828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "78a8f26b79e106deb0cda4f1dc69d21ee3b0cfed8e5d23011f39e98d8778ffa3", + "prompt_hash": "7447f7e97610c9d135bffcb6bedb3973cfcb18b24a95c9b56c025f12be6fc215", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 88, + "doc": { + "question": "Which two models are used by MinerU for table recognition tasks?", + "choices": [ + "StructEqTable and TableMaster", + "DocXchain and TableMaster", + "Pix2tex and StructEqTable", + "Texify and TableMaster" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which two models are used by MinerU for table recognition tasks?\nAnswer:", + " StructEqTable and TableMaster" + ], + [ + "Question:Which two models are used by MinerU for table recognition tasks?\nAnswer:", + " DocXchain and TableMaster" + ], + [ + "Question:Which two models are used by MinerU for table recognition tasks?\nAnswer:", + " Pix2tex and StructEqTable" + ], + [ + "Question:Which two models are used by MinerU for table recognition tasks?\nAnswer:", + " Texify and TableMaster" + ] + ], + "resps": [ + [ + [ + -41.12918472290039, + false + ] + ], + [ + [ + -46.08926773071289, + false + ] + ], + [ + [ + -45.41828918457031, + false + ] + ], + [ + [ + -38.265869140625, + false + ] + ] + ], + "filtered_resps": [ + [ + -41.12918472290039, + false + ], + [ + -46.08926773071289, + false + ], + [ + -45.41828918457031, + false + ], + [ + -38.265869140625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7ebc7a53eeb4822f4633bb50c81307b737b7e5d75cedec0d907633580d14d0c7", + "prompt_hash": "94f14cd39b28fc71d7d77acfeda9bce9541e04e522aeb95d34315422b2fb94c5", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 89, + "doc": { + "question": "What document attribute determines whether MinerU applies OCR or direct text extraction?", + "choices": [ + "Document file size", + "Language of the document", + "Presence of formulas", + "Scanned vs. text-based PDF" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What document attribute determines whether MinerU applies OCR or direct text extraction?\nAnswer:", + " Document file size" + ], + [ + "Question:What document attribute determines whether MinerU applies OCR or direct text extraction?\nAnswer:", + " Language of the document" + ], + [ + "Question:What document attribute determines whether MinerU applies OCR or direct text extraction?\nAnswer:", + " Presence of formulas" + ], + [ + "Question:What document attribute determines whether MinerU applies OCR or direct text extraction?\nAnswer:", + " Scanned vs. text-based PDF" + ] + ], + "resps": [ + [ + [ + -15.031461715698242, + false + ] + ], + [ + [ + -13.443138122558594, + false + ] + ], + [ + [ + -21.756549835205078, + false + ] + ], + [ + [ + -32.97414016723633, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.031461715698242, + false + ], + [ + -13.443138122558594, + false + ], + [ + -21.756549835205078, + false + ], + [ + -32.97414016723633, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b65253960a8d398702513c3de6c03b58763a782ea4c8eb76d51069f4c1a83a16", + "prompt_hash": "6ec9281ad15979a5e1a2f3d8be4a535782184f3f378b9de562997926427b1b65", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 90, + "doc": { + "question": "What is the codebook size used by the vision tokenizer?", + "choices": [ + "16384", + "32768", + "65536", + "4096" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the codebook size used by the vision tokenizer?\nAnswer:", + " 16384" + ], + [ + "Question:What is the codebook size used by the vision tokenizer?\nAnswer:", + " 32768" + ], + [ + "Question:What is the codebook size used by the vision tokenizer?\nAnswer:", + " 65536" + ], + [ + "Question:What is the codebook size used by the vision tokenizer?\nAnswer:", + " 4096" + ] + ], + "resps": [ + [ + [ + -9.472506523132324, + false + ] + ], + [ + [ + -9.326908111572266, + false + ] + ], + [ + [ + -8.893693923950195, + false + ] + ], + [ + [ + -6.730905532836914, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.472506523132324, + false + ], + [ + -9.326908111572266, + false + ], + [ + -8.893693923950195, + false + ], + [ + -6.730905532836914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "921138505deac46d1dc26ce3e7d95d3173d37af399a55da377c25799cf397256", + "prompt_hash": "8c13fc6441acf5a97158c0331c0abc98f3f5d45d7793efd374c10a6dab8654bb", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 91, + "doc": { + "question": "What is the total context length supported by the model during video training?", + "choices": [ + "4096", + "131072", + "2048", + "65536" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the total context length supported by the model during video training?\nAnswer:", + " 4096" + ], + [ + "Question:What is the total context length supported by the model during video training?\nAnswer:", + " 131072" + ], + [ + "Question:What is the total context length supported by the model during video training?\nAnswer:", + " 2048" + ], + [ + "Question:What is the total context length supported by the model during video training?\nAnswer:", + " 65536" + ] + ], + "resps": [ + [ + [ + -6.252519130706787, + false + ] + ], + [ + [ + -11.824712753295898, + false + ] + ], + [ + [ + -6.172224998474121, + false + ] + ], + [ + [ + -10.215291976928711, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.252519130706787, + false + ], + [ + -11.824712753295898, + false + ], + [ + -6.172224998474121, + false + ], + [ + -10.215291976928711, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bfeee6c195dc0a3c16023724818ceb8649450dbbb88b1962eab3682c072102b1", + "prompt_hash": "9c81cc8c1465e209432661336484c3e2cfd0aafef5bb044705a5b1a7e7ec3ce2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 92, + "doc": { + "question": "What filtering threshold was used with the LAION-AI aesthetic predictor for images?", + "choices": [ + "5.5", + "4.0", + "6.0", + "5.0" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What filtering threshold was used with the LAION-AI aesthetic predictor for images?\nAnswer:", + " 5.5" + ], + [ + "Question:What filtering threshold was used with the LAION-AI aesthetic predictor for images?\nAnswer:", + " 4.0" + ], + [ + "Question:What filtering threshold was used with the LAION-AI aesthetic predictor for images?\nAnswer:", + " 6.0" + ], + [ + "Question:What filtering threshold was used with the LAION-AI aesthetic predictor for images?\nAnswer:", + " 5.0" + ] + ], + "resps": [ + [ + [ + -8.747995376586914, + false + ] + ], + [ + [ + -8.179380416870117, + false + ] + ], + [ + [ + -9.052521705627441, + false + ] + ], + [ + [ + -7.995424270629883, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.747995376586914, + false + ], + [ + -8.179380416870117, + false + ], + [ + -9.052521705627441, + false + ], + [ + -7.995424270629883, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e58253cf7bf1b42c0e218db5cc1e700afe70d68ab4c4326e0ca1c1393d3b2508", + "prompt_hash": "f104f738b2da0ffbac987df020d34e8922606c468203a1c53463f2ffb1220886", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 93, + "doc": { + "question": "Which activation function is used in the model architecture?", + "choices": [ + "ReLU", + "GELU", + "SwiGLU", + "LeakyReLU" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which activation function is used in the model architecture?\nAnswer:", + " ReLU" + ], + [ + "Question:Which activation function is used in the model architecture?\nAnswer:", + " GELU" + ], + [ + "Question:Which activation function is used in the model architecture?\nAnswer:", + " SwiGLU" + ], + [ + "Question:Which activation function is used in the model architecture?\nAnswer:", + " LeakyReLU" + ] + ], + "resps": [ + [ + [ + -2.701192855834961, + false + ] + ], + [ + [ + -6.409379482269287, + false + ] + ], + [ + [ + -12.06315803527832, + false + ] + ], + [ + [ + -4.720894813537598, + false + ] + ] + ], + "filtered_resps": [ + [ + -2.701192855834961, + false + ], + [ + -6.409379482269287, + false + ], + [ + -12.06315803527832, + false + ], + [ + -4.720894813537598, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fa9b33bb481b548158c9744834bef4ed62e97a0d0893c4e5c69840958b0b0189", + "prompt_hash": "13ba185649417990c269bbd6c34c95cbfc797c8f4234e5923414f85e38ab18ae", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 94, + "doc": { + "question": "What are the dimensions of the visual tokens that the vision tokenizer compresses a 512x512 image into?", + "choices": [ + "64", + "4096", + "512", + "32" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What are the dimensions of the visual tokens that the vision tokenizer compresses a 512x512 image into?\nAnswer:", + " 64" + ], + [ + "Question:What are the dimensions of the visual tokens that the vision tokenizer compresses a 512x512 image into?\nAnswer:", + " 4096" + ], + [ + "Question:What are the dimensions of the visual tokens that the vision tokenizer compresses a 512x512 image into?\nAnswer:", + " 512" + ], + [ + "Question:What are the dimensions of the visual tokens that the vision tokenizer compresses a 512x512 image into?\nAnswer:", + " 32" + ] + ], + "resps": [ + [ + [ + -5.736916542053223, + false + ] + ], + [ + [ + -7.617210388183594, + false + ] + ], + [ + [ + -5.7705512046813965, + false + ] + ], + [ + [ + -5.485168933868408, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.736916542053223, + false + ], + [ + -7.617210388183594, + false + ], + [ + -5.7705512046813965, + false + ], + [ + -5.485168933868408, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d480b3c765bc0fd2dfba85e7928b9e6256f3d92f03d35f0c172d8ed3889bead1", + "prompt_hash": "84fe528602ee3059051e8e68a2b6b11ca0e4eda52876bde6554ba7e13bc5c0d5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 95, + "doc": { + "question": "What simulator does the PhysGen system use for 2D rigid body dynamics?", + "choices": [ + "PyBullet", + "MuJoCo", + "Pymunk", + "PhysX" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What simulator does the PhysGen system use for 2D rigid body dynamics?\nAnswer:", + " PyBullet" + ], + [ + "Question:What simulator does the PhysGen system use for 2D rigid body dynamics?\nAnswer:", + " MuJoCo" + ], + [ + "Question:What simulator does the PhysGen system use for 2D rigid body dynamics?\nAnswer:", + " Pymunk" + ], + [ + "Question:What simulator does the PhysGen system use for 2D rigid body dynamics?\nAnswer:", + " PhysX" + ] + ], + "resps": [ + [ + [ + -6.650575160980225, + false + ] + ], + [ + [ + -8.297380447387695, + false + ] + ], + [ + [ + -6.8455810546875, + false + ] + ], + [ + [ + -8.160148620605469, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.650575160980225, + false + ], + [ + -8.297380447387695, + false + ], + [ + -6.8455810546875, + false + ], + [ + -8.160148620605469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e89640744aa137f59ddd183fee7a69b64a815e0ea1445e6748f305d2f2d35874", + "prompt_hash": "1babfd91f4788bc86342e6f0f603c60b26327f96ac47d8dcd91b228980968ac0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 96, + "doc": { + "question": "What is the resolution used for video generation in the experiments?", + "choices": [ + "256 × 256", + "768 × 768", + "1024 × 1024", + "512 × 512" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the resolution used for video generation in the experiments?\nAnswer:", + " 256 × 256" + ], + [ + "Question:What is the resolution used for video generation in the experiments?\nAnswer:", + " 768 × 768" + ], + [ + "Question:What is the resolution used for video generation in the experiments?\nAnswer:", + " 1024 × 1024" + ], + [ + "Question:What is the resolution used for video generation in the experiments?\nAnswer:", + " 512 × 512" + ] + ], + "resps": [ + [ + [ + -7.873867511749268, + false + ] + ], + [ + [ + -10.801594734191895, + false + ] + ], + [ + [ + -8.7615966796875, + false + ] + ], + [ + [ + -8.544112205505371, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.873867511749268, + false + ], + [ + -10.801594734191895, + false + ], + [ + -8.7615966796875, + false + ], + [ + -8.544112205505371, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c1f9aac27b758c6c9bbe16854de2f0da20a0063ce56679800a51b8a47ec0a5de", + "prompt_hash": "98db683fc1ff393d1165bf42f2254a7124c024e7a9fbb969eac3f5662de74532", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 97, + "doc": { + "question": "What metric is used to evaluate image appearance in the quantitative evaluation?", + "choices": [ + "Fréchet Video Distance", + "Peak Signal-to-Noise Ratio", + "Image-FID", + "SSIM" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What metric is used to evaluate image appearance in the quantitative evaluation?\nAnswer:", + " Fréchet Video Distance" + ], + [ + "Question:What metric is used to evaluate image appearance in the quantitative evaluation?\nAnswer:", + " Peak Signal-to-Noise Ratio" + ], + [ + "Question:What metric is used to evaluate image appearance in the quantitative evaluation?\nAnswer:", + " Image-FID" + ], + [ + "Question:What metric is used to evaluate image appearance in the quantitative evaluation?\nAnswer:", + " SSIM" + ] + ], + "resps": [ + [ + [ + -14.589303016662598, + false + ] + ], + [ + [ + -7.220948219299316, + false + ] + ], + [ + [ + -19.263450622558594, + false + ] + ], + [ + [ + -6.307013034820557, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.589303016662598, + false + ], + [ + -7.220948219299316, + false + ], + [ + -19.263450622558594, + false + ], + [ + -6.307013034820557, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b2c8febd871fe21d664c54b2885582a829ee64ca1ee70a739f09058a533511f8", + "prompt_hash": "417d34dd90cf897e51f620454efe10802be46cc668007e267dd4afa55052b0d9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 98, + "doc": { + "question": "How many human participants were involved in the physical-realism and photo-realism evaluation?", + "choices": [ + "10", + "12", + "14", + "20" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many human participants were involved in the physical-realism and photo-realism evaluation?\nAnswer:", + " 10" + ], + [ + "Question:How many human participants were involved in the physical-realism and photo-realism evaluation?\nAnswer:", + " 12" + ], + [ + "Question:How many human participants were involved in the physical-realism and photo-realism evaluation?\nAnswer:", + " 14" + ], + [ + "Question:How many human participants were involved in the physical-realism and photo-realism evaluation?\nAnswer:", + " 20" + ] + ], + "resps": [ + [ + [ + -4.0728678703308105, + true + ] + ], + [ + [ + -4.49506139755249, + false + ] + ], + [ + [ + -5.605117321014404, + false + ] + ], + [ + [ + -4.566987037658691, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.0728678703308105, + true + ], + [ + -4.49506139755249, + false + ], + [ + -5.605117321014404, + false + ], + [ + -4.566987037658691, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2e057fc17dce303739e665d9ffdd88f0cb75faaee88f577b0132ad378807aa61", + "prompt_hash": "0b4f201db05858d3bae3b02a7724eeb3f7961656c58f1ad4fc98cf12ff486654", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 99, + "doc": { + "question": "Which model is used to estimate surface normals for intrinsic decomposition?", + "choices": [ + "OmniData", + "GeoWizard", + "DINOv2", + "SAM" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which model is used to estimate surface normals for intrinsic decomposition?\nAnswer:", + " OmniData" + ], + [ + "Question:Which model is used to estimate surface normals for intrinsic decomposition?\nAnswer:", + " GeoWizard" + ], + [ + "Question:Which model is used to estimate surface normals for intrinsic decomposition?\nAnswer:", + " DINOv2" + ], + [ + "Question:Which model is used to estimate surface normals for intrinsic decomposition?\nAnswer:", + " SAM" + ] + ], + "resps": [ + [ + [ + -20.465606689453125, + false + ] + ], + [ + [ + -20.510021209716797, + false + ] + ], + [ + [ + -10.1165771484375, + false + ] + ], + [ + [ + -10.176116943359375, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.465606689453125, + false + ], + [ + -20.510021209716797, + false + ], + [ + -10.1165771484375, + false + ], + [ + -10.176116943359375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "be853caa9fc164305d79a18a3a24573ea1963c2fb005183ae8ce413eebebab46", + "prompt_hash": "28f78e448df786530b10b8c768afc1db85a16371e1d6f722e67ba3779c48fb82", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + } + ], + "arxiv_mc_2410": [ + { + "doc_id": 0, + "doc": { + "question": "What are the three main phases of the TECH approach for fixing software issues?", + "choices": [ + "Planning, Execution, Review", + "Localization, Repair, Patch Validation", + "Input Parsing, Compilation, Testing", + "Scanning, Refactoring, Deployment" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What are the three main phases of the TECH approach for fixing software issues?\nAnswer:", + " Planning, Execution, Review" + ], + [ + "Question:What are the three main phases of the TECH approach for fixing software issues?\nAnswer:", + " Localization, Repair, Patch Validation" + ], + [ + "Question:What are the three main phases of the TECH approach for fixing software issues?\nAnswer:", + " Input Parsing, Compilation, Testing" + ], + [ + "Question:What are the three main phases of the TECH approach for fixing software issues?\nAnswer:", + " Scanning, Refactoring, Deployment" + ] + ], + "resps": [ + [ + [ + -11.142742156982422, + false + ] + ], + [ + [ + -31.907644271850586, + false + ] + ], + [ + [ + -28.914268493652344, + false + ] + ], + [ + [ + -23.669246673583984, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.142742156982422, + false + ], + [ + -31.907644271850586, + false + ], + [ + -28.914268493652344, + false + ], + [ + -23.669246673583984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8f30c31f2913f7a7945de0d4dc3242e4f453933de09ccf31eb6e210083d748c4", + "prompt_hash": "d785864492154380bf1f9f3e96c251bc75978f55738006395216d3242e45aff9", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "What format does TECH use to concisely represent a repository's file and directory structure?", + "choices": [ + "Markdown format", + "Skeleton format", + "Tree-like structure called repostructureformat", + "JSON representation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What format does TECH use to concisely represent a repository's file and directory structure?\nAnswer:", + " Markdown format" + ], + [ + "Question:What format does TECH use to concisely represent a repository's file and directory structure?\nAnswer:", + " Skeleton format" + ], + [ + "Question:What format does TECH use to concisely represent a repository's file and directory structure?\nAnswer:", + " Tree-like structure called repostructureformat" + ], + [ + "Question:What format does TECH use to concisely represent a repository's file and directory structure?\nAnswer:", + " JSON representation" + ] + ], + "resps": [ + [ + [ + -14.080405235290527, + false + ] + ], + [ + [ + -19.98662567138672, + false + ] + ], + [ + [ + -49.63202667236328, + false + ] + ], + [ + [ + -11.538562774658203, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.080405235290527, + false + ], + [ + -19.98662567138672, + false + ], + [ + -49.63202667236328, + false + ], + [ + -11.538562774658203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "61bf0927a6d4fb046ae2fe9787c168288efc1338d0565d93b1e933f503142de9", + "prompt_hash": "c2e4f29b8ab34d9218f5de34f173a908265aa157bce548d6b3e07479e957b9c3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 2, + "doc": { + "question": "What type of test does TECH synthesize to verify whether an issue has been fixed?", + "choices": [ + "Regression test", + "Smoke test", + "Reproduction test", + "Integration test" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of test does TECH synthesize to verify whether an issue has been fixed?\nAnswer:", + " Regression test" + ], + [ + "Question:What type of test does TECH synthesize to verify whether an issue has been fixed?\nAnswer:", + " Smoke test" + ], + [ + "Question:What type of test does TECH synthesize to verify whether an issue has been fixed?\nAnswer:", + " Reproduction test" + ], + [ + "Question:What type of test does TECH synthesize to verify whether an issue has been fixed?\nAnswer:", + " Integration test" + ] + ], + "resps": [ + [ + [ + -9.417646408081055, + false + ] + ], + [ + [ + -14.601350784301758, + false + ] + ], + [ + [ + -12.118673324584961, + false + ] + ], + [ + [ + -10.362127304077148, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.417646408081055, + false + ], + [ + -14.601350784301758, + false + ], + [ + -12.118673324584961, + false + ], + [ + -10.362127304077148, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1f4bf8b5c4ff2a11454e56e263d85156cae6700871a341481e534898e5687dff", + "prompt_hash": "023e52b0faef56033cb02985f46f356e563fb5058f7ea12bade09233d6921c5e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 3, + "doc": { + "question": "What diff format does TECH use during the repair phase to apply patches?", + "choices": [ + "Context-based patch", + "Unified diff format", + "Search/Replace edit format", + "Reverse diff format" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What diff format does TECH use during the repair phase to apply patches?\nAnswer:", + " Context-based patch" + ], + [ + "Question:What diff format does TECH use during the repair phase to apply patches?\nAnswer:", + " Unified diff format" + ], + [ + "Question:What diff format does TECH use during the repair phase to apply patches?\nAnswer:", + " Search/Replace edit format" + ], + [ + "Question:What diff format does TECH use during the repair phase to apply patches?\nAnswer:", + " Reverse diff format" + ] + ], + "resps": [ + [ + [ + -18.5589599609375, + false + ] + ], + [ + [ + -16.782909393310547, + false + ] + ], + [ + [ + -37.64253616333008, + false + ] + ], + [ + [ + -20.763954162597656, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.5589599609375, + false + ], + [ + -16.782909393310547, + false + ], + [ + -37.64253616333008, + false + ], + [ + -20.763954162597656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f2c5708f345133a398dfca01d690074eb387df7eb91fb481c8f5c47295e2e96e", + "prompt_hash": "79bfec124e28d4bf05de9556619b459d58f71b627bcd66101040b5c37e5b58a0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 4, + "doc": { + "question": "Which benchmark did OpenAI release to address concerns about problem quality in SWE-bench Lite?", + "choices": [ + "SWE-bench Pro", + "SWE-bench Alpha", + "SWE-bench Verified", + "SWE-bench Debug" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which benchmark did OpenAI release to address concerns about problem quality in SWE-bench Lite?\nAnswer:", + " SWE-bench Pro" + ], + [ + "Question:Which benchmark did OpenAI release to address concerns about problem quality in SWE-bench Lite?\nAnswer:", + " SWE-bench Alpha" + ], + [ + "Question:Which benchmark did OpenAI release to address concerns about problem quality in SWE-bench Lite?\nAnswer:", + " SWE-bench Verified" + ], + [ + "Question:Which benchmark did OpenAI release to address concerns about problem quality in SWE-bench Lite?\nAnswer:", + " SWE-bench Debug" + ] + ], + "resps": [ + [ + [ + -11.525687217712402, + false + ] + ], + [ + [ + -13.013885498046875, + false + ] + ], + [ + [ + -15.520681381225586, + false + ] + ], + [ + [ + -14.712057113647461, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.525687217712402, + false + ], + [ + -13.013885498046875, + false + ], + [ + -15.520681381225586, + false + ], + [ + -14.712057113647461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "55486ad2ef065f2cbbcca4b5038e34a1a0b0d244820634cea34a7dc17c5da3b5", + "prompt_hash": "aca54400c5b3ff7d1572b9cc03a80d7966fd32951ea88e9eca8558bdaa3734c3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 5, + "doc": { + "question": "What type of image encoder does PaliGemma use?", + "choices": [ + "A. CLIP", + "B. SigLIP", + "C. BLIP-2", + "D. Flamingo" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of image encoder does PaliGemma use?\nAnswer:", + " A. CLIP" + ], + [ + "Question:What type of image encoder does PaliGemma use?\nAnswer:", + " B. SigLIP" + ], + [ + "Question:What type of image encoder does PaliGemma use?\nAnswer:", + " C. BLIP-2" + ], + [ + "Question:What type of image encoder does PaliGemma use?\nAnswer:", + " D. Flamingo" + ] + ], + "resps": [ + [ + [ + -9.815264701843262, + false + ] + ], + [ + [ + -21.23035430908203, + false + ] + ], + [ + [ + -16.70880126953125, + false + ] + ], + [ + [ + -20.90701675415039, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.815264701843262, + false + ], + [ + -21.23035430908203, + false + ], + [ + -16.70880126953125, + false + ], + [ + -20.90701675415039, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "faf0292756db6246476c88adf99a11951deb3b1c6118964ba5bd87de03a8fc52", + "prompt_hash": "a43e47ae912a3783d89098b4568202ae426e79d14917bf43d0505e67195ab643", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 6, + "doc": { + "question": "How many new tokens were added to support PaliGemma’s structured computer vision tasks?", + "choices": [ + "A. 1024", + "B. 128", + "C. 1152", + "D. 2048" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many new tokens were added to support PaliGemma’s structured computer vision tasks?\nAnswer:", + " A. 1024" + ], + [ + "Question:How many new tokens were added to support PaliGemma’s structured computer vision tasks?\nAnswer:", + " B. 128" + ], + [ + "Question:How many new tokens were added to support PaliGemma’s structured computer vision tasks?\nAnswer:", + " C. 1152" + ], + [ + "Question:How many new tokens were added to support PaliGemma’s structured computer vision tasks?\nAnswer:", + " D. 2048" + ] + ], + "resps": [ + [ + [ + -17.1997013092041, + false + ] + ], + [ + [ + -19.620742797851562, + false + ] + ], + [ + [ + -23.257474899291992, + false + ] + ], + [ + [ + -23.265338897705078, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.1997013092041, + false + ], + [ + -19.620742797851562, + false + ], + [ + -23.257474899291992, + false + ], + [ + -23.265338897705078, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b86bf0c3962ea72631ecfbb84ea987beeb0eea7dec02c015c5666bd94d1e99ef", + "prompt_hash": "6a3f272c72a41669cf58da1d9c24c62d760682550711922ffbd1ffbb7289b811", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "What resolution does PaliGemma use during Stage 1 pretraining?", + "choices": [ + "A. 224px", + "B. 448px", + "C. 896px", + "D. 1024px" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What resolution does PaliGemma use during Stage 1 pretraining?\nAnswer:", + " A. 224px" + ], + [ + "Question:What resolution does PaliGemma use during Stage 1 pretraining?\nAnswer:", + " B. 448px" + ], + [ + "Question:What resolution does PaliGemma use during Stage 1 pretraining?\nAnswer:", + " C. 896px" + ], + [ + "Question:What resolution does PaliGemma use during Stage 1 pretraining?\nAnswer:", + " D. 1024px" + ] + ], + "resps": [ + [ + [ + -18.129924774169922, + false + ] + ], + [ + [ + -24.31297492980957, + false + ] + ], + [ + [ + -24.918224334716797, + false + ] + ], + [ + [ + -21.301815032958984, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.129924774169922, + false + ], + [ + -24.31297492980957, + false + ], + [ + -24.918224334716797, + false + ], + [ + -21.301815032958984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b998212e8cc1fb9f15a5fb0fc5d89b81af35494c6ac9464feb23ec42ff9de43e", + "prompt_hash": "33be460a43eec891167ef62e7ecffcb88a69dcf9f4538a1bafd197a9dd31f241", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 8, + "doc": { + "question": "Which language model does PaliGemma use as its decoder?", + "choices": [ + "A. Gemma-7B", + "B. PaLM-E", + "C. Gemma-2B", + "D. mT5" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which language model does PaliGemma use as its decoder?\nAnswer:", + " A. Gemma-7B" + ], + [ + "Question:Which language model does PaliGemma use as its decoder?\nAnswer:", + " B. PaLM-E" + ], + [ + "Question:Which language model does PaliGemma use as its decoder?\nAnswer:", + " C. Gemma-2B" + ], + [ + "Question:Which language model does PaliGemma use as its decoder?\nAnswer:", + " D. mT5" + ] + ], + "resps": [ + [ + [ + -13.4105863571167, + false + ] + ], + [ + [ + -25.003814697265625, + false + ] + ], + [ + [ + -18.639402389526367, + false + ] + ], + [ + [ + -19.03719139099121, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.4105863571167, + false + ], + [ + -25.003814697265625, + false + ], + [ + -18.639402389526367, + false + ], + [ + -19.03719139099121, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8ea584d27437c754402c4878ff8038a7b28b01ee7f36e783ec652978aed23dee", + "prompt_hash": "e8dd45cd5bb9e2e28d6ae138f4fe19b7ae51a6ff9b895ae462dba64b3f5cda97", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 9, + "doc": { + "question": "What kind of connector is used between SigLIP and Gemma in PaliGemma?", + "choices": [ + "A. MLP with attention", + "B. Q-Former", + "C. Linear projection", + "D. Convolutional adapter" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What kind of connector is used between SigLIP and Gemma in PaliGemma?\nAnswer:", + " A. MLP with attention" + ], + [ + "Question:What kind of connector is used between SigLIP and Gemma in PaliGemma?\nAnswer:", + " B. Q-Former" + ], + [ + "Question:What kind of connector is used between SigLIP and Gemma in PaliGemma?\nAnswer:", + " C. Linear projection" + ], + [ + "Question:What kind of connector is used between SigLIP and Gemma in PaliGemma?\nAnswer:", + " D. Convolutional adapter" + ] + ], + "resps": [ + [ + [ + -24.83165740966797, + false + ] + ], + [ + [ + -24.223936080932617, + false + ] + ], + [ + [ + -24.006492614746094, + false + ] + ], + [ + [ + -31.108749389648438, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.83165740966797, + false + ], + [ + -24.223936080932617, + false + ], + [ + -24.006492614746094, + false + ], + [ + -31.108749389648438, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c3b705e13367b24fe44ddee6efc65b4f95aee365c48c1376151412963b520013", + "prompt_hash": "373ffce3f8709c674ae833152b3ed551fb570bec602b3187cee3b62910992334", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 10, + "doc": { + "question": "What is the size of the vocabulary used in the Gemma 2 models?", + "choices": [ + "128k", + "256k", + "512k", + "64k" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the size of the vocabulary used in the Gemma 2 models?\nAnswer:", + " 128k" + ], + [ + "Question:What is the size of the vocabulary used in the Gemma 2 models?\nAnswer:", + " 256k" + ], + [ + "Question:What is the size of the vocabulary used in the Gemma 2 models?\nAnswer:", + " 512k" + ], + [ + "Question:What is the size of the vocabulary used in the Gemma 2 models?\nAnswer:", + " 64k" + ] + ], + "resps": [ + [ + [ + -7.427914619445801, + false + ] + ], + [ + [ + -8.645819664001465, + false + ] + ], + [ + [ + -9.469427108764648, + false + ] + ], + [ + [ + -7.870913505554199, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.427914619445801, + false + ], + [ + -8.645819664001465, + false + ], + [ + -9.469427108764648, + false + ], + [ + -7.870913505554199, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5b9d9b32959d1bd9a3838b7e692d123eb3256ed4d79f1b5f6341ab4833443bec", + "prompt_hash": "c21c35818306c97b4e6bc5c857e0494f9b0ab1ca1aac07486760d842b4679594", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 11, + "doc": { + "question": "What non-linearity function is used in all Gemma 2 models?", + "choices": [ + "ReLU", + "Swish", + "GeGLU", + "Sigmoid" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What non-linearity function is used in all Gemma 2 models?\nAnswer:", + " ReLU" + ], + [ + "Question:What non-linearity function is used in all Gemma 2 models?\nAnswer:", + " Swish" + ], + [ + "Question:What non-linearity function is used in all Gemma 2 models?\nAnswer:", + " GeGLU" + ], + [ + "Question:What non-linearity function is used in all Gemma 2 models?\nAnswer:", + " Sigmoid" + ] + ], + "resps": [ + [ + [ + -4.497497081756592, + false + ] + ], + [ + [ + -9.737420082092285, + false + ] + ], + [ + [ + -24.105804443359375, + false + ] + ], + [ + [ + -4.997573375701904, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.497497081756592, + false + ], + [ + -9.737420082092285, + false + ], + [ + -24.105804443359375, + false + ], + [ + -4.997573375701904, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0b8e6c5eb1b5746b793ab356c9c33eecaa945b7e5f7a27fe29d9ff7058c46623", + "prompt_hash": "b2f86be38f5913ba3b9cef331d432f708e5a912ecb9ea64dc3dd2fe675a3ba25", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 12, + "doc": { + "question": "Which mechanism does Gemma 2 use to improve inference speed while maintaining downstream performance?", + "choices": [ + "Multi-Head Attention", + "Rotary Position Embedding", + "Grouped-Query Attention", + "Sparse Transformer" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which mechanism does Gemma 2 use to improve inference speed while maintaining downstream performance?\nAnswer:", + " Multi-Head Attention" + ], + [ + "Question:Which mechanism does Gemma 2 use to improve inference speed while maintaining downstream performance?\nAnswer:", + " Rotary Position Embedding" + ], + [ + "Question:Which mechanism does Gemma 2 use to improve inference speed while maintaining downstream performance?\nAnswer:", + " Grouped-Query Attention" + ], + [ + "Question:Which mechanism does Gemma 2 use to improve inference speed while maintaining downstream performance?\nAnswer:", + " Sparse Transformer" + ] + ], + "resps": [ + [ + [ + -15.394296646118164, + false + ] + ], + [ + [ + -20.19460678100586, + false + ] + ], + [ + [ + -15.833362579345703, + false + ] + ], + [ + [ + -17.08722686767578, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.394296646118164, + false + ], + [ + -20.19460678100586, + false + ], + [ + -15.833362579345703, + false + ], + [ + -17.08722686767578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "785e76f9c2316a0ed775048f820bed3227d85f88e8e34ccc1792f82dce848c38", + "prompt_hash": "461d52f64e24115c0807730ff58a90a5c0bdbaed6f3fe71b3b9907b17dea6b4e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 13, + "doc": { + "question": "How many tokens was the Gemma 2 27B model trained on?", + "choices": [ + "3 trillion", + "8 trillion", + "13 trillion", + "15 trillion" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many tokens was the Gemma 2 27B model trained on?\nAnswer:", + " 3 trillion" + ], + [ + "Question:How many tokens was the Gemma 2 27B model trained on?\nAnswer:", + " 8 trillion" + ], + [ + "Question:How many tokens was the Gemma 2 27B model trained on?\nAnswer:", + " 13 trillion" + ], + [ + "Question:How many tokens was the Gemma 2 27B model trained on?\nAnswer:", + " 15 trillion" + ] + ], + "resps": [ + [ + [ + -10.128683090209961, + false + ] + ], + [ + [ + -12.361979484558105, + false + ] + ], + [ + [ + -12.137228012084961, + false + ] + ], + [ + [ + -12.41441822052002, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.128683090209961, + false + ], + [ + -12.361979484558105, + false + ], + [ + -12.137228012084961, + false + ], + [ + -12.41441822052002, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7eb7aedba0cadb9438df3db05265fd3b52d42e554ef64a857b9b1b71a4218dcc", + "prompt_hash": "f0593255989772c2c18a7e5a6b143ae507e838dcfe8a0b3b5200723bd40eba70", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "What soft cap value is used for logits in the final layer of Gemma 2 models?", + "choices": [ + "10.0", + "30.0", + "50.0", + "100.0" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What soft cap value is used for logits in the final layer of Gemma 2 models?\nAnswer:", + " 10.0" + ], + [ + "Question:What soft cap value is used for logits in the final layer of Gemma 2 models?\nAnswer:", + " 30.0" + ], + [ + "Question:What soft cap value is used for logits in the final layer of Gemma 2 models?\nAnswer:", + " 50.0" + ], + [ + "Question:What soft cap value is used for logits in the final layer of Gemma 2 models?\nAnswer:", + " 100.0" + ] + ], + "resps": [ + [ + [ + -6.458868026733398, + false + ] + ], + [ + [ + -8.575843811035156, + false + ] + ], + [ + [ + -8.410329818725586, + false + ] + ], + [ + [ + -8.538220405578613, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.458868026733398, + false + ], + [ + -8.575843811035156, + false + ], + [ + -8.410329818725586, + false + ], + [ + -8.538220405578613, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "95dc03314caef85c92c77964e354165d2854429f6ba048362ff298af844c0c30", + "prompt_hash": "02aba881c3e2b1813dc814c5c6f5e4b4af64d6f6de73eda92da54299c1a38d22", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 15, + "doc": { + "question": "Which language model is used as the LLM in the architecture?", + "choices": [ + "Qwen-2", + "GPT-3.5", + "LLaMA-2", + "BLOOM" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which language model is used as the LLM in the architecture?\nAnswer:", + " Qwen-2" + ], + [ + "Question:Which language model is used as the LLM in the architecture?\nAnswer:", + " GPT-3.5" + ], + [ + "Question:Which language model is used as the LLM in the architecture?\nAnswer:", + " LLaMA-2" + ], + [ + "Question:Which language model is used as the LLM in the architecture?\nAnswer:", + " BLOOM" + ] + ], + "resps": [ + [ + [ + -10.288427352905273, + false + ] + ], + [ + [ + -4.0216217041015625, + false + ] + ], + [ + [ + -8.288175582885742, + false + ] + ], + [ + [ + -7.530017375946045, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.288427352905273, + false + ], + [ + -4.0216217041015625, + false + ], + [ + -8.288175582885742, + false + ], + [ + -7.530017375946045, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "763e2d4590d8631d47a491547a5c25bc18e8897ebad0deb508b89de5df974438", + "prompt_hash": "a251e3cc0a376c8158837560ea2b3cbf69857501397f91508554761288bf785f", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "What visual encoder is used to generate visual features from images?", + "choices": [ + "CLIP", + "SigLIP", + "ViT-B/32", + "BLIP-2" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What visual encoder is used to generate visual features from images?\nAnswer:", + " CLIP" + ], + [ + "Question:What visual encoder is used to generate visual features from images?\nAnswer:", + " SigLIP" + ], + [ + "Question:What visual encoder is used to generate visual features from images?\nAnswer:", + " ViT-B/32" + ], + [ + "Question:What visual encoder is used to generate visual features from images?\nAnswer:", + " BLIP-2" + ] + ], + "resps": [ + [ + [ + -4.400332927703857, + false + ] + ], + [ + [ + -12.483972549438477, + false + ] + ], + [ + [ + -12.72900390625, + false + ] + ], + [ + [ + -9.399051666259766, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.400332927703857, + false + ], + [ + -12.483972549438477, + false + ], + [ + -12.72900390625, + false + ], + [ + -9.399051666259766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1fed2d0f3f806f0ede4ac3dcd2e24da5af46bbe8c681c54f27c660efdae75d64", + "prompt_hash": "76a68c139bf7c017822524c8f45b702cf3e64b99258e8479dcefddc1bcfb3aae", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 17, + "doc": { + "question": "How many samples are included in the Re-Captioned Detailed Description Data?", + "choices": [ + "2.1M", + "3.5M", + "5.0M", + "1.8M" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many samples are included in the Re-Captioned Detailed Description Data?\nAnswer:", + " 2.1M" + ], + [ + "Question:How many samples are included in the Re-Captioned Detailed Description Data?\nAnswer:", + " 3.5M" + ], + [ + "Question:How many samples are included in the Re-Captioned Detailed Description Data?\nAnswer:", + " 5.0M" + ], + [ + "Question:How many samples are included in the Re-Captioned Detailed Description Data?\nAnswer:", + " 1.8M" + ] + ], + "resps": [ + [ + [ + -12.48883056640625, + false + ] + ], + [ + [ + -12.085700988769531, + false + ] + ], + [ + [ + -13.646947860717773, + false + ] + ], + [ + [ + -11.971969604492188, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.48883056640625, + false + ], + [ + -12.085700988769531, + false + ], + [ + -13.646947860717773, + false + ], + [ + -11.971969604492188, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "432a3bf5c6810f0e0bba9d1541cb9434bfb62e6a117cdf56357d718e4ba521a0", + "prompt_hash": "2f9de16b807907efad345b576e9fce196e96e0b258d7e05eae7be249c75cc7ab", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 18, + "doc": { + "question": "What is the maximum number of visual tokens used in Stage-2 of training?", + "choices": [ + "729", + "729 × 5", + "729 × 10", + "729 × 2" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum number of visual tokens used in Stage-2 of training?\nAnswer:", + " 729" + ], + [ + "Question:What is the maximum number of visual tokens used in Stage-2 of training?\nAnswer:", + " 729 × 5" + ], + [ + "Question:What is the maximum number of visual tokens used in Stage-2 of training?\nAnswer:", + " 729 × 10" + ], + [ + "Question:What is the maximum number of visual tokens used in Stage-2 of training?\nAnswer:", + " 729 × 2" + ] + ], + "resps": [ + [ + [ + -9.973566055297852, + false + ] + ], + [ + [ + -23.389095306396484, + false + ] + ], + [ + [ + -22.504749298095703, + false + ] + ], + [ + [ + -22.001413345336914, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.973566055297852, + false + ], + [ + -23.389095306396484, + false + ], + [ + -22.504749298095703, + false + ], + [ + -22.001413345336914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2bc24be57e51015d15ac5e3e51f61ed93196c209592ae538e612d4f932ea7920", + "prompt_hash": "b48df395d7a06051c3af142362fcfe394f8cfea66edbf90cf90c664dd7904b14", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 19, + "doc": { + "question": "Which dataset was used to generate 92K detailed Chinese caption samples?", + "choices": [ + "COCO", + "ShareGPT4V", + "SynDOG EN", + "Evo-Instruct" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset was used to generate 92K detailed Chinese caption samples?\nAnswer:", + " COCO" + ], + [ + "Question:Which dataset was used to generate 92K detailed Chinese caption samples?\nAnswer:", + " ShareGPT4V" + ], + [ + "Question:Which dataset was used to generate 92K detailed Chinese caption samples?\nAnswer:", + " SynDOG EN" + ], + [ + "Question:Which dataset was used to generate 92K detailed Chinese caption samples?\nAnswer:", + " Evo-Instruct" + ] + ], + "resps": [ + [ + [ + -7.290433406829834, + false + ] + ], + [ + [ + -12.020796775817871, + false + ] + ], + [ + [ + -36.19415283203125, + false + ] + ], + [ + [ + -20.265243530273438, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.290433406829834, + false + ], + [ + -12.020796775817871, + false + ], + [ + -36.19415283203125, + false + ], + [ + -20.265243530273438, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c48e39a64b13161ee1bea7e39ea13c045f0e59c4f0a902a9841855dd286bcb0f", + "prompt_hash": "29c6ecd1445476dff678dd07862a504e9a47079055c8023ffbad73b57364430d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What is the primary reason for the latency in the pre-filling stage of long-context LLMs?", + "choices": [ + "Model initialization", + "Token decoding speed", + "Self-attention computation overhead", + "Memory swapping between GPUs" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary reason for the latency in the pre-filling stage of long-context LLMs?\nAnswer:", + " Model initialization" + ], + [ + "Question:What is the primary reason for the latency in the pre-filling stage of long-context LLMs?\nAnswer:", + " Token decoding speed" + ], + [ + "Question:What is the primary reason for the latency in the pre-filling stage of long-context LLMs?\nAnswer:", + " Self-attention computation overhead" + ], + [ + "Question:What is the primary reason for the latency in the pre-filling stage of long-context LLMs?\nAnswer:", + " Memory swapping between GPUs" + ] + ], + "resps": [ + [ + [ + -11.29690170288086, + false + ] + ], + [ + [ + -21.47367286682129, + false + ] + ], + [ + [ + -19.235584259033203, + false + ] + ], + [ + [ + -26.47637367248535, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.29690170288086, + false + ], + [ + -21.47367286682129, + false + ], + [ + -19.235584259033203, + false + ], + [ + -26.47637367248535, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c35537323672d6352eef7842a5fdcd331b963c254c0b1e10fc871a1b0979fa8a", + "prompt_hash": "50c6e338256e19846b03fddb143a281480b7c983a1f11d369bd847a728c3f144", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 21, + "doc": { + "question": "What recall percentage is achieved when only the top 4k columns are retained in a 128k attention matrix?", + "choices": [ + "89.3%", + "92.5%", + "96.8%", + "99.1%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What recall percentage is achieved when only the top 4k columns are retained in a 128k attention matrix?\nAnswer:", + " 89.3%" + ], + [ + "Question:What recall percentage is achieved when only the top 4k columns are retained in a 128k attention matrix?\nAnswer:", + " 92.5%" + ], + [ + "Question:What recall percentage is achieved when only the top 4k columns are retained in a 128k attention matrix?\nAnswer:", + " 96.8%" + ], + [ + "Question:What recall percentage is achieved when only the top 4k columns are retained in a 128k attention matrix?\nAnswer:", + " 99.1%" + ] + ], + "resps": [ + [ + [ + -14.330986022949219, + false + ] + ], + [ + [ + -13.229812622070312, + false + ] + ], + [ + [ + -13.767045974731445, + false + ] + ], + [ + [ + -14.044456481933594, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.330986022949219, + false + ], + [ + -13.229812622070312, + false + ], + [ + -13.767045974731445, + false + ], + [ + -14.044456481933594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5438925b93c9e52efeef96c678d215f83f32bbc7b162644c312d3fd33139500b", + "prompt_hash": "0ddd6c551b6b0dc2c533e3a6f6d1db7231e1c899b2128d3c6b8f18ca58fda0e8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "Which attention pattern is characterized by tokens focusing on initial tokens and local windows?", + "choices": [ + "Block-Sparse pattern", + "Vertical-Slash pattern", + "A-shape pattern", + "Global-Context pattern" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which attention pattern is characterized by tokens focusing on initial tokens and local windows?\nAnswer:", + " Block-Sparse pattern" + ], + [ + "Question:Which attention pattern is characterized by tokens focusing on initial tokens and local windows?\nAnswer:", + " Vertical-Slash pattern" + ], + [ + "Question:Which attention pattern is characterized by tokens focusing on initial tokens and local windows?\nAnswer:", + " A-shape pattern" + ], + [ + "Question:Which attention pattern is characterized by tokens focusing on initial tokens and local windows?\nAnswer:", + " Global-Context pattern" + ] + ], + "resps": [ + [ + [ + -28.077165603637695, + false + ] + ], + [ + [ + -36.12141418457031, + false + ] + ], + [ + [ + -24.157867431640625, + false + ] + ], + [ + [ + -23.051002502441406, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.077165603637695, + false + ], + [ + -36.12141418457031, + false + ], + [ + -24.157867431640625, + false + ], + [ + -23.051002502441406, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7a0b7e981e36e32875a92973f2a7923dd3b684ec424f172722d92441aac32bf0", + "prompt_hash": "fe87432365ef44cd6f6feca83292c45aeb4e6405c58e10d3f0256bf28770f6d3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 23, + "doc": { + "question": "What is the reported speedup using MInference on 1M-token prompts with LLaMA-3-8B on a single A100 GPU?", + "choices": [ + "Up to 3×", + "Up to 5×", + "Up to 8×", + "Up to 10×" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the reported speedup using MInference on 1M-token prompts with LLaMA-3-8B on a single A100 GPU?\nAnswer:", + " Up to 3×" + ], + [ + "Question:What is the reported speedup using MInference on 1M-token prompts with LLaMA-3-8B on a single A100 GPU?\nAnswer:", + " Up to 5×" + ], + [ + "Question:What is the reported speedup using MInference on 1M-token prompts with LLaMA-3-8B on a single A100 GPU?\nAnswer:", + " Up to 8×" + ], + [ + "Question:What is the reported speedup using MInference on 1M-token prompts with LLaMA-3-8B on a single A100 GPU?\nAnswer:", + " Up to 10×" + ] + ], + "resps": [ + [ + [ + -18.81794548034668, + false + ] + ], + [ + [ + -19.063770294189453, + false + ] + ], + [ + [ + -18.887496948242188, + false + ] + ], + [ + [ + -18.622156143188477, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.81794548034668, + false + ], + [ + -19.063770294189453, + false + ], + [ + -18.887496948242188, + false + ], + [ + -18.622156143188477, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "be1003f886be358b8d164b50e52a8632a02e9892a114e08f9b2a19a29a6ea779", + "prompt_hash": "493f78c96da1d5cb7247238dd51eec7334cc3d4421166683f854892037fc3bbd", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 24, + "doc": { + "question": "Which method is used in MInference to approximate sparse indices for Block-Sparse heads?", + "choices": [ + "Top-K ranking of global tokens", + "Mean pooling of query and key vectors", + "Nearest-neighbor clustering", + "Random subsampling of attention weights" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which method is used in MInference to approximate sparse indices for Block-Sparse heads?\nAnswer:", + " Top-K ranking of global tokens" + ], + [ + "Question:Which method is used in MInference to approximate sparse indices for Block-Sparse heads?\nAnswer:", + " Mean pooling of query and key vectors" + ], + [ + "Question:Which method is used in MInference to approximate sparse indices for Block-Sparse heads?\nAnswer:", + " Nearest-neighbor clustering" + ], + [ + "Question:Which method is used in MInference to approximate sparse indices for Block-Sparse heads?\nAnswer:", + " Random subsampling of attention weights" + ] + ], + "resps": [ + [ + [ + -31.907312393188477, + false + ] + ], + [ + [ + -30.570072174072266, + false + ] + ], + [ + [ + -22.7277889251709, + false + ] + ], + [ + [ + -26.002704620361328, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.907312393188477, + false + ], + [ + -30.570072174072266, + false + ], + [ + -22.7277889251709, + false + ], + [ + -26.002704620361328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1ee9007973ea9d762c1d28efd631ea2df296bb5722e13f9b73476089e17f593c", + "prompt_hash": "dec85aaaced9973622c82d55bd48a6590d9c4c61c214320bc06f4ecb773982a4", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 25, + "doc": { + "question": "What was the absolute self-correction gain of the proposed method on reasoning problems from the MATH dataset?", + "choices": [ + "15.6%", + "9.1%", + "23.0%", + "12.2%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What was the absolute self-correction gain of the proposed method on reasoning problems from the MATH dataset?\nAnswer:", + " 15.6%" + ], + [ + "Question:What was the absolute self-correction gain of the proposed method on reasoning problems from the MATH dataset?\nAnswer:", + " 9.1%" + ], + [ + "Question:What was the absolute self-correction gain of the proposed method on reasoning problems from the MATH dataset?\nAnswer:", + " 23.0%" + ], + [ + "Question:What was the absolute self-correction gain of the proposed method on reasoning problems from the MATH dataset?\nAnswer:", + " 12.2%" + ] + ], + "resps": [ + [ + [ + -15.868279457092285, + false + ] + ], + [ + [ + -15.192159652709961, + false + ] + ], + [ + [ + -15.974629402160645, + false + ] + ], + [ + [ + -15.19686508178711, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.868279457092285, + false + ], + [ + -15.192159652709961, + false + ], + [ + -15.974629402160645, + false + ], + [ + -15.19686508178711, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f647626b6c2effe8ca7302ca4613cf8129fd899143a80c08ab8f03289e257337", + "prompt_hash": "3868ab363e5f76c379bfc768131c3b6ffe214eaae3d6b1244836f37f8ce8a071", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "Which base model was fine-tuned for experiments on coding problems?", + "choices": [ + "Gemini 1.0 Pro", + "Gemini 1.5 Flash", + "GPT-3.5", + "PaLM 2" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which base model was fine-tuned for experiments on coding problems?\nAnswer:", + " Gemini 1.0 Pro" + ], + [ + "Question:Which base model was fine-tuned for experiments on coding problems?\nAnswer:", + " Gemini 1.5 Flash" + ], + [ + "Question:Which base model was fine-tuned for experiments on coding problems?\nAnswer:", + " GPT-3.5" + ], + [ + "Question:Which base model was fine-tuned for experiments on coding problems?\nAnswer:", + " PaLM 2" + ] + ], + "resps": [ + [ + [ + -11.975496292114258, + false + ] + ], + [ + [ + -11.706640243530273, + false + ] + ], + [ + [ + -4.315305233001709, + false + ] + ], + [ + [ + -9.331165313720703, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.975496292114258, + false + ], + [ + -11.706640243530273, + false + ], + [ + -4.315305233001709, + false + ], + [ + -9.331165313720703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b417fc051bd16e505060fabea6737d135a4be1170c992653e9da7ec5ec2a682b", + "prompt_hash": "07d264febd521ba3080badaf41a97ae084f718159df7ec066c962b7cc8de4507", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "In Stage I of the training approach, what constraint is applied to the model’s first attempt?", + "choices": [ + "It must maximize accuracy directly", + "It is constrained to be close to the base model using a KL-divergence", + "It is discarded and only the second attempt is optimized", + "It is forced to match ground-truth answers" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In Stage I of the training approach, what constraint is applied to the model’s first attempt?\nAnswer:", + " It must maximize accuracy directly" + ], + [ + "Question:In Stage I of the training approach, what constraint is applied to the model’s first attempt?\nAnswer:", + " It is constrained to be close to the base model using a KL-divergence" + ], + [ + "Question:In Stage I of the training approach, what constraint is applied to the model’s first attempt?\nAnswer:", + " It is discarded and only the second attempt is optimized" + ], + [ + "Question:In Stage I of the training approach, what constraint is applied to the model’s first attempt?\nAnswer:", + " It is forced to match ground-truth answers" + ] + ], + "resps": [ + [ + [ + -30.358684539794922, + false + ] + ], + [ + [ + -41.4800910949707, + false + ] + ], + [ + [ + -34.299049377441406, + false + ] + ], + [ + [ + -29.237268447875977, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.358684539794922, + false + ], + [ + -41.4800910949707, + false + ], + [ + -34.299049377441406, + false + ], + [ + -29.237268447875977, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b8c8119741d755b6d2421c475321038ba54dc019dc31248a9d83b976c63b9bed", + "prompt_hash": "7f31f11aac28d018d0a6f0802cd3bce644fe5839dd3195d20e3521da243f905c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 28, + "doc": { + "question": "What hyperparameter value was used for the learning rate when training the method on the MATH dataset?", + "choices": [ + "5e-6", + "1e-5", + "1e-4", + "5e-5" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What hyperparameter value was used for the learning rate when training the method on the MATH dataset?\nAnswer:", + " 5e-6" + ], + [ + "Question:What hyperparameter value was used for the learning rate when training the method on the MATH dataset?\nAnswer:", + " 1e-5" + ], + [ + "Question:What hyperparameter value was used for the learning rate when training the method on the MATH dataset?\nAnswer:", + " 1e-4" + ], + [ + "Question:What hyperparameter value was used for the learning rate when training the method on the MATH dataset?\nAnswer:", + " 5e-5" + ] + ], + "resps": [ + [ + [ + -8.413949012756348, + false + ] + ], + [ + [ + -6.770974636077881, + false + ] + ], + [ + [ + -6.059501647949219, + false + ] + ], + [ + [ + -7.30908727645874, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.413949012756348, + false + ], + [ + -6.770974636077881, + false + ], + [ + -6.059501647949219, + false + ], + [ + -7.30908727645874, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "37abbc46b91aca2efa4127a035b6201304e388062228a9add526d282010ccb0f", + "prompt_hash": "ec4777367a3cfe0380696c09d06021224bebb270ab0c973687ae04831ce2411c", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "What was the accuracy@t2 of the method on the HumanEval coding benchmark?", + "choices": [ + "52.5%", + "54.3%", + "56.7%", + "64.6%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What was the accuracy@t2 of the method on the HumanEval coding benchmark?\nAnswer:", + " 52.5%" + ], + [ + "Question:What was the accuracy@t2 of the method on the HumanEval coding benchmark?\nAnswer:", + " 54.3%" + ], + [ + "Question:What was the accuracy@t2 of the method on the HumanEval coding benchmark?\nAnswer:", + " 56.7%" + ], + [ + "Question:What was the accuracy@t2 of the method on the HumanEval coding benchmark?\nAnswer:", + " 64.6%" + ] + ], + "resps": [ + [ + [ + -13.624635696411133, + false + ] + ], + [ + [ + -14.403515815734863, + false + ] + ], + [ + [ + -14.236377716064453, + false + ] + ], + [ + [ + -14.352609634399414, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.624635696411133, + false + ], + [ + -14.403515815734863, + false + ], + [ + -14.236377716064453, + false + ], + [ + -14.352609634399414, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d34e36f1883b08b4e303e1155a8c5e4b00af362834b850e5aa13120ee8e57bb5", + "prompt_hash": "9a16d45757cca256ff8ad1d89fa972d6db23ca8bbfb942bab51659565a081b8d", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "What is the default number of GPUs per machine (U) used in the auto-parallelism algorithm?", + "choices": [ + "4", + "8", + "16", + "32" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the default number of GPUs per machine (U) used in the auto-parallelism algorithm?\nAnswer:", + " 4" + ], + [ + "Question:What is the default number of GPUs per machine (U) used in the auto-parallelism algorithm?\nAnswer:", + " 8" + ], + [ + "Question:What is the default number of GPUs per machine (U) used in the auto-parallelism algorithm?\nAnswer:", + " 16" + ], + [ + "Question:What is the default number of GPUs per machine (U) used in the auto-parallelism algorithm?\nAnswer:", + " 32" + ] + ], + "resps": [ + [ + [ + -3.4775350093841553, + false + ] + ], + [ + [ + -3.663968801498413, + false + ] + ], + [ + [ + -5.262546539306641, + false + ] + ], + [ + [ + -5.171002388000488, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.4775350093841553, + false + ], + [ + -3.663968801498413, + false + ], + [ + -5.262546539306641, + false + ], + [ + -5.171002388000488, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f612f5a0f7dce9c3cab1f9b7264350f535e58c3ff60f197a3ec58f4c3ad32fa7", + "prompt_hash": "67a6515518bc1d2f3d9ffa96539cd1f9a1c48faf0b6a78b868320df3db076bd1", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 31, + "doc": { + "question": "Which workload type is described as memory-bound in the auto-parallelism algorithm?", + "choices": [ + "Training", + "Inference", + "Generation", + "Backward pass" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which workload type is described as memory-bound in the auto-parallelism algorithm?\nAnswer:", + " Training" + ], + [ + "Question:Which workload type is described as memory-bound in the auto-parallelism algorithm?\nAnswer:", + " Inference" + ], + [ + "Question:Which workload type is described as memory-bound in the auto-parallelism algorithm?\nAnswer:", + " Generation" + ], + [ + "Question:Which workload type is described as memory-bound in the auto-parallelism algorithm?\nAnswer:", + " Backward pass" + ] + ], + "resps": [ + [ + [ + -7.63580322265625, + false + ] + ], + [ + [ + -7.492424011230469, + false + ] + ], + [ + [ + -9.588844299316406, + false + ] + ], + [ + [ + -12.772279739379883, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.63580322265625, + false + ], + [ + -7.492424011230469, + false + ], + [ + -9.588844299316406, + false + ], + [ + -12.772279739379883, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1d7916337d09f81d90fd24ab466d07533e48fc1e51f50d4c5f7b39ae5d0a4c43", + "prompt_hash": "b30c24ce196072cfa65f45b1664ae3504848f4675842802db20232d1095eabb1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 32, + "doc": { + "question": "In RLHF, what operation does the actor model perform during Stage 1 (Generation)?", + "choices": [ + "Compute squared-error loss", + "Produce responses from prompts using auto-regressive generation", + "Evaluate divergence with the reference policy", + "Assign scalar rewards" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In RLHF, what operation does the actor model perform during Stage 1 (Generation)?\nAnswer:", + " Compute squared-error loss" + ], + [ + "Question:In RLHF, what operation does the actor model perform during Stage 1 (Generation)?\nAnswer:", + " Produce responses from prompts using auto-regressive generation" + ], + [ + "Question:In RLHF, what operation does the actor model perform during Stage 1 (Generation)?\nAnswer:", + " Evaluate divergence with the reference policy" + ], + [ + "Question:In RLHF, what operation does the actor model perform during Stage 1 (Generation)?\nAnswer:", + " Assign scalar rewards" + ] + ], + "resps": [ + [ + [ + -30.3663330078125, + false + ] + ], + [ + [ + -35.097373962402344, + false + ] + ], + [ + [ + -35.44439697265625, + false + ] + ], + [ + [ + -18.711971282958984, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.3663330078125, + false + ], + [ + -35.097373962402344, + false + ], + [ + -35.44439697265625, + false + ], + [ + -18.711971282958984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "25894f47d56bb4ce7d2bca2fa551786ae042ad88121e6af68f653ffb9c286461", + "prompt_hash": "0e3494099025b55e286623a7eb4a440a3500f83f64bc70e16f2b9f1aaad7aeac", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 33, + "doc": { + "question": "What parallelism strategy does ZeRO optimize?", + "choices": [ + "Pipeline parallelism", + "Tensor parallelism", + "Data parallelism", + "Model offloading" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What parallelism strategy does ZeRO optimize?\nAnswer:", + " Pipeline parallelism" + ], + [ + "Question:What parallelism strategy does ZeRO optimize?\nAnswer:", + " Tensor parallelism" + ], + [ + "Question:What parallelism strategy does ZeRO optimize?\nAnswer:", + " Data parallelism" + ], + [ + "Question:What parallelism strategy does ZeRO optimize?\nAnswer:", + " Model offloading" + ] + ], + "resps": [ + [ + [ + -10.252677917480469, + false + ] + ], + [ + [ + -8.313484191894531, + false + ] + ], + [ + [ + -8.054891586303711, + false + ] + ], + [ + [ + -20.16019058227539, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.252677917480469, + false + ], + [ + -8.313484191894531, + false + ], + [ + -8.054891586303711, + false + ], + [ + -20.16019058227539, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "14ad4867fd1a7cf0861481030428370605924da8a0b1bdfef6dcad706397b60e", + "prompt_hash": "eb1a8d1f7a2617e8d515800faefd480a234027205dbf2f3ce3be026c087b3bf8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 34, + "doc": { + "question": "Which system colocates all models on the same set of devices for RLHF execution?", + "choices": [ + "OpenRLHF", + "NeMo-Aligner", + "DeepSpeed-Chat", + "HybridFlow" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which system colocates all models on the same set of devices for RLHF execution?\nAnswer:", + " OpenRLHF" + ], + [ + "Question:Which system colocates all models on the same set of devices for RLHF execution?\nAnswer:", + " NeMo-Aligner" + ], + [ + "Question:Which system colocates all models on the same set of devices for RLHF execution?\nAnswer:", + " DeepSpeed-Chat" + ], + [ + "Question:Which system colocates all models on the same set of devices for RLHF execution?\nAnswer:", + " HybridFlow" + ] + ], + "resps": [ + [ + [ + -15.416034698486328, + false + ] + ], + [ + [ + -25.671110153198242, + false + ] + ], + [ + [ + -13.71575927734375, + false + ] + ], + [ + [ + -17.46190643310547, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.416034698486328, + false + ], + [ + -25.671110153198242, + false + ], + [ + -13.71575927734375, + false + ], + [ + -17.46190643310547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b22608c501506d3c57e6cb329f8d20566e5179d924ace4a6fc6c75413cbe34f4", + "prompt_hash": "2666f9983d03c6b0a75e6c5668e2439459f3f798b55909a9b1f0154d256c84eb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What is the parameter size of the Helium text language model?", + "choices": [ + "3B", + "7B", + "13B", + "70B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the parameter size of the Helium text language model?\nAnswer:", + " 3B" + ], + [ + "Question:What is the parameter size of the Helium text language model?\nAnswer:", + " 7B" + ], + [ + "Question:What is the parameter size of the Helium text language model?\nAnswer:", + " 13B" + ], + [ + "Question:What is the parameter size of the Helium text language model?\nAnswer:", + " 70B" + ] + ], + "resps": [ + [ + [ + -7.6070075035095215, + false + ] + ], + [ + [ + -6.104134559631348, + false + ] + ], + [ + [ + -7.87722110748291, + false + ] + ], + [ + [ + -8.18855094909668, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.6070075035095215, + false + ], + [ + -6.104134559631348, + false + ], + [ + -7.87722110748291, + false + ], + [ + -8.18855094909668, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "295a79dec253df87092a645dbb7aca11949e95802697013694a6fefe4ef6fb0f", + "prompt_hash": "88a6e62e96d34cc3fdbceaa93f9adfb56c8fc7cd6357984ce3d11b6f81de46b5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which neural audio codec is introduced to encode audio into discrete units?", + "choices": [ + "AudioLM", + "EnCodec", + "Mimi", + "SoundStream" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which neural audio codec is introduced to encode audio into discrete units?\nAnswer:", + " AudioLM" + ], + [ + "Question:Which neural audio codec is introduced to encode audio into discrete units?\nAnswer:", + " EnCodec" + ], + [ + "Question:Which neural audio codec is introduced to encode audio into discrete units?\nAnswer:", + " Mimi" + ], + [ + "Question:Which neural audio codec is introduced to encode audio into discrete units?\nAnswer:", + " SoundStream" + ] + ], + "resps": [ + [ + [ + -12.109827041625977, + false + ] + ], + [ + [ + -10.259915351867676, + false + ] + ], + [ + [ + -15.565141677856445, + false + ] + ], + [ + [ + -8.1206693649292, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.109827041625977, + false + ], + [ + -10.259915351867676, + false + ], + [ + -15.565141677856445, + false + ], + [ + -8.1206693649292, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "43046273a2d2929dfdb9fd277f085207ba600a9e1c95cc1d19a4e80097ad625f", + "prompt_hash": "16864ae4d828b91f371b7546d2dea0d21184075e15af6c995d72587a4c372ee6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "What theoretical latency is reported for the real-time spoken dialogue system?", + "choices": [ + "80ms", + "120ms", + "160ms", + "230ms" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What theoretical latency is reported for the real-time spoken dialogue system?\nAnswer:", + " 80ms" + ], + [ + "Question:What theoretical latency is reported for the real-time spoken dialogue system?\nAnswer:", + " 120ms" + ], + [ + "Question:What theoretical latency is reported for the real-time spoken dialogue system?\nAnswer:", + " 160ms" + ], + [ + "Question:What theoretical latency is reported for the real-time spoken dialogue system?\nAnswer:", + " 230ms" + ] + ], + "resps": [ + [ + [ + -8.884298324584961, + false + ] + ], + [ + [ + -9.397584915161133, + false + ] + ], + [ + [ + -10.101129531860352, + false + ] + ], + [ + [ + -11.414660453796387, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.884298324584961, + false + ], + [ + -9.397584915161133, + false + ], + [ + -10.101129531860352, + false + ], + [ + -11.414660453796387, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b8850f8666028d7b8f55ed2a688a466e983ffda9a1ed39029e5a160dbf58d025", + "prompt_hash": "116a2f49d3531f67838d68a074089b4c452bc3eb568b8d8592054d40e1bbfaf3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 38, + "doc": { + "question": "What benchmark is adopted to evaluate safety under multiple categories?", + "choices": [ + "SUPERB", + "ALERT", + "MMLU", + "TriviaQA" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What benchmark is adopted to evaluate safety under multiple categories?\nAnswer:", + " SUPERB" + ], + [ + "Question:What benchmark is adopted to evaluate safety under multiple categories?\nAnswer:", + " ALERT" + ], + [ + "Question:What benchmark is adopted to evaluate safety under multiple categories?\nAnswer:", + " MMLU" + ], + [ + "Question:What benchmark is adopted to evaluate safety under multiple categories?\nAnswer:", + " TriviaQA" + ] + ], + "resps": [ + [ + [ + -13.114148139953613, + false + ] + ], + [ + [ + -17.356304168701172, + false + ] + ], + [ + [ + -10.324151039123535, + false + ] + ], + [ + [ + -13.553725242614746, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.114148139953613, + false + ], + [ + -17.356304168701172, + false + ], + [ + -10.324151039123535, + false + ], + [ + -13.553725242614746, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0ca451912f9e74fac5218c521b184aa7c06447916cec32fd5d97e838a9996769", + "prompt_hash": "a8a2c111e702c31f90d4990bce06e574afe60bd4a866b7c3a99fb16dd02deb62", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "Which method is introduced to improve speech-to-speech generation by predicting text tokens before audio tokens?", + "choices": [ + "Chain-of-Modality", + "Parallel Generation", + "Interleaving", + "Residual Quantization" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which method is introduced to improve speech-to-speech generation by predicting text tokens before audio tokens?\nAnswer:", + " Chain-of-Modality" + ], + [ + "Question:Which method is introduced to improve speech-to-speech generation by predicting text tokens before audio tokens?\nAnswer:", + " Parallel Generation" + ], + [ + "Question:Which method is introduced to improve speech-to-speech generation by predicting text tokens before audio tokens?\nAnswer:", + " Interleaving" + ], + [ + "Question:Which method is introduced to improve speech-to-speech generation by predicting text tokens before audio tokens?\nAnswer:", + " Residual Quantization" + ] + ], + "resps": [ + [ + [ + -25.59917640686035, + false + ] + ], + [ + [ + -14.614076614379883, + false + ] + ], + [ + [ + -15.206499099731445, + false + ] + ], + [ + [ + -21.050691604614258, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.59917640686035, + false + ], + [ + -14.614076614379883, + false + ], + [ + -15.206499099731445, + false + ], + [ + -21.050691604614258, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a36384f4676d8978afe402e7f8d9b947f3a2a83db5673f38eedfc8e1f5b7a14b", + "prompt_hash": "9744f7a13faf7938951b7ec848c88d16aad947fc8ca2eaf7419692e7f13a5cbc", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "How many videos are included in the \\\\DataName dataset?", + "choices": [ + "A. 178,510", + "B. 40,000", + "C. 900,000", + "D. 136 million" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many videos are included in the \\\\DataName dataset?\nAnswer:", + " A. 178,510" + ], + [ + "Question:How many videos are included in the \\\\DataName dataset?\nAnswer:", + " B. 40,000" + ], + [ + "Question:How many videos are included in the \\\\DataName dataset?\nAnswer:", + " C. 900,000" + ], + [ + "Question:How many videos are included in the \\\\DataName dataset?\nAnswer:", + " D. 136 million" + ] + ], + "resps": [ + [ + [ + -27.44942855834961, + false + ] + ], + [ + [ + -23.27755355834961, + false + ] + ], + [ + [ + -26.529640197753906, + false + ] + ], + [ + [ + -31.16094970703125, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.44942855834961, + false + ], + [ + -23.27755355834961, + false + ], + [ + -26.529640197753906, + false + ], + [ + -31.16094970703125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0fda4662f1ff0aa970514e5fdee88c432ac56b5b26263747812aa13614a7e8be", + "prompt_hash": "e05a2e8b0fc242c5cbb8630ceea6316bb72350bf467322d8cd62ea60708ab0e0", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 41, + "doc": { + "question": "What frame sampling rate is used in the \\\\DataName dataset?", + "choices": [ + "A. 0.008 fps", + "B. 0.15 fps", + "C. 1 fps", + "D. 10 fps" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What frame sampling rate is used in the \\\\DataName dataset?\nAnswer:", + " A. 0.008 fps" + ], + [ + "Question:What frame sampling rate is used in the \\\\DataName dataset?\nAnswer:", + " B. 0.15 fps" + ], + [ + "Question:What frame sampling rate is used in the \\\\DataName dataset?\nAnswer:", + " C. 1 fps" + ], + [ + "Question:What frame sampling rate is used in the \\\\DataName dataset?\nAnswer:", + " D. 10 fps" + ] + ], + "resps": [ + [ + [ + -27.462284088134766, + false + ] + ], + [ + [ + -26.013099670410156, + false + ] + ], + [ + [ + -18.49521255493164, + false + ] + ], + [ + [ + -19.096094131469727, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.462284088134766, + false + ], + [ + -26.013099670410156, + false + ], + [ + -18.49521255493164, + false + ], + [ + -19.096094131469727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "08cf43a87aad451e03891dd8f52fd7dfae8127eb484b177c09ec9bd855170d55", + "prompt_hash": "9bd3fa5265e2205ee7e9a48d0e0fb0cb57a83361fe54e184b51355af1111300a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 42, + "doc": { + "question": "How many question types are defined for generating question-answer pairs in \\\\DataName?", + "choices": [ + "A. 8", + "B. 10", + "C. 12", + "D. 16" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:How many question types are defined for generating question-answer pairs in \\\\DataName?\nAnswer:", + " A. 8" + ], + [ + "Question:How many question types are defined for generating question-answer pairs in \\\\DataName?\nAnswer:", + " B. 10" + ], + [ + "Question:How many question types are defined for generating question-answer pairs in \\\\DataName?\nAnswer:", + " C. 12" + ], + [ + "Question:How many question types are defined for generating question-answer pairs in \\\\DataName?\nAnswer:", + " D. 16" + ] + ], + "resps": [ + [ + [ + -15.037141799926758, + false + ] + ], + [ + [ + -18.426780700683594, + false + ] + ], + [ + [ + -19.0015869140625, + false + ] + ], + [ + [ + -20.956581115722656, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.037141799926758, + false + ], + [ + -18.426780700683594, + false + ], + [ + -19.0015869140625, + false + ], + [ + -20.956581115722656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "82e2376e9f9d4a079f3834fa42be02a1d9f23fa5015ce69c8c1a39ddfabf8ede", + "prompt_hash": "e3ee7893fc0206f90a0deae683266051c043888688f960c70aa659dcf549087b", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "What is the total number of instruction-following samples in the \\\\DataName dataset?", + "choices": [ + "A. 1.3 million", + "B. 960,000", + "C. 178,000", + "D. 215,000" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the total number of instruction-following samples in the \\\\DataName dataset?\nAnswer:", + " A. 1.3 million" + ], + [ + "Question:What is the total number of instruction-following samples in the \\\\DataName dataset?\nAnswer:", + " B. 960,000" + ], + [ + "Question:What is the total number of instruction-following samples in the \\\\DataName dataset?\nAnswer:", + " C. 178,000" + ], + [ + "Question:What is the total number of instruction-following samples in the \\\\DataName dataset?\nAnswer:", + " D. 215,000" + ] + ], + "resps": [ + [ + [ + -20.036428451538086, + false + ] + ], + [ + [ + -28.441104888916016, + false + ] + ], + [ + [ + -27.614681243896484, + false + ] + ], + [ + [ + -28.703224182128906, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.036428451538086, + false + ], + [ + -28.441104888916016, + false + ], + [ + -27.614681243896484, + false + ], + [ + -28.703224182128906, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f128563c09c7fddc52e38a1b51567a61be91ea0faac794d2e8c55bc39075ccda", + "prompt_hash": "7f89cba8f9e1fe131fbecce3bcebe2aec104d7f3f37be4c619240b74381f8fc9", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 44, + "doc": { + "question": "Which dataset is reported to have an average sampling rate of 0.15 fps, sometimes sampling only 2 frames from a 30-second video?", + "choices": [ + "A. ShareGPT4Video", + "B. LLaVA-Hound", + "C. HowTo100M", + "D. ACAV-100M" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset is reported to have an average sampling rate of 0.15 fps, sometimes sampling only 2 frames from a 30-second video?\nAnswer:", + " A. ShareGPT4Video" + ], + [ + "Question:Which dataset is reported to have an average sampling rate of 0.15 fps, sometimes sampling only 2 frames from a 30-second video?\nAnswer:", + " B. LLaVA-Hound" + ], + [ + "Question:Which dataset is reported to have an average sampling rate of 0.15 fps, sometimes sampling only 2 frames from a 30-second video?\nAnswer:", + " C. HowTo100M" + ], + [ + "Question:Which dataset is reported to have an average sampling rate of 0.15 fps, sometimes sampling only 2 frames from a 30-second video?\nAnswer:", + " D. ACAV-100M" + ] + ], + "resps": [ + [ + [ + -25.649900436401367, + false + ] + ], + [ + [ + -42.51259231567383, + false + ] + ], + [ + [ + -24.319427490234375, + false + ] + ], + [ + [ + -43.85147476196289, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.649900436401367, + false + ], + [ + -42.51259231567383, + false + ], + [ + -24.319427490234375, + false + ], + [ + -43.85147476196289, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "91ef16f651f4594ab7a458e54ea4022facbb0e5c3631264f316b197bae204fdd", + "prompt_hash": "ea07f7b02166866b6f0032e0c9168d65d23f09c6eed5c894629afaea30d2b9b7", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 45, + "doc": { + "question": "How many parameters does the 0.6B version of the model contain?", + "choices": [ + "1152M", + "590M", + "2240M", + "1604M" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many parameters does the 0.6B version of the model contain?\nAnswer:", + " 1152M" + ], + [ + "Question:How many parameters does the 0.6B version of the model contain?\nAnswer:", + " 590M" + ], + [ + "Question:How many parameters does the 0.6B version of the model contain?\nAnswer:", + " 2240M" + ], + [ + "Question:How many parameters does the 0.6B version of the model contain?\nAnswer:", + " 1604M" + ] + ], + "resps": [ + [ + [ + -14.291555404663086, + false + ] + ], + [ + [ + -10.522879600524902, + false + ] + ], + [ + [ + -16.00899887084961, + false + ] + ], + [ + [ + -16.75801658630371, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.291555404663086, + false + ], + [ + -10.522879600524902, + false + ], + [ + -16.00899887084961, + false + ], + [ + -16.75801658630371, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a4761b93b20496334d111187f778f76dfea66671563930bea6e0b3980c216e20", + "prompt_hash": "14882c4529bd7fe9c8ee9d792e71cfb63a0c12ca8a2bcec7afb3bfdba8d5d00a", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 46, + "doc": { + "question": "Which dataset contains 30K images from Midjourney and is used for evaluating FID and Clip Score?", + "choices": [ + "ImageNet-1K", + "MSCOCO", + "MJHQ-30K", + "DPG-Bench" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dataset contains 30K images from Midjourney and is used for evaluating FID and Clip Score?\nAnswer:", + " ImageNet-1K" + ], + [ + "Question:Which dataset contains 30K images from Midjourney and is used for evaluating FID and Clip Score?\nAnswer:", + " MSCOCO" + ], + [ + "Question:Which dataset contains 30K images from Midjourney and is used for evaluating FID and Clip Score?\nAnswer:", + " MJHQ-30K" + ], + [ + "Question:Which dataset contains 30K images from Midjourney and is used for evaluating FID and Clip Score?\nAnswer:", + " DPG-Bench" + ] + ], + "resps": [ + [ + [ + -7.996962070465088, + false + ] + ], + [ + [ + -9.08751106262207, + false + ] + ], + [ + [ + -22.040189743041992, + false + ] + ], + [ + [ + -22.54903793334961, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.996962070465088, + false + ], + [ + -9.08751106262207, + false + ], + [ + -22.040189743041992, + false + ], + [ + -22.54903793334961, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "47b0bbebc2ea01a95a6021dfa70c6f6543afdbccfcee49014c890c3b647ddbb9", + "prompt_hash": "fc97e1336510e8fe920082f9a2bc83ac3e73e2c24d4c22c872b4669e00d5f1e3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 47, + "doc": { + "question": "What is the depth (number of layers) of the 1.6B version of the model?", + "choices": [ + "28", + "36", + "20", + "32" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the depth (number of layers) of the 1.6B version of the model?\nAnswer:", + " 28" + ], + [ + "Question:What is the depth (number of layers) of the 1.6B version of the model?\nAnswer:", + " 36" + ], + [ + "Question:What is the depth (number of layers) of the 1.6B version of the model?\nAnswer:", + " 20" + ], + [ + "Question:What is the depth (number of layers) of the 1.6B version of the model?\nAnswer:", + " 32" + ] + ], + "resps": [ + [ + [ + -7.322317123413086, + false + ] + ], + [ + [ + -6.984606742858887, + false + ] + ], + [ + [ + -5.454815864562988, + false + ] + ], + [ + [ + -5.265756607055664, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.322317123413086, + false + ], + [ + -6.984606742858887, + false + ], + [ + -5.454815864562988, + false + ], + [ + -5.265756607055664, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d283b2c2f551cd9a4491d5d44ffb10ddc54033ecfdcbe377a306d7ce6d105f13", + "prompt_hash": "f9e760f45616b073e48293f49d2ea32c9244abe3751176a0834a1a204b0a5391", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "Which evaluation metric assesses human preference performance and includes 100 prompts?", + "choices": [ + "GenEval", + "FID", + "ImageReward", + "Clip Score" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which evaluation metric assesses human preference performance and includes 100 prompts?\nAnswer:", + " GenEval" + ], + [ + "Question:Which evaluation metric assesses human preference performance and includes 100 prompts?\nAnswer:", + " FID" + ], + [ + "Question:Which evaluation metric assesses human preference performance and includes 100 prompts?\nAnswer:", + " ImageReward" + ], + [ + "Question:Which evaluation metric assesses human preference performance and includes 100 prompts?\nAnswer:", + " Clip Score" + ] + ], + "resps": [ + [ + [ + -12.850953102111816, + false + ] + ], + [ + [ + -7.501878261566162, + false + ] + ], + [ + [ + -11.249866485595703, + false + ] + ], + [ + [ + -14.445904731750488, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.850953102111816, + false + ], + [ + -7.501878261566162, + false + ], + [ + -11.249866485595703, + false + ], + [ + -14.445904731750488, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b1c842171a30cabea8a0daa58584624e1982962bfeabe9757318a71179c49389", + "prompt_hash": "75cc4f9f1859d330acf6b765f1cb11c2974f5c217b2ea2829083badbb4a07185", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 49, + "doc": { + "question": "When replacing full attention with linear attention using AE-F8C4P2, what happens to latency?", + "choices": [ + "Increases from 1931ms to 2250ms", + "Reduces from 2250ms to 1931ms", + "Remains constant at 2250ms", + "Remains constant at 1931ms" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:When replacing full attention with linear attention using AE-F8C4P2, what happens to latency?\nAnswer:", + " Increases from 1931ms to 2250ms" + ], + [ + "Question:When replacing full attention with linear attention using AE-F8C4P2, what happens to latency?\nAnswer:", + " Reduces from 2250ms to 1931ms" + ], + [ + "Question:When replacing full attention with linear attention using AE-F8C4P2, what happens to latency?\nAnswer:", + " Remains constant at 2250ms" + ], + [ + "Question:When replacing full attention with linear attention using AE-F8C4P2, what happens to latency?\nAnswer:", + " Remains constant at 1931ms" + ] + ], + "resps": [ + [ + [ + -31.620426177978516, + false + ] + ], + [ + [ + -32.93737030029297, + false + ] + ], + [ + [ + -28.212650299072266, + false + ] + ], + [ + [ + -29.66581916809082, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.620426177978516, + false + ], + [ + -32.93737030029297, + false + ], + [ + -28.212650299072266, + false + ], + [ + -29.66581916809082, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "820644bfcd2dc3aa158a97ab3742cb3b502046a277e8e05c499e6791d335730b", + "prompt_hash": "52f48f53d1dea326532a265611d26026a39424eef577534ac9a7d91b62926f97", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 50, + "doc": { + "question": "What is the main architectural simplification that makes the method 27% faster than LocoTrack?", + "choices": [ + "The use of 3D convolution for feature aggregation", + "A simplified MLP to process 4D correlation features", + "The removal of Fourier encoding in track embeddings", + "The integration of optical flow refinement modules" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main architectural simplification that makes the method 27% faster than LocoTrack?\nAnswer:", + " The use of 3D convolution for feature aggregation" + ], + [ + "Question:What is the main architectural simplification that makes the method 27% faster than LocoTrack?\nAnswer:", + " A simplified MLP to process 4D correlation features" + ], + [ + "Question:What is the main architectural simplification that makes the method 27% faster than LocoTrack?\nAnswer:", + " The removal of Fourier encoding in track embeddings" + ], + [ + "Question:What is the main architectural simplification that makes the method 27% faster than LocoTrack?\nAnswer:", + " The integration of optical flow refinement modules" + ] + ], + "resps": [ + [ + [ + -27.528322219848633, + false + ] + ], + [ + [ + -49.46475601196289, + false + ] + ], + [ + [ + -44.16676330566406, + false + ] + ], + [ + [ + -33.746856689453125, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.528322219848633, + false + ], + [ + -49.46475601196289, + false + ], + [ + -44.16676330566406, + false + ], + [ + -33.746856689453125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "85e26dc0e27fe3b1dfcc11f0663c09675a0372c7d6e26e9e33790b5f9e0e702e", + "prompt_hash": "f72f4336249fca531698c3403b55235f59beb77b5ba270ac8e97493c4efa36c0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "How many real videos does the proposed method use to outperform BootsTAPIR?", + "choices": [ + "15 billion", + "15 million", + "15 thousand", + "1.5 million" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many real videos does the proposed method use to outperform BootsTAPIR?\nAnswer:", + " 15 billion" + ], + [ + "Question:How many real videos does the proposed method use to outperform BootsTAPIR?\nAnswer:", + " 15 million" + ], + [ + "Question:How many real videos does the proposed method use to outperform BootsTAPIR?\nAnswer:", + " 15 thousand" + ], + [ + "Question:How many real videos does the proposed method use to outperform BootsTAPIR?\nAnswer:", + " 1.5 million" + ] + ], + "resps": [ + [ + [ + -15.632142066955566, + false + ] + ], + [ + [ + -12.80514907836914, + false + ] + ], + [ + [ + -14.469340324401855, + false + ] + ], + [ + [ + -11.69805908203125, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.632142066955566, + false + ], + [ + -12.80514907836914, + false + ], + [ + -14.469340324401855, + false + ], + [ + -11.69805908203125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "30dca43e6d67cfa022d80dec0a71ed2d08cc4128cb249ae5a5b2ba1c2ed39b69", + "prompt_hash": "d236ad1ed85bba8b379b7cdcfb1b7270884351d9aee04d2796646686f9e6d2bc", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 52, + "doc": { + "question": "Which teacher models are used to generate pseudo-labels in the student training protocol?", + "choices": [ + "RAFT, PIPs++, and SuperPoint", + "TAPIR, CoTracker, method online and method offline", + "MAST, TAPTR, and LightGlue", + "VGGSfM, PointOdyssey, and DISK" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which teacher models are used to generate pseudo-labels in the student training protocol?\nAnswer:", + " RAFT, PIPs++, and SuperPoint" + ], + [ + "Question:Which teacher models are used to generate pseudo-labels in the student training protocol?\nAnswer:", + " TAPIR, CoTracker, method online and method offline" + ], + [ + "Question:Which teacher models are used to generate pseudo-labels in the student training protocol?\nAnswer:", + " MAST, TAPTR, and LightGlue" + ], + [ + "Question:Which teacher models are used to generate pseudo-labels in the student training protocol?\nAnswer:", + " VGGSfM, PointOdyssey, and DISK" + ] + ], + "resps": [ + [ + [ + -52.90093231201172, + false + ] + ], + [ + [ + -59.762210845947266, + false + ] + ], + [ + [ + -49.665283203125, + false + ] + ], + [ + [ + -56.55097961425781, + false + ] + ] + ], + "filtered_resps": [ + [ + -52.90093231201172, + false + ], + [ + -59.762210845947266, + false + ], + [ + -49.665283203125, + false + ], + [ + -56.55097961425781, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e7096069f795a6b31354d56171bab7cb64e9729dc77a21a7023c3f21fbe830b0", + "prompt_hash": "4c8a81ecb2d25b0ecf479c4c24bc416df2057eb1ddeb420626479cc5a847f079", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 53, + "doc": { + "question": "What type of videos are primarily used in the large-scale dataset for training with pseudo-labels?", + "choices": [ + "Synthetic videos of static indoor scenes", + "Videos of computer-generated natural phenomena", + "Internet-like videos of humans and animals", + "Labeled surveillance videos from street cameras" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of videos are primarily used in the large-scale dataset for training with pseudo-labels?\nAnswer:", + " Synthetic videos of static indoor scenes" + ], + [ + "Question:What type of videos are primarily used in the large-scale dataset for training with pseudo-labels?\nAnswer:", + " Videos of computer-generated natural phenomena" + ], + [ + "Question:What type of videos are primarily used in the large-scale dataset for training with pseudo-labels?\nAnswer:", + " Internet-like videos of humans and animals" + ], + [ + "Question:What type of videos are primarily used in the large-scale dataset for training with pseudo-labels?\nAnswer:", + " Labeled surveillance videos from street cameras" + ] + ], + "resps": [ + [ + [ + -27.919666290283203, + false + ] + ], + [ + [ + -31.976137161254883, + false + ] + ], + [ + [ + -31.44933319091797, + false + ] + ], + [ + [ + -32.28932571411133, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.919666290283203, + false + ], + [ + -31.976137161254883, + false + ], + [ + -31.44933319091797, + false + ], + [ + -32.28932571411133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "77d962a9d88756c197df6db26cb0ed6c97ac99c1f27d634b07cff078c08e3a2d", + "prompt_hash": "e272316b8094350cdcb23a77e849572f4541ca0a2b1dd54b397b2fff3b54db05", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 54, + "doc": { + "question": "Which metric measures both tracking and occlusion prediction accuracy together?", + "choices": [ + "Occlusion Accuracy (OA)", + "Average Jaccard (AJ)", + "Average Displacement for Visible Points", + "Confidence Threshold Score" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which metric measures both tracking and occlusion prediction accuracy together?\nAnswer:", + " Occlusion Accuracy (OA)" + ], + [ + "Question:Which metric measures both tracking and occlusion prediction accuracy together?\nAnswer:", + " Average Jaccard (AJ)" + ], + [ + "Question:Which metric measures both tracking and occlusion prediction accuracy together?\nAnswer:", + " Average Displacement for Visible Points" + ], + [ + "Question:Which metric measures both tracking and occlusion prediction accuracy together?\nAnswer:", + " Confidence Threshold Score" + ] + ], + "resps": [ + [ + [ + -10.50698471069336, + false + ] + ], + [ + [ + -15.57904052734375, + false + ] + ], + [ + [ + -29.477798461914062, + false + ] + ], + [ + [ + -17.279052734375, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.50698471069336, + false + ], + [ + -15.57904052734375, + false + ], + [ + -29.477798461914062, + false + ], + [ + -17.279052734375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5b3399c26f3ef69cd385333b0e5bdc026bd5a64550372075f4e5fffebb9cb69d", + "prompt_hash": "9ced7ba0deba30498839b18d23a59085f5cf39c21d548e34267f77208b17eade", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 55, + "doc": { + "question": "What is the resolution of images used in the main visualization examples of Janus?", + "choices": [ + "256 × 256", + "384 × 384", + "512 × 512", + "1024 × 1024" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the resolution of images used in the main visualization examples of Janus?\nAnswer:", + " 256 × 256" + ], + [ + "Question:What is the resolution of images used in the main visualization examples of Janus?\nAnswer:", + " 384 × 384" + ], + [ + "Question:What is the resolution of images used in the main visualization examples of Janus?\nAnswer:", + " 512 × 512" + ], + [ + "Question:What is the resolution of images used in the main visualization examples of Janus?\nAnswer:", + " 1024 × 1024" + ] + ], + "resps": [ + [ + [ + -7.799862861633301, + false + ] + ], + [ + [ + -10.63082504272461, + false + ] + ], + [ + [ + -8.216829299926758, + false + ] + ], + [ + [ + -8.296860694885254, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.799862861633301, + false + ], + [ + -10.63082504272461, + false + ], + [ + -8.216829299926758, + false + ], + [ + -8.296860694885254, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "08275decf2faf58dfa291473ce53e1cee174d18aa5857c75541400d82863e845", + "prompt_hash": "4c3b81178c9839409766d7953ec4c181088f4afbf8459303d49eaec3b2600f82", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "Which model does Janus use as its base large language model (LLM)?", + "choices": [ + "Qwen-VL", + "GPT-Neo", + "DeepSeek-LLM", + "LLaMA 2" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model does Janus use as its base large language model (LLM)?\nAnswer:", + " Qwen-VL" + ], + [ + "Question:Which model does Janus use as its base large language model (LLM)?\nAnswer:", + " GPT-Neo" + ], + [ + "Question:Which model does Janus use as its base large language model (LLM)?\nAnswer:", + " DeepSeek-LLM" + ], + [ + "Question:Which model does Janus use as its base large language model (LLM)?\nAnswer:", + " LLaMA 2" + ] + ], + "resps": [ + [ + [ + -9.289318084716797, + false + ] + ], + [ + [ + -8.6356201171875, + false + ] + ], + [ + [ + -12.277383804321289, + false + ] + ], + [ + [ + -10.94096851348877, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.289318084716797, + false + ], + [ + -8.6356201171875, + false + ], + [ + -12.277383804321289, + false + ], + [ + -10.94096851348877, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "09a112fdcf77b0745cd5d3b4f4d0133d042b7a76e9a26c7bb6113d69dbab6d4c", + "prompt_hash": "b6d3b003f088781b3137314937470a4a50e04c438b85653d0a77596b8529a21e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 57, + "doc": { + "question": "What is the FID score achieved by Janus on the MSCOCO-30K benchmark?", + "choices": [ + "10.39", + "9.24", + "8.53", + "7.32" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the FID score achieved by Janus on the MSCOCO-30K benchmark?\nAnswer:", + " 10.39" + ], + [ + "Question:What is the FID score achieved by Janus on the MSCOCO-30K benchmark?\nAnswer:", + " 9.24" + ], + [ + "Question:What is the FID score achieved by Janus on the MSCOCO-30K benchmark?\nAnswer:", + " 8.53" + ], + [ + "Question:What is the FID score achieved by Janus on the MSCOCO-30K benchmark?\nAnswer:", + " 7.32" + ] + ], + "resps": [ + [ + [ + -11.242629051208496, + false + ] + ], + [ + [ + -11.04965591430664, + false + ] + ], + [ + [ + -11.016393661499023, + false + ] + ], + [ + [ + -11.18939208984375, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.242629051208496, + false + ], + [ + -11.04965591430664, + false + ], + [ + -11.016393661499023, + false + ], + [ + -11.18939208984375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6a08db5d8ac2bb1254553eb0da0ea2d3691559010c970be56df0c44c97c78ab4", + "prompt_hash": "a8e79602f14e078b6a6b23461115e4993d68ac4f95fe9896c64d905c2d2337be", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 58, + "doc": { + "question": "How many attention heads does the semantic decoder in the ablation study have?", + "choices": [ + "8", + "10", + "12", + "16" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many attention heads does the semantic decoder in the ablation study have?\nAnswer:", + " 8" + ], + [ + "Question:How many attention heads does the semantic decoder in the ablation study have?\nAnswer:", + " 10" + ], + [ + "Question:How many attention heads does the semantic decoder in the ablation study have?\nAnswer:", + " 12" + ], + [ + "Question:How many attention heads does the semantic decoder in the ablation study have?\nAnswer:", + " 16" + ] + ], + "resps": [ + [ + [ + -3.614625930786133, + false + ] + ], + [ + [ + -7.19932746887207, + false + ] + ], + [ + [ + -5.028623580932617, + false + ] + ], + [ + [ + -5.395282745361328, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.614625930786133, + false + ], + [ + -7.19932746887207, + false + ], + [ + -5.028623580932617, + false + ], + [ + -5.395282745361328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "97eddd54ac52372dd9747fe6ede0c383a8a2aea0a608d85cdd1c94fa817d7c6d", + "prompt_hash": "a35a7ec64f3da7478f8048930b42444873cc40ec2b9963fea585728aaee40284", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 59, + "doc": { + "question": "How many nodes and GPUs were used in the full training process of Janus?", + "choices": [ + "8 nodes with 4 GPUs each", + "16 nodes with 8 GPUs each", + "32 nodes with 2 GPUs each", + "4 nodes with 16 GPUs each" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many nodes and GPUs were used in the full training process of Janus?\nAnswer:", + " 8 nodes with 4 GPUs each" + ], + [ + "Question:How many nodes and GPUs were used in the full training process of Janus?\nAnswer:", + " 16 nodes with 8 GPUs each" + ], + [ + "Question:How many nodes and GPUs were used in the full training process of Janus?\nAnswer:", + " 32 nodes with 2 GPUs each" + ], + [ + "Question:How many nodes and GPUs were used in the full training process of Janus?\nAnswer:", + " 4 nodes with 16 GPUs each" + ] + ], + "resps": [ + [ + [ + -16.610118865966797, + false + ] + ], + [ + [ + -17.693458557128906, + false + ] + ], + [ + [ + -16.693805694580078, + false + ] + ], + [ + [ + -17.05245590209961, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.610118865966797, + false + ], + [ + -17.693458557128906, + false + ], + [ + -16.693805694580078, + false + ], + [ + -17.05245590209961, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f674a2e6a1bb759c66f647721dda1ebcea5048921cce40120a2cf90e4695798f", + "prompt_hash": "a608a708c7e2c33240e761f0d73ad14f178f9cbf67b494a220b664d8f16ec08c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 60, + "doc": { + "question": "What does the C3k2 block in YOLOv11 replace from previous versions?", + "choices": [ + "CSP block", + "CBS block", + "C2f block", + "C3 block" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the C3k2 block in YOLOv11 replace from previous versions?\nAnswer:", + " CSP block" + ], + [ + "Question:What does the C3k2 block in YOLOv11 replace from previous versions?\nAnswer:", + " CBS block" + ], + [ + "Question:What does the C3k2 block in YOLOv11 replace from previous versions?\nAnswer:", + " C2f block" + ], + [ + "Question:What does the C3k2 block in YOLOv11 replace from previous versions?\nAnswer:", + " C3 block" + ] + ], + "resps": [ + [ + [ + -10.633159637451172, + false + ] + ], + [ + [ + -16.662315368652344, + false + ] + ], + [ + [ + -12.172492980957031, + false + ] + ], + [ + [ + -7.113786220550537, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.633159637451172, + false + ], + [ + -16.662315368652344, + false + ], + [ + -12.172492980957031, + false + ], + [ + -7.113786220550537, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5076961b809c42258c80aa58f12d0097ed42b99ed43ef50190f915a71c4b8416", + "prompt_hash": "b7135c8dbb3db09bcc7e1dd949c1bbdce2eb9be903a99350a8bc227231ed6b0b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 61, + "doc": { + "question": "Which YOLOv11 component is responsible for generating the final object detection predictions?", + "choices": [ + "Backbone", + "Neck", + "Head", + "C2PSA" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which YOLOv11 component is responsible for generating the final object detection predictions?\nAnswer:", + " Backbone" + ], + [ + "Question:Which YOLOv11 component is responsible for generating the final object detection predictions?\nAnswer:", + " Neck" + ], + [ + "Question:Which YOLOv11 component is responsible for generating the final object detection predictions?\nAnswer:", + " Head" + ], + [ + "Question:Which YOLOv11 component is responsible for generating the final object detection predictions?\nAnswer:", + " C2PSA" + ] + ], + "resps": [ + [ + [ + -8.064074516296387, + false + ] + ], + [ + [ + -7.961186408996582, + false + ] + ], + [ + [ + -4.5736188888549805, + false + ] + ], + [ + [ + -28.57439422607422, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.064074516296387, + false + ], + [ + -7.961186408996582, + false + ], + [ + -4.5736188888549805, + false + ], + [ + -28.57439422607422, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "74c4acd48e33b31d2db4dcce90783d71007a901dd9550689dd6404b689183fa6", + "prompt_hash": "d561ea0b79eb2ffdb1d0fbbfa7120a55d977134eb76585649b70a53cffbffd61", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 62, + "doc": { + "question": "Which YOLO version introduced consistent dual assignments for NMS-free training?", + "choices": [ + "YOLOv8", + "YOLOv9", + "YOLOv10", + "YOLOv11" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which YOLO version introduced consistent dual assignments for NMS-free training?\nAnswer:", + " YOLOv8" + ], + [ + "Question:Which YOLO version introduced consistent dual assignments for NMS-free training?\nAnswer:", + " YOLOv9" + ], + [ + "Question:Which YOLO version introduced consistent dual assignments for NMS-free training?\nAnswer:", + " YOLOv10" + ], + [ + "Question:Which YOLO version introduced consistent dual assignments for NMS-free training?\nAnswer:", + " YOLOv11" + ] + ], + "resps": [ + [ + [ + -6.006664276123047, + false + ] + ], + [ + [ + -7.727287292480469, + false + ] + ], + [ + [ + -10.3282470703125, + false + ] + ], + [ + [ + -12.71410846710205, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.006664276123047, + false + ], + [ + -7.727287292480469, + false + ], + [ + -10.3282470703125, + false + ], + [ + -12.71410846710205, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "782bc04b40fdf646e23da0df3c4d456db2841d31c48d7f728473473a9d4eefd3", + "prompt_hash": "88b8f0a208713be8321190755c8a091359e89d20385829a8866f058617bd1246", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "What task is the YOLOv11-obb variant specifically designed for?", + "choices": [ + "Pose Estimation", + "Oriented Object Detection", + "Instance Segmentation", + "Image Classification" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What task is the YOLOv11-obb variant specifically designed for?\nAnswer:", + " Pose Estimation" + ], + [ + "Question:What task is the YOLOv11-obb variant specifically designed for?\nAnswer:", + " Oriented Object Detection" + ], + [ + "Question:What task is the YOLOv11-obb variant specifically designed for?\nAnswer:", + " Instance Segmentation" + ], + [ + "Question:What task is the YOLOv11-obb variant specifically designed for?\nAnswer:", + " Image Classification" + ] + ], + "resps": [ + [ + [ + -10.559854507446289, + false + ] + ], + [ + [ + -12.302038192749023, + false + ] + ], + [ + [ + -9.71845531463623, + false + ] + ], + [ + [ + -10.004639625549316, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.559854507446289, + false + ], + [ + -12.302038192749023, + false + ], + [ + -9.71845531463623, + false + ], + [ + -10.004639625549316, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2362c80e349c52b80c72e0f5e575c2435ec9f5680e2fb7a386a2a533cc52b3ad", + "prompt_hash": "13ec62143491a215a0f7c97502a13fb9a5363536bd5328eae8b7d42f4bfb7250", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 64, + "doc": { + "question": "How much fewer parameters does YOLOv11m use compared to YOLOv8m while achieving superior mAP?", + "choices": [ + "10%", + "18%", + "22%", + "30%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How much fewer parameters does YOLOv11m use compared to YOLOv8m while achieving superior mAP?\nAnswer:", + " 10%" + ], + [ + "Question:How much fewer parameters does YOLOv11m use compared to YOLOv8m while achieving superior mAP?\nAnswer:", + " 18%" + ], + [ + "Question:How much fewer parameters does YOLOv11m use compared to YOLOv8m while achieving superior mAP?\nAnswer:", + " 22%" + ], + [ + "Question:How much fewer parameters does YOLOv11m use compared to YOLOv8m while achieving superior mAP?\nAnswer:", + " 30%" + ] + ], + "resps": [ + [ + [ + -13.356922149658203, + false + ] + ], + [ + [ + -13.9075927734375, + false + ] + ], + [ + [ + -14.149547576904297, + false + ] + ], + [ + [ + -13.93923568725586, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.356922149658203, + false + ], + [ + -13.9075927734375, + false + ], + [ + -14.149547576904297, + false + ], + [ + -13.93923568725586, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ed2455f30c263a79e1b14b6460eb0dc88c2f48ba69a9945217c4cd71079266ff", + "prompt_hash": "05f7d4beae3f3e039c97fe7b7862d7965444171656fd42e43210013ab83c0b5e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 65, + "doc": { + "question": "Which stage in the predictive model hierarchy is characterized by the ability to generate actionable videos?", + "choices": [ + "A. S0", + "B. S1", + "C. S2", + "D. S3" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which stage in the predictive model hierarchy is characterized by the ability to generate actionable videos?\nAnswer:", + " A. S0" + ], + [ + "Question:Which stage in the predictive model hierarchy is characterized by the ability to generate actionable videos?\nAnswer:", + " B. S1" + ], + [ + "Question:Which stage in the predictive model hierarchy is characterized by the ability to generate actionable videos?\nAnswer:", + " C. S2" + ], + [ + "Question:Which stage in the predictive model hierarchy is characterized by the ability to generate actionable videos?\nAnswer:", + " D. S3" + ] + ], + "resps": [ + [ + [ + -19.10530662536621, + false + ] + ], + [ + [ + -19.845218658447266, + false + ] + ], + [ + [ + -21.168285369873047, + false + ] + ], + [ + [ + -21.69548797607422, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.10530662536621, + false + ], + [ + -19.845218658447266, + false + ], + [ + -21.168285369873047, + false + ], + [ + -21.69548797607422, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1e0d10a547609c9ddd142bfcb86a301eb7d6bdfa579ede13d2b137378097532c", + "prompt_hash": "5d1e1c6ae3e6a61a17ea040689e1fe031915e198af7a4ab2d9e77076669f6ee4", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 66, + "doc": { + "question": "What simulation environment is used for the evaluation of predictive models in the \\u2018\\abarm\\u2019 scenario?", + "choices": [ + "A. CARLA", + "B. Calvin", + "C. Habitat", + "D. MineRL" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What simulation environment is used for the evaluation of predictive models in the \\u2018\\abarm\\u2019 scenario?\nAnswer:", + " A. CARLA" + ], + [ + "Question:What simulation environment is used for the evaluation of predictive models in the \\u2018\\abarm\\u2019 scenario?\nAnswer:", + " B. Calvin" + ], + [ + "Question:What simulation environment is used for the evaluation of predictive models in the \\u2018\\abarm\\u2019 scenario?\nAnswer:", + " C. Habitat" + ], + [ + "Question:What simulation environment is used for the evaluation of predictive models in the \\u2018\\abarm\\u2019 scenario?\nAnswer:", + " D. MineRL" + ] + ], + "resps": [ + [ + [ + -18.005578994750977, + false + ] + ], + [ + [ + -24.09957504272461, + false + ] + ], + [ + [ + -20.01169204711914, + false + ] + ], + [ + [ + -31.064380645751953, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.005578994750977, + false + ], + [ + -24.09957504272461, + false + ], + [ + -20.01169204711914, + false + ], + [ + -31.064380645751953, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f67a8549dcd2b8979f1de1e9580290429e1de35d629105905b3335e18f414831", + "prompt_hash": "ad95fec7874f6e14104bc5e0b9893ff429528f30f46d222389a546536e555e70", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 67, + "doc": { + "question": "Which benchmark uses 'Human Judgement' as its evaluation strategy and operates at Stage S0?", + "choices": [ + "A. EvalCrafter", + "B. VBench", + "C. AgentBench", + "D. LEGO" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which benchmark uses 'Human Judgement' as its evaluation strategy and operates at Stage S0?\nAnswer:", + " A. EvalCrafter" + ], + [ + "Question:Which benchmark uses 'Human Judgement' as its evaluation strategy and operates at Stage S0?\nAnswer:", + " B. VBench" + ], + [ + "Question:Which benchmark uses 'Human Judgement' as its evaluation strategy and operates at Stage S0?\nAnswer:", + " C. AgentBench" + ], + [ + "Question:Which benchmark uses 'Human Judgement' as its evaluation strategy and operates at Stage S0?\nAnswer:", + " D. LEGO" + ] + ], + "resps": [ + [ + [ + -24.232826232910156, + false + ] + ], + [ + [ + -22.290088653564453, + false + ] + ], + [ + [ + -18.042863845825195, + false + ] + ], + [ + [ + -19.464723587036133, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.232826232910156, + false + ], + [ + -22.290088653564453, + false + ], + [ + -18.042863845825195, + false + ], + [ + -19.464723587036133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0106eeed7c37ae01edec7570e1ae1c221b7254621b32264510364b75a26e88a7", + "prompt_hash": "ee5726b35273b27cab1d8fdc2a7741030ee5426a2d8e9dcdfc70735de5142a86", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 68, + "doc": { + "question": "Which dimension is specifically used in the \\u2018\\abmc\\u2019 scenario to evaluate whether speed varies appropriately across environments?", + "choices": [ + "A. Trajectory", + "B. Embodied Interaction", + "C. Velocity", + "D. Aesthetics" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dimension is specifically used in the \\u2018\\abmc\\u2019 scenario to evaluate whether speed varies appropriately across environments?\nAnswer:", + " A. Trajectory" + ], + [ + "Question:Which dimension is specifically used in the \\u2018\\abmc\\u2019 scenario to evaluate whether speed varies appropriately across environments?\nAnswer:", + " B. Embodied Interaction" + ], + [ + "Question:Which dimension is specifically used in the \\u2018\\abmc\\u2019 scenario to evaluate whether speed varies appropriately across environments?\nAnswer:", + " C. Velocity" + ], + [ + "Question:Which dimension is specifically used in the \\u2018\\abmc\\u2019 scenario to evaluate whether speed varies appropriately across environments?\nAnswer:", + " D. Aesthetics" + ] + ], + "resps": [ + [ + [ + -15.953933715820312, + false + ] + ], + [ + [ + -28.505935668945312, + false + ] + ], + [ + [ + -17.61598777770996, + false + ] + ], + [ + [ + -20.809507369995117, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.953933715820312, + false + ], + [ + -28.505935668945312, + false + ], + [ + -17.61598777770996, + false + ], + [ + -20.809507369995117, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3990cd538cbdf9365d531ac8430c2b207e2a1d05895d2a94e1f9bd888b363b82", + "prompt_hash": "2ce6d5a0bdaf02c26b50587735534e59d52e3cc879bc864e2be9113582787fa4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 69, + "doc": { + "question": "What is the name of the video-level evaluator trained on HF-Embodied dataset?", + "choices": [ + "A. GPT-4o", + "B. FlashVStream", + "C. LoRA", + "D. evaluator" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the name of the video-level evaluator trained on HF-Embodied dataset?\nAnswer:", + " A. GPT-4o" + ], + [ + "Question:What is the name of the video-level evaluator trained on HF-Embodied dataset?\nAnswer:", + " B. FlashVStream" + ], + [ + "Question:What is the name of the video-level evaluator trained on HF-Embodied dataset?\nAnswer:", + " C. LoRA" + ], + [ + "Question:What is the name of the video-level evaluator trained on HF-Embodied dataset?\nAnswer:", + " D. evaluator" + ] + ], + "resps": [ + [ + [ + -14.393389701843262, + false + ] + ], + [ + [ + -43.895687103271484, + false + ] + ], + [ + [ + -19.041667938232422, + false + ] + ], + [ + [ + -21.171215057373047, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.393389701843262, + false + ], + [ + -43.895687103271484, + false + ], + [ + -19.041667938232422, + false + ], + [ + -21.171215057373047, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c4c0a3f71245d5aef21fb670f8d69a2ea0c6b9ff284ac337ce92c1639acbbc2b", + "prompt_hash": "b32bc05ee2c22f1d58ed12d7e2fca4906e868fc81e57716c4a023579e4ead278", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 70, + "doc": { + "question": "What was the performance improvement of journey learning over conventional supervised learning on the MATH dataset using only 327 training samples?", + "choices": [ + "Over 5%", + "Over 6%", + "Over 7%", + "Over 8%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What was the performance improvement of journey learning over conventional supervised learning on the MATH dataset using only 327 training samples?\nAnswer:", + " Over 5%" + ], + [ + "Question:What was the performance improvement of journey learning over conventional supervised learning on the MATH dataset using only 327 training samples?\nAnswer:", + " Over 6%" + ], + [ + "Question:What was the performance improvement of journey learning over conventional supervised learning on the MATH dataset using only 327 training samples?\nAnswer:", + " Over 7%" + ], + [ + "Question:What was the performance improvement of journey learning over conventional supervised learning on the MATH dataset using only 327 training samples?\nAnswer:", + " Over 8%" + ] + ], + "resps": [ + [ + [ + -20.16574478149414, + false + ] + ], + [ + [ + -21.092784881591797, + false + ] + ], + [ + [ + -20.957870483398438, + false + ] + ], + [ + [ + -20.980009078979492, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.16574478149414, + false + ], + [ + -21.092784881591797, + false + ], + [ + -20.957870483398438, + false + ], + [ + -20.980009078979492, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0b38a2a56a0912b086eab973f3de7d206f452885ad75f309e1afecfe669a85ac", + "prompt_hash": "fe48919a07c4d400cd52f3a2516b1427fb5a6a6a0b9a6f25d08171a43a36e00b", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "Which keyword is highlighted as an indicator of the model's ability to engage in reflection and self-correction?", + "choices": [ + "and", + "if", + "wait", + "consider" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which keyword is highlighted as an indicator of the model's ability to engage in reflection and self-correction?\nAnswer:", + " and" + ], + [ + "Question:Which keyword is highlighted as an indicator of the model's ability to engage in reflection and self-correction?\nAnswer:", + " if" + ], + [ + "Question:Which keyword is highlighted as an indicator of the model's ability to engage in reflection and self-correction?\nAnswer:", + " wait" + ], + [ + "Question:Which keyword is highlighted as an indicator of the model's ability to engage in reflection and self-correction?\nAnswer:", + " consider" + ] + ], + "resps": [ + [ + [ + -8.678184509277344, + false + ] + ], + [ + [ + -11.576055526733398, + false + ] + ], + [ + [ + -11.51467514038086, + false + ] + ], + [ + [ + -11.106267929077148, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.678184509277344, + false + ], + [ + -11.576055526733398, + false + ], + [ + -11.51467514038086, + false + ], + [ + -11.106267929077148, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4a96dbe55bfbdcc9ba72bc147dab79b3cc54ddf4e5750603408adcd6726a8c30", + "prompt_hash": "03917cb385f93db817b7541a7b7f5839f60a7932c74753f7df8af44ccb7ab021", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 72, + "doc": { + "question": "Which model achieved the highest F1 score on the subset of the PRM800K dataset?", + "choices": [ + "GPT-4o-mini", + "o1-preview", + "o1-mini", + "Math-shepherd" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model achieved the highest F1 score on the subset of the PRM800K dataset?\nAnswer:", + " GPT-4o-mini" + ], + [ + "Question:Which model achieved the highest F1 score on the subset of the PRM800K dataset?\nAnswer:", + " o1-preview" + ], + [ + "Question:Which model achieved the highest F1 score on the subset of the PRM800K dataset?\nAnswer:", + " o1-mini" + ], + [ + "Question:Which model achieved the highest F1 score on the subset of the PRM800K dataset?\nAnswer:", + " Math-shepherd" + ] + ], + "resps": [ + [ + [ + -16.167285919189453, + false + ] + ], + [ + [ + -29.22384262084961, + false + ] + ], + [ + [ + -26.068363189697266, + false + ] + ], + [ + [ + -29.02634048461914, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.167285919189453, + false + ], + [ + -29.22384262084961, + false + ], + [ + -26.068363189697266, + false + ], + [ + -29.02634048461914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a1d92fb539e2211d60d6ca6220ce9941130781647199c626073e3ba9680df7e1", + "prompt_hash": "589e4789166075cb696d865c767e1dde0771e39b0b72cf0af4f8e4eaffb0a658", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "Which phase in the training pipeline involved training on 327 examples of long thoughts including reflections and corrections?", + "choices": [ + "Supervised Fine-Tuning Phase 1", + "Supervised Fine-Tuning Phase 2: Shortcut Learning", + "Supervised Fine-Tuning Phase 2: Journey Learning", + "Direct Preference Optimization" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which phase in the training pipeline involved training on 327 examples of long thoughts including reflections and corrections?\nAnswer:", + " Supervised Fine-Tuning Phase 1" + ], + [ + "Question:Which phase in the training pipeline involved training on 327 examples of long thoughts including reflections and corrections?\nAnswer:", + " Supervised Fine-Tuning Phase 2: Shortcut Learning" + ], + [ + "Question:Which phase in the training pipeline involved training on 327 examples of long thoughts including reflections and corrections?\nAnswer:", + " Supervised Fine-Tuning Phase 2: Journey Learning" + ], + [ + "Question:Which phase in the training pipeline involved training on 327 examples of long thoughts including reflections and corrections?\nAnswer:", + " Direct Preference Optimization" + ] + ], + "resps": [ + [ + [ + -18.895179748535156, + false + ] + ], + [ + [ + -35.28461456298828, + false + ] + ], + [ + [ + -37.04241943359375, + false + ] + ], + [ + [ + -9.66794204711914, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.895179748535156, + false + ], + [ + -35.28461456298828, + false + ], + [ + -37.04241943359375, + false + ], + [ + -9.66794204711914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "02febbe76e396536a06aa7130637a3193a3899e6913f805ce3b17ccd9c62f61a", + "prompt_hash": "db9356601bfe5d31416c62fc62bc9ff729c695dd8c9a625807dcb9db5b37ea44", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "Which reasoning paradigm emphasizes learning through trial-and-error, reflection, and backtracking?", + "choices": [ + "Shortcut learning", + "Zero-shot learning", + "Journey learning", + "Chain-of-thought learning" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which reasoning paradigm emphasizes learning through trial-and-error, reflection, and backtracking?\nAnswer:", + " Shortcut learning" + ], + [ + "Question:Which reasoning paradigm emphasizes learning through trial-and-error, reflection, and backtracking?\nAnswer:", + " Zero-shot learning" + ], + [ + "Question:Which reasoning paradigm emphasizes learning through trial-and-error, reflection, and backtracking?\nAnswer:", + " Journey learning" + ], + [ + "Question:Which reasoning paradigm emphasizes learning through trial-and-error, reflection, and backtracking?\nAnswer:", + " Chain-of-thought learning" + ] + ], + "resps": [ + [ + [ + -16.114988327026367, + false + ] + ], + [ + [ + -11.982396125793457, + false + ] + ], + [ + [ + -21.8007869720459, + false + ] + ], + [ + [ + -18.297088623046875, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.114988327026367, + false + ], + [ + -11.982396125793457, + false + ], + [ + -21.8007869720459, + false + ], + [ + -18.297088623046875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b2c6e5fd2e897e762c53be4d164a0b0084990c633d5674b313b910d9317c6223", + "prompt_hash": "a59d7ff020b71e354b01ae1fcf7bb63b83a17c02d02a02ea95235ff1304fb392", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 75, + "doc": { + "question": "What types of data were included in the pre-training of GPT-4o?", + "choices": [ + "Only publicly available datasets", + "Only user-contributed content", + "Web data, code and math, multimodal data", + "Data exclusively from government archives" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What types of data were included in the pre-training of GPT-4o?\nAnswer:", + " Only publicly available datasets" + ], + [ + "Question:What types of data were included in the pre-training of GPT-4o?\nAnswer:", + " Only user-contributed content" + ], + [ + "Question:What types of data were included in the pre-training of GPT-4o?\nAnswer:", + " Web data, code and math, multimodal data" + ], + [ + "Question:What types of data were included in the pre-training of GPT-4o?\nAnswer:", + " Data exclusively from government archives" + ] + ], + "resps": [ + [ + [ + -16.46111297607422, + false + ] + ], + [ + [ + -21.123538970947266, + false + ] + ], + [ + [ + -33.048213958740234, + false + ] + ], + [ + [ + -30.38631248474121, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.46111297607422, + false + ], + [ + -21.123538970947266, + false + ], + [ + -33.048213958740234, + false + ], + [ + -30.38631248474121, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a372985e07f7abbe54f600d63ee6d5f7fc6bffc04505cbae54da4d2bf27521bc", + "prompt_hash": "0104587737bedb6bdb520872d906a3d9ad0822941ea4075fb38f27cf3e75c6ef", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 76, + "doc": { + "question": "What was the final risk classification for GPT-4o under OpenAI’s Preparedness Framework?", + "choices": [ + "High", + "Medium", + "Low", + "Critical" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the final risk classification for GPT-4o under OpenAI’s Preparedness Framework?\nAnswer:", + " High" + ], + [ + "Question:What was the final risk classification for GPT-4o under OpenAI’s Preparedness Framework?\nAnswer:", + " Medium" + ], + [ + "Question:What was the final risk classification for GPT-4o under OpenAI’s Preparedness Framework?\nAnswer:", + " Low" + ], + [ + "Question:What was the final risk classification for GPT-4o under OpenAI’s Preparedness Framework?\nAnswer:", + " Critical" + ] + ], + "resps": [ + [ + [ + -4.722260475158691, + false + ] + ], + [ + [ + -6.896150588989258, + false + ] + ], + [ + [ + -5.012272834777832, + false + ] + ], + [ + [ + -5.753252983093262, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.722260475158691, + false + ], + [ + -6.896150588989258, + false + ], + [ + -5.012272834777832, + false + ], + [ + -5.753252983093262, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4e0758f5d5174342e37f294d716b53e741c0f4bb1a12c6b51960807a7c821ac3", + "prompt_hash": "3256e306cb57e76dae5c1a79329caf9ed10dd3825feaaadb0e6415d3d8358cc7", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 77, + "doc": { + "question": "How many languages did the GPT-4o external red teamers collectively speak?", + "choices": [ + "15", + "29", + "45", + "60" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many languages did the GPT-4o external red teamers collectively speak?\nAnswer:", + " 15" + ], + [ + "Question:How many languages did the GPT-4o external red teamers collectively speak?\nAnswer:", + " 29" + ], + [ + "Question:How many languages did the GPT-4o external red teamers collectively speak?\nAnswer:", + " 45" + ], + [ + "Question:How many languages did the GPT-4o external red teamers collectively speak?\nAnswer:", + " 60" + ] + ], + "resps": [ + [ + [ + -5.321237087249756, + false + ] + ], + [ + [ + -6.68866491317749, + false + ] + ], + [ + [ + -6.993188858032227, + false + ] + ], + [ + [ + -6.728579521179199, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.321237087249756, + false + ], + [ + -6.68866491317749, + false + ], + [ + -6.993188858032227, + false + ], + [ + -6.728579521179199, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6339a9a7bef3a9eea574a239c5b1e0b022e7a614bde294b7614d603313d2ec5e", + "prompt_hash": "73b492ab37fb4cc79613d41e9e60760c03659310f5c6a8a2c8407621a38a4c6f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 78, + "doc": { + "question": "Which system was used to convert text to audio for evaluation purposes?", + "choices": [ + "DALL-E", + "Voice Engine", + "AudioLM", + "Whisper" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which system was used to convert text to audio for evaluation purposes?\nAnswer:", + " DALL-E" + ], + [ + "Question:Which system was used to convert text to audio for evaluation purposes?\nAnswer:", + " Voice Engine" + ], + [ + "Question:Which system was used to convert text to audio for evaluation purposes?\nAnswer:", + " AudioLM" + ], + [ + "Question:Which system was used to convert text to audio for evaluation purposes?\nAnswer:", + " Whisper" + ] + ], + "resps": [ + [ + [ + -10.943501472473145, + false + ] + ], + [ + [ + -16.534181594848633, + false + ] + ], + [ + [ + -8.04608154296875, + false + ] + ], + [ + [ + -6.90962553024292, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.943501472473145, + false + ], + [ + -16.534181594848633, + false + ], + [ + -8.04608154296875, + false + ], + [ + -6.90962553024292, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d28ce463101a24c6e1288b67a0efd3244c77b55169d76730f01b4b7e6177f1df", + "prompt_hash": "ca824eae6c8a5d78e8ea1f9a0dacb4e6f2ceebbd524d7b8fea9502455b9729d0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 79, + "doc": { + "question": "In voice output classification, what recall was achieved for non-English languages?", + "choices": [ + "0.85", + "0.90", + "1.0", + "0.95" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In voice output classification, what recall was achieved for non-English languages?\nAnswer:", + " 0.85" + ], + [ + "Question:In voice output classification, what recall was achieved for non-English languages?\nAnswer:", + " 0.90" + ], + [ + "Question:In voice output classification, what recall was achieved for non-English languages?\nAnswer:", + " 1.0" + ], + [ + "Question:In voice output classification, what recall was achieved for non-English languages?\nAnswer:", + " 0.95" + ] + ], + "resps": [ + [ + [ + -9.070045471191406, + false + ] + ], + [ + [ + -9.178909301757812, + false + ] + ], + [ + [ + -8.597393035888672, + false + ] + ], + [ + [ + -9.124435424804688, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.070045471191406, + false + ], + [ + -9.178909301757812, + false + ], + [ + -8.597393035888672, + false + ], + [ + -9.124435424804688, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32c94fe4437b01c5ef72e2aa2c37b59ea35d0fee9fbd37285420512012a356e6", + "prompt_hash": "436600b9a53a410a7a5ddc2c85b9f999f6477738cbcbdd3545f1dd649528a24f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 80, + "doc": { + "question": "What is the screen width tolerance used to evaluate click-based actions as correct?", + "choices": [ + "10% of screen width", + "12% of screen width", + "14% of screen width", + "16% of screen width" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the screen width tolerance used to evaluate click-based actions as correct?\nAnswer:", + " 10% of screen width" + ], + [ + "Question:What is the screen width tolerance used to evaluate click-based actions as correct?\nAnswer:", + " 12% of screen width" + ], + [ + "Question:What is the screen width tolerance used to evaluate click-based actions as correct?\nAnswer:", + " 14% of screen width" + ], + [ + "Question:What is the screen width tolerance used to evaluate click-based actions as correct?\nAnswer:", + " 16% of screen width" + ] + ], + "resps": [ + [ + [ + -17.385103225708008, + false + ] + ], + [ + [ + -22.00188446044922, + false + ] + ], + [ + [ + -22.98278045654297, + false + ] + ], + [ + [ + -22.503746032714844, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.385103225708008, + false + ], + [ + -22.00188446044922, + false + ], + [ + -22.98278045654297, + false + ], + [ + -22.503746032714844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9f8b5be0a3b9d267dde452ed7eac20b48ec86f51bbdf962ee9a7d9816042876c", + "prompt_hash": "310393acf8b5193cd43714a633b1db6ddcf4973879f52ae045a8c7e741bce8a2", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 81, + "doc": { + "question": "Which benchmark had 1,410 test samples and included 3 basic plus 2 custom actions?", + "choices": [ + "GUI-Act-Web", + "OmniAct-Desktop", + "GUI-Odyssey-App", + "AndroidControl-High" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark had 1,410 test samples and included 3 basic plus 2 custom actions?\nAnswer:", + " GUI-Act-Web" + ], + [ + "Question:Which benchmark had 1,410 test samples and included 3 basic plus 2 custom actions?\nAnswer:", + " OmniAct-Desktop" + ], + [ + "Question:Which benchmark had 1,410 test samples and included 3 basic plus 2 custom actions?\nAnswer:", + " GUI-Odyssey-App" + ], + [ + "Question:Which benchmark had 1,410 test samples and included 3 basic plus 2 custom actions?\nAnswer:", + " AndroidControl-High" + ] + ], + "resps": [ + [ + [ + -27.018978118896484, + false + ] + ], + [ + [ + -35.97882843017578, + false + ] + ], + [ + [ + -38.291839599609375, + false + ] + ], + [ + [ + -32.71967697143555, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.018978118896484, + false + ], + [ + -35.97882843017578, + false + ], + [ + -38.291839599609375, + false + ], + [ + -32.71967697143555, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2fa1819279e963427cc8e97b7fb0cb586778c2437dd702a77a7cfdc56d8a5af1", + "prompt_hash": "ad0ee4079fd39fe7f78fe4045eb915029c0d600e0584f0f0ad542099cf9cdd08", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 82, + "doc": { + "question": "Which dataset had the highest number of screenshots in the GUI grounding pretraining corpus?", + "choices": [ + "FineWeb-filtered", + "SeeClick", + "Windows-desktop", + "Mind2Web-annotated" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset had the highest number of screenshots in the GUI grounding pretraining corpus?\nAnswer:", + " FineWeb-filtered" + ], + [ + "Question:Which dataset had the highest number of screenshots in the GUI grounding pretraining corpus?\nAnswer:", + " SeeClick" + ], + [ + "Question:Which dataset had the highest number of screenshots in the GUI grounding pretraining corpus?\nAnswer:", + " Windows-desktop" + ], + [ + "Question:Which dataset had the highest number of screenshots in the GUI grounding pretraining corpus?\nAnswer:", + " Mind2Web-annotated" + ] + ], + "resps": [ + [ + [ + -25.71546173095703, + false + ] + ], + [ + [ + -24.613677978515625, + false + ] + ], + [ + [ + -16.762664794921875, + false + ] + ], + [ + [ + -30.07876205444336, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.71546173095703, + false + ], + [ + -24.613677978515625, + false + ], + [ + -16.762664794921875, + false + ], + [ + -30.07876205444336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "929fea3025f2a9b1affc7e2ab173b8b92436b91ff4daf4678c3664bd0fe130dc", + "prompt_hash": "d037808124da7a33a08b66e11ea5af7d9090610ebf90d71d8a3bbcc12d1bc1c4", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "What method was used to synthesize sub-instructions for instruction grounding data?", + "choices": [ + "Chain-of-Thought prompting", + "Set-of-Mark prompting", + "Few-shot prompting", + "Auto-regressive prompting" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What method was used to synthesize sub-instructions for instruction grounding data?\nAnswer:", + " Chain-of-Thought prompting" + ], + [ + "Question:What method was used to synthesize sub-instructions for instruction grounding data?\nAnswer:", + " Set-of-Mark prompting" + ], + [ + "Question:What method was used to synthesize sub-instructions for instruction grounding data?\nAnswer:", + " Few-shot prompting" + ], + [ + "Question:What method was used to synthesize sub-instructions for instruction grounding data?\nAnswer:", + " Auto-regressive prompting" + ] + ], + "resps": [ + [ + [ + -10.850055694580078, + false + ] + ], + [ + [ + -22.5295352935791, + false + ] + ], + [ + [ + -11.125772476196289, + false + ] + ], + [ + [ + -13.05416488647461, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.850055694580078, + false + ], + [ + -22.5295352935791, + false + ], + [ + -11.125772476196289, + false + ], + [ + -13.05416488647461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a859d38b3a1df340c92657008d4464c3f55dc09d6d2d59c847fae83a73b231af", + "prompt_hash": "33f05316acc2217230f633db435d256e9a6ab4886eaa408aa2850e362322267e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 84, + "doc": { + "question": "What unified action reduces naming conflicts such as 'tap' and 'click'?", + "choices": [ + "Dynamic Action Mapping", + "Standardized Action Encoding", + "Unified Action Space", + "Cross-Domain Action Bridge" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What unified action reduces naming conflicts such as 'tap' and 'click'?\nAnswer:", + " Dynamic Action Mapping" + ], + [ + "Question:What unified action reduces naming conflicts such as 'tap' and 'click'?\nAnswer:", + " Standardized Action Encoding" + ], + [ + "Question:What unified action reduces naming conflicts such as 'tap' and 'click'?\nAnswer:", + " Unified Action Space" + ], + [ + "Question:What unified action reduces naming conflicts such as 'tap' and 'click'?\nAnswer:", + " Cross-Domain Action Bridge" + ] + ], + "resps": [ + [ + [ + -22.388999938964844, + false + ] + ], + [ + [ + -23.831134796142578, + false + ] + ], + [ + [ + -21.0826358795166, + false + ] + ], + [ + [ + -29.27459716796875, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.388999938964844, + false + ], + [ + -23.831134796142578, + false + ], + [ + -21.0826358795166, + false + ], + [ + -29.27459716796875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "279699b89160ac7cc040c8217e39a7c78bab1d462634e0e1716881353083fe4b", + "prompt_hash": "a43e32d200b3b898278c02ca262be5772b92ae6d2c2e741e1163da5b3ebcb957", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 85, + "doc": { + "question": "What is the codebook size used by the MAGVIT-v2 quantizer for image tokenization in Show-o?", + "choices": [ + "4,096", + "8,192", + "16,384", + "2,048" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the codebook size used by the MAGVIT-v2 quantizer for image tokenization in Show-o?\nAnswer:", + " 4,096" + ], + [ + "Question:What is the codebook size used by the MAGVIT-v2 quantizer for image tokenization in Show-o?\nAnswer:", + " 8,192" + ], + [ + "Question:What is the codebook size used by the MAGVIT-v2 quantizer for image tokenization in Show-o?\nAnswer:", + " 16,384" + ], + [ + "Question:What is the codebook size used by the MAGVIT-v2 quantizer for image tokenization in Show-o?\nAnswer:", + " 2,048" + ] + ], + "resps": [ + [ + [ + -9.725521087646484, + false + ] + ], + [ + [ + -11.152690887451172, + false + ] + ], + [ + [ + -9.869206428527832, + false + ] + ], + [ + [ + -9.555603981018066, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.725521087646484, + false + ], + [ + -11.152690887451172, + false + ], + [ + -9.869206428527832, + false + ], + [ + -9.555603981018066, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "537f8e13a8575c99b1c95067d90cf9ea8a3221d78b0b3c2bef9ba1ed2bc4f74f", + "prompt_hash": "51f7d65b8b8b0b214b4856c8f48f9c5ef119b17e890df6f58283de6b776c09d6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "Which operation is prepended to each attention layer in Show-o's architecture?", + "choices": [ + "Layer Normalization", + "Residual Connection", + "QK-Norm", + "Batch Normalization" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which operation is prepended to each attention layer in Show-o's architecture?\nAnswer:", + " Layer Normalization" + ], + [ + "Question:Which operation is prepended to each attention layer in Show-o's architecture?\nAnswer:", + " Residual Connection" + ], + [ + "Question:Which operation is prepended to each attention layer in Show-o's architecture?\nAnswer:", + " QK-Norm" + ], + [ + "Question:Which operation is prepended to each attention layer in Show-o's architecture?\nAnswer:", + " Batch Normalization" + ] + ], + "resps": [ + [ + [ + -8.532977104187012, + false + ] + ], + [ + [ + -10.449203491210938, + false + ] + ], + [ + [ + -17.17314910888672, + false + ] + ], + [ + [ + -10.140875816345215, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.532977104187012, + false + ], + [ + -10.449203491210938, + false + ], + [ + -17.17314910888672, + false + ], + [ + -10.140875816345215, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e82398508a7698a85b0ca7fce40294bedf9db1c8fa02e12de0642116958caea0", + "prompt_hash": "7a36bb0baac4bc975b5211cea95fc4cd933e9fa4a9ab24630ca9b841e27d2697", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 87, + "doc": { + "question": "Which dataset is used in the first stage of training Show-o to learn pixel dependency for image generation?", + "choices": [ + "COCO", + "ImageNet-1K", + "RefinedWeb", + "LAION-aesthetics-12M" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset is used in the first stage of training Show-o to learn pixel dependency for image generation?\nAnswer:", + " COCO" + ], + [ + "Question:Which dataset is used in the first stage of training Show-o to learn pixel dependency for image generation?\nAnswer:", + " ImageNet-1K" + ], + [ + "Question:Which dataset is used in the first stage of training Show-o to learn pixel dependency for image generation?\nAnswer:", + " RefinedWeb" + ], + [ + "Question:Which dataset is used in the first stage of training Show-o to learn pixel dependency for image generation?\nAnswer:", + " LAION-aesthetics-12M" + ] + ], + "resps": [ + [ + [ + -8.915375709533691, + false + ] + ], + [ + [ + -8.84010124206543, + false + ] + ], + [ + [ + -16.313142776489258, + false + ] + ], + [ + [ + -28.85309600830078, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.915375709533691, + false + ], + [ + -8.84010124206543, + false + ], + [ + -16.313142776489258, + false + ], + [ + -28.85309600830078, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a2fbac91fa5a58fbaad42c9fd06e66ed9324b31f1a17f89ec6f6fbe662e28996", + "prompt_hash": "9eeb735d357ee7d91d5a5551029508dc08da295606cb72b157d6cfa3a55b60d9", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 88, + "doc": { + "question": "What is the overall training loss of Show-o composed of?", + "choices": [ + "Only Mask Token Prediction loss", + "Only Next Token Prediction loss", + "Combination of Mask Token Prediction and Next Token Prediction losses", + "Reconstruction loss only" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the overall training loss of Show-o composed of?\nAnswer:", + " Only Mask Token Prediction loss" + ], + [ + "Question:What is the overall training loss of Show-o composed of?\nAnswer:", + " Only Next Token Prediction loss" + ], + [ + "Question:What is the overall training loss of Show-o composed of?\nAnswer:", + " Combination of Mask Token Prediction and Next Token Prediction losses" + ], + [ + "Question:What is the overall training loss of Show-o composed of?\nAnswer:", + " Reconstruction loss only" + ] + ], + "resps": [ + [ + [ + -32.088958740234375, + false + ] + ], + [ + [ + -26.067264556884766, + false + ] + ], + [ + [ + -45.478004455566406, + false + ] + ], + [ + [ + -15.250179290771484, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.088958740234375, + false + ], + [ + -26.067264556884766, + false + ], + [ + -45.478004455566406, + false + ], + [ + -15.250179290771484, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ad4111d75628f6b33d86e9e55bd4403451877b988a90f06f7e5995caa76f78eb", + "prompt_hash": "3738b86b77deb7b607d86bd29382528e9215d70aa7fc71ea7d06be174226ec65", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 89, + "doc": { + "question": "During inference, how many steps does Show-o take to generate an image?", + "choices": [ + "1 step", + "T steps", + "10 steps", + "512 steps" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:During inference, how many steps does Show-o take to generate an image?\nAnswer:", + " 1 step" + ], + [ + "Question:During inference, how many steps does Show-o take to generate an image?\nAnswer:", + " T steps" + ], + [ + "Question:During inference, how many steps does Show-o take to generate an image?\nAnswer:", + " 10 steps" + ], + [ + "Question:During inference, how many steps does Show-o take to generate an image?\nAnswer:", + " 512 steps" + ] + ], + "resps": [ + [ + [ + -6.212777137756348, + false + ] + ], + [ + [ + -14.03068733215332, + false + ] + ], + [ + [ + -7.745494365692139, + false + ] + ], + [ + [ + -10.251701354980469, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.212777137756348, + false + ], + [ + -14.03068733215332, + false + ], + [ + -7.745494365692139, + false + ], + [ + -10.251701354980469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "97590d342be3769a9363b2f9240f40242666e6744ec386dc8e6e036b5db150f9", + "prompt_hash": "769bd6096ad2dc7e8d1263e5bfe602f8928265bfdb9024d078d113b3a888c9f0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 90, + "doc": { + "question": "How many total tokens did Qwen2-VL process during pre-training stages?", + "choices": [ + "600 billion", + "800 billion", + "1.4 trillion", + "2 trillion" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many total tokens did Qwen2-VL process during pre-training stages?\nAnswer:", + " 600 billion" + ], + [ + "Question:How many total tokens did Qwen2-VL process during pre-training stages?\nAnswer:", + " 800 billion" + ], + [ + "Question:How many total tokens did Qwen2-VL process during pre-training stages?\nAnswer:", + " 1.4 trillion" + ], + [ + "Question:How many total tokens did Qwen2-VL process during pre-training stages?\nAnswer:", + " 2 trillion" + ] + ], + "resps": [ + [ + [ + -9.786516189575195, + false + ] + ], + [ + [ + -10.318535804748535, + false + ] + ], + [ + [ + -10.681632995605469, + false + ] + ], + [ + [ + -9.810903549194336, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.786516189575195, + false + ], + [ + -10.318535804748535, + false + ], + [ + -10.681632995605469, + false + ], + [ + -9.810903549194336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a343616497f6eb027f084ca72bf2bea7781fcffb6f13b70186848784fa662566", + "prompt_hash": "17881d277e9e366a35093991cd6021414d8f960bc8c022bc075c47e96590f8b4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 91, + "doc": { + "question": "What is the parameter size of the Vision Transformer (ViT) used in Qwen2-VL?", + "choices": [ + "224M", + "675M", + "1.5B", + "2B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the parameter size of the Vision Transformer (ViT) used in Qwen2-VL?\nAnswer:", + " 224M" + ], + [ + "Question:What is the parameter size of the Vision Transformer (ViT) used in Qwen2-VL?\nAnswer:", + " 675M" + ], + [ + "Question:What is the parameter size of the Vision Transformer (ViT) used in Qwen2-VL?\nAnswer:", + " 1.5B" + ], + [ + "Question:What is the parameter size of the Vision Transformer (ViT) used in Qwen2-VL?\nAnswer:", + " 2B" + ] + ], + "resps": [ + [ + [ + -11.379950523376465, + false + ] + ], + [ + [ + -14.706459999084473, + false + ] + ], + [ + [ + -10.820328712463379, + false + ] + ], + [ + [ + -12.057304382324219, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.379950523376465, + false + ], + [ + -14.706459999084473, + false + ], + [ + -10.820328712463379, + false + ], + [ + -12.057304382324219, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "832be3f29de56d53c7952bcdc890db00f6ccdb0ea4a060b4c54a74a9b415bf26", + "prompt_hash": "88dd86e33d060dccd865e0939d7661e72dd9e27790014246c7e6eb55efb53507", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 92, + "doc": { + "question": "Which technique was introduced in Qwen2-VL to replace absolute position embeddings in the Vision Transformer?", + "choices": [ + "1D-RoPE", + "Naive Dynamic Resolution", + "2D-RoPE", + "M-RoPE" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which technique was introduced in Qwen2-VL to replace absolute position embeddings in the Vision Transformer?\nAnswer:", + " 1D-RoPE" + ], + [ + "Question:Which technique was introduced in Qwen2-VL to replace absolute position embeddings in the Vision Transformer?\nAnswer:", + " Naive Dynamic Resolution" + ], + [ + "Question:Which technique was introduced in Qwen2-VL to replace absolute position embeddings in the Vision Transformer?\nAnswer:", + " 2D-RoPE" + ], + [ + "Question:Which technique was introduced in Qwen2-VL to replace absolute position embeddings in the Vision Transformer?\nAnswer:", + " M-RoPE" + ] + ], + "resps": [ + [ + [ + -23.201679229736328, + false + ] + ], + [ + [ + -26.624053955078125, + false + ] + ], + [ + [ + -19.48124122619629, + false + ] + ], + [ + [ + -22.730064392089844, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.201679229736328, + false + ], + [ + -26.624053955078125, + false + ], + [ + -19.48124122619629, + false + ], + [ + -22.730064392089844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bf5e204e913102aebfc35f62da4c9110e1a76382a80de783093c72686f54e765", + "prompt_hash": "213aa4463b3b448efc1f02e8044c4e03a1d866b068bee8662d49dd9d80e21fbb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 93, + "doc": { + "question": "What is the length limit of videos that Qwen2-VL is capable of understanding?", + "choices": [ + "5 minutes", + "10 minutes", + "15 minutes", + "20+ minutes" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the length limit of videos that Qwen2-VL is capable of understanding?\nAnswer:", + " 5 minutes" + ], + [ + "Question:What is the length limit of videos that Qwen2-VL is capable of understanding?\nAnswer:", + " 10 minutes" + ], + [ + "Question:What is the length limit of videos that Qwen2-VL is capable of understanding?\nAnswer:", + " 15 minutes" + ], + [ + "Question:What is the length limit of videos that Qwen2-VL is capable of understanding?\nAnswer:", + " 20+ minutes" + ] + ], + "resps": [ + [ + [ + -6.9861159324646, + false + ] + ], + [ + [ + -7.793810844421387, + false + ] + ], + [ + [ + -7.572601318359375, + false + ] + ], + [ + [ + -15.895883560180664, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.9861159324646, + false + ], + [ + -7.793810844421387, + false + ], + [ + -7.572601318359375, + false + ], + [ + -15.895883560180664, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "359a8f69c0acbb9228cd774f5931ed894339dfbcfe1c35d8f04728dd117dec03", + "prompt_hash": "2d70ef5aa8991e86d7ea0ed86261a6c7835cc0dc4fe835dfd948c2ad23fb407f", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 94, + "doc": { + "question": "Which benchmark did Qwen2-VL-72B achieve a score of 77.8, surpassing GPT-4o?", + "choices": [ + "MathVista", + "RealWorldQA", + "MMStar", + "DocVQA" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark did Qwen2-VL-72B achieve a score of 77.8, surpassing GPT-4o?\nAnswer:", + " MathVista" + ], + [ + "Question:Which benchmark did Qwen2-VL-72B achieve a score of 77.8, surpassing GPT-4o?\nAnswer:", + " RealWorldQA" + ], + [ + "Question:Which benchmark did Qwen2-VL-72B achieve a score of 77.8, surpassing GPT-4o?\nAnswer:", + " MMStar" + ], + [ + "Question:Which benchmark did Qwen2-VL-72B achieve a score of 77.8, surpassing GPT-4o?\nAnswer:", + " DocVQA" + ] + ], + "resps": [ + [ + [ + -13.382601737976074, + false + ] + ], + [ + [ + -14.511748313903809, + false + ] + ], + [ + [ + -16.00177764892578, + false + ] + ], + [ + [ + -12.91325569152832, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.382601737976074, + false + ], + [ + -14.511748313903809, + false + ], + [ + -16.00177764892578, + false + ], + [ + -12.91325569152832, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "80a3f392f925b83d72b0de9ae7ffab91162ac793f757151d7b8ac89c4b222093", + "prompt_hash": "9ec2afa7905d405eda6affd8a9c929f80e8bea537aca5568db62a5cb9eb23dc1", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 95, + "doc": { + "question": "Which dataset includes prompts from ShareGPT and is annotated with five specific attributes?", + "choices": [ + "Magpie Ultra", + "HelpSteer2", + "OffsetBias", + "WildGuardMix" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset includes prompts from ShareGPT and is annotated with five specific attributes?\nAnswer:", + " Magpie Ultra" + ], + [ + "Question:Which dataset includes prompts from ShareGPT and is annotated with five specific attributes?\nAnswer:", + " HelpSteer2" + ], + [ + "Question:Which dataset includes prompts from ShareGPT and is annotated with five specific attributes?\nAnswer:", + " OffsetBias" + ], + [ + "Question:Which dataset includes prompts from ShareGPT and is annotated with five specific attributes?\nAnswer:", + " WildGuardMix" + ] + ], + "resps": [ + [ + [ + -28.369426727294922, + false + ] + ], + [ + [ + -23.477996826171875, + false + ] + ], + [ + [ + -24.307876586914062, + false + ] + ], + [ + [ + -30.440494537353516, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.369426727294922, + false + ], + [ + -23.477996826171875, + false + ], + [ + -24.307876586914062, + false + ], + [ + -30.440494537353516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "816acbde7d3d995f0878ae52bd5249e7d9dc11fb5c22c5485554cc5ab645ee47", + "prompt_hash": "a6b409831786f78a1950fc5a187ca30dbaec3320e3fb5b0d850d4bbeb5704c7e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 96, + "doc": { + "question": "Which loss function consistently outperformed others in the study for reward modeling?", + "choices": [ + "Hinge Loss", + "Cross-Entropy", + "Focal Loss", + "Bradley-Terry" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which loss function consistently outperformed others in the study for reward modeling?\nAnswer:", + " Hinge Loss" + ], + [ + "Question:Which loss function consistently outperformed others in the study for reward modeling?\nAnswer:", + " Cross-Entropy" + ], + [ + "Question:Which loss function consistently outperformed others in the study for reward modeling?\nAnswer:", + " Focal Loss" + ], + [ + "Question:Which loss function consistently outperformed others in the study for reward modeling?\nAnswer:", + " Bradley-Terry" + ] + ], + "resps": [ + [ + [ + -10.29498291015625, + false + ] + ], + [ + [ + -6.921933174133301, + false + ] + ], + [ + [ + -13.43044662475586, + false + ] + ], + [ + [ + -9.149613380432129, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.29498291015625, + false + ], + [ + -6.921933174133301, + false + ], + [ + -13.43044662475586, + false + ], + [ + -9.149613380432129, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "541e9fe2894b0f49b4709cdd760aa1d58ddff2d3a5250e5c3f03403e1b5b0519", + "prompt_hash": "a793f665a42e67ed3cb092ec91c1e13f07b0aa4e5db79a1c48d55eb0ed8ce46f", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 97, + "doc": { + "question": "What is the size of the Skywork Reward Preference 80K dataset in terms of total preference pairs?", + "choices": [ + "378,000", + "80,000", + "59,539", + "700,000" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the size of the Skywork Reward Preference 80K dataset in terms of total preference pairs?\nAnswer:", + " 378,000" + ], + [ + "Question:What is the size of the Skywork Reward Preference 80K dataset in terms of total preference pairs?\nAnswer:", + " 80,000" + ], + [ + "Question:What is the size of the Skywork Reward Preference 80K dataset in terms of total preference pairs?\nAnswer:", + " 59,539" + ], + [ + "Question:What is the size of the Skywork Reward Preference 80K dataset in terms of total preference pairs?\nAnswer:", + " 700,000" + ] + ], + "resps": [ + [ + [ + -13.991911888122559, + false + ] + ], + [ + [ + -6.92512321472168, + false + ] + ], + [ + [ + -14.930144309997559, + false + ] + ], + [ + [ + -10.193219184875488, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.991911888122559, + false + ], + [ + -6.92512321472168, + false + ], + [ + -14.930144309997559, + false + ], + [ + -10.193219184875488, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9c7304f1822614dc658230f603a51ccef62ce2fd7fa526a20780f2926cc80131", + "prompt_hash": "1b67194f16ca8ff42ec2c50785c1ba0fae603196263f8e7245d190520fbbc403", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 98, + "doc": { + "question": "Which preference dataset was created to address spurious signals such as length bias in reward modeling?", + "choices": [ + "OffsetBias", + "HelpSteer2", + "Magpie Air", + "WildGuardMix" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which preference dataset was created to address spurious signals such as length bias in reward modeling?\nAnswer:", + " OffsetBias" + ], + [ + "Question:Which preference dataset was created to address spurious signals such as length bias in reward modeling?\nAnswer:", + " HelpSteer2" + ], + [ + "Question:Which preference dataset was created to address spurious signals such as length bias in reward modeling?\nAnswer:", + " Magpie Air" + ], + [ + "Question:Which preference dataset was created to address spurious signals such as length bias in reward modeling?\nAnswer:", + " WildGuardMix" + ] + ], + "resps": [ + [ + [ + -19.423641204833984, + false + ] + ], + [ + [ + -24.912960052490234, + false + ] + ], + [ + [ + -27.164596557617188, + false + ] + ], + [ + [ + -30.299091339111328, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.423641204833984, + false + ], + [ + -24.912960052490234, + false + ], + [ + -27.164596557617188, + false + ], + [ + -30.299091339111328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f1c3744150badebebd1a5bd3004cdcdfc0be7644e56224b379dadd9bf6037680", + "prompt_hash": "a1478f9ea022bc9f3bcfcdbfbcf56dbbf3309ce1bbb4645058515d3b593c35f4", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 99, + "doc": { + "question": "Which model achieved the highest average score on RewardBench among the models listed?", + "choices": [ + "Nemotron-4-340B-Reward", + "Skywork-Reward-Gemma-2-27B", + "InternLM-20B-Reward", + "SFR-nemo-12B-Judge-r" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which model achieved the highest average score on RewardBench among the models listed?\nAnswer:", + " Nemotron-4-340B-Reward" + ], + [ + "Question:Which model achieved the highest average score on RewardBench among the models listed?\nAnswer:", + " Skywork-Reward-Gemma-2-27B" + ], + [ + "Question:Which model achieved the highest average score on RewardBench among the models listed?\nAnswer:", + " InternLM-20B-Reward" + ], + [ + "Question:Which model achieved the highest average score on RewardBench among the models listed?\nAnswer:", + " SFR-nemo-12B-Judge-r" + ] + ], + "resps": [ + [ + [ + -35.623653411865234, + false + ] + ], + [ + [ + -42.98133087158203, + false + ] + ], + [ + [ + -24.74456787109375, + false + ] + ], + [ + [ + -56.343814849853516, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.623653411865234, + false + ], + [ + -42.98133087158203, + false + ], + [ + -24.74456787109375, + false + ], + [ + -56.343814849853516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fdf78a4f218d6f44cd8777913beb60fcf1903a7e3353c3f7e9b03097b4626ff9", + "prompt_hash": "c5c9fbc6a6ffa1795b7bac2ff3cea81de793c3fa936b2b6e10e3a5408a773fff", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + } + ], + "arxiv_mc_2411": [ + { + "doc_id": 0, + "doc": { + "question": "What is the peak data rate target for 6G networks?", + "choices": [ + "10 Gbps", + "100 Gbps", + "1 Tbps", + "10 Tbps" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the peak data rate target for 6G networks?\nAnswer:", + " 10 Gbps" + ], + [ + "Question:What is the peak data rate target for 6G networks?\nAnswer:", + " 100 Gbps" + ], + [ + "Question:What is the peak data rate target for 6G networks?\nAnswer:", + " 1 Tbps" + ], + [ + "Question:What is the peak data rate target for 6G networks?\nAnswer:", + " 10 Tbps" + ] + ], + "resps": [ + [ + [ + -5.873969078063965, + false + ] + ], + [ + [ + -4.819973468780518, + false + ] + ], + [ + [ + -6.002620220184326, + false + ] + ], + [ + [ + -7.384844779968262, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.873969078063965, + false + ], + [ + -4.819973468780518, + false + ], + [ + -6.002620220184326, + false + ], + [ + -7.384844779968262, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "eec87ea7bd2734619ac06ae996519d9ebd74b6278d70c4af453c19857d10fa27", + "prompt_hash": "da9989854daa6aaca25ea247e4f40df164f29efbef51497fed3846972dfd2a20", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "Which usage scenario in 6G focuses on enabling sensing capabilities like movement tracking and environmental monitoring?", + "choices": [ + "Immersive communication", + "Hyper reliable, low-latency communication", + "Integrated sensing and communication", + "Massive communication" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which usage scenario in 6G focuses on enabling sensing capabilities like movement tracking and environmental monitoring?\nAnswer:", + " Immersive communication" + ], + [ + "Question:Which usage scenario in 6G focuses on enabling sensing capabilities like movement tracking and environmental monitoring?\nAnswer:", + " Hyper reliable, low-latency communication" + ], + [ + "Question:Which usage scenario in 6G focuses on enabling sensing capabilities like movement tracking and environmental monitoring?\nAnswer:", + " Integrated sensing and communication" + ], + [ + "Question:Which usage scenario in 6G focuses on enabling sensing capabilities like movement tracking and environmental monitoring?\nAnswer:", + " Massive communication" + ] + ], + "resps": [ + [ + [ + -15.115461349487305, + false + ] + ], + [ + [ + -36.961116790771484, + false + ] + ], + [ + [ + -11.438902854919434, + false + ] + ], + [ + [ + -17.085294723510742, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.115461349487305, + false + ], + [ + -36.961116790771484, + false + ], + [ + -11.438902854919434, + false + ], + [ + -17.085294723510742, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b07c19a35114e750f431c9afff3917842b3eeca2335694d93e1b08fa508ab18c", + "prompt_hash": "3d93644794afb74c70f8e5e45b917cce4bef90e92d768a37c92d452ff74abdb9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 2, + "doc": { + "question": "What type of materials are mentioned as enabling technologies for Fluid Antenna Systems (FAS)?", + "choices": [ + "Rigid conductors and ceramic insulators", + "Liquid materials, RF pixels, stepper motors, and metamaterials", + "Photonic crystals and superconductors", + "Fiber optics and quantum dots" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of materials are mentioned as enabling technologies for Fluid Antenna Systems (FAS)?\nAnswer:", + " Rigid conductors and ceramic insulators" + ], + [ + "Question:What type of materials are mentioned as enabling technologies for Fluid Antenna Systems (FAS)?\nAnswer:", + " Liquid materials, RF pixels, stepper motors, and metamaterials" + ], + [ + "Question:What type of materials are mentioned as enabling technologies for Fluid Antenna Systems (FAS)?\nAnswer:", + " Photonic crystals and superconductors" + ], + [ + "Question:What type of materials are mentioned as enabling technologies for Fluid Antenna Systems (FAS)?\nAnswer:", + " Fiber optics and quantum dots" + ] + ], + "resps": [ + [ + [ + -27.662734985351562, + false + ] + ], + [ + [ + -51.00507736206055, + false + ] + ], + [ + [ + -18.791711807250977, + false + ] + ], + [ + [ + -20.55025863647461, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.662734985351562, + false + ], + [ + -51.00507736206055, + false + ], + [ + -18.791711807250977, + false + ], + [ + -20.55025863647461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6017248b4020a1bd9d1de89edc12d80bcccabe21f64eefb720c28c9ad7a63547", + "prompt_hash": "a3007907bbb72246ec5f8411bf73de8fa59f03a60c35a8d7808979f807b79cae", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 3, + "doc": { + "question": "Which technology was inspired by FAS and focuses only on position reconfiguration?", + "choices": [ + "Reconfigurable Intelligent Surfaces", + "Movable Antenna System", + "Non-Orthogonal Multiple Access", + "Continuous Aperture MIMO" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which technology was inspired by FAS and focuses only on position reconfiguration?\nAnswer:", + " Reconfigurable Intelligent Surfaces" + ], + [ + "Question:Which technology was inspired by FAS and focuses only on position reconfiguration?\nAnswer:", + " Movable Antenna System" + ], + [ + "Question:Which technology was inspired by FAS and focuses only on position reconfiguration?\nAnswer:", + " Non-Orthogonal Multiple Access" + ], + [ + "Question:Which technology was inspired by FAS and focuses only on position reconfiguration?\nAnswer:", + " Continuous Aperture MIMO" + ] + ], + "resps": [ + [ + [ + -9.414812088012695, + false + ] + ], + [ + [ + -19.87340545654297, + false + ] + ], + [ + [ + -18.9980411529541, + false + ] + ], + [ + [ + -27.375492095947266, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.414812088012695, + false + ], + [ + -19.87340545654297, + false + ], + [ + -18.9980411529541, + false + ], + [ + -27.375492095947266, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6b4441be7a721e85bf01c62181a759643acb5ef0cd1f1cc54ca828a513129e43", + "prompt_hash": "ec50620dcf3e101926f2c83ab625fbb096569d82df72f8e2e50fcf87d4266425", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 4, + "doc": { + "question": "According to the philosophy quoted by Bruce Lee, what is the core inspiration behind Fluid Antenna Systems?", + "choices": [ + "Rigidity and strength", + "Fixed structure and control", + "Formless adaptability like water", + "Speed and precision like lightning" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:According to the philosophy quoted by Bruce Lee, what is the core inspiration behind Fluid Antenna Systems?\nAnswer:", + " Rigidity and strength" + ], + [ + "Question:According to the philosophy quoted by Bruce Lee, what is the core inspiration behind Fluid Antenna Systems?\nAnswer:", + " Fixed structure and control" + ], + [ + "Question:According to the philosophy quoted by Bruce Lee, what is the core inspiration behind Fluid Antenna Systems?\nAnswer:", + " Formless adaptability like water" + ], + [ + "Question:According to the philosophy quoted by Bruce Lee, what is the core inspiration behind Fluid Antenna Systems?\nAnswer:", + " Speed and precision like lightning" + ] + ], + "resps": [ + [ + [ + -21.77372169494629, + false + ] + ], + [ + [ + -27.31595230102539, + false + ] + ], + [ + [ + -39.78109359741211, + false + ] + ], + [ + [ + -33.8327751159668, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.77372169494629, + false + ], + [ + -27.31595230102539, + false + ], + [ + -39.78109359741211, + false + ], + [ + -33.8327751159668, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "abb6f7b232a71ac5a3da433396f253d80a2ca0f59fef83d8656c6178f446c8ba", + "prompt_hash": "f109106228e3a9f23fe802fcf92d0018fd3135689f42b4af5faa4d5a81340df8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 5, + "doc": { + "question": "What is the main reason the paper proposes an activation-based importance estimation strategy for pruning?", + "choices": [ + "To improve training efficiency by using backward passes", + "To reduce memory and computation cost by avoiding gradient computation", + "To ensure compatibility with reinforcement learning models", + "To enhance the convergence rate of the optimizer" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main reason the paper proposes an activation-based importance estimation strategy for pruning?\nAnswer:", + " To improve training efficiency by using backward passes" + ], + [ + "Question:What is the main reason the paper proposes an activation-based importance estimation strategy for pruning?\nAnswer:", + " To reduce memory and computation cost by avoiding gradient computation" + ], + [ + "Question:What is the main reason the paper proposes an activation-based importance estimation strategy for pruning?\nAnswer:", + " To ensure compatibility with reinforcement learning models" + ], + [ + "Question:What is the main reason the paper proposes an activation-based importance estimation strategy for pruning?\nAnswer:", + " To enhance the convergence rate of the optimizer" + ] + ], + "resps": [ + [ + [ + -29.56960678100586, + false + ] + ], + [ + [ + -29.971778869628906, + false + ] + ], + [ + [ + -26.012845993041992, + false + ] + ], + [ + [ + -21.306804656982422, + false + ] + ] + ], + "filtered_resps": [ + [ + -29.56960678100586, + false + ], + [ + -29.971778869628906, + false + ], + [ + -26.012845993041992, + false + ], + [ + -21.306804656982422, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9014bad4efb87b06a349240f8654f967c495a9730c9f52539dda10a679c02965", + "prompt_hash": "ed3aa775dea305a6c6700f5b0d5a921615db4c56bcc14fd274a6e9af802b1a19", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 6, + "doc": { + "question": "Which technique is used to estimate the importance of depth (layers) using cosine distance?", + "choices": [ + "Logit matching", + "Perplexity evaluation", + "Block Importance (BI)", + "LayerNorm scaling" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which technique is used to estimate the importance of depth (layers) using cosine distance?\nAnswer:", + " Logit matching" + ], + [ + "Question:Which technique is used to estimate the importance of depth (layers) using cosine distance?\nAnswer:", + " Perplexity evaluation" + ], + [ + "Question:Which technique is used to estimate the importance of depth (layers) using cosine distance?\nAnswer:", + " Block Importance (BI)" + ], + [ + "Question:Which technique is used to estimate the importance of depth (layers) using cosine distance?\nAnswer:", + " LayerNorm scaling" + ] + ], + "resps": [ + [ + [ + -18.129497528076172, + false + ] + ], + [ + [ + -19.97736358642578, + false + ] + ], + [ + [ + -22.130346298217773, + false + ] + ], + [ + [ + -20.870519638061523, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.129497528076172, + false + ], + [ + -19.97736358642578, + false + ], + [ + -22.130346298217773, + false + ], + [ + -20.870519638061523, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d2f0d78afdaa7596ecc6fdf6ae7488274a6dea3cd6a83d5eccfd065e4f6a5a54", + "prompt_hash": "9bd727639df70abc9f26f83569d56b87f3cbf59a45bb8e8a898532a74f7323c3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "In the process of head pruning, what is added back to the remaining heads to preserve knowledge?", + "choices": [ + "Randomly initialized heads", + "Residual information from pruned heads", + "Gradients of the original heads", + "Normalized attention weights" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In the process of head pruning, what is added back to the remaining heads to preserve knowledge?\nAnswer:", + " Randomly initialized heads" + ], + [ + "Question:In the process of head pruning, what is added back to the remaining heads to preserve knowledge?\nAnswer:", + " Residual information from pruned heads" + ], + [ + "Question:In the process of head pruning, what is added back to the remaining heads to preserve knowledge?\nAnswer:", + " Gradients of the original heads" + ], + [ + "Question:In the process of head pruning, what is added back to the remaining heads to preserve knowledge?\nAnswer:", + " Normalized attention weights" + ] + ], + "resps": [ + [ + [ + -16.398324966430664, + false + ] + ], + [ + [ + -20.017580032348633, + false + ] + ], + [ + [ + -17.852195739746094, + false + ] + ], + [ + [ + -19.09755516052246, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.398324966430664, + false + ], + [ + -20.017580032348633, + false + ], + [ + -17.852195739746094, + false + ], + [ + -19.09755516052246, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0d2983d11675001038546a0bcafe2426794fc9c33d4ffae94ed328d7c0b23971", + "prompt_hash": "9d567fc6e697e92f05881bf33f910659dee9ea08852f64601862c068d6ef4421", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 8, + "doc": { + "question": "How many samples are used in the calibration dataset for activation-based importance estimation?", + "choices": [ + "256", + "512", + "1024", + "2048" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many samples are used in the calibration dataset for activation-based importance estimation?\nAnswer:", + " 256" + ], + [ + "Question:How many samples are used in the calibration dataset for activation-based importance estimation?\nAnswer:", + " 512" + ], + [ + "Question:How many samples are used in the calibration dataset for activation-based importance estimation?\nAnswer:", + " 1024" + ], + [ + "Question:How many samples are used in the calibration dataset for activation-based importance estimation?\nAnswer:", + " 2048" + ] + ], + "resps": [ + [ + [ + -6.168458938598633, + false + ] + ], + [ + [ + -6.1954240798950195, + false + ] + ], + [ + [ + -6.884011268615723, + false + ] + ], + [ + [ + -7.902861595153809, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.168458938598633, + false + ], + [ + -6.1954240798950195, + false + ], + [ + -6.884011268615723, + false + ], + [ + -7.902861595153809, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e6b013c5031193ef5a41ed4462d5c0de4a10b762a1051b60ea7c32e67437ceea", + "prompt_hash": "5f52b44600d809d763b29b4bef6ad0ca70bcc2189ba974d2ef17ec03d844078c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 9, + "doc": { + "question": "Which loss function is found to be most effective for logit-based knowledge distillation in this work?", + "choices": [ + "Mean Squared Error (MSE)", + "Reverse KL Divergence (R-KLD)", + "Cosine Similarity", + "Kullback-Leibler Divergence (KLD)" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which loss function is found to be most effective for logit-based knowledge distillation in this work?\nAnswer:", + " Mean Squared Error (MSE)" + ], + [ + "Question:Which loss function is found to be most effective for logit-based knowledge distillation in this work?\nAnswer:", + " Reverse KL Divergence (R-KLD)" + ], + [ + "Question:Which loss function is found to be most effective for logit-based knowledge distillation in this work?\nAnswer:", + " Cosine Similarity" + ], + [ + "Question:Which loss function is found to be most effective for logit-based knowledge distillation in this work?\nAnswer:", + " Kullback-Leibler Divergence (KLD)" + ] + ], + "resps": [ + [ + [ + -8.091793060302734, + false + ] + ], + [ + [ + -20.987077713012695, + false + ] + ], + [ + [ + -12.251108169555664, + false + ] + ], + [ + [ + -10.422445297241211, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.091793060302734, + false + ], + [ + -20.987077713012695, + false + ], + [ + -12.251108169555664, + false + ], + [ + -10.422445297241211, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b146330c14a52f1e5eb3ad27d75a0d6aa712be86ed927041fc598cbca1e50c78", + "prompt_hash": "291f73cd7fd6e150e50b0088ff0ed3c461b12aaaf85b6b5e22ba43ccfdd17b68", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 10, + "doc": { + "question": "What is the generating velocity formula for discrete Flow Matching using denoiser parameterization?", + "choices": [ + "u^i_t(x^i,z) = \\dot{\\kappa}_t(x^i - z^i)", + "u^i_t(x^i,z) = \\frac{\\dot{\\kappa}_t}{\\kappa_t}(\\delta_{z}(x^i) - p_{1|t}(x^i|z))", + "u^i_t(x^i,z) = \\frac{\\dot{\\kappa}_t}{1-\\kappa_t}(p_{1|t}(x^i|z) - \\delta_{z}(x^i))", + "u^i_t(x^i,z) = \\dot{\\kappa}_t(x^i + z^i)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the generating velocity formula for discrete Flow Matching using denoiser parameterization?\nAnswer:", + " u^i_t(x^i,z) = \\dot{\\kappa}_t(x^i - z^i)" + ], + [ + "Question:What is the generating velocity formula for discrete Flow Matching using denoiser parameterization?\nAnswer:", + " u^i_t(x^i,z) = \\frac{\\dot{\\kappa}_t}{\\kappa_t}(\\delta_{z}(x^i) - p_{1|t}(x^i|z))" + ], + [ + "Question:What is the generating velocity formula for discrete Flow Matching using denoiser parameterization?\nAnswer:", + " u^i_t(x^i,z) = \\frac{\\dot{\\kappa}_t}{1-\\kappa_t}(p_{1|t}(x^i|z) - \\delta_{z}(x^i))" + ], + [ + "Question:What is the generating velocity formula for discrete Flow Matching using denoiser parameterization?\nAnswer:", + " u^i_t(x^i,z) = \\dot{\\kappa}_t(x^i + z^i)" + ] + ], + "resps": [ + [ + [ + -68.01116943359375, + false + ] + ], + [ + [ + -107.26423645019531, + false + ] + ], + [ + [ + -109.30007934570312, + false + ] + ], + [ + [ + -70.12492370605469, + false + ] + ] + ], + "filtered_resps": [ + [ + -68.01116943359375, + false + ], + [ + -107.26423645019531, + false + ], + [ + -109.30007934570312, + false + ], + [ + -70.12492370605469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3e572071690cd40ec828547c492d433d58578af3dcab3e6d254a6a1e417c21d4", + "prompt_hash": "542d77d6830a44a1fe70f95d36a61a73a17813946366d9c754e76dbb5894e066", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 11, + "doc": { + "question": "Which of the following is a key condition for the discrete probability velocity to define a proper PMF?", + "choices": [ + "u_t^i(x^i,z) must be negative for all i and x^i ≠ z^i", + "The sum over i of u_t^i(x^i,z) must be zero", + "u_t^i(x^i,z) must be symmetric in x^i and z", + "The sum over x^i of u_t^i(x^i,z) must be zero and u_t^i(x^i,z) ≥ 0 for x^i ≠ z^i" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which of the following is a key condition for the discrete probability velocity to define a proper PMF?\nAnswer:", + " u_t^i(x^i,z) must be negative for all i and x^i ≠ z^i" + ], + [ + "Question:Which of the following is a key condition for the discrete probability velocity to define a proper PMF?\nAnswer:", + " The sum over i of u_t^i(x^i,z) must be zero" + ], + [ + "Question:Which of the following is a key condition for the discrete probability velocity to define a proper PMF?\nAnswer:", + " u_t^i(x^i,z) must be symmetric in x^i and z" + ], + [ + "Question:Which of the following is a key condition for the discrete probability velocity to define a proper PMF?\nAnswer:", + " The sum over x^i of u_t^i(x^i,z) must be zero and u_t^i(x^i,z) ≥ 0 for x^i ≠ z^i" + ] + ], + "resps": [ + [ + [ + -91.10096740722656, + false + ] + ], + [ + [ + -65.13204956054688, + false + ] + ], + [ + [ + -68.94792175292969, + false + ] + ], + [ + [ + -109.533935546875, + false + ] + ] + ], + "filtered_resps": [ + [ + -91.10096740722656, + false + ], + [ + -65.13204956054688, + false + ], + [ + -68.94792175292969, + false + ], + [ + -109.533935546875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0c766ba70e3c44cffef5e20b395c9e59dc2a99656fe7a6996a7f569c868c52b6", + "prompt_hash": "b21dc9d9aa57995066b718add6746175c197930d2f46fb39ae3d599309277b9f", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 12, + "doc": { + "question": "In the experiments, which source distribution is primarily used for training?", + "choices": [ + "Uniform over all tokens", + "Gaussian noise", + "All-mask sequences \\(\\delta_\\dummy(x)\\)", + "Random binary sequences" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the experiments, which source distribution is primarily used for training?\nAnswer:", + " Uniform over all tokens" + ], + [ + "Question:In the experiments, which source distribution is primarily used for training?\nAnswer:", + " Gaussian noise" + ], + [ + "Question:In the experiments, which source distribution is primarily used for training?\nAnswer:", + " All-mask sequences \\(\\delta_\\dummy(x)\\)" + ], + [ + "Question:In the experiments, which source distribution is primarily used for training?\nAnswer:", + " Random binary sequences" + ] + ], + "resps": [ + [ + [ + -28.035892486572266, + false + ] + ], + [ + [ + -14.957950592041016, + false + ] + ], + [ + [ + -76.81466674804688, + false + ] + ], + [ + [ + -23.489944458007812, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.035892486572266, + false + ], + [ + -14.957950592041016, + false + ], + [ + -76.81466674804688, + false + ], + [ + -23.489944458007812, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6a8ff128da3cb5032b99962c3ace2ce14de5c804eb4bf8537a559ca3760ddc3b", + "prompt_hash": "e5faa1d4061d60bea510fb45889de76a5d0e821c8966fa860c43c14ddb8bd409", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 13, + "doc": { + "question": "What is the entropy score achieved by the 1.7B Discrete Flow Matching model with 1024 NFE on unconditional text generation?", + "choices": [ + "7.1", + "7.6", + "8.3", + "6.7" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the entropy score achieved by the 1.7B Discrete Flow Matching model with 1024 NFE on unconditional text generation?\nAnswer:", + " 7.1" + ], + [ + "Question:What is the entropy score achieved by the 1.7B Discrete Flow Matching model with 1024 NFE on unconditional text generation?\nAnswer:", + " 7.6" + ], + [ + "Question:What is the entropy score achieved by the 1.7B Discrete Flow Matching model with 1024 NFE on unconditional text generation?\nAnswer:", + " 8.3" + ], + [ + "Question:What is the entropy score achieved by the 1.7B Discrete Flow Matching model with 1024 NFE on unconditional text generation?\nAnswer:", + " 6.7" + ] + ], + "resps": [ + [ + [ + -8.013721466064453, + false + ] + ], + [ + [ + -7.946964263916016, + false + ] + ], + [ + [ + -7.716164588928223, + false + ] + ], + [ + [ + -7.653365135192871, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.013721466064453, + false + ], + [ + -7.946964263916016, + false + ], + [ + -7.716164588928223, + false + ], + [ + -7.653365135192871, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f104cfef0729719cf261c81dd1aae450ed57ebe3dac02c6d8f0dfa014afde700", + "prompt_hash": "ddcad478860bed220703544578375a51f2ee47123b98690d6ebd8e8b610d1433", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "What does the conditional probability path p_t(x^i|x_0,x_1) become in the simplest convex interpolant case?", + "choices": [ + "A uniform distribution over [d]", + "A Gaussian centered between x_0 and x_1", + "(1-\\kappa_t)\\delta_{x_0}(x^i) + \\kappa_t \\delta_{x_1}(x^i)", + "An average of multiple learned posteriors" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the conditional probability path p_t(x^i|x_0,x_1) become in the simplest convex interpolant case?\nAnswer:", + " A uniform distribution over [d]" + ], + [ + "Question:What does the conditional probability path p_t(x^i|x_0,x_1) become in the simplest convex interpolant case?\nAnswer:", + " A Gaussian centered between x_0 and x_1" + ], + [ + "Question:What does the conditional probability path p_t(x^i|x_0,x_1) become in the simplest convex interpolant case?\nAnswer:", + " (1-\\kappa_t)\\delta_{x_0}(x^i) + \\kappa_t \\delta_{x_1}(x^i)" + ], + [ + "Question:What does the conditional probability path p_t(x^i|x_0,x_1) become in the simplest convex interpolant case?\nAnswer:", + " An average of multiple learned posteriors" + ] + ], + "resps": [ + [ + [ + -28.31566619873047, + false + ] + ], + [ + [ + -26.569395065307617, + false + ] + ], + [ + [ + -34.784271240234375, + false + ] + ], + [ + [ + -38.008331298828125, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.31566619873047, + false + ], + [ + -26.569395065307617, + false + ], + [ + -34.784271240234375, + false + ], + [ + -38.008331298828125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32da34d1bd40cfa82315f3867a08cad8b4592f25737cf0108a9bd7364027f0ca", + "prompt_hash": "5ddd5f5bd1e88d6f2e9385a4f2dd16cc85ed382780f3729120582bd6d1d6dfb3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 15, + "doc": { + "question": "Which library is described as a minimalistic implementation focused on transparency and simplicity in reinforcement learning algorithms?", + "choices": [ + "A. Stable Baselines3", + "B. CleanRL", + "C. Ray Rllib", + "D. Dopamine" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which library is described as a minimalistic implementation focused on transparency and simplicity in reinforcement learning algorithms?\nAnswer:", + " A. Stable Baselines3" + ], + [ + "Question:Which library is described as a minimalistic implementation focused on transparency and simplicity in reinforcement learning algorithms?\nAnswer:", + " B. CleanRL" + ], + [ + "Question:Which library is described as a minimalistic implementation focused on transparency and simplicity in reinforcement learning algorithms?\nAnswer:", + " C. Ray Rllib" + ], + [ + "Question:Which library is described as a minimalistic implementation focused on transparency and simplicity in reinforcement learning algorithms?\nAnswer:", + " D. Dopamine" + ] + ], + "resps": [ + [ + [ + -18.61655616760254, + false + ] + ], + [ + [ + -22.445934295654297, + false + ] + ], + [ + [ + -30.368227005004883, + false + ] + ], + [ + [ + -24.299724578857422, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.61655616760254, + false + ], + [ + -22.445934295654297, + false + ], + [ + -30.368227005004883, + false + ], + [ + -24.299724578857422, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "904ecd6be1774e67177b9cf34468c95060f6fd6625165af4d0adb6d7c2452bfa", + "prompt_hash": "42a2094e37c0ea21296696b3a2983f73a082205c23e7bd7636ce07eac786bce3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "What is the purpose of the 'TimeLimit' wrapper in Gymnasium?", + "choices": [ + "A. To handle multi-agent environments", + "B. To limit the number of parallel environments", + "C. To enforce episode truncation after a set number of steps", + "D. To restrict the size of the action space" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the purpose of the 'TimeLimit' wrapper in Gymnasium?\nAnswer:", + " A. To handle multi-agent environments" + ], + [ + "Question:What is the purpose of the 'TimeLimit' wrapper in Gymnasium?\nAnswer:", + " B. To limit the number of parallel environments" + ], + [ + "Question:What is the purpose of the 'TimeLimit' wrapper in Gymnasium?\nAnswer:", + " C. To enforce episode truncation after a set number of steps" + ], + [ + "Question:What is the purpose of the 'TimeLimit' wrapper in Gymnasium?\nAnswer:", + " D. To restrict the size of the action space" + ] + ], + "resps": [ + [ + [ + -26.94652557373047, + false + ] + ], + [ + [ + -30.293476104736328, + false + ] + ], + [ + [ + -44.030799865722656, + false + ] + ], + [ + [ + -26.48984146118164, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.94652557373047, + false + ], + [ + -30.293476104736328, + false + ], + [ + -44.030799865722656, + false + ], + [ + -26.48984146118164, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "85dc42ac9b74820219e7e7e66f1c3d98d606738371118a8b8472288595de8966", + "prompt_hash": "fe682fd0f2002613a9b1ca97e74c720df39b12300931b0dac7f60a025774e8ed", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 17, + "doc": { + "question": "Which composite space in Gymnasium is equivalent to a sum type in algebraic data types?", + "choices": [ + "A. Tuple", + "B. Dict", + "C. Graph", + "D. OneOf" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which composite space in Gymnasium is equivalent to a sum type in algebraic data types?\nAnswer:", + " A. Tuple" + ], + [ + "Question:Which composite space in Gymnasium is equivalent to a sum type in algebraic data types?\nAnswer:", + " B. Dict" + ], + [ + "Question:Which composite space in Gymnasium is equivalent to a sum type in algebraic data types?\nAnswer:", + " C. Graph" + ], + [ + "Question:Which composite space in Gymnasium is equivalent to a sum type in algebraic data types?\nAnswer:", + " D. OneOf" + ] + ], + "resps": [ + [ + [ + -14.58735466003418, + false + ] + ], + [ + [ + -19.29799461364746, + false + ] + ], + [ + [ + -15.71833610534668, + false + ] + ], + [ + [ + -24.84959602355957, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.58735466003418, + false + ], + [ + -19.29799461364746, + false + ], + [ + -15.71833610534668, + false + ], + [ + -24.84959602355957, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0d19232b2ba1f0667f11f8fbd617daf13ecfc4f70863a1719cab8bb40964130d", + "prompt_hash": "81d03b60cac6606bfc09a29327254fd491995841dfafbe33e0fd2b9d640bc77f", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 18, + "doc": { + "question": "What advantage does FuncEnv offer over the standard Env in Gymnasium?", + "choices": [ + "A. It supports more games", + "B. It enables higher resolution rendering", + "C. It allows easy hardware acceleration and theoretical alignment with POMDPs", + "D. It reduces the number of required libraries" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What advantage does FuncEnv offer over the standard Env in Gymnasium?\nAnswer:", + " A. It supports more games" + ], + [ + "Question:What advantage does FuncEnv offer over the standard Env in Gymnasium?\nAnswer:", + " B. It enables higher resolution rendering" + ], + [ + "Question:What advantage does FuncEnv offer over the standard Env in Gymnasium?\nAnswer:", + " C. It allows easy hardware acceleration and theoretical alignment with POMDPs" + ], + [ + "Question:What advantage does FuncEnv offer over the standard Env in Gymnasium?\nAnswer:", + " D. It reduces the number of required libraries" + ] + ], + "resps": [ + [ + [ + -25.4164981842041, + false + ] + ], + [ + [ + -31.929250717163086, + false + ] + ], + [ + [ + -67.37519836425781, + false + ] + ], + [ + [ + -33.48160934448242, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.4164981842041, + false + ], + [ + -31.929250717163086, + false + ], + [ + -67.37519836425781, + false + ], + [ + -33.48160934448242, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9d21d8c4990fa96ac7bb357064d95a6aaa9aa93bda23c77b06d141f55f93b696", + "prompt_hash": "575fd9b7ee03779f48f5051f52088719a892c99cb7a8dc3824fb9254d66e992c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 19, + "doc": { + "question": "Which environment category in Gymnasium includes simulations using the Box2D physics engine?", + "choices": [ + "A. Toy text", + "B. Classic control", + "C. Mujoco", + "D. Box2D" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which environment category in Gymnasium includes simulations using the Box2D physics engine?\nAnswer:", + " A. Toy text" + ], + [ + "Question:Which environment category in Gymnasium includes simulations using the Box2D physics engine?\nAnswer:", + " B. Classic control" + ], + [ + "Question:Which environment category in Gymnasium includes simulations using the Box2D physics engine?\nAnswer:", + " C. Mujoco" + ], + [ + "Question:Which environment category in Gymnasium includes simulations using the Box2D physics engine?\nAnswer:", + " D. Box2D" + ] + ], + "resps": [ + [ + [ + -26.217872619628906, + false + ] + ], + [ + [ + -25.281362533569336, + false + ] + ], + [ + [ + -17.874332427978516, + false + ] + ], + [ + [ + -16.03414535522461, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.217872619628906, + false + ], + [ + -25.281362533569336, + false + ], + [ + -17.874332427978516, + false + ], + [ + -16.03414535522461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b663e101e05e8daa81280fdbf27f747086424f897b25e4522d58408114cda7e5", + "prompt_hash": "bc815e7a9b1fbdfda309fdbb37ff1f4e46139a48cbe3f87b0b52013093017bfa", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What is the main function of the KV Cache in a Transformer model?", + "choices": [ + "To store the attention scores for future layers", + "To reduce the time complexity by storing keys and values from past tokens", + "To improve model accuracy by storing gradients", + "To keep track of the vocabulary index for each token" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main function of the KV Cache in a Transformer model?\nAnswer:", + " To store the attention scores for future layers" + ], + [ + "Question:What is the main function of the KV Cache in a Transformer model?\nAnswer:", + " To reduce the time complexity by storing keys and values from past tokens" + ], + [ + "Question:What is the main function of the KV Cache in a Transformer model?\nAnswer:", + " To improve model accuracy by storing gradients" + ], + [ + "Question:What is the main function of the KV Cache in a Transformer model?\nAnswer:", + " To keep track of the vocabulary index for each token" + ] + ], + "resps": [ + [ + [ + -25.331092834472656, + false + ] + ], + [ + [ + -33.660465240478516, + false + ] + ], + [ + [ + -26.1849308013916, + false + ] + ], + [ + [ + -31.01213836669922, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.331092834472656, + false + ], + [ + -33.660465240478516, + false + ], + [ + -26.1849308013916, + false + ], + [ + -31.01213836669922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3463744ea89b2f25f6539f138a7e4232ea0c8fad55aa5d2f6d1d9c1a0f621c31", + "prompt_hash": "6c634aebb08ad3543ecc982fee051e3bfae803b63d3e7dd0236990c4e853f2ce", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 21, + "doc": { + "question": "What happens to the size of the KV Cache as the input sequence grows?", + "choices": [ + "It remains constant regardless of sequence length", + "It decreases exponentially with sequence length", + "It increases linearly with sequence length", + "It increases quadratically with sequence length" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What happens to the size of the KV Cache as the input sequence grows?\nAnswer:", + " It remains constant regardless of sequence length" + ], + [ + "Question:What happens to the size of the KV Cache as the input sequence grows?\nAnswer:", + " It decreases exponentially with sequence length" + ], + [ + "Question:What happens to the size of the KV Cache as the input sequence grows?\nAnswer:", + " It increases linearly with sequence length" + ], + [ + "Question:What happens to the size of the KV Cache as the input sequence grows?\nAnswer:", + " It increases quadratically with sequence length" + ] + ], + "resps": [ + [ + [ + -15.243741035461426, + false + ] + ], + [ + [ + -13.885777473449707, + false + ] + ], + [ + [ + -11.21221923828125, + false + ] + ], + [ + [ + -15.224578857421875, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.243741035461426, + false + ], + [ + -13.885777473449707, + false + ], + [ + -11.21221923828125, + false + ], + [ + -15.224578857421875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "118ddfec11dcb0fe65b26ed3a40af3d40dadba30da3beb6bd65e7c2fb508ef47", + "prompt_hash": "a91b09a7e82745c745c163096df4fdbbbc90dd0a172550d69395461d43a5b5ba", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 22, + "doc": { + "question": "Which method divides query heads into groups to share key heads for improved KV Cache efficiency?", + "choices": [ + "Multi-Query Attention (MQA)", + "Grouped Query Attention (GQA)", + "Dynamic Memory Compression (DMC)", + "Multi-Head Latent Attention (MLA)" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which method divides query heads into groups to share key heads for improved KV Cache efficiency?\nAnswer:", + " Multi-Query Attention (MQA)" + ], + [ + "Question:Which method divides query heads into groups to share key heads for improved KV Cache efficiency?\nAnswer:", + " Grouped Query Attention (GQA)" + ], + [ + "Question:Which method divides query heads into groups to share key heads for improved KV Cache efficiency?\nAnswer:", + " Dynamic Memory Compression (DMC)" + ], + [ + "Question:Which method divides query heads into groups to share key heads for improved KV Cache efficiency?\nAnswer:", + " Multi-Head Latent Attention (MLA)" + ] + ], + "resps": [ + [ + [ + -14.856151580810547, + false + ] + ], + [ + [ + -8.729805946350098, + false + ] + ], + [ + [ + -22.34358787536621, + false + ] + ], + [ + [ + -27.207630157470703, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.856151580810547, + false + ], + [ + -8.729805946350098, + false + ], + [ + -22.34358787536621, + false + ], + [ + -27.207630157470703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "90a08f669aa49d769ceac60983fd76066b90276bee096b610e9a9996f44c3983", + "prompt_hash": "1d846ff11639e33d06c3be8b2e5f27d7eb7d5328d7836aba456472da5e3efcee", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 23, + "doc": { + "question": "What is the key idea behind Token Omission Via Attention (TOVA)?", + "choices": [ + "To discard tokens based on entropy scores", + "To merge token representations based on Gaussian weighting", + "To evict tokens with minimal attention weights layer-wise", + "To compress keys using low-rank factorization" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the key idea behind Token Omission Via Attention (TOVA)?\nAnswer:", + " To discard tokens based on entropy scores" + ], + [ + "Question:What is the key idea behind Token Omission Via Attention (TOVA)?\nAnswer:", + " To merge token representations based on Gaussian weighting" + ], + [ + "Question:What is the key idea behind Token Omission Via Attention (TOVA)?\nAnswer:", + " To evict tokens with minimal attention weights layer-wise" + ], + [ + "Question:What is the key idea behind Token Omission Via Attention (TOVA)?\nAnswer:", + " To compress keys using low-rank factorization" + ] + ], + "resps": [ + [ + [ + -30.414817810058594, + false + ] + ], + [ + [ + -41.22924041748047, + false + ] + ], + [ + [ + -42.73115921020508, + false + ] + ], + [ + [ + -37.3741455078125, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.414817810058594, + false + ], + [ + -41.22924041748047, + false + ], + [ + -42.73115921020508, + false + ], + [ + -37.3741455078125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aecb2b4b9d87e1ec1445221a5e45e21be8e240abab840cef2d7f455ac9e5d3b6", + "prompt_hash": "e853258d32fcf11b3960428130f3102bd970cf4ea7b6fc8680e79fee800d0b38", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 24, + "doc": { + "question": "Which quantization method applies different strategies to key and value caches and uses an attention window to guide quantization?", + "choices": [ + "FlexGen", + "WKVQuant", + "QAQ", + "GEAR" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which quantization method applies different strategies to key and value caches and uses an attention window to guide quantization?\nAnswer:", + " FlexGen" + ], + [ + "Question:Which quantization method applies different strategies to key and value caches and uses an attention window to guide quantization?\nAnswer:", + " WKVQuant" + ], + [ + "Question:Which quantization method applies different strategies to key and value caches and uses an attention window to guide quantization?\nAnswer:", + " QAQ" + ], + [ + "Question:Which quantization method applies different strategies to key and value caches and uses an attention window to guide quantization?\nAnswer:", + " GEAR" + ] + ], + "resps": [ + [ + [ + -15.81790542602539, + false + ] + ], + [ + [ + -21.988216400146484, + false + ] + ], + [ + [ + -12.134068489074707, + false + ] + ], + [ + [ + -15.453916549682617, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.81790542602539, + false + ], + [ + -21.988216400146484, + false + ], + [ + -12.134068489074707, + false + ], + [ + -15.453916549682617, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9b9ec799f0644347f00ccdc559a2de9b2950ecdd38505a1eea0fa43b6e678612", + "prompt_hash": "7bd19b063a0a24820d40409d88ce869b3d85a9e2382085f4f1e8098cef48240c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 25, + "doc": { + "question": "What evaluation metric is used for the HumanEval benchmark?", + "choices": [ + "Accuracy", + "F1 Score", + "Pass@1", + "BLEU" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What evaluation metric is used for the HumanEval benchmark?\nAnswer:", + " Accuracy" + ], + [ + "Question:What evaluation metric is used for the HumanEval benchmark?\nAnswer:", + " F1 Score" + ], + [ + "Question:What evaluation metric is used for the HumanEval benchmark?\nAnswer:", + " Pass@1" + ], + [ + "Question:What evaluation metric is used for the HumanEval benchmark?\nAnswer:", + " BLEU" + ] + ], + "resps": [ + [ + [ + -5.508859157562256, + false + ] + ], + [ + [ + -7.320566177368164, + false + ] + ], + [ + [ + -6.618783473968506, + false + ] + ], + [ + [ + -6.3945088386535645, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.508859157562256, + false + ], + [ + -7.320566177368164, + false + ], + [ + -6.618783473968506, + false + ], + [ + -6.3945088386535645, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "40b7639a11a2d6401329ccab248911a0b42df0e8dc28672aae354083c26f73e8", + "prompt_hash": "36ddf66b2a490ca68d938f5ebc940a8b85fcd8dc65d94c9a32f6e976909f94cf", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "How many samples are in the MMLU benchmark?", + "choices": [ + "5000", + "14042", + "1954", + "87048" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many samples are in the MMLU benchmark?\nAnswer:", + " 5000" + ], + [ + "Question:How many samples are in the MMLU benchmark?\nAnswer:", + " 14042" + ], + [ + "Question:How many samples are in the MMLU benchmark?\nAnswer:", + " 1954" + ], + [ + "Question:How many samples are in the MMLU benchmark?\nAnswer:", + " 87048" + ] + ], + "resps": [ + [ + [ + -5.450283527374268, + false + ] + ], + [ + [ + -13.787731170654297, + false + ] + ], + [ + [ + -11.449134826660156, + false + ] + ], + [ + [ + -16.09764862060547, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.450283527374268, + false + ], + [ + -13.787731170654297, + false + ], + [ + -11.449134826660156, + false + ], + [ + -16.09764862060547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ba828c25f156de6325888a9da9951b77820404aaadb21d08e34211803a90f4b2", + "prompt_hash": "8210b467843557958d435189a7ded66d735a1f9a89ee4a09fd5f1ba9c2fa1cf1", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "Which benchmark uses the 'MC2_score' metric?", + "choices": [ + "TruthfulQA", + "AGIEval", + "GSM8k", + "SiQA" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark uses the 'MC2_score' metric?\nAnswer:", + " TruthfulQA" + ], + [ + "Question:Which benchmark uses the 'MC2_score' metric?\nAnswer:", + " AGIEval" + ], + [ + "Question:Which benchmark uses the 'MC2_score' metric?\nAnswer:", + " GSM8k" + ], + [ + "Question:Which benchmark uses the 'MC2_score' metric?\nAnswer:", + " SiQA" + ] + ], + "resps": [ + [ + [ + -12.017866134643555, + false + ] + ], + [ + [ + -11.988103866577148, + false + ] + ], + [ + [ + -9.616325378417969, + false + ] + ], + [ + [ + -19.542951583862305, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.017866134643555, + false + ], + [ + -11.988103866577148, + false + ], + [ + -9.616325378417969, + false + ], + [ + -19.542951583862305, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3fcc58cfc3351602d8c12fcc817be59779b123a9aefe8e79e34d8584309fa707", + "prompt_hash": "1d5a9234f36bec2a977788235df16dc92ff6c526203551a7286ba6e227ea9586", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 28, + "doc": { + "question": "What is the maximum number of few-shot examples used in GSM8k?", + "choices": [ + "3", + "5", + "8", + "0" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum number of few-shot examples used in GSM8k?\nAnswer:", + " 3" + ], + [ + "Question:What is the maximum number of few-shot examples used in GSM8k?\nAnswer:", + " 5" + ], + [ + "Question:What is the maximum number of few-shot examples used in GSM8k?\nAnswer:", + " 8" + ], + [ + "Question:What is the maximum number of few-shot examples used in GSM8k?\nAnswer:", + " 0" + ] + ], + "resps": [ + [ + [ + -3.252291202545166, + false + ] + ], + [ + [ + -2.7015891075134277, + false + ] + ], + [ + [ + -3.2675089836120605, + false + ] + ], + [ + [ + -6.1405487060546875, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.252291202545166, + false + ], + [ + -2.7015891075134277, + false + ], + [ + -3.2675089836120605, + false + ], + [ + -6.1405487060546875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6402abb438cc24d2f202a81faf00c17046e16338291e66758b64a58fae4ab0eb", + "prompt_hash": "3327f679ce2b69e15b832d37f8c391899a7a09786e0265e26494977d0bf82a18", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "Which dataset includes 5000 samples and uses 'EM maj1@1' as its metric?", + "choices": [ + "MATH", + "MBPP", + "PiQA", + "OpenBookQA" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset includes 5000 samples and uses 'EM maj1@1' as its metric?\nAnswer:", + " MATH" + ], + [ + "Question:Which dataset includes 5000 samples and uses 'EM maj1@1' as its metric?\nAnswer:", + " MBPP" + ], + [ + "Question:Which dataset includes 5000 samples and uses 'EM maj1@1' as its metric?\nAnswer:", + " PiQA" + ], + [ + "Question:Which dataset includes 5000 samples and uses 'EM maj1@1' as its metric?\nAnswer:", + " OpenBookQA" + ] + ], + "resps": [ + [ + [ + -12.23568058013916, + false + ] + ], + [ + [ + -12.814970016479492, + false + ] + ], + [ + [ + -16.764488220214844, + false + ] + ], + [ + [ + -18.32901954650879, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.23568058013916, + false + ], + [ + -12.814970016479492, + false + ], + [ + -16.764488220214844, + false + ], + [ + -18.32901954650879, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "22fc097dd2c97659d37f74fb56bb16765fb7a8e35b418b058170082a53c3f80f", + "prompt_hash": "f2fc571d9e327979e9e02ac55b91d28937cf446a2c28ceedc0b42b5e9713ec5d", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "What is Mini-Omni described as?", + "choices": [ + "A closed-source multimodal model", + "The first open-source end-to-end multimodal model with real-time speech capabilities", + "A text-only language model", + "A dataset for speech assistants" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is Mini-Omni described as?\nAnswer:", + " A closed-source multimodal model" + ], + [ + "Question:What is Mini-Omni described as?\nAnswer:", + " The first open-source end-to-end multimodal model with real-time speech capabilities" + ], + [ + "Question:What is Mini-Omni described as?\nAnswer:", + " A text-only language model" + ], + [ + "Question:What is Mini-Omni described as?\nAnswer:", + " A dataset for speech assistants" + ] + ], + "resps": [ + [ + [ + -20.025798797607422, + false + ] + ], + [ + [ + -49.17539978027344, + false + ] + ], + [ + [ + -21.924789428710938, + false + ] + ], + [ + [ + -28.139144897460938, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.025798797607422, + false + ], + [ + -49.17539978027344, + false + ], + [ + -21.924789428710938, + false + ], + [ + -28.139144897460938, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a2b690276c4d1ed0d8b85440c4630674bd06ec2ca989bb2c0cb93a9d1352d366", + "prompt_hash": "41e98cf04516b6ad4fae7b0d5180fc01b236a5fb254cd03e7589e27a34380887", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 31, + "doc": { + "question": "Which audio encoder was selected to ensure high-quality audio in Mini-Omni?", + "choices": [ + "HuBERT", + "WavLM", + "SNAC", + "SpearTTS" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which audio encoder was selected to ensure high-quality audio in Mini-Omni?\nAnswer:", + " HuBERT" + ], + [ + "Question:Which audio encoder was selected to ensure high-quality audio in Mini-Omni?\nAnswer:", + " WavLM" + ], + [ + "Question:Which audio encoder was selected to ensure high-quality audio in Mini-Omni?\nAnswer:", + " SNAC" + ], + [ + "Question:Which audio encoder was selected to ensure high-quality audio in Mini-Omni?\nAnswer:", + " SpearTTS" + ] + ], + "resps": [ + [ + [ + -6.846920490264893, + false + ] + ], + [ + [ + -4.520740509033203, + false + ] + ], + [ + [ + -16.42870330810547, + false + ] + ], + [ + [ + -18.480022430419922, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.846920490264893, + false + ], + [ + -4.520740509033203, + false + ], + [ + -16.42870330810547, + false + ], + [ + -18.480022430419922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "500b61420e80d825b160ea80c086d69b09dbf803be743dc13dac871587d6b136", + "prompt_hash": "1e0185e543741408a3054af6df183379519be7f31028489dae0044fe6abe0e5a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 32, + "doc": { + "question": "What is the size of the base language model used in Mini-Omni?", + "choices": [ + "7 billion parameters", + "13 billion parameters", + "0.5 billion parameters", + "2 billion parameters" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the size of the base language model used in Mini-Omni?\nAnswer:", + " 7 billion parameters" + ], + [ + "Question:What is the size of the base language model used in Mini-Omni?\nAnswer:", + " 13 billion parameters" + ], + [ + "Question:What is the size of the base language model used in Mini-Omni?\nAnswer:", + " 0.5 billion parameters" + ], + [ + "Question:What is the size of the base language model used in Mini-Omni?\nAnswer:", + " 2 billion parameters" + ] + ], + "resps": [ + [ + [ + -7.209259510040283, + false + ] + ], + [ + [ + -8.974302291870117, + false + ] + ], + [ + [ + -11.013339042663574, + false + ] + ], + [ + [ + -8.469942092895508, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.209259510040283, + false + ], + [ + -8.974302291870117, + false + ], + [ + -11.013339042663574, + false + ], + [ + -8.469942092895508, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d4000b0545a7feb4021e6fc2823d64ab08bbedd1417b1397cde5ec9df741909d", + "prompt_hash": "0de74d67eaa0ed7054024f50cdde5a742db9019e2f5bf1aea009ad7989472de0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 33, + "doc": { + "question": "What is the name of the dataset introduced to fine-tune models into a voice assistant style?", + "choices": [ + "LibriSpeech", + "VoiceAssistant-400K", + "Open-Orca", + "Moss-SFT" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the name of the dataset introduced to fine-tune models into a voice assistant style?\nAnswer:", + " LibriSpeech" + ], + [ + "Question:What is the name of the dataset introduced to fine-tune models into a voice assistant style?\nAnswer:", + " VoiceAssistant-400K" + ], + [ + "Question:What is the name of the dataset introduced to fine-tune models into a voice assistant style?\nAnswer:", + " Open-Orca" + ], + [ + "Question:What is the name of the dataset introduced to fine-tune models into a voice assistant style?\nAnswer:", + " Moss-SFT" + ] + ], + "resps": [ + [ + [ + -6.687619209289551, + false + ] + ], + [ + [ + -20.196495056152344, + false + ] + ], + [ + [ + -17.251708984375, + false + ] + ], + [ + [ + -27.790363311767578, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.687619209289551, + false + ], + [ + -20.196495056152344, + false + ], + [ + -17.251708984375, + false + ], + [ + -27.790363311767578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ce713f869e9e226e4e6243d90e96d1d8e130a8a761f36585e5f3a48ea1295069", + "prompt_hash": "4e377807034ef44a7a8d59253a6432b3ac6d9464956a15954af4d154870aedff", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 34, + "doc": { + "question": "Which training stage of Mini-Omni involves unfreezing all model weights for holistic fine-tuning?", + "choices": [ + "Stage 1: Modality Alignment", + "Stage 2: Adaptation Training", + "Stage 3: Multi-modal Finetuning", + "Stage 4: Evaluation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which training stage of Mini-Omni involves unfreezing all model weights for holistic fine-tuning?\nAnswer:", + " Stage 1: Modality Alignment" + ], + [ + "Question:Which training stage of Mini-Omni involves unfreezing all model weights for holistic fine-tuning?\nAnswer:", + " Stage 2: Adaptation Training" + ], + [ + "Question:Which training stage of Mini-Omni involves unfreezing all model weights for holistic fine-tuning?\nAnswer:", + " Stage 3: Multi-modal Finetuning" + ], + [ + "Question:Which training stage of Mini-Omni involves unfreezing all model weights for holistic fine-tuning?\nAnswer:", + " Stage 4: Evaluation" + ] + ], + "resps": [ + [ + [ + -21.08725357055664, + false + ] + ], + [ + [ + -18.947019577026367, + false + ] + ], + [ + [ + -22.240943908691406, + false + ] + ], + [ + [ + -14.598235130310059, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.08725357055664, + false + ], + [ + -18.947019577026367, + false + ], + [ + -22.240943908691406, + false + ], + [ + -14.598235130310059, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d07e134beb38224450f014439b85131c773e0959af7b93b2e4c921492cf859bd", + "prompt_hash": "7031d671c2508328142687f9266f37ca2135e8ac581b5e9b2f79f1cae7ac811d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What does a single instance of ReKep map an array of keypoints to?", + "choices": [ + "A probability distribution", + "A bounded energy value", + "An unbounded cost", + "A sequence of discrete poses" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does a single instance of ReKep map an array of keypoints to?\nAnswer:", + " A probability distribution" + ], + [ + "Question:What does a single instance of ReKep map an array of keypoints to?\nAnswer:", + " A bounded energy value" + ], + [ + "Question:What does a single instance of ReKep map an array of keypoints to?\nAnswer:", + " An unbounded cost" + ], + [ + "Question:What does a single instance of ReKep map an array of keypoints to?\nAnswer:", + " A sequence of discrete poses" + ] + ], + "resps": [ + [ + [ + -11.133968353271484, + false + ] + ], + [ + [ + -26.93883514404297, + false + ] + ], + [ + [ + -22.620363235473633, + false + ] + ], + [ + [ + -19.65053939819336, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.133968353271484, + false + ], + [ + -26.93883514404297, + false + ], + [ + -22.620363235473633, + false + ], + [ + -19.65053939819336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4db36d9910572980f07baf6fe692acf7552b15a3f17912dc3c9dd5ce464447ec", + "prompt_hash": "4cede1e94eded219e5a81eb153a41f1a84cc1f2cdf064b84363143b3049e11c4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which model is used in the system to propose fine-grained and semantically meaningful keypoints in the scene?", + "choices": [ + "BLIP-2", + "SAM", + "DINOv2", + "AnyGrasp" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model is used in the system to propose fine-grained and semantically meaningful keypoints in the scene?\nAnswer:", + " BLIP-2" + ], + [ + "Question:Which model is used in the system to propose fine-grained and semantically meaningful keypoints in the scene?\nAnswer:", + " SAM" + ], + [ + "Question:Which model is used in the system to propose fine-grained and semantically meaningful keypoints in the scene?\nAnswer:", + " DINOv2" + ], + [ + "Question:Which model is used in the system to propose fine-grained and semantically meaningful keypoints in the scene?\nAnswer:", + " AnyGrasp" + ] + ], + "resps": [ + [ + [ + -9.148873329162598, + false + ] + ], + [ + [ + -8.042795181274414, + false + ] + ], + [ + [ + -8.681550025939941, + false + ] + ], + [ + [ + -19.21855926513672, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.148873329162598, + false + ], + [ + -8.042795181274414, + false + ], + [ + -8.681550025939941, + false + ], + [ + -19.21855926513672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "74a656301051bfff3335d6544f70a32cc1ade176e1c8177b21c267b58dcc33b3", + "prompt_hash": "43718c78e0c8a34db04835b5ba255717edd73cb30afa6de56e38a7f3ff8bc1c1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 37, + "doc": { + "question": "What is the optimization framework’s approximate solving frequency for the tasks considered in the work?", + "choices": [ + "1 Hz", + "5 Hz", + "10 Hz", + "20 Hz" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the optimization framework’s approximate solving frequency for the tasks considered in the work?\nAnswer:", + " 1 Hz" + ], + [ + "Question:What is the optimization framework’s approximate solving frequency for the tasks considered in the work?\nAnswer:", + " 5 Hz" + ], + [ + "Question:What is the optimization framework’s approximate solving frequency for the tasks considered in the work?\nAnswer:", + " 10 Hz" + ], + [ + "Question:What is the optimization framework’s approximate solving frequency for the tasks considered in the work?\nAnswer:", + " 20 Hz" + ] + ], + "resps": [ + [ + [ + -11.280828475952148, + false + ] + ], + [ + [ + -11.805965423583984, + false + ] + ], + [ + [ + -10.422658920288086, + false + ] + ], + [ + [ + -11.62740707397461, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.280828475952148, + false + ], + [ + -11.805965423583984, + false + ], + [ + -10.422658920288086, + false + ], + [ + -11.62740707397461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bf78edf81140ee8a9beda3a69bd1fe95a2866cca0ccf89f5d91a1553787204a7", + "prompt_hash": "4bece240a600ba03e92ec71087ccf5b03ae8d9393b28dca5645c8b707fccc7e6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 38, + "doc": { + "question": "What auxiliary cost is specifically included in the sub-goal problem when a stage is concerned with grasping?", + "choices": [ + "Path length minimization", + "A grasp metric", + "Self-collision avoidance", + "Consistency cost" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What auxiliary cost is specifically included in the sub-goal problem when a stage is concerned with grasping?\nAnswer:", + " Path length minimization" + ], + [ + "Question:What auxiliary cost is specifically included in the sub-goal problem when a stage is concerned with grasping?\nAnswer:", + " A grasp metric" + ], + [ + "Question:What auxiliary cost is specifically included in the sub-goal problem when a stage is concerned with grasping?\nAnswer:", + " Self-collision avoidance" + ], + [ + "Question:What auxiliary cost is specifically included in the sub-goal problem when a stage is concerned with grasping?\nAnswer:", + " Consistency cost" + ] + ], + "resps": [ + [ + [ + -18.370542526245117, + false + ] + ], + [ + [ + -19.641986846923828, + false + ] + ], + [ + [ + -14.385573387145996, + false + ] + ], + [ + [ + -15.92432975769043, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.370542526245117, + false + ], + [ + -19.641986846923828, + false + ], + [ + -14.385573387145996, + false + ], + [ + -15.92432975769043, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c06195ea7e94ff57f5dd396d3fffa0f6546f0629988a77b82a5234c7579a21bc", + "prompt_hash": "330d51adcd40a4f69715390ef1b0223c4b230bab7b8bb500d1fe3ba2f348f4e6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "Which component contributed the largest portion of system errors according to the error breakdown analysis?", + "choices": [ + "Keypoint proposal", + "Vision-language model", + "Optimization module", + "Point tracker" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which component contributed the largest portion of system errors according to the error breakdown analysis?\nAnswer:", + " Keypoint proposal" + ], + [ + "Question:Which component contributed the largest portion of system errors according to the error breakdown analysis?\nAnswer:", + " Vision-language model" + ], + [ + "Question:Which component contributed the largest portion of system errors according to the error breakdown analysis?\nAnswer:", + " Optimization module" + ], + [ + "Question:Which component contributed the largest portion of system errors according to the error breakdown analysis?\nAnswer:", + " Point tracker" + ] + ], + "resps": [ + [ + [ + -23.096324920654297, + false + ] + ], + [ + [ + -21.57367706298828, + false + ] + ], + [ + [ + -17.320581436157227, + false + ] + ], + [ + [ + -19.568580627441406, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.096324920654297, + false + ], + [ + -21.57367706298828, + false + ], + [ + -17.320581436157227, + false + ], + [ + -19.568580627441406, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "04a4dfcb73d83e71a147cfcba69814390ffe6d2a77c6e0cdada07d010423dba8", + "prompt_hash": "6792587cdddb5ff84a1438cd151eaf7860da1b4fb2100f0e33b5cca813c0c760", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What is the primary goal of monocular depth estimation?", + "choices": [ + "To predict the depth map from a single image", + "To generate high-resolution RGB images", + "To classify objects in 3D scenes", + "To segment moving objects in videos" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the primary goal of monocular depth estimation?\nAnswer:", + " To predict the depth map from a single image" + ], + [ + "Question:What is the primary goal of monocular depth estimation?\nAnswer:", + " To generate high-resolution RGB images" + ], + [ + "Question:What is the primary goal of monocular depth estimation?\nAnswer:", + " To classify objects in 3D scenes" + ], + [ + "Question:What is the primary goal of monocular depth estimation?\nAnswer:", + " To segment moving objects in videos" + ] + ], + "resps": [ + [ + [ + -11.69544792175293, + false + ] + ], + [ + [ + -20.34946060180664, + false + ] + ], + [ + [ + -17.6307315826416, + false + ] + ], + [ + [ + -21.26361846923828, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.69544792175293, + false + ], + [ + -20.34946060180664, + false + ], + [ + -17.6307315826416, + false + ], + [ + -21.26361846923828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fd6ac965fdc8b359488adc36e6b635fc9198c2aba8774119dc641286ade6e6e2", + "prompt_hash": "69901e85b64035444f79c66e1412b12263a82428b19daa57a4134cff44e3f0e7", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 41, + "doc": { + "question": "Which dataset was combined with DynamicReplica to form the synthetic dataset for DepthCrafter training?", + "choices": [ + "MatrixCity", + "ScanNet v2", + "KITTI", + "Sintel" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset was combined with DynamicReplica to form the synthetic dataset for DepthCrafter training?\nAnswer:", + " MatrixCity" + ], + [ + "Question:Which dataset was combined with DynamicReplica to form the synthetic dataset for DepthCrafter training?\nAnswer:", + " ScanNet v2" + ], + [ + "Question:Which dataset was combined with DynamicReplica to form the synthetic dataset for DepthCrafter training?\nAnswer:", + " KITTI" + ], + [ + "Question:Which dataset was combined with DynamicReplica to form the synthetic dataset for DepthCrafter training?\nAnswer:", + " Sintel" + ] + ], + "resps": [ + [ + [ + -18.05780792236328, + false + ] + ], + [ + [ + -16.11438751220703, + false + ] + ], + [ + [ + -7.35317850112915, + false + ] + ], + [ + [ + -15.332847595214844, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.05780792236328, + false + ], + [ + -16.11438751220703, + false + ], + [ + -7.35317850112915, + false + ], + [ + -15.332847595214844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3e85574950d0bcb2dc31b42eca31b8559b154e0263855ed93eb652cdd64d3518", + "prompt_hash": "33448e03dee4e4b13849c0651c6ad9786e0c84071ce59ca2db90133b397de274", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 42, + "doc": { + "question": "How many training stages are involved in DepthCrafter's training strategy?", + "choices": [ + "One", + "Two", + "Three", + "Four" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many training stages are involved in DepthCrafter's training strategy?\nAnswer:", + " One" + ], + [ + "Question:How many training stages are involved in DepthCrafter's training strategy?\nAnswer:", + " Two" + ], + [ + "Question:How many training stages are involved in DepthCrafter's training strategy?\nAnswer:", + " Three" + ], + [ + "Question:How many training stages are involved in DepthCrafter's training strategy?\nAnswer:", + " Four" + ] + ], + "resps": [ + [ + [ + -5.532721042633057, + false + ] + ], + [ + [ + -5.27767276763916, + false + ] + ], + [ + [ + -5.411354064941406, + false + ] + ], + [ + [ + -6.417080402374268, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.532721042633057, + false + ], + [ + -5.27767276763916, + false + ], + [ + -5.411354064941406, + false + ], + [ + -6.417080402374268, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b44adef1648b27a5a5dd13a349c17593441f704701a56c8ee62bd6626e2f6944", + "prompt_hash": "55709d651755d15c0f99595b4611603adc6430a5f68194a99ad0638bf094d428", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 43, + "doc": { + "question": "What is the maximum number of frames DepthCrafter can generate in a single sequence after training?", + "choices": [ + "25", + "50", + "90", + "110" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the maximum number of frames DepthCrafter can generate in a single sequence after training?\nAnswer:", + " 25" + ], + [ + "Question:What is the maximum number of frames DepthCrafter can generate in a single sequence after training?\nAnswer:", + " 50" + ], + [ + "Question:What is the maximum number of frames DepthCrafter can generate in a single sequence after training?\nAnswer:", + " 90" + ], + [ + "Question:What is the maximum number of frames DepthCrafter can generate in a single sequence after training?\nAnswer:", + " 110" + ] + ], + "resps": [ + [ + [ + -4.397420406341553, + false + ] + ], + [ + [ + -4.530661106109619, + false + ] + ], + [ + [ + -5.898233413696289, + false + ] + ], + [ + [ + -7.255423069000244, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.397420406341553, + false + ], + [ + -4.530661106109619, + false + ], + [ + -5.898233413696289, + false + ], + [ + -7.255423069000244, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32ef88ba8b3db37223d2380545cfb74b1dbe2219fe962aff8a8ec1ca9136f19e", + "prompt_hash": "a5b25ded009375961e1d26e28deed8d0d580dbde9bfe97316270ca89d3fdc6f3", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 44, + "doc": { + "question": "Which baseline method does ChronoDepth support in terms of temporal context length?", + "choices": [ + "10 frames", + "25 frames", + "50 frames", + "110 frames" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which baseline method does ChronoDepth support in terms of temporal context length?\nAnswer:", + " 10 frames" + ], + [ + "Question:Which baseline method does ChronoDepth support in terms of temporal context length?\nAnswer:", + " 25 frames" + ], + [ + "Question:Which baseline method does ChronoDepth support in terms of temporal context length?\nAnswer:", + " 50 frames" + ], + [ + "Question:Which baseline method does ChronoDepth support in terms of temporal context length?\nAnswer:", + " 110 frames" + ] + ], + "resps": [ + [ + [ + -9.895761489868164, + false + ] + ], + [ + [ + -10.400951385498047, + false + ] + ], + [ + [ + -11.934900283813477, + false + ] + ], + [ + [ + -14.357010841369629, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.895761489868164, + false + ], + [ + -10.400951385498047, + false + ], + [ + -11.934900283813477, + false + ], + [ + -14.357010841369629, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "51c5df2258ee6bbda62c0c66f6152a967f8434e2f9764045f20dae32a9327957", + "prompt_hash": "2060a60d6854a6f73fc75059f57f1ed3d482de4aae258a212b1bcca7ea590006", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 45, + "doc": { + "question": "Which base model is Qwen2.5-Coder derived from?", + "choices": [ + "GPT-4o", + "Qwen2.5", + "StarCoder2", + "CodeLlama" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which base model is Qwen2.5-Coder derived from?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which base model is Qwen2.5-Coder derived from?\nAnswer:", + " Qwen2.5" + ], + [ + "Question:Which base model is Qwen2.5-Coder derived from?\nAnswer:", + " StarCoder2" + ], + [ + "Question:Which base model is Qwen2.5-Coder derived from?\nAnswer:", + " CodeLlama" + ] + ], + "resps": [ + [ + [ + -9.98691177368164, + false + ] + ], + [ + [ + -2.9870645999908447, + false + ] + ], + [ + [ + -14.584648132324219, + false + ] + ], + [ + [ + -9.186256408691406, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.98691177368164, + false + ], + [ + -2.9870645999908447, + false + ], + [ + -14.584648132324219, + false + ], + [ + -9.186256408691406, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9184d47659d765f50610d9b95090031ec3c8dc62d330f69af8ec5deaea34f03c", + "prompt_hash": "ebf9dac2363b8a5ca7ba4947530d007bf8719cb7420805d3116a28df8bc57253", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 46, + "doc": { + "question": "How many different model sizes are included in the Qwen2.5-Coder series?", + "choices": [ + "Four", + "Six", + "Eight", + "Ten" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many different model sizes are included in the Qwen2.5-Coder series?\nAnswer:", + " Four" + ], + [ + "Question:How many different model sizes are included in the Qwen2.5-Coder series?\nAnswer:", + " Six" + ], + [ + "Question:How many different model sizes are included in the Qwen2.5-Coder series?\nAnswer:", + " Eight" + ], + [ + "Question:How many different model sizes are included in the Qwen2.5-Coder series?\nAnswer:", + " Ten" + ] + ], + "resps": [ + [ + [ + -5.960505962371826, + false + ] + ], + [ + [ + -6.804236888885498, + false + ] + ], + [ + [ + -8.083930015563965, + false + ] + ], + [ + [ + -9.204055786132812, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.960505962371826, + false + ], + [ + -6.804236888885498, + false + ], + [ + -8.083930015563965, + false + ], + [ + -9.204055786132812, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "61511e1ed9fbd67b26fc1d3a2a743382894c8370ced0cc61d26a2bab76d28318", + "prompt_hash": "f0c03dd966a50b77ea4090116c3fbb3b5e4851c743c0eca6bbff3bace1c5cca7", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 47, + "doc": { + "question": "What is the total number of tokens in the Qwen2.5-Coder pretraining dataset?", + "choices": [ + "3.2 trillion", + "4.1 trillion", + "5.5 trillion", + "6.7 trillion" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total number of tokens in the Qwen2.5-Coder pretraining dataset?\nAnswer:", + " 3.2 trillion" + ], + [ + "Question:What is the total number of tokens in the Qwen2.5-Coder pretraining dataset?\nAnswer:", + " 4.1 trillion" + ], + [ + "Question:What is the total number of tokens in the Qwen2.5-Coder pretraining dataset?\nAnswer:", + " 5.5 trillion" + ], + [ + "Question:What is the total number of tokens in the Qwen2.5-Coder pretraining dataset?\nAnswer:", + " 6.7 trillion" + ] + ], + "resps": [ + [ + [ + -13.193757057189941, + false + ] + ], + [ + [ + -14.902152061462402, + false + ] + ], + [ + [ + -14.214559555053711, + false + ] + ], + [ + [ + -14.328497886657715, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.193757057189941, + false + ], + [ + -14.902152061462402, + false + ], + [ + -14.214559555053711, + false + ], + [ + -14.328497886657715, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "519136bb6844023e6bc3559c19b3c52f48c6f04ed430e4109e45413fee24a586", + "prompt_hash": "5e46854b005aabfaefe305fc51bd6413e5335ef12525ff420d44a53890dc5eb8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "Which token is used in Qwen2.5-Coder to mark the end of a text or sequence?", + "choices": [ + "<|fim_suffix|>", + "<|file_sep|>", + "<|endoftext|>", + "<|fim_middle|>" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which token is used in Qwen2.5-Coder to mark the end of a text or sequence?\nAnswer:", + " <|fim_suffix|>" + ], + [ + "Question:Which token is used in Qwen2.5-Coder to mark the end of a text or sequence?\nAnswer:", + " <|file_sep|>" + ], + [ + "Question:Which token is used in Qwen2.5-Coder to mark the end of a text or sequence?\nAnswer:", + " <|endoftext|>" + ], + [ + "Question:Which token is used in Qwen2.5-Coder to mark the end of a text or sequence?\nAnswer:", + " <|fim_middle|>" + ] + ], + "resps": [ + [ + [ + -40.7396125793457, + false + ] + ], + [ + [ + -31.887859344482422, + false + ] + ], + [ + [ + -15.501543045043945, + false + ] + ], + [ + [ + -38.91889572143555, + false + ] + ] + ], + "filtered_resps": [ + [ + -40.7396125793457, + false + ], + [ + -31.887859344482422, + false + ], + [ + -15.501543045043945, + false + ], + [ + -38.91889572143555, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32688a8ff3a6312b9cf1f4d13435c86284be27e68f112c1ea7c044b55f9876f5", + "prompt_hash": "4a280c533e45a6c1f706ed603bc9f59e96708374dfc6fc0bf823638c32ffe8ad", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 49, + "doc": { + "question": "What was the final mixture ratio of Code, Text, and Math data chosen for Qwen2.5-Coder?", + "choices": [ + "100:0:0", + "85:15:5", + "70:20:10", + "60:30:10" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What was the final mixture ratio of Code, Text, and Math data chosen for Qwen2.5-Coder?\nAnswer:", + " 100:0:0" + ], + [ + "Question:What was the final mixture ratio of Code, Text, and Math data chosen for Qwen2.5-Coder?\nAnswer:", + " 85:15:5" + ], + [ + "Question:What was the final mixture ratio of Code, Text, and Math data chosen for Qwen2.5-Coder?\nAnswer:", + " 70:20:10" + ], + [ + "Question:What was the final mixture ratio of Code, Text, and Math data chosen for Qwen2.5-Coder?\nAnswer:", + " 60:30:10" + ] + ], + "resps": [ + [ + [ + -12.652618408203125, + false + ] + ], + [ + [ + -11.911613464355469, + false + ] + ], + [ + [ + -10.841472625732422, + false + ] + ], + [ + [ + -11.832803726196289, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.652618408203125, + false + ], + [ + -11.911613464355469, + false + ], + [ + -10.841472625732422, + false + ], + [ + -11.832803726196289, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8686768174e3565d0100e217dbda69483a98ee1b6994f8f3201ee0ca5e0a49cb", + "prompt_hash": "aac1d2cd9a36d901ef435efee1953e49f52c134a3315544ae637814f935077c2", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 50, + "doc": { + "question": "Which classical equation is widely used to predict the oscillation behavior of spherical cavitation bubbles?", + "choices": [ + "Gilmore equation", + "Keller-Miksis equation", + "Rayleigh-Plesset equation", + "Doubly Asymptotic Approximation equation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which classical equation is widely used to predict the oscillation behavior of spherical cavitation bubbles?\nAnswer:", + " Gilmore equation" + ], + [ + "Question:Which classical equation is widely used to predict the oscillation behavior of spherical cavitation bubbles?\nAnswer:", + " Keller-Miksis equation" + ], + [ + "Question:Which classical equation is widely used to predict the oscillation behavior of spherical cavitation bubbles?\nAnswer:", + " Rayleigh-Plesset equation" + ], + [ + "Question:Which classical equation is widely used to predict the oscillation behavior of spherical cavitation bubbles?\nAnswer:", + " Doubly Asymptotic Approximation equation" + ] + ], + "resps": [ + [ + [ + -13.879878044128418, + false + ] + ], + [ + [ + -31.828353881835938, + false + ] + ], + [ + [ + -14.75823974609375, + false + ] + ], + [ + [ + -32.73979187011719, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.879878044128418, + false + ], + [ + -31.828353881835938, + false + ], + [ + -14.75823974609375, + false + ], + [ + -32.73979187011719, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "86560bbde1abd696430083a87c3d3ed80ef43f5eb498566253bb3713a6b16b2e", + "prompt_hash": "9888374170c0feb7fc1506a6e95683c4d811614684fd0bb3fc707931241f05ee", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 51, + "doc": { + "question": "What additional factors, besides compressibility and viscosity, have been shown to significantly affect the dynamics of laser-induced and spark bubbles?", + "choices": [ + "Electrical conductivity and surface roughness", + "Condensation and evaporation processes", + "Magnetic field fluctuations", + "Bubble shape irregularities" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What additional factors, besides compressibility and viscosity, have been shown to significantly affect the dynamics of laser-induced and spark bubbles?\nAnswer:", + " Electrical conductivity and surface roughness" + ], + [ + "Question:What additional factors, besides compressibility and viscosity, have been shown to significantly affect the dynamics of laser-induced and spark bubbles?\nAnswer:", + " Condensation and evaporation processes" + ], + [ + "Question:What additional factors, besides compressibility and viscosity, have been shown to significantly affect the dynamics of laser-induced and spark bubbles?\nAnswer:", + " Magnetic field fluctuations" + ], + [ + "Question:What additional factors, besides compressibility and viscosity, have been shown to significantly affect the dynamics of laser-induced and spark bubbles?\nAnswer:", + " Bubble shape irregularities" + ] + ], + "resps": [ + [ + [ + -19.15823745727539, + false + ] + ], + [ + [ + -22.268741607666016, + false + ] + ], + [ + [ + -17.82883644104004, + false + ] + ], + [ + [ + -25.504554748535156, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.15823745727539, + false + ], + [ + -22.268741607666016, + false + ], + [ + -17.82883644104004, + false + ], + [ + -25.504554748535156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "579da9a1d1f14a4f7b9696f859194c3f6a831c5cba3710a01dc5548241c45e6e", + "prompt_hash": "b7184e53f62c00f868964d623ed2924be0ff3e37482871c7dbb7600dc757c8af", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 52, + "doc": { + "question": "What equation of state is employed to model the uniform inner pressure of a bubble under violent compression?", + "choices": [ + "Ideal gas equation", + "Boyle’s law", + "Van der Waals equation", + "Clapeyron equation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What equation of state is employed to model the uniform inner pressure of a bubble under violent compression?\nAnswer:", + " Ideal gas equation" + ], + [ + "Question:What equation of state is employed to model the uniform inner pressure of a bubble under violent compression?\nAnswer:", + " Boyle’s law" + ], + [ + "Question:What equation of state is employed to model the uniform inner pressure of a bubble under violent compression?\nAnswer:", + " Van der Waals equation" + ], + [ + "Question:What equation of state is employed to model the uniform inner pressure of a bubble under violent compression?\nAnswer:", + " Clapeyron equation" + ] + ], + "resps": [ + [ + [ + -12.687660217285156, + false + ] + ], + [ + [ + -15.20535659790039, + false + ] + ], + [ + [ + -10.195505142211914, + false + ] + ], + [ + [ + -13.616377830505371, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.687660217285156, + false + ], + [ + -15.20535659790039, + false + ], + [ + -10.195505142211914, + false + ], + [ + -13.616377830505371, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3cb46df70cd1fd88e71c4fb2d06e77ca266e27965aac6d41767d56bcf1122835", + "prompt_hash": "f2b917857232c039bc388235d8cdb0eaadf3dbdbe16624f6c6e91f2d9d335317", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 53, + "doc": { + "question": "What is the typical value of the surface tension coefficient at room temperature (293 K) provided in the model parameters?", + "choices": [ + "0.001 Pa·s", + "0.075 N/m", + "0.1402 J·m³·mol⁻²", + "0.55 W·m⁻¹·K⁻¹" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the typical value of the surface tension coefficient at room temperature (293 K) provided in the model parameters?\nAnswer:", + " 0.001 Pa·s" + ], + [ + "Question:What is the typical value of the surface tension coefficient at room temperature (293 K) provided in the model parameters?\nAnswer:", + " 0.075 N/m" + ], + [ + "Question:What is the typical value of the surface tension coefficient at room temperature (293 K) provided in the model parameters?\nAnswer:", + " 0.1402 J·m³·mol⁻²" + ], + [ + "Question:What is the typical value of the surface tension coefficient at room temperature (293 K) provided in the model parameters?\nAnswer:", + " 0.55 W·m⁻¹·K⁻¹" + ] + ], + "resps": [ + [ + [ + -14.709671974182129, + false + ] + ], + [ + [ + -12.447090148925781, + false + ] + ], + [ + [ + -48.58300018310547, + false + ] + ], + [ + [ + -41.24347686767578, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.709671974182129, + false + ], + [ + -12.447090148925781, + false + ], + [ + -48.58300018310547, + false + ], + [ + -41.24347686767578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "60c2ea3472dcbc20b38df041644ae8bce015f14b1fd11b1ca65a21e057fbe2f5", + "prompt_hash": "2ca5dd46a48399f1a5aa66a281c6a0986d24dfdd7319e23ebdd5d0df8cbe3d80", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 54, + "doc": { + "question": "In the spark-generated bubble experiment, what was the maximum bubble radius reached?", + "choices": [ + "1.01 mm", + "12 mm", + "18.1 mm", + "30 mm" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the spark-generated bubble experiment, what was the maximum bubble radius reached?\nAnswer:", + " 1.01 mm" + ], + [ + "Question:In the spark-generated bubble experiment, what was the maximum bubble radius reached?\nAnswer:", + " 12 mm" + ], + [ + "Question:In the spark-generated bubble experiment, what was the maximum bubble radius reached?\nAnswer:", + " 18.1 mm" + ], + [ + "Question:In the spark-generated bubble experiment, what was the maximum bubble radius reached?\nAnswer:", + " 30 mm" + ] + ], + "resps": [ + [ + [ + -12.215747833251953, + false + ] + ], + [ + [ + -9.254501342773438, + false + ] + ], + [ + [ + -13.88467025756836, + false + ] + ], + [ + [ + -9.194122314453125, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.215747833251953, + false + ], + [ + -9.254501342773438, + false + ], + [ + -13.88467025756836, + false + ], + [ + -9.194122314453125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0e8a2ce0886b9e422a63b1299efe89deaacb5858a6ea609eaff8ac381b44b4ab", + "prompt_hash": "315a87f41522f6690ece93bd824296a44a8f5d3acece53a5ecfb557a47647875", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 55, + "doc": { + "question": "Which vision-language model serves as the backbone for the \\u201cModelSymbol\\u201d architecture?", + "choices": [ + "PaliGemma", + "Flamingo", + "Gemini", + "Octo" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which vision-language model serves as the backbone for the \\u201cModelSymbol\\u201d architecture?\nAnswer:", + " PaliGemma" + ], + [ + "Question:Which vision-language model serves as the backbone for the \\u201cModelSymbol\\u201d architecture?\nAnswer:", + " Flamingo" + ], + [ + "Question:Which vision-language model serves as the backbone for the \\u201cModelSymbol\\u201d architecture?\nAnswer:", + " Gemini" + ], + [ + "Question:Which vision-language model serves as the backbone for the \\u201cModelSymbol\\u201d architecture?\nAnswer:", + " Octo" + ] + ], + "resps": [ + [ + [ + -23.2396240234375, + false + ] + ], + [ + [ + -8.813681602478027, + false + ] + ], + [ + [ + -6.939650535583496, + false + ] + ], + [ + [ + -16.771682739257812, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.2396240234375, + false + ], + [ + -8.813681602478027, + false + ], + [ + -6.939650535583496, + false + ], + [ + -16.771682739257812, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "44cfab75afc74c3f9040b647b44c8a6cd66f888103de6a4e7fc72665cca5ad9f", + "prompt_hash": "e760c9d7b72d0fa964da31194d5bb2ddc1361f8ebc0a9893eb3d7be1f75cf88c", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "What technique does the \\u201cModelSymbol\\u201d model use to generate continuous action distributions?", + "choices": [ + "Autoregressive discretization", + "Conditional flow matching", + "Reinforcement learning", + "Cross-entropy loss" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What technique does the \\u201cModelSymbol\\u201d model use to generate continuous action distributions?\nAnswer:", + " Autoregressive discretization" + ], + [ + "Question:What technique does the \\u201cModelSymbol\\u201d model use to generate continuous action distributions?\nAnswer:", + " Conditional flow matching" + ], + [ + "Question:What technique does the \\u201cModelSymbol\\u201d model use to generate continuous action distributions?\nAnswer:", + " Reinforcement learning" + ], + [ + "Question:What technique does the \\u201cModelSymbol\\u201d model use to generate continuous action distributions?\nAnswer:", + " Cross-entropy loss" + ] + ], + "resps": [ + [ + [ + -19.554214477539062, + false + ] + ], + [ + [ + -19.457931518554688, + false + ] + ], + [ + [ + -8.617112159729004, + false + ] + ], + [ + [ + -12.441343307495117, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.554214477539062, + false + ], + [ + -19.457931518554688, + false + ], + [ + -8.617112159729004, + false + ], + [ + -12.441343307495117, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "34120c787c63f3cd993bcfdbf7c316dd59a741f2c53a6b9663c10007fc52909a", + "prompt_hash": "47bba7a5ba4c27f3890616b2747252222a0805931bab6c0bb030384c75608899", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 57, + "doc": { + "question": "How many parameters does the \\u201cModelSymbol\\u201d model have, including the action expert?", + "choices": [ + "470M", + "3B", + "3.3B", + "7B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many parameters does the \\u201cModelSymbol\\u201d model have, including the action expert?\nAnswer:", + " 470M" + ], + [ + "Question:How many parameters does the \\u201cModelSymbol\\u201d model have, including the action expert?\nAnswer:", + " 3B" + ], + [ + "Question:How many parameters does the \\u201cModelSymbol\\u201d model have, including the action expert?\nAnswer:", + " 3.3B" + ], + [ + "Question:How many parameters does the \\u201cModelSymbol\\u201d model have, including the action expert?\nAnswer:", + " 7B" + ] + ], + "resps": [ + [ + [ + -14.699792861938477, + false + ] + ], + [ + [ + -12.135706901550293, + false + ] + ], + [ + [ + -14.984294891357422, + false + ] + ], + [ + [ + -11.93716049194336, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.699792861938477, + false + ], + [ + -12.135706901550293, + false + ], + [ + -14.984294891357422, + false + ], + [ + -11.93716049194336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "290acbf83831bbc31d777b5bc2f83c49ae3dec908c2a92ceac3e542f64403a1f", + "prompt_hash": "e9bb7024e9173b093e62fd7785fba67863ebe1682748bc459de2243494c6f04d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 58, + "doc": { + "question": "What is the purpose of weighting each task-robot combination by $n^{0.43}$ in the pre-training mixture?", + "choices": [ + "To give more weight to overrepresented tasks", + "To down-weight overrepresented combinations", + "To exclude rare tasks", + "To normalize action dimensions" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the purpose of weighting each task-robot combination by $n^{0.43}$ in the pre-training mixture?\nAnswer:", + " To give more weight to overrepresented tasks" + ], + [ + "Question:What is the purpose of weighting each task-robot combination by $n^{0.43}$ in the pre-training mixture?\nAnswer:", + " To down-weight overrepresented combinations" + ], + [ + "Question:What is the purpose of weighting each task-robot combination by $n^{0.43}$ in the pre-training mixture?\nAnswer:", + " To exclude rare tasks" + ], + [ + "Question:What is the purpose of weighting each task-robot combination by $n^{0.43}$ in the pre-training mixture?\nAnswer:", + " To normalize action dimensions" + ] + ], + "resps": [ + [ + [ + -24.754289627075195, + false + ] + ], + [ + [ + -28.06523895263672, + false + ] + ], + [ + [ + -18.77325439453125, + false + ] + ], + [ + [ + -24.841075897216797, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.754289627075195, + false + ], + [ + -28.06523895263672, + false + ], + [ + -18.77325439453125, + false + ], + [ + -24.841075897216797, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ae68a34690e4101ae12959abbabd3d053cfe34bc5cdc918da5cc0668c788706b", + "prompt_hash": "cbee554b6bf15797b1f6b7df3a5f97907a8c24a124c42fc767a8197d561e4d63", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 59, + "doc": { + "question": "What is the control frequency that the action chunking architecture enables for dexterous tasks such as laundry folding?", + "choices": [ + "2 Hz", + "10 Hz", + "25 Hz", + "50 Hz" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the control frequency that the action chunking architecture enables for dexterous tasks such as laundry folding?\nAnswer:", + " 2 Hz" + ], + [ + "Question:What is the control frequency that the action chunking architecture enables for dexterous tasks such as laundry folding?\nAnswer:", + " 10 Hz" + ], + [ + "Question:What is the control frequency that the action chunking architecture enables for dexterous tasks such as laundry folding?\nAnswer:", + " 25 Hz" + ], + [ + "Question:What is the control frequency that the action chunking architecture enables for dexterous tasks such as laundry folding?\nAnswer:", + " 50 Hz" + ] + ], + "resps": [ + [ + [ + -6.943455696105957, + false + ] + ], + [ + [ + -5.619145393371582, + false + ] + ], + [ + [ + -7.140289306640625, + false + ] + ], + [ + [ + -7.105968952178955, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.943455696105957, + false + ], + [ + -5.619145393371582, + false + ], + [ + -7.140289306640625, + false + ], + [ + -7.105968952178955, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0d1c836e5acfd601aca57b114c2e4b75c32aeb471d59df90b0932f0ad5ce0efd", + "prompt_hash": "699d91475e58382dad8ac2e319ff793e97e57a1799094f57e59be2c803407bde", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 60, + "doc": { + "question": "Which optimizer was used for the weight fitting procedure?", + "choices": [ + "SGD", + "Adam", + "RMSProp", + "Adagrad" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which optimizer was used for the weight fitting procedure?\nAnswer:", + " SGD" + ], + [ + "Question:Which optimizer was used for the weight fitting procedure?\nAnswer:", + " Adam" + ], + [ + "Question:Which optimizer was used for the weight fitting procedure?\nAnswer:", + " RMSProp" + ], + [ + "Question:Which optimizer was used for the weight fitting procedure?\nAnswer:", + " Adagrad" + ] + ], + "resps": [ + [ + [ + -5.789969444274902, + false + ] + ], + [ + [ + -2.538813591003418, + false + ] + ], + [ + [ + -6.673875331878662, + false + ] + ], + [ + [ + -5.803485870361328, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.789969444274902, + false + ], + [ + -2.538813591003418, + false + ], + [ + -6.673875331878662, + false + ], + [ + -5.803485870361328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "090dde0ae1db7d78793c9d6912aa6afb8c8f6488d175d3c5ea5850dc65750634", + "prompt_hash": "7aee1cbf7079e009ad787cb4272dd65a51025f5d2c300668e30a7dd42218369a", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 61, + "doc": { + "question": "How many completions were manually labeled in the Gold set?", + "choices": [ + "268", + "132", + "518", + "118" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many completions were manually labeled in the Gold set?\nAnswer:", + " 268" + ], + [ + "Question:How many completions were manually labeled in the Gold set?\nAnswer:", + " 132" + ], + [ + "Question:How many completions were manually labeled in the Gold set?\nAnswer:", + " 518" + ], + [ + "Question:How many completions were manually labeled in the Gold set?\nAnswer:", + " 118" + ] + ], + "resps": [ + [ + [ + -7.861661911010742, + false + ] + ], + [ + [ + -6.840625762939453, + false + ] + ], + [ + [ + -8.422104835510254, + false + ] + ], + [ + [ + -6.897829055786133, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.861661911010742, + false + ], + [ + -6.840625762939453, + false + ], + [ + -8.422104835510254, + false + ], + [ + -6.897829055786133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9de3ad02da421e52280aa159084cbd6ea988739691cdd2ffc4d2109cc4b80c81", + "prompt_hash": "9d41457bfedf823bd9b92b57415b78e62d7f5a14128f49243121a9c5d230afed", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 62, + "doc": { + "question": "What was the learning rate used during weight fitting?", + "choices": [ + "0.001", + "0.01", + "0.05", + "0.1" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the learning rate used during weight fitting?\nAnswer:", + " 0.001" + ], + [ + "Question:What was the learning rate used during weight fitting?\nAnswer:", + " 0.01" + ], + [ + "Question:What was the learning rate used during weight fitting?\nAnswer:", + " 0.05" + ], + [ + "Question:What was the learning rate used during weight fitting?\nAnswer:", + " 0.1" + ] + ], + "resps": [ + [ + [ + -4.894764423370361, + false + ] + ], + [ + [ + -4.790251731872559, + false + ] + ], + [ + [ + -6.303414344787598, + false + ] + ], + [ + [ + -5.2953596115112305, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.894764423370361, + false + ], + [ + -4.790251731872559, + false + ], + [ + -6.303414344787598, + false + ], + [ + -5.2953596115112305, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4324393b6c3286950e5766986366bc8e8568567b195d8a801d217c700e457dae", + "prompt_hash": "fad73082b060c33f0c83c2209196e99cc73ff6237e07ebb4a45d4124a70d6842", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "Which content policy areas were considered in the experiments?", + "choices": [ + "Erotic Content, Hate Speech, Criminal Advice, Self-Harm", + "Political Speech, Hate Speech, Spam, Self-Harm", + "Fraud, Hate Speech, Misinformation, Self-Harm", + "Erotic Content, Terrorism, Criminal Advice, Bullying" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which content policy areas were considered in the experiments?\nAnswer:", + " Erotic Content, Hate Speech, Criminal Advice, Self-Harm" + ], + [ + "Question:Which content policy areas were considered in the experiments?\nAnswer:", + " Political Speech, Hate Speech, Spam, Self-Harm" + ], + [ + "Question:Which content policy areas were considered in the experiments?\nAnswer:", + " Fraud, Hate Speech, Misinformation, Self-Harm" + ], + [ + "Question:Which content policy areas were considered in the experiments?\nAnswer:", + " Erotic Content, Terrorism, Criminal Advice, Bullying" + ] + ], + "resps": [ + [ + [ + -43.40247344970703, + false + ] + ], + [ + [ + -37.416038513183594, + false + ] + ], + [ + [ + -29.05992889404297, + false + ] + ], + [ + [ + -49.14972686767578, + false + ] + ] + ], + "filtered_resps": [ + [ + -43.40247344970703, + false + ], + [ + -37.416038513183594, + false + ], + [ + -29.05992889404297, + false + ], + [ + -49.14972686767578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ec1dc5d71815891ddedae21bbe7cc1a744059a5e451c5bcc2093a92d4e0a6118", + "prompt_hash": "c03957363758c5755716117db8f5e52b831f079592e8fbcb8ae08130ab55f79e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 64, + "doc": { + "question": "What was the size of the synthetically generated RBR weight fitting comparison dataset ($\\\\DRBR$)?", + "choices": [ + "~6.7K", + "518", + "6.7K * 4", + "3K" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What was the size of the synthetically generated RBR weight fitting comparison dataset ($\\\\DRBR$)?\nAnswer:", + " ~6.7K" + ], + [ + "Question:What was the size of the synthetically generated RBR weight fitting comparison dataset ($\\\\DRBR$)?\nAnswer:", + " 518" + ], + [ + "Question:What was the size of the synthetically generated RBR weight fitting comparison dataset ($\\\\DRBR$)?\nAnswer:", + " 6.7K * 4" + ], + [ + "Question:What was the size of the synthetically generated RBR weight fitting comparison dataset ($\\\\DRBR$)?\nAnswer:", + " 3K" + ] + ], + "resps": [ + [ + [ + -19.00811004638672, + false + ] + ], + [ + [ + -9.766897201538086, + false + ] + ], + [ + [ + -26.137615203857422, + false + ] + ], + [ + [ + -9.225601196289062, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.00811004638672, + false + ], + [ + -9.766897201538086, + false + ], + [ + -26.137615203857422, + false + ], + [ + -9.225601196289062, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7ee279fed47dd700458e5d931b275ac58723b8d23378f36ec4753610edbda3ca", + "prompt_hash": "8698351b05ba5a7514fea881bafb225b56a1c0c8657ba5062889ef7098db1c3e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 65, + "doc": { + "question": "How many short, fact-seeking questions are included in the SimpleQA benchmark?", + "choices": [ + "3,500", + "4,326", + "5,200", + "6,000" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many short, fact-seeking questions are included in the SimpleQA benchmark?\nAnswer:", + " 3,500" + ], + [ + "Question:How many short, fact-seeking questions are included in the SimpleQA benchmark?\nAnswer:", + " 4,326" + ], + [ + "Question:How many short, fact-seeking questions are included in the SimpleQA benchmark?\nAnswer:", + " 5,200" + ], + [ + "Question:How many short, fact-seeking questions are included in the SimpleQA benchmark?\nAnswer:", + " 6,000" + ] + ], + "resps": [ + [ + [ + -11.849647521972656, + false + ] + ], + [ + [ + -15.20968246459961, + false + ] + ], + [ + [ + -13.093934059143066, + false + ] + ], + [ + [ + -9.874208450317383, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.849647521972656, + false + ], + [ + -15.20968246459961, + false + ], + [ + -13.093934059143066, + false + ], + [ + -9.874208450317383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b99421e5a3ca71704d84295315f2f7d05abf1b053eea290e1385acdf3a8c58cd", + "prompt_hash": "73ea9f9187b6d5d8d6c3ca3d9892d2a486a91e14344cae62dc4d2ec031148a3b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 66, + "doc": { + "question": "Which U.S. TV station did the Canadian reality series 'To Serve and Protect' debut on?", + "choices": [ + "CBS", + "NBC", + "KVOS-TV", + "FOX" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which U.S. TV station did the Canadian reality series 'To Serve and Protect' debut on?\nAnswer:", + " CBS" + ], + [ + "Question:Which U.S. TV station did the Canadian reality series 'To Serve and Protect' debut on?\nAnswer:", + " NBC" + ], + [ + "Question:Which U.S. TV station did the Canadian reality series 'To Serve and Protect' debut on?\nAnswer:", + " KVOS-TV" + ], + [ + "Question:Which U.S. TV station did the Canadian reality series 'To Serve and Protect' debut on?\nAnswer:", + " FOX" + ] + ], + "resps": [ + [ + [ + -5.485742568969727, + false + ] + ], + [ + [ + -4.589082717895508, + false + ] + ], + [ + [ + -20.12972640991211, + false + ] + ], + [ + [ + -7.1394171714782715, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.485742568969727, + false + ], + [ + -4.589082717895508, + false + ], + [ + -20.12972640991211, + false + ], + [ + -7.1394171714782715, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d47ff8aed5c4c8e891737fc835f92eaec4e7017a5027c46907f35c132f267484", + "prompt_hash": "c3c6b529c8d2cdd703d3ee5d1cb1fb9247e86a111d23697441045befcd7dedc1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 67, + "doc": { + "question": "According to the dataset diversity analysis, which topic had the largest number of questions in SimpleQA?", + "choices": [ + "Art", + "Science & Technology", + "Politics", + "Sports" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:According to the dataset diversity analysis, which topic had the largest number of questions in SimpleQA?\nAnswer:", + " Art" + ], + [ + "Question:According to the dataset diversity analysis, which topic had the largest number of questions in SimpleQA?\nAnswer:", + " Science & Technology" + ], + [ + "Question:According to the dataset diversity analysis, which topic had the largest number of questions in SimpleQA?\nAnswer:", + " Politics" + ], + [ + "Question:According to the dataset diversity analysis, which topic had the largest number of questions in SimpleQA?\nAnswer:", + " Sports" + ] + ], + "resps": [ + [ + [ + -7.214799404144287, + false + ] + ], + [ + [ + -19.02574920654297, + false + ] + ], + [ + [ + -5.521854877471924, + false + ] + ], + [ + [ + -7.079794406890869, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.214799404144287, + false + ], + [ + -19.02574920654297, + false + ], + [ + -5.521854877471924, + false + ], + [ + -7.079794406890869, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2a14bf241290cd59c02e68a8a871233553960d2d6b92ecef98384c9d0295d751", + "prompt_hash": "457e06339f9efb9f7a4fe366fb0483f40a53b329c2841f568610f8dc85099a12", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 68, + "doc": { + "question": "What percentage of SimpleQA answers were classified as dates?", + "choices": [ + "15.3%", + "18.0%", + "24.1%", + "32.8%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What percentage of SimpleQA answers were classified as dates?\nAnswer:", + " 15.3%" + ], + [ + "Question:What percentage of SimpleQA answers were classified as dates?\nAnswer:", + " 18.0%" + ], + [ + "Question:What percentage of SimpleQA answers were classified as dates?\nAnswer:", + " 24.1%" + ], + [ + "Question:What percentage of SimpleQA answers were classified as dates?\nAnswer:", + " 32.8%" + ] + ], + "resps": [ + [ + [ + -13.22205924987793, + false + ] + ], + [ + [ + -13.355619430541992, + false + ] + ], + [ + [ + -13.3021240234375, + false + ] + ], + [ + [ + -12.97262191772461, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.22205924987793, + false + ], + [ + -13.355619430541992, + false + ], + [ + -13.3021240234375, + false + ], + [ + -12.97262191772461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "70fe071bc00888a605080b6809c984dd38af6f592b3cf8fac781c0cc5debfe9a", + "prompt_hash": "0d0f0a519437a8b378320b22a982baf60b834b7a212efccfd6cdbc0c8a76f27c", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 69, + "doc": { + "question": "Who received the IEEE Frank Rosenblatt Award in 2010?", + "choices": [ + "Geoffrey Hinton", + "Yoshua Bengio", + "Michio Sugeno", + "Yann LeCun" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Who received the IEEE Frank Rosenblatt Award in 2010?\nAnswer:", + " Geoffrey Hinton" + ], + [ + "Question:Who received the IEEE Frank Rosenblatt Award in 2010?\nAnswer:", + " Yoshua Bengio" + ], + [ + "Question:Who received the IEEE Frank Rosenblatt Award in 2010?\nAnswer:", + " Michio Sugeno" + ], + [ + "Question:Who received the IEEE Frank Rosenblatt Award in 2010?\nAnswer:", + " Yann LeCun" + ] + ], + "resps": [ + [ + [ + -6.306613445281982, + false + ] + ], + [ + [ + -8.654630661010742, + false + ] + ], + [ + [ + -18.297204971313477, + false + ] + ], + [ + [ + -9.778219223022461, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.306613445281982, + false + ], + [ + -8.654630661010742, + false + ], + [ + -18.297204971313477, + false + ], + [ + -9.778219223022461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4856395e23d1b486e1805d9ddc475e84c1e94fdea8e608fec4a76c055406f2d1", + "prompt_hash": "84f9802f578f7253fd0d08d4ab097d2cca0f113c9ca1fa0bde7bd0d654aa68e9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 70, + "doc": { + "question": "How many subjects are covered in the MMLU benchmark?", + "choices": [ + "57", + "45", + "63", + "52" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many subjects are covered in the MMLU benchmark?\nAnswer:", + " 57" + ], + [ + "Question:How many subjects are covered in the MMLU benchmark?\nAnswer:", + " 45" + ], + [ + "Question:How many subjects are covered in the MMLU benchmark?\nAnswer:", + " 63" + ], + [ + "Question:How many subjects are covered in the MMLU benchmark?\nAnswer:", + " 52" + ] + ], + "resps": [ + [ + [ + -6.606266021728516, + false + ] + ], + [ + [ + -6.3166961669921875, + false + ] + ], + [ + [ + -7.257956504821777, + false + ] + ], + [ + [ + -5.796786308288574, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.606266021728516, + false + ], + [ + -6.3166961669921875, + false + ], + [ + -7.257956504821777, + false + ], + [ + -5.796786308288574, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c1da0f228cb3f562ef18855bb58eb38974856f02b0c3d6b81cff1f66ca2a7330", + "prompt_hash": "f592d4ab3de93b612df4c290531f42796baea4eb2e26217eb2204b2d5797b70b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "What evaluation metric is used for the HumanEval dataset?", + "choices": [ + "BLEU score", + "Accuracy", + "pass@1", + "F1 score" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What evaluation metric is used for the HumanEval dataset?\nAnswer:", + " BLEU score" + ], + [ + "Question:What evaluation metric is used for the HumanEval dataset?\nAnswer:", + " Accuracy" + ], + [ + "Question:What evaluation metric is used for the HumanEval dataset?\nAnswer:", + " pass@1" + ], + [ + "Question:What evaluation metric is used for the HumanEval dataset?\nAnswer:", + " F1 score" + ] + ], + "resps": [ + [ + [ + -8.921050071716309, + false + ] + ], + [ + [ + -5.304507732391357, + false + ] + ], + [ + [ + -8.254301071166992, + false + ] + ], + [ + [ + -6.906661033630371, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.921050071716309, + false + ], + [ + -5.304507732391357, + false + ], + [ + -8.254301071166992, + false + ], + [ + -6.906661033630371, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6a4660cb3646307034774e93d85c6d72209b4ca41a5df7903df763e627d8573d", + "prompt_hash": "05e0f4eabe8be341cf59c011a598fcca173afacbaa69dd8923e7041dd37edd1d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 72, + "doc": { + "question": "Which programming languages are involved in the CODEXGLUE dataset for code translation?", + "choices": [ + "Python and Go", + "Java and C-Sharp", + "JavaScript and Ruby", + "C++ and Rust" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which programming languages are involved in the CODEXGLUE dataset for code translation?\nAnswer:", + " Python and Go" + ], + [ + "Question:Which programming languages are involved in the CODEXGLUE dataset for code translation?\nAnswer:", + " Java and C-Sharp" + ], + [ + "Question:Which programming languages are involved in the CODEXGLUE dataset for code translation?\nAnswer:", + " JavaScript and Ruby" + ], + [ + "Question:Which programming languages are involved in the CODEXGLUE dataset for code translation?\nAnswer:", + " C++ and Rust" + ] + ], + "resps": [ + [ + [ + -8.412498474121094, + false + ] + ], + [ + [ + -16.453357696533203, + false + ] + ], + [ + [ + -10.993216514587402, + false + ] + ], + [ + [ + -11.8782958984375, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.412498474121094, + false + ], + [ + -16.453357696533203, + false + ], + [ + -10.993216514587402, + false + ], + [ + -11.8782958984375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6607f36aaafbab5635ca3d0516438a7bfb48e4f4b45e146a71ee003072753675", + "prompt_hash": "3e132e3c2fb334c40e1ba4facbfda3b456123737b67a644b42cd76489493a8eb", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "How many total samples are included in the HumanEval dataset?", + "choices": [ + "164", + "200", + "500", + "1000" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many total samples are included in the HumanEval dataset?\nAnswer:", + " 164" + ], + [ + "Question:How many total samples are included in the HumanEval dataset?\nAnswer:", + " 200" + ], + [ + "Question:How many total samples are included in the HumanEval dataset?\nAnswer:", + " 500" + ], + [ + "Question:How many total samples are included in the HumanEval dataset?\nAnswer:", + " 1000" + ] + ], + "resps": [ + [ + [ + -7.63142204284668, + false + ] + ], + [ + [ + -4.791470527648926, + false + ] + ], + [ + [ + -4.627772331237793, + false + ] + ], + [ + [ + -4.572315692901611, + true + ] + ] + ], + "filtered_resps": [ + [ + -7.63142204284668, + false + ], + [ + -4.791470527648926, + false + ], + [ + -4.627772331237793, + false + ], + [ + -4.572315692901611, + true + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8a593a36da549ab39302960a4ce5b84f98c4ba2cf0ee9912a92b0161990963f4", + "prompt_hash": "aa0d9a994fe693ac1b36b62dce0fe1bcc5858e551a69b9932ce351a5b7c7c88e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "What is the size of the FIND benchmark subset used in evaluation?", + "choices": [ + "200 functions", + "500 functions", + "820 functions", + "1000 functions" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the size of the FIND benchmark subset used in evaluation?\nAnswer:", + " 200 functions" + ], + [ + "Question:What is the size of the FIND benchmark subset used in evaluation?\nAnswer:", + " 500 functions" + ], + [ + "Question:What is the size of the FIND benchmark subset used in evaluation?\nAnswer:", + " 820 functions" + ], + [ + "Question:What is the size of the FIND benchmark subset used in evaluation?\nAnswer:", + " 1000 functions" + ] + ], + "resps": [ + [ + [ + -14.669370651245117, + false + ] + ], + [ + [ + -14.397915840148926, + false + ] + ], + [ + [ + -19.312631607055664, + false + ] + ], + [ + [ + -14.173040390014648, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.669370651245117, + false + ], + [ + -14.397915840148926, + false + ], + [ + -19.312631607055664, + false + ], + [ + -14.173040390014648, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3a4b93b9301b5f196f596c1d88217f8055d17447b91145d6bdde5f118c870feb", + "prompt_hash": "c2457c947e0ab2dc92e423641a122338685f4821906af3091fce7438e74cd40d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 75, + "doc": { + "question": "What was the total token count of the RedPajama-V1 dataset?", + "choices": [ + "1.2 Trillion", + "3.0 Trillion", + "878 Billion", + "2.8 Trillion" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What was the total token count of the RedPajama-V1 dataset?\nAnswer:", + " 1.2 Trillion" + ], + [ + "Question:What was the total token count of the RedPajama-V1 dataset?\nAnswer:", + " 3.0 Trillion" + ], + [ + "Question:What was the total token count of the RedPajama-V1 dataset?\nAnswer:", + " 878 Billion" + ], + [ + "Question:What was the total token count of the RedPajama-V1 dataset?\nAnswer:", + " 2.8 Trillion" + ] + ], + "resps": [ + [ + [ + -12.873974800109863, + false + ] + ], + [ + [ + -17.502328872680664, + false + ] + ], + [ + [ + -16.340862274169922, + false + ] + ], + [ + [ + -14.272342681884766, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.873974800109863, + false + ], + [ + -17.502328872680664, + false + ], + [ + -16.340862274169922, + false + ], + [ + -14.272342681884766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "343df6d4a5c375184cbd53649f5a4ce41db0cefeec9ec624869da66e561e6f23", + "prompt_hash": "38bf56f206f6e40bed40f0873a1f9fd49f9026c986fa0c744a31b7edd92ed879", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 76, + "doc": { + "question": "Which architecture was used to train the RedPajama-INCITE models on the Summit supercomputer?", + "choices": [ + "Intel Xeon", + "IBM Power9", + "Apple M1", + "AMD EPYC" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which architecture was used to train the RedPajama-INCITE models on the Summit supercomputer?\nAnswer:", + " Intel Xeon" + ], + [ + "Question:Which architecture was used to train the RedPajama-INCITE models on the Summit supercomputer?\nAnswer:", + " IBM Power9" + ], + [ + "Question:Which architecture was used to train the RedPajama-INCITE models on the Summit supercomputer?\nAnswer:", + " Apple M1" + ], + [ + "Question:Which architecture was used to train the RedPajama-INCITE models on the Summit supercomputer?\nAnswer:", + " AMD EPYC" + ] + ], + "resps": [ + [ + [ + -10.685319900512695, + false + ] + ], + [ + [ + -16.690317153930664, + false + ] + ], + [ + [ + -14.072364807128906, + false + ] + ], + [ + [ + -12.965118408203125, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.685319900512695, + false + ], + [ + -16.690317153930664, + false + ], + [ + -14.072364807128906, + false + ], + [ + -12.965118408203125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c07272860702c841e10c51dec9727d2a1539d601ba161c4ed979bec80a7548db", + "prompt_hash": "27c2c137ee521266467ce7e2661c2de39e14ca9bd7ced8e64e883be01b78572d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 77, + "doc": { + "question": "How many Common Crawl snapshots were used to create RedPajama-V2?", + "choices": [ + "5", + "12", + "84", + "46" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many Common Crawl snapshots were used to create RedPajama-V2?\nAnswer:", + " 5" + ], + [ + "Question:How many Common Crawl snapshots were used to create RedPajama-V2?\nAnswer:", + " 12" + ], + [ + "Question:How many Common Crawl snapshots were used to create RedPajama-V2?\nAnswer:", + " 84" + ], + [ + "Question:How many Common Crawl snapshots were used to create RedPajama-V2?\nAnswer:", + " 46" + ] + ], + "resps": [ + [ + [ + -3.648129940032959, + false + ] + ], + [ + [ + -4.735817909240723, + false + ] + ], + [ + [ + -7.701600074768066, + false + ] + ], + [ + [ + -6.892649173736572, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.648129940032959, + false + ], + [ + -4.735817909240723, + false + ], + [ + -7.701600074768066, + false + ], + [ + -6.892649173736572, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c98d0ea6b984a0c02763b82495d98ecafe07e6c8d1172f5b059883d48745bb45", + "prompt_hash": "df25701a85694241d855e290288603b96157d5f225d32e35399fc9937194fe76", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 78, + "doc": { + "question": "Which five languages are included in RedPajama-V2?", + "choices": [ + "English, German, French, Spanish, Italian", + "English, Chinese, Japanese, Korean, French", + "English, Russian, Portuguese, German, Spanish", + "English, Hindi, Arabic, French, Italian" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which five languages are included in RedPajama-V2?\nAnswer:", + " English, German, French, Spanish, Italian" + ], + [ + "Question:Which five languages are included in RedPajama-V2?\nAnswer:", + " English, Chinese, Japanese, Korean, French" + ], + [ + "Question:Which five languages are included in RedPajama-V2?\nAnswer:", + " English, Russian, Portuguese, German, Spanish" + ], + [ + "Question:Which five languages are included in RedPajama-V2?\nAnswer:", + " English, Hindi, Arabic, French, Italian" + ] + ], + "resps": [ + [ + [ + -10.228062629699707, + false + ] + ], + [ + [ + -14.981597900390625, + false + ] + ], + [ + [ + -18.74696922302246, + false + ] + ], + [ + [ + -16.782047271728516, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.228062629699707, + false + ], + [ + -14.981597900390625, + false + ], + [ + -18.74696922302246, + false + ], + [ + -16.782047271728516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ffad02ec878dccdcdb3f69be4e6cb3c3ab7299eaf77297b1fd06c7fb4a14aaac", + "prompt_hash": "b02bb87f442385006941957c27541b2a25db5ba040cf7f8361e92ff644650c9e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 79, + "doc": { + "question": "What was the total number of documents in the RedPajama-V2 dataset?", + "choices": [ + "32.8 Billion", + "113.3 Billion", + "87.5 Billion", + "50.7 Billion" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the total number of documents in the RedPajama-V2 dataset?\nAnswer:", + " 32.8 Billion" + ], + [ + "Question:What was the total number of documents in the RedPajama-V2 dataset?\nAnswer:", + " 113.3 Billion" + ], + [ + "Question:What was the total number of documents in the RedPajama-V2 dataset?\nAnswer:", + " 87.5 Billion" + ], + [ + "Question:What was the total number of documents in the RedPajama-V2 dataset?\nAnswer:", + " 50.7 Billion" + ] + ], + "resps": [ + [ + [ + -16.723188400268555, + false + ] + ], + [ + [ + -19.00351333618164, + false + ] + ], + [ + [ + -17.76470947265625, + false + ] + ], + [ + [ + -17.174829483032227, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.723188400268555, + false + ], + [ + -19.00351333618164, + false + ], + [ + -17.76470947265625, + false + ], + [ + -17.174829483032227, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cd2499d878b3dbdfcbcc7b73b842f7265b17497e41e5ed6877c38f04e3c04df0", + "prompt_hash": "86ede63365518aa6a0c4469591e5d3a720087f05e09a59ead51ed51cdb9eabbc", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 80, + "doc": { + "question": "Which base model was fine-tuned to create Marco-o1-CoT?", + "choices": [ + "LLaMA-2-13B", + "Qwen2-7B-Instruct", + "GPT-3.5", + "Mistral-7B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which base model was fine-tuned to create Marco-o1-CoT?\nAnswer:", + " LLaMA-2-13B" + ], + [ + "Question:Which base model was fine-tuned to create Marco-o1-CoT?\nAnswer:", + " Qwen2-7B-Instruct" + ], + [ + "Question:Which base model was fine-tuned to create Marco-o1-CoT?\nAnswer:", + " GPT-3.5" + ], + [ + "Question:Which base model was fine-tuned to create Marco-o1-CoT?\nAnswer:", + " Mistral-7B" + ] + ], + "resps": [ + [ + [ + -12.792409896850586, + false + ] + ], + [ + [ + -12.102909088134766, + false + ] + ], + [ + [ + -5.229546070098877, + false + ] + ], + [ + [ + -6.480302810668945, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.792409896850586, + false + ], + [ + -12.102909088134766, + false + ], + [ + -5.229546070098877, + false + ], + [ + -6.480302810668945, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "854e0f9653c3da0877c954e315bb63f134dabd432dad36ecd6086e36e77b6f4a", + "prompt_hash": "05924aef3a26920fc1c2502accd55e327b7cffc40a6128c2e9eb69d62435b3d8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 81, + "doc": { + "question": "How many samples are included in the Marco-o1 CoT Dataset (Synthetic)?", + "choices": [ + "45,125", + "10,000", + "5,141", + "60,266" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many samples are included in the Marco-o1 CoT Dataset (Synthetic)?\nAnswer:", + " 45,125" + ], + [ + "Question:How many samples are included in the Marco-o1 CoT Dataset (Synthetic)?\nAnswer:", + " 10,000" + ], + [ + "Question:How many samples are included in the Marco-o1 CoT Dataset (Synthetic)?\nAnswer:", + " 5,141" + ], + [ + "Question:How many samples are included in the Marco-o1 CoT Dataset (Synthetic)?\nAnswer:", + " 60,266" + ] + ], + "resps": [ + [ + [ + -15.957291603088379, + false + ] + ], + [ + [ + -5.9744343757629395, + false + ] + ], + [ + [ + -13.689861297607422, + false + ] + ], + [ + [ + -16.420578002929688, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.957291603088379, + false + ], + [ + -5.9744343757629395, + false + ], + [ + -13.689861297607422, + false + ], + [ + -16.420578002929688, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "027cd7fa470750d66dabff86af0374caba6f6d4e90d8d1f3e1a558c7cfe74d9a", + "prompt_hash": "470ba892ff98e50aac9bef0ef10e99661216ce36d267acb86701c7b114cee2d1", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 82, + "doc": { + "question": "What improvement in accuracy did Marco-o1 achieve on the MGSM (English) dataset compared to its baseline?", + "choices": [ + "+4.10%", + "+6.17%", + "+5.60%", + "+7.25%" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What improvement in accuracy did Marco-o1 achieve on the MGSM (English) dataset compared to its baseline?\nAnswer:", + " +4.10%" + ], + [ + "Question:What improvement in accuracy did Marco-o1 achieve on the MGSM (English) dataset compared to its baseline?\nAnswer:", + " +6.17%" + ], + [ + "Question:What improvement in accuracy did Marco-o1 achieve on the MGSM (English) dataset compared to its baseline?\nAnswer:", + " +5.60%" + ], + [ + "Question:What improvement in accuracy did Marco-o1 achieve on the MGSM (English) dataset compared to its baseline?\nAnswer:", + " +7.25%" + ] + ], + "resps": [ + [ + [ + -20.366485595703125, + false + ] + ], + [ + [ + -20.923389434814453, + false + ] + ], + [ + [ + -20.500133514404297, + false + ] + ], + [ + [ + -20.437623977661133, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.366485595703125, + false + ], + [ + -20.923389434814453, + false + ], + [ + -20.500133514404297, + false + ], + [ + -20.437623977661133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3b81c9861f63328c55eee8f9a171bc29ee04e18fc8c291667a8318746146b4d3", + "prompt_hash": "0ea6e0375ab9dec9aa8b8bf8d07007c702d0b13ff85345c2eeac5c15062d23ac", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "In the MCTS framework, what does each node represent?", + "choices": [ + "A reasoning state", + "A dataset sample", + "A confidence score", + "A translation example" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:In the MCTS framework, what does each node represent?\nAnswer:", + " A reasoning state" + ], + [ + "Question:In the MCTS framework, what does each node represent?\nAnswer:", + " A dataset sample" + ], + [ + "Question:In the MCTS framework, what does each node represent?\nAnswer:", + " A confidence score" + ], + [ + "Question:In the MCTS framework, what does each node represent?\nAnswer:", + " A translation example" + ] + ], + "resps": [ + [ + [ + -17.532194137573242, + false + ] + ], + [ + [ + -17.33028793334961, + false + ] + ], + [ + [ + -15.44752311706543, + false + ] + ], + [ + [ + -23.827362060546875, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.532194137573242, + false + ], + [ + -17.33028793334961, + false + ], + [ + -15.44752311706543, + false + ], + [ + -23.827362060546875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "da0d104d4c28b6023871cd7c042e6100b946db17c1487fc3fdab43d0280f16ee", + "prompt_hash": "917998b7b8619495489d5230e2c452d9a10f4ea710ccf4b965290ffedde46aef", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 84, + "doc": { + "question": "What reflection prompt was added to encourage self-criticism in reasoning?", + "choices": [ + "Think again carefully before answering.", + "Wait! Maybe I made some mistakes! I need to rethink from scratch.", + "Check your reasoning one more time.", + "Be cautious, errors may occur." + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What reflection prompt was added to encourage self-criticism in reasoning?\nAnswer:", + " Think again carefully before answering." + ], + [ + "Question:What reflection prompt was added to encourage self-criticism in reasoning?\nAnswer:", + " Wait! Maybe I made some mistakes! I need to rethink from scratch." + ], + [ + "Question:What reflection prompt was added to encourage self-criticism in reasoning?\nAnswer:", + " Check your reasoning one more time." + ], + [ + "Question:What reflection prompt was added to encourage self-criticism in reasoning?\nAnswer:", + " Be cautious, errors may occur." + ] + ], + "resps": [ + [ + [ + -20.51762580871582, + false + ] + ], + [ + [ + -56.214149475097656, + false + ] + ], + [ + [ + -22.922752380371094, + false + ] + ], + [ + [ + -32.4448127746582, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.51762580871582, + false + ], + [ + -56.214149475097656, + false + ], + [ + -22.922752380371094, + false + ], + [ + -32.4448127746582, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c1db4d89e8536ae6c2c91d8b610e10f3f3c8e002eb44a61cf024ce7028741e67", + "prompt_hash": "ac16d04f80938514c60a3c23e4df6f1010417fdf9b3aeed1c3d69a4ba274c044", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 85, + "doc": { + "question": "Which base model was selected due to its strong foundational capability in mathematical reasoning?", + "choices": [ + "GPT-4o", + "LLaMA-2", + "Qwen2.5-Math-72B", + "WizardLM" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which base model was selected due to its strong foundational capability in mathematical reasoning?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which base model was selected due to its strong foundational capability in mathematical reasoning?\nAnswer:", + " LLaMA-2" + ], + [ + "Question:Which base model was selected due to its strong foundational capability in mathematical reasoning?\nAnswer:", + " Qwen2.5-Math-72B" + ], + [ + "Question:Which base model was selected due to its strong foundational capability in mathematical reasoning?\nAnswer:", + " WizardLM" + ] + ], + "resps": [ + [ + [ + -4.530148506164551, + false + ] + ], + [ + [ + -8.570853233337402, + false + ] + ], + [ + [ + -29.780563354492188, + false + ] + ], + [ + [ + -9.87548828125, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.530148506164551, + false + ], + [ + -8.570853233337402, + false + ], + [ + -29.780563354492188, + false + ], + [ + -9.87548828125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "642effc96f010f6d01ce3cc3d9e1e4da4cf9c0e2a95c61926efc5c2058a19767", + "prompt_hash": "eaba60c4953a3f517eb30c9042d6f00862ccf2dcf8c249eaba2737571adc36dd", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "Which dataset subset was used to facilitate more extensive inference-time scaling experiments?", + "choices": [ + "AIME2024", + "MATH500", + "MATH2024", + "SimpleQA" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset subset was used to facilitate more extensive inference-time scaling experiments?\nAnswer:", + " AIME2024" + ], + [ + "Question:Which dataset subset was used to facilitate more extensive inference-time scaling experiments?\nAnswer:", + " MATH500" + ], + [ + "Question:Which dataset subset was used to facilitate more extensive inference-time scaling experiments?\nAnswer:", + " MATH2024" + ], + [ + "Question:Which dataset subset was used to facilitate more extensive inference-time scaling experiments?\nAnswer:", + " SimpleQA" + ] + ], + "resps": [ + [ + [ + -23.071943283081055, + false + ] + ], + [ + [ + -21.142967224121094, + false + ] + ], + [ + [ + -20.790672302246094, + false + ] + ], + [ + [ + -17.837215423583984, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.071943283081055, + false + ], + [ + -21.142967224121094, + false + ], + [ + -20.790672302246094, + false + ], + [ + -17.837215423583984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "386f81490afe08849f4cdfe9479ae1093010c99a8d2b1e2c414fb2428d8ffb94", + "prompt_hash": "602a2d88d523c4d0f12f48973eb2892ed39d2caf4f98e09fdc10c6d983272287", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 87, + "doc": { + "question": "What is the total transparency score assigned in the proposed scoring framework?", + "choices": [ + "50", + "75", + "100", + "150" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total transparency score assigned in the proposed scoring framework?\nAnswer:", + " 50" + ], + [ + "Question:What is the total transparency score assigned in the proposed scoring framework?\nAnswer:", + " 75" + ], + [ + "Question:What is the total transparency score assigned in the proposed scoring framework?\nAnswer:", + " 100" + ], + [ + "Question:What is the total transparency score assigned in the proposed scoring framework?\nAnswer:", + " 150" + ] + ], + "resps": [ + [ + [ + -6.404786109924316, + false + ] + ], + [ + [ + -7.529520511627197, + false + ] + ], + [ + [ + -5.6015729904174805, + false + ] + ], + [ + [ + -9.762228012084961, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.404786109924316, + false + ], + [ + -7.529520511627197, + false + ], + [ + -5.6015729904174805, + false + ], + [ + -9.762228012084961, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4c0d03e44a52a1380460a9e87840ed540aa0d5306090a4e992d9dffe62962bd6", + "prompt_hash": "3da4faa28aa643a53554e62566baaa9eb8c27bf624b650a137ef1d8d4a9596f3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 88, + "doc": { + "question": "From which competition were 30 additional problems curated to diversify evaluation benchmarks?", + "choices": [ + "American Invitational Mathematics Examination", + "China National High School Mathematics Competition", + "International Mathematical Olympiad", + "Putnam Competition" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:From which competition were 30 additional problems curated to diversify evaluation benchmarks?\nAnswer:", + " American Invitational Mathematics Examination" + ], + [ + "Question:From which competition were 30 additional problems curated to diversify evaluation benchmarks?\nAnswer:", + " China National High School Mathematics Competition" + ], + [ + "Question:From which competition were 30 additional problems curated to diversify evaluation benchmarks?\nAnswer:", + " International Mathematical Olympiad" + ], + [ + "Question:From which competition were 30 additional problems curated to diversify evaluation benchmarks?\nAnswer:", + " Putnam Competition" + ] + ], + "resps": [ + [ + [ + -23.391521453857422, + false + ] + ], + [ + [ + -23.65379524230957, + false + ] + ], + [ + [ + -7.464717388153076, + false + ] + ], + [ + [ + -15.607044219970703, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.391521453857422, + false + ], + [ + -23.65379524230957, + false + ], + [ + -7.464717388153076, + false + ], + [ + -15.607044219970703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "adfa9e48b2843d6e20fbfc770b373e66f427d22f799d9f27d40b51348af6a198", + "prompt_hash": "9e3bbcb449ce9fabe9f85c2e2cc158442c7acd6cb4a610e329e729e4b9e2f200", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 89, + "doc": { + "question": "Which dataset demonstrated a drop in accuracy from 92% to 86.5% after fine-tuning?", + "choices": [ + "Flames", + "DiaSafety", + "WildSafety", + "SimpleQA" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dataset demonstrated a drop in accuracy from 92% to 86.5% after fine-tuning?\nAnswer:", + " Flames" + ], + [ + "Question:Which dataset demonstrated a drop in accuracy from 92% to 86.5% after fine-tuning?\nAnswer:", + " DiaSafety" + ], + [ + "Question:Which dataset demonstrated a drop in accuracy from 92% to 86.5% after fine-tuning?\nAnswer:", + " WildSafety" + ], + [ + "Question:Which dataset demonstrated a drop in accuracy from 92% to 86.5% after fine-tuning?\nAnswer:", + " SimpleQA" + ] + ], + "resps": [ + [ + [ + -14.188251495361328, + false + ] + ], + [ + [ + -26.24142074584961, + false + ] + ], + [ + [ + -24.381790161132812, + false + ] + ], + [ + [ + -18.144813537597656, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.188251495361328, + false + ], + [ + -26.24142074584961, + false + ], + [ + -24.381790161132812, + false + ], + [ + -18.144813537597656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b3f4cd4668762b23da69af6238b379755d32b68bd1327203e306eaa09d42a916", + "prompt_hash": "003ceb9e9bd248a70d3c0a8df60b0e368e9458581e05b605cdd2f897ca650c2b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 90, + "doc": { + "question": "How many screenshots were collected for web instruction-tuning data before filtering static text?", + "choices": [ + "10K", + "15K", + "22K", + "30K" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many screenshots were collected for web instruction-tuning data before filtering static text?\nAnswer:", + " 10K" + ], + [ + "Question:How many screenshots were collected for web instruction-tuning data before filtering static text?\nAnswer:", + " 15K" + ], + [ + "Question:How many screenshots were collected for web instruction-tuning data before filtering static text?\nAnswer:", + " 22K" + ], + [ + "Question:How many screenshots were collected for web instruction-tuning data before filtering static text?\nAnswer:", + " 30K" + ] + ], + "resps": [ + [ + [ + -9.913565635681152, + false + ] + ], + [ + [ + -11.398613929748535, + false + ] + ], + [ + [ + -11.466139793395996, + false + ] + ], + [ + [ + -11.128503799438477, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.913565635681152, + false + ], + [ + -11.398613929748535, + false + ], + [ + -11.466139793395996, + false + ], + [ + -11.128503799438477, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8a0701d6015b21c37484eb7e6d9e276cf922e61fa5f4dbb92dbf6161f1177900", + "prompt_hash": "818b0cb599a378908e016043ae4eb55a5c8a12bb882db7d6fdbf0191f7d4325e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 91, + "doc": { + "question": "Which dataset contains 30k instructions and 715K trajectories in an Android smartphone environment?", + "choices": [ + "Mind2Web", + "AITW", + "MiniWob", + "OmniAct" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset contains 30k instructions and 715K trajectories in an Android smartphone environment?\nAnswer:", + " Mind2Web" + ], + [ + "Question:Which dataset contains 30k instructions and 715K trajectories in an Android smartphone environment?\nAnswer:", + " AITW" + ], + [ + "Question:Which dataset contains 30k instructions and 715K trajectories in an Android smartphone environment?\nAnswer:", + " MiniWob" + ], + [ + "Question:Which dataset contains 30k instructions and 715K trajectories in an Android smartphone environment?\nAnswer:", + " OmniAct" + ] + ], + "resps": [ + [ + [ + -22.257719039916992, + false + ] + ], + [ + [ + -18.680747985839844, + false + ] + ], + [ + [ + -28.62395668029785, + false + ] + ], + [ + [ + -20.713193893432617, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.257719039916992, + false + ], + [ + -18.680747985839844, + false + ], + [ + -28.62395668029785, + false + ], + [ + -20.713193893432617, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "81b2317defb712221e1a47661cf025da1057bc3cd60746839d59fcb0bb89bd4f", + "prompt_hash": "b7f9de5a9dea5182c107cef796da80fee91ca22ac47f5834e4b6adaf7faa5d31", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 92, + "doc": { + "question": "What was the learning rate configured during training?", + "choices": [ + "1e-3", + "1e-4", + "5e-5", + "2e-4" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the learning rate configured during training?\nAnswer:", + " 1e-3" + ], + [ + "Question:What was the learning rate configured during training?\nAnswer:", + " 1e-4" + ], + [ + "Question:What was the learning rate configured during training?\nAnswer:", + " 5e-5" + ], + [ + "Question:What was the learning rate configured during training?\nAnswer:", + " 2e-4" + ] + ], + "resps": [ + [ + [ + -6.130802631378174, + false + ] + ], + [ + [ + -5.951138019561768, + false + ] + ], + [ + [ + -7.6521711349487305, + false + ] + ], + [ + [ + -7.785113334655762, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.130802631378174, + false + ], + [ + -5.951138019561768, + false + ], + [ + -7.6521711349487305, + false + ], + [ + -7.785113334655762, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "982cb0f36b4a6afcf5533c6dd81994dbfdbc261d841b5381a135a56fae46e7f8", + "prompt_hash": "d5ad5afbf26b85078270057741fb62aedbd62983b6e895295bc67a40691eb237", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 93, + "doc": { + "question": "Which benchmark dataset for grounding is used to assess text and widget recognition separately?", + "choices": [ + "Screenspot", + "SeeClick", + "GUIAct", + "Mind2Web" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark dataset for grounding is used to assess text and widget recognition separately?\nAnswer:", + " Screenspot" + ], + [ + "Question:Which benchmark dataset for grounding is used to assess text and widget recognition separately?\nAnswer:", + " SeeClick" + ], + [ + "Question:Which benchmark dataset for grounding is used to assess text and widget recognition separately?\nAnswer:", + " GUIAct" + ], + [ + "Question:Which benchmark dataset for grounding is used to assess text and widget recognition separately?\nAnswer:", + " Mind2Web" + ] + ], + "resps": [ + [ + [ + -19.185638427734375, + false + ] + ], + [ + [ + -24.059486389160156, + false + ] + ], + [ + [ + -20.9055118560791, + false + ] + ], + [ + [ + -19.651004791259766, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.185638427734375, + false + ], + [ + -24.059486389160156, + false + ], + [ + -20.9055118560791, + false + ], + [ + -19.651004791259766, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "66ceed2da311e496a4d1ecc1d6d4a8d63337f396192d8124982e79d9abcb2364", + "prompt_hash": "5d6bf6fe135a7b5a7127295f52ca865dfc4822886083c430de092233a04d15f3", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 94, + "doc": { + "question": "On which layer was cross-layer insertion applied for the UI Connected Graph?", + "choices": [ + "Layer 10", + "Layer 12", + "Layer 14", + "Layer 16" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:On which layer was cross-layer insertion applied for the UI Connected Graph?\nAnswer:", + " Layer 10" + ], + [ + "Question:On which layer was cross-layer insertion applied for the UI Connected Graph?\nAnswer:", + " Layer 12" + ], + [ + "Question:On which layer was cross-layer insertion applied for the UI Connected Graph?\nAnswer:", + " Layer 14" + ], + [ + "Question:On which layer was cross-layer insertion applied for the UI Connected Graph?\nAnswer:", + " Layer 16" + ] + ], + "resps": [ + [ + [ + -10.43406867980957, + false + ] + ], + [ + [ + -11.22581672668457, + false + ] + ], + [ + [ + -12.237310409545898, + false + ] + ], + [ + [ + -12.027925491333008, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.43406867980957, + false + ], + [ + -11.22581672668457, + false + ], + [ + -12.237310409545898, + false + ], + [ + -12.027925491333008, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3759bdb6424860a4263ca8eb7807cd0b3cf957b42967ae7ccacb768de79db59a", + "prompt_hash": "268b4707c6a7004b4c5ad78090c712885d24a4d5fc1101ad378edc3a1324761e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 95, + "doc": { + "question": "What is the name of the proposed Vision-Language-Action model that decouples cognition and action capabilities?", + "choices": [ + "OpenVLA", + "CogACT", + "RT-2-X", + "Octo-Base" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the name of the proposed Vision-Language-Action model that decouples cognition and action capabilities?\nAnswer:", + " OpenVLA" + ], + [ + "Question:What is the name of the proposed Vision-Language-Action model that decouples cognition and action capabilities?\nAnswer:", + " CogACT" + ], + [ + "Question:What is the name of the proposed Vision-Language-Action model that decouples cognition and action capabilities?\nAnswer:", + " RT-2-X" + ], + [ + "Question:What is the name of the proposed Vision-Language-Action model that decouples cognition and action capabilities?\nAnswer:", + " Octo-Base" + ] + ], + "resps": [ + [ + [ + -13.862266540527344, + false + ] + ], + [ + [ + -16.2746524810791, + false + ] + ], + [ + [ + -24.433616638183594, + false + ] + ], + [ + [ + -24.454200744628906, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.862266540527344, + false + ], + [ + -16.2746524810791, + false + ], + [ + -24.433616638183594, + false + ], + [ + -24.454200744628906, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a04fd2b4c0456439d4f9cc93484d3f91eb1bb16fd1778cf8d5620ec2f97456fe", + "prompt_hash": "72826baf9d6f3aa5c5d683c89926c74e74e9fd893440b479c679096eb8490c92", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 96, + "doc": { + "question": "Which diffusion-based architecture is employed as the backbone for the action module?", + "choices": [ + "Multi-Layer Perceptron", + "LSTM", + "Diffusion Transformer (DiT)", + "CNN" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which diffusion-based architecture is employed as the backbone for the action module?\nAnswer:", + " Multi-Layer Perceptron" + ], + [ + "Question:Which diffusion-based architecture is employed as the backbone for the action module?\nAnswer:", + " LSTM" + ], + [ + "Question:Which diffusion-based architecture is employed as the backbone for the action module?\nAnswer:", + " Diffusion Transformer (DiT)" + ], + [ + "Question:Which diffusion-based architecture is employed as the backbone for the action module?\nAnswer:", + " CNN" + ] + ], + "resps": [ + [ + [ + -13.12600040435791, + false + ] + ], + [ + [ + -11.416516304016113, + false + ] + ], + [ + [ + -15.304282188415527, + false + ] + ], + [ + [ + -8.963671684265137, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.12600040435791, + false + ], + [ + -11.416516304016113, + false + ], + [ + -15.304282188415527, + false + ], + [ + -8.963671684265137, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "84c99fd4a4fae935da1b3c680ee3eee1afb8bc8eee00708a6aab52cb645a2158", + "prompt_hash": "17c62f6ff563bc4bf63a76d509af13a212e7d4c94849538343b05a0b5cace1f8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 97, + "doc": { + "question": "What dataset was primarily used to train the proposed Vision-Language-Action models?", + "choices": [ + "ImageNet", + "COCO", + "Open X-Embodiment", + "EPIC-Kitchens" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What dataset was primarily used to train the proposed Vision-Language-Action models?\nAnswer:", + " ImageNet" + ], + [ + "Question:What dataset was primarily used to train the proposed Vision-Language-Action models?\nAnswer:", + " COCO" + ], + [ + "Question:What dataset was primarily used to train the proposed Vision-Language-Action models?\nAnswer:", + " Open X-Embodiment" + ], + [ + "Question:What dataset was primarily used to train the proposed Vision-Language-Action models?\nAnswer:", + " EPIC-Kitchens" + ] + ], + "resps": [ + [ + [ + -5.665737628936768, + false + ] + ], + [ + [ + -8.678854942321777, + false + ] + ], + [ + [ + -20.266983032226562, + false + ] + ], + [ + [ + -11.777584075927734, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.665737628936768, + false + ], + [ + -8.678854942321777, + false + ], + [ + -20.266983032226562, + false + ], + [ + -11.777584075927734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "01cc2688bea7779c120bdf57ac79db47d821c9d3edbde9575a3bf14448bc120b", + "prompt_hash": "4f256133214ffff03f62ca26ee941b0d8161ed7621307af23442a559654745f1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 98, + "doc": { + "question": "Which language model serves as the backbone of the language module in the architecture?", + "choices": [ + "GPT-4", + "LLAMA-2", + "PaLI-X", + "Phi" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which language model serves as the backbone of the language module in the architecture?\nAnswer:", + " GPT-4" + ], + [ + "Question:Which language model serves as the backbone of the language module in the architecture?\nAnswer:", + " LLAMA-2" + ], + [ + "Question:Which language model serves as the backbone of the language module in the architecture?\nAnswer:", + " PaLI-X" + ], + [ + "Question:Which language model serves as the backbone of the language module in the architecture?\nAnswer:", + " Phi" + ] + ], + "resps": [ + [ + [ + -5.79275369644165, + false + ] + ], + [ + [ + -13.931233406066895, + false + ] + ], + [ + [ + -22.94920539855957, + false + ] + ], + [ + [ + -8.201404571533203, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.79275369644165, + false + ], + [ + -13.931233406066895, + false + ], + [ + -22.94920539855957, + false + ], + [ + -8.201404571533203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ce57ae75fb5b4ea3ce6c73e36a821d5303a5d9ba755ea4042553aed15c4eea77", + "prompt_hash": "d4e11f8f25225894ee66ed8fa09cf6996ca299426f3a35a50dfaf0d3814221f3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 99, + "doc": { + "question": "During training, what is the default number of future actions predicted by the action module?", + "choices": [ + "3", + "7", + "15", + "31" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:During training, what is the default number of future actions predicted by the action module?\nAnswer:", + " 3" + ], + [ + "Question:During training, what is the default number of future actions predicted by the action module?\nAnswer:", + " 7" + ], + [ + "Question:During training, what is the default number of future actions predicted by the action module?\nAnswer:", + " 15" + ], + [ + "Question:During training, what is the default number of future actions predicted by the action module?\nAnswer:", + " 31" + ] + ], + "resps": [ + [ + [ + -3.34032940864563, + false + ] + ], + [ + [ + -5.836397171020508, + false + ] + ], + [ + [ + -5.689146995544434, + false + ] + ], + [ + [ + -9.099987030029297, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.34032940864563, + false + ], + [ + -5.836397171020508, + false + ], + [ + -5.689146995544434, + false + ], + [ + -9.099987030029297, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b63f832f8590826c7cb5b85e52d3dd0482b5a1714952450815f2678b2d9bf5c6", + "prompt_hash": "2901e294ac7f5a4130458e7a3a5905c6480257231e5f73acda55a32a89ff24d9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + } + ], + "arxiv_mc_2412": [ + { + "doc_id": 0, + "doc": { + "question": "What limitation does teacher forcing impose on current next-token prediction models?", + "choices": [ + "It prevents models from being trained on continuous data.", + "It restricts the model to generating only one token at a time.", + "It offers no mechanism to guide sequence sampling toward specific objectives.", + "It reduces the model’s ability to perform tree search efficiently." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What limitation does teacher forcing impose on current next-token prediction models?\nAnswer:", + " It prevents models from being trained on continuous data." + ], + [ + "Question:What limitation does teacher forcing impose on current next-token prediction models?\nAnswer:", + " It restricts the model to generating only one token at a time." + ], + [ + "Question:What limitation does teacher forcing impose on current next-token prediction models?\nAnswer:", + " It offers no mechanism to guide sequence sampling toward specific objectives." + ], + [ + "Question:What limitation does teacher forcing impose on current next-token prediction models?\nAnswer:", + " It reduces the model’s ability to perform tree search efficiently." + ] + ], + "resps": [ + [ + [ + -22.44562530517578, + false + ] + ], + [ + [ + -18.013652801513672, + false + ] + ], + [ + [ + -50.749053955078125, + false + ] + ], + [ + [ + -35.38025665283203, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.44562530517578, + false + ], + [ + -18.013652801513672, + false + ], + [ + -50.749053955078125, + false + ], + [ + -35.38025665283203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4c3e7569e4468951b0a98135ccc738df8d31c703938930b8b26c77fbb26f0073", + "prompt_hash": "e341aaa05fa6db270029370f8f97e06ef1dfb36d003bd27ebfc634862f06ce5b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "What is the key limitation of full-sequence diffusion architectures mentioned in the text?", + "choices": [ + "They fail to handle discrete data like text or audio.", + "They rely on causal attention, making them inflexible for parallel training.", + "They require variable-length token prediction, which is inefficient.", + "They are universally parameterized via non-causal, unmasked architectures." + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the key limitation of full-sequence diffusion architectures mentioned in the text?\nAnswer:", + " They fail to handle discrete data like text or audio." + ], + [ + "Question:What is the key limitation of full-sequence diffusion architectures mentioned in the text?\nAnswer:", + " They rely on causal attention, making them inflexible for parallel training." + ], + [ + "Question:What is the key limitation of full-sequence diffusion architectures mentioned in the text?\nAnswer:", + " They require variable-length token prediction, which is inefficient." + ], + [ + "Question:What is the key limitation of full-sequence diffusion architectures mentioned in the text?\nAnswer:", + " They are universally parameterized via non-causal, unmasked architectures." + ] + ], + "resps": [ + [ + [ + -30.89609718322754, + false + ] + ], + [ + [ + -49.041259765625, + false + ] + ], + [ + [ + -38.779083251953125, + false + ] + ], + [ + [ + -59.433868408203125, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.89609718322754, + false + ], + [ + -49.041259765625, + false + ], + [ + -38.779083251953125, + false + ], + [ + -59.433868408203125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f06576e73899a009a57b91ade993db970065e63efbf66ddfe429c18a1799096e", + "prompt_hash": "20f709b25998ca1921a405b0aead0207a27404bfb30309345ba3233b4bc171c9", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 2, + "doc": { + "question": "What enables \\u03b1Seq to generate variable-length sequences stably even for continuous tokens?", + "choices": [ + "Use of teacher forcing with deterministic transitions.", + "Joint modeling of latent variables with fixed-length noise schedules.", + "Denoising tokens with independent noise levels via a causal architecture.", + "Training with non-causal architectures over full-token batches." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What enables \\u03b1Seq to generate variable-length sequences stably even for continuous tokens?\nAnswer:", + " Use of teacher forcing with deterministic transitions." + ], + [ + "Question:What enables \\u03b1Seq to generate variable-length sequences stably even for continuous tokens?\nAnswer:", + " Joint modeling of latent variables with fixed-length noise schedules." + ], + [ + "Question:What enables \\u03b1Seq to generate variable-length sequences stably even for continuous tokens?\nAnswer:", + " Denoising tokens with independent noise levels via a causal architecture." + ], + [ + "Question:What enables \\u03b1Seq to generate variable-length sequences stably even for continuous tokens?\nAnswer:", + " Training with non-causal architectures over full-token batches." + ] + ], + "resps": [ + [ + [ + -35.210548400878906, + false + ] + ], + [ + [ + -46.058746337890625, + false + ] + ], + [ + [ + -57.82805252075195, + false + ] + ], + [ + [ + -56.03277587890625, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.210548400878906, + false + ], + [ + -46.058746337890625, + false + ], + [ + -57.82805252075195, + false + ], + [ + -56.03277587890625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8b7db974c971783be24d283fc0b30af1533a468dea2747d11116c8198ecdd878", + "prompt_hash": "d06deb2e244631f4c9a0cf6c9e7bb14db1b725a527eae640a488e97c6c05f6c8", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 3, + "doc": { + "question": "What does Monte Carlo Guidance (MCG) aim to improve in the \\u03b1Seq model?", + "choices": [ + "Training speed during multi-task learning.", + "Sampling of high-reward generations.", + "Data efficiency in few-shot settings.", + "Convergence of the forward diffusion process." + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What does Monte Carlo Guidance (MCG) aim to improve in the \\u03b1Seq model?\nAnswer:", + " Training speed during multi-task learning." + ], + [ + "Question:What does Monte Carlo Guidance (MCG) aim to improve in the \\u03b1Seq model?\nAnswer:", + " Sampling of high-reward generations." + ], + [ + "Question:What does Monte Carlo Guidance (MCG) aim to improve in the \\u03b1Seq model?\nAnswer:", + " Data efficiency in few-shot settings." + ], + [ + "Question:What does Monte Carlo Guidance (MCG) aim to improve in the \\u03b1Seq model?\nAnswer:", + " Convergence of the forward diffusion process." + ] + ], + "resps": [ + [ + [ + -32.38593673706055, + false + ] + ], + [ + [ + -31.022836685180664, + false + ] + ], + [ + [ + -31.55197525024414, + false + ] + ], + [ + [ + -23.415512084960938, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.38593673706055, + false + ], + [ + -31.022836685180664, + false + ], + [ + -31.55197525024414, + false + ], + [ + -23.415512084960938, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f7cda5ca84495bc9eddcf9a40c309e189f0abd73a2f7aa79e2ededc3d98c0296", + "prompt_hash": "8c9ee044d4547fc50d79a0ac9df3d33c91b8e365151aad32160dfb35be79f6ca", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 4, + "doc": { + "question": "Which of the following architectures does the paper currently use for its causal implementation?", + "choices": [ + "Feedforward Transformer", + "Bidirectional LSTM", + "Recurrent Neural Network (RNN)", + "Masked Autoencoder" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which of the following architectures does the paper currently use for its causal implementation?\nAnswer:", + " Feedforward Transformer" + ], + [ + "Question:Which of the following architectures does the paper currently use for its causal implementation?\nAnswer:", + " Bidirectional LSTM" + ], + [ + "Question:Which of the following architectures does the paper currently use for its causal implementation?\nAnswer:", + " Recurrent Neural Network (RNN)" + ], + [ + "Question:Which of the following architectures does the paper currently use for its causal implementation?\nAnswer:", + " Masked Autoencoder" + ] + ], + "resps": [ + [ + [ + -20.343334197998047, + false + ] + ], + [ + [ + -13.779508590698242, + false + ] + ], + [ + [ + -11.171441078186035, + false + ] + ], + [ + [ + -12.286684036254883, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.343334197998047, + false + ], + [ + -13.779508590698242, + false + ], + [ + -11.171441078186035, + false + ], + [ + -12.286684036254883, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f618242e0a371a70f893f9d7b535d4b66c482e1fa31a0a617ec2998fb54d3a92", + "prompt_hash": "3a808e1733399c82d28c4218096a47230cb6816c0ced6a433152bf2b40d54df9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 5, + "doc": { + "question": "How many editing samples does the dataset \\u201cUltraEdit\\u201d contain in total?", + "choices": [ + "750,000", + "1,600,000", + "4,108,262", + "313,010" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many editing samples does the dataset \\u201cUltraEdit\\u201d contain in total?\nAnswer:", + " 750,000" + ], + [ + "Question:How many editing samples does the dataset \\u201cUltraEdit\\u201d contain in total?\nAnswer:", + " 1,600,000" + ], + [ + "Question:How many editing samples does the dataset \\u201cUltraEdit\\u201d contain in total?\nAnswer:", + " 4,108,262" + ], + [ + "Question:How many editing samples does the dataset \\u201cUltraEdit\\u201d contain in total?\nAnswer:", + " 313,010" + ] + ], + "resps": [ + [ + [ + -12.545722007751465, + false + ] + ], + [ + [ + -12.534786224365234, + false + ] + ], + [ + [ + -22.415538787841797, + false + ] + ], + [ + [ + -19.220945358276367, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.545722007751465, + false + ], + [ + -12.534786224365234, + false + ], + [ + -22.415538787841797, + false + ], + [ + -19.220945358276367, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4fe6b16f5e73298a13d1372b8ea8db250730cd4e5aa0c877cc84dabb8477e46c", + "prompt_hash": "95b2a1fd0fc42d700a482f49c3f87e054be2a261fd58cfceff9c39f8af857f1b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 6, + "doc": { + "question": "Which model is used as the diffusion backbone in the UltraEdit data generation pipeline?", + "choices": [ + "Stable Diffusion v1.5", + "GLIDE", + "DALL-E 3", + "SDXL-Turbo" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which model is used as the diffusion backbone in the UltraEdit data generation pipeline?\nAnswer:", + " Stable Diffusion v1.5" + ], + [ + "Question:Which model is used as the diffusion backbone in the UltraEdit data generation pipeline?\nAnswer:", + " GLIDE" + ], + [ + "Question:Which model is used as the diffusion backbone in the UltraEdit data generation pipeline?\nAnswer:", + " DALL-E 3" + ], + [ + "Question:Which model is used as the diffusion backbone in the UltraEdit data generation pipeline?\nAnswer:", + " SDXL-Turbo" + ] + ], + "resps": [ + [ + [ + -8.732109069824219, + false + ] + ], + [ + [ + -11.479008674621582, + false + ] + ], + [ + [ + -9.608929634094238, + false + ] + ], + [ + [ + -17.172548294067383, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.732109069824219, + false + ], + [ + -11.479008674621582, + false + ], + [ + -9.608929634094238, + false + ], + [ + -17.172548294067383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d861624f0803bd0d0d0f30213b808194a8a804266415f902a306b7e6ad3ba97f", + "prompt_hash": "2213363c328ebf6a27f5f0227e8d976bf54f6e1ad54e50469dfc3b221f56b641", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "Which benchmark reveals a scaling effect when using increasing volumes of the UltraEdit dataset?", + "choices": [ + "COCO", + "MagicBrush", + "ImageNet", + "EditBench" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark reveals a scaling effect when using increasing volumes of the UltraEdit dataset?\nAnswer:", + " COCO" + ], + [ + "Question:Which benchmark reveals a scaling effect when using increasing volumes of the UltraEdit dataset?\nAnswer:", + " MagicBrush" + ], + [ + "Question:Which benchmark reveals a scaling effect when using increasing volumes of the UltraEdit dataset?\nAnswer:", + " ImageNet" + ], + [ + "Question:Which benchmark reveals a scaling effect when using increasing volumes of the UltraEdit dataset?\nAnswer:", + " EditBench" + ] + ], + "resps": [ + [ + [ + -11.662349700927734, + false + ] + ], + [ + [ + -14.004988670349121, + false + ] + ], + [ + [ + -8.855036735534668, + false + ] + ], + [ + [ + -13.624947547912598, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.662349700927734, + false + ], + [ + -14.004988670349121, + false + ], + [ + -8.855036735534668, + false + ], + [ + -13.624947547912598, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f84d5958b5aead115ff6c3b25f650dc6b2342343d5f729c3015b9b4cae0f3f81", + "prompt_hash": "40b205e24e7829798397f2138536fb831ce0cded104c4feb1999e141ba7222c2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 8, + "doc": { + "question": "Which technique is used to identify the object to be edited in region-based editing?", + "choices": [ + "ControlNet", + "GroundingDINO with SAM", + "VQGAN-CLIP", + "DreamBooth" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which technique is used to identify the object to be edited in region-based editing?\nAnswer:", + " ControlNet" + ], + [ + "Question:Which technique is used to identify the object to be edited in region-based editing?\nAnswer:", + " GroundingDINO with SAM" + ], + [ + "Question:Which technique is used to identify the object to be edited in region-based editing?\nAnswer:", + " VQGAN-CLIP" + ], + [ + "Question:Which technique is used to identify the object to be edited in region-based editing?\nAnswer:", + " DreamBooth" + ] + ], + "resps": [ + [ + [ + -12.154390335083008, + false + ] + ], + [ + [ + -23.500587463378906, + false + ] + ], + [ + [ + -23.389801025390625, + false + ] + ], + [ + [ + -13.390340805053711, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.154390335083008, + false + ], + [ + -23.500587463378906, + false + ], + [ + -23.389801025390625, + false + ], + [ + -13.390340805053711, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f117727818829cd8265a97648f0610caa2e19403f0de0ef7e1a28057ae87a256", + "prompt_hash": "8ce78380f484ba30f5a7489d5d0fcc31c80f3e78e95cd2c13345dbbe5f98e61c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 9, + "doc": { + "question": "What is the primary metric used in the ablation study to evaluate the impact of region-based editing data?", + "choices": [ + "DINO similarity", + "CLIPout", + "L2 distance", + "SSIM" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the primary metric used in the ablation study to evaluate the impact of region-based editing data?\nAnswer:", + " DINO similarity" + ], + [ + "Question:What is the primary metric used in the ablation study to evaluate the impact of region-based editing data?\nAnswer:", + " CLIPout" + ], + [ + "Question:What is the primary metric used in the ablation study to evaluate the impact of region-based editing data?\nAnswer:", + " L2 distance" + ], + [ + "Question:What is the primary metric used in the ablation study to evaluate the impact of region-based editing data?\nAnswer:", + " SSIM" + ] + ], + "resps": [ + [ + [ + -16.91316795349121, + false + ] + ], + [ + [ + -20.059391021728516, + false + ] + ], + [ + [ + -11.492344856262207, + false + ] + ], + [ + [ + -10.103410720825195, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.91316795349121, + false + ], + [ + -20.059391021728516, + false + ], + [ + -11.492344856262207, + false + ], + [ + -10.103410720825195, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "115786c7d725aed69d3e54b16f9664cdfbedb4f49ff34d45f98a5333a18fba7e", + "prompt_hash": "0acb114e8b7c7e6ed8c4e39c8f039af3e557b3f5863020d89b83cc60db6f9837", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 10, + "doc": { + "question": "What is the increase in coverage observed when solving CodeContests problems using Gemma-2B with repeated sampling up to 10,000 samples?", + "choices": [ + "From 0.02% to 7.1%", + "From 0.1% to 25%", + "From 0.5% to 10%", + "From 1% to 50%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the increase in coverage observed when solving CodeContests problems using Gemma-2B with repeated sampling up to 10,000 samples?\nAnswer:", + " From 0.02% to 7.1%" + ], + [ + "Question:What is the increase in coverage observed when solving CodeContests problems using Gemma-2B with repeated sampling up to 10,000 samples?\nAnswer:", + " From 0.1% to 25%" + ], + [ + "Question:What is the increase in coverage observed when solving CodeContests problems using Gemma-2B with repeated sampling up to 10,000 samples?\nAnswer:", + " From 0.5% to 10%" + ], + [ + "Question:What is the increase in coverage observed when solving CodeContests problems using Gemma-2B with repeated sampling up to 10,000 samples?\nAnswer:", + " From 1% to 50%" + ] + ], + "resps": [ + [ + [ + -36.91026306152344, + false + ] + ], + [ + [ + -33.62900161743164, + false + ] + ], + [ + [ + -31.656620025634766, + false + ] + ], + [ + [ + -26.566728591918945, + false + ] + ] + ], + "filtered_resps": [ + [ + -36.91026306152344, + false + ], + [ + -33.62900161743164, + false + ], + [ + -31.656620025634766, + false + ], + [ + -26.566728591918945, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "44df8ac43cd9caa8e4599475afb3e79cd55e4101df9a6be10f6f9382602b688c", + "prompt_hash": "9ceebe5ac95b6ba8122bf265f958d7b9b6ea4934411a663b4bdc4d54f7e0862e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 11, + "doc": { + "question": "Which datasets evaluated in the study include automatic verifiers for sample correctness?", + "choices": [ + "MiniF2F-MATH, CodeContests, SWE-bench Lite", + "GSM8K, MATH, SWE-bench Lite", + "MATH, MiniF2F-MATH, GSM8K", + "CodeContests, GSM8K, MATH" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which datasets evaluated in the study include automatic verifiers for sample correctness?\nAnswer:", + " MiniF2F-MATH, CodeContests, SWE-bench Lite" + ], + [ + "Question:Which datasets evaluated in the study include automatic verifiers for sample correctness?\nAnswer:", + " GSM8K, MATH, SWE-bench Lite" + ], + [ + "Question:Which datasets evaluated in the study include automatic verifiers for sample correctness?\nAnswer:", + " MATH, MiniF2F-MATH, GSM8K" + ], + [ + "Question:Which datasets evaluated in the study include automatic verifiers for sample correctness?\nAnswer:", + " CodeContests, GSM8K, MATH" + ] + ], + "resps": [ + [ + [ + -87.04169464111328, + false + ] + ], + [ + [ + -50.65325927734375, + false + ] + ], + [ + [ + -45.92802429199219, + false + ] + ], + [ + [ + -28.259180068969727, + false + ] + ] + ], + "filtered_resps": [ + [ + -87.04169464111328, + false + ], + [ + -50.65325927734375, + false + ], + [ + -45.92802429199219, + false + ], + [ + -28.259180068969727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "16350209d0fd2de900dc667b7547b1c1acda347ffe7ad0e075a33bacbcd0b5ae", + "prompt_hash": "e7aca0fd8e0b4655d141766d237513ee15a41bedc0801a38614753bd866ea52b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 12, + "doc": { + "question": "What type of function is used to model the relationship between coverage and number of samples?", + "choices": [ + "Exponentiated power law", + "Linear regression", + "Logarithmic function", + "Quadratic polynomial" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What type of function is used to model the relationship between coverage and number of samples?\nAnswer:", + " Exponentiated power law" + ], + [ + "Question:What type of function is used to model the relationship between coverage and number of samples?\nAnswer:", + " Linear regression" + ], + [ + "Question:What type of function is used to model the relationship between coverage and number of samples?\nAnswer:", + " Logarithmic function" + ], + [ + "Question:What type of function is used to model the relationship between coverage and number of samples?\nAnswer:", + " Quadratic polynomial" + ] + ], + "resps": [ + [ + [ + -24.710233688354492, + false + ] + ], + [ + [ + -7.671542167663574, + false + ] + ], + [ + [ + -11.807942390441895, + false + ] + ], + [ + [ + -13.855320930480957, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.710233688354492, + false + ], + [ + -7.671542167663574, + false + ], + [ + -11.807942390441895, + false + ], + [ + -13.855320930480957, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "871053292bba086d3b3b812b1c4b2bfff9fdf77ff9831ebdc84d75742938d72a", + "prompt_hash": "a33a8b9084d669044175e7ce9033ca380ab9718a2aff6a835db8f2709a7e5c73", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 13, + "doc": { + "question": "What was the maximum percentage of issues solved using DeepSeek-Coder-V2-Instruct with 5 attempts per issue on SWE-bench Lite?", + "choices": [ + "29.62%", + "43.00%", + "15.90%", + "56.00%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What was the maximum percentage of issues solved using DeepSeek-Coder-V2-Instruct with 5 attempts per issue on SWE-bench Lite?\nAnswer:", + " 29.62%" + ], + [ + "Question:What was the maximum percentage of issues solved using DeepSeek-Coder-V2-Instruct with 5 attempts per issue on SWE-bench Lite?\nAnswer:", + " 43.00%" + ], + [ + "Question:What was the maximum percentage of issues solved using DeepSeek-Coder-V2-Instruct with 5 attempts per issue on SWE-bench Lite?\nAnswer:", + " 15.90%" + ], + [ + "Question:What was the maximum percentage of issues solved using DeepSeek-Coder-V2-Instruct with 5 attempts per issue on SWE-bench Lite?\nAnswer:", + " 56.00%" + ] + ], + "resps": [ + [ + [ + -16.0406494140625, + false + ] + ], + [ + [ + -15.355518341064453, + false + ] + ], + [ + [ + -16.611568450927734, + false + ] + ], + [ + [ + -15.058615684509277, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.0406494140625, + false + ], + [ + -15.355518341064453, + false + ], + [ + -16.611568450927734, + false + ], + [ + -15.058615684509277, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "664e791dfb8e473eb440962fb2801a946420e6aacb189738b6b4cd3a6e590f88", + "prompt_hash": "5a1317a0c04f5290b0304168b6d6d77eb802b415403ee25b29fdaaf46cc42af3", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "Which verification methods were tested for their scalability in identifying correct answers on GSM8K and MATH datasets?", + "choices": [ + "Majority vote, reward model best-of-N, reward model weighted vote", + "Gradient descent, beam search, ensemble voting", + "Prompt engineering, token sampling, output reranking", + "Syntax parsing, lexical similarity, token classification" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which verification methods were tested for their scalability in identifying correct answers on GSM8K and MATH datasets?\nAnswer:", + " Majority vote, reward model best-of-N, reward model weighted vote" + ], + [ + "Question:Which verification methods were tested for their scalability in identifying correct answers on GSM8K and MATH datasets?\nAnswer:", + " Gradient descent, beam search, ensemble voting" + ], + [ + "Question:Which verification methods were tested for their scalability in identifying correct answers on GSM8K and MATH datasets?\nAnswer:", + " Prompt engineering, token sampling, output reranking" + ], + [ + "Question:Which verification methods were tested for their scalability in identifying correct answers on GSM8K and MATH datasets?\nAnswer:", + " Syntax parsing, lexical similarity, token classification" + ] + ], + "resps": [ + [ + [ + -59.267059326171875, + false + ] + ], + [ + [ + -35.804481506347656, + false + ] + ], + [ + [ + -43.54387664794922, + false + ] + ], + [ + [ + -39.79151153564453, + false + ] + ] + ], + "filtered_resps": [ + [ + -59.267059326171875, + false + ], + [ + -35.804481506347656, + false + ], + [ + -43.54387664794922, + false + ], + [ + -39.79151153564453, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0b9f0bba11615e1363d62aca9634d306bd14429c924a8ad6885c2983578308c8", + "prompt_hash": "0d17baf56754596eaed49b4028801899ec6ba4fbb02532f3aa951b5948115bb2", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 15, + "doc": { + "question": "What Dice score does MedSAM-2 achieve on the BTCV 3D medical image segmentation dataset?", + "choices": [ + "86.2%", + "88.4%", + "89.0%", + "90.5%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What Dice score does MedSAM-2 achieve on the BTCV 3D medical image segmentation dataset?\nAnswer:", + " 86.2%" + ], + [ + "Question:What Dice score does MedSAM-2 achieve on the BTCV 3D medical image segmentation dataset?\nAnswer:", + " 88.4%" + ], + [ + "Question:What Dice score does MedSAM-2 achieve on the BTCV 3D medical image segmentation dataset?\nAnswer:", + " 89.0%" + ], + [ + "Question:What Dice score does MedSAM-2 achieve on the BTCV 3D medical image segmentation dataset?\nAnswer:", + " 90.5%" + ] + ], + "resps": [ + [ + [ + -14.722869873046875, + false + ] + ], + [ + [ + -15.132040977478027, + false + ] + ], + [ + [ + -15.362841606140137, + false + ] + ], + [ + [ + -15.039222717285156, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.722869873046875, + false + ], + [ + -15.132040977478027, + false + ], + [ + -15.362841606140137, + false + ], + [ + -15.039222717285156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "071c9543500216fe3fa54b274fda3405e1560d004da2672b30ff527777bbc92b", + "prompt_hash": "3a19aaa4ffdf21a14e504172df6f1b5b9f69e86067c43f481ddeb35b33473d2f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "What is the primary function of the self-sorting memory bank in MedSAM-2?", + "choices": [ + "To store only the most recent embeddings", + "To sort image slices by acquisition order", + "To dynamically select embeddings based on confidence and dissimilarity", + "To increase the memory capacity of the model" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary function of the self-sorting memory bank in MedSAM-2?\nAnswer:", + " To store only the most recent embeddings" + ], + [ + "Question:What is the primary function of the self-sorting memory bank in MedSAM-2?\nAnswer:", + " To sort image slices by acquisition order" + ], + [ + "Question:What is the primary function of the self-sorting memory bank in MedSAM-2?\nAnswer:", + " To dynamically select embeddings based on confidence and dissimilarity" + ], + [ + "Question:What is the primary function of the self-sorting memory bank in MedSAM-2?\nAnswer:", + " To increase the memory capacity of the model" + ] + ], + "resps": [ + [ + [ + -27.875429153442383, + false + ] + ], + [ + [ + -35.84728240966797, + false + ] + ], + [ + [ + -43.798126220703125, + false + ] + ], + [ + [ + -17.682130813598633, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.875429153442383, + false + ], + [ + -35.84728240966797, + false + ], + [ + -43.798126220703125, + false + ], + [ + -17.682130813598633, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "85e9263b05f7c959e70b80cb683ee5ed4496855a5dcd697bf4d14086b8fefbd0", + "prompt_hash": "f49c6b7c63e3b7b2d841b5acf05e3014c385f72a0ee3273b1642084679e24684", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 17, + "doc": { + "question": "What prompt probability is used for 3D medical image frames during segmentation?", + "choices": [ + "0.1", + "0.25", + "0.3", + "0.5" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What prompt probability is used for 3D medical image frames during segmentation?\nAnswer:", + " 0.1" + ], + [ + "Question:What prompt probability is used for 3D medical image frames during segmentation?\nAnswer:", + " 0.25" + ], + [ + "Question:What prompt probability is used for 3D medical image frames during segmentation?\nAnswer:", + " 0.3" + ], + [ + "Question:What prompt probability is used for 3D medical image frames during segmentation?\nAnswer:", + " 0.5" + ] + ], + "resps": [ + [ + [ + -4.572741985321045, + false + ] + ], + [ + [ + -6.072905540466309, + false + ] + ], + [ + [ + -5.138263702392578, + false + ] + ], + [ + [ + -4.633764743804932, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.572741985321045, + false + ], + [ + -6.072905540466309, + false + ], + [ + -5.138263702392578, + false + ], + [ + -4.633764743804932, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f2344d55b6c745c4d2dc99b6f0644d1836baaef4bb9c0b3d1b2b74620fae1f41", + "prompt_hash": "564b43b57c335dd664c18821b1478abd181701f4a2f606c13138dcf6769ee592", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 18, + "doc": { + "question": "In the ablation study, what Dice score was achieved on the CadVidSet dataset when using all three components: IOU threshold, dissimilar templates, and resampling?", + "choices": [ + "53.9%", + "57.8%", + "64.5%", + "72.9%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:In the ablation study, what Dice score was achieved on the CadVidSet dataset when using all three components: IOU threshold, dissimilar templates, and resampling?\nAnswer:", + " 53.9%" + ], + [ + "Question:In the ablation study, what Dice score was achieved on the CadVidSet dataset when using all three components: IOU threshold, dissimilar templates, and resampling?\nAnswer:", + " 57.8%" + ], + [ + "Question:In the ablation study, what Dice score was achieved on the CadVidSet dataset when using all three components: IOU threshold, dissimilar templates, and resampling?\nAnswer:", + " 64.5%" + ], + [ + "Question:In the ablation study, what Dice score was achieved on the CadVidSet dataset when using all three components: IOU threshold, dissimilar templates, and resampling?\nAnswer:", + " 72.9%" + ] + ], + "resps": [ + [ + [ + -16.8532657623291, + false + ] + ], + [ + [ + -16.828392028808594, + false + ] + ], + [ + [ + -16.361194610595703, + false + ] + ], + [ + [ + -15.42495059967041, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.8532657623291, + false + ], + [ + -16.828392028808594, + false + ], + [ + -16.361194610595703, + false + ], + [ + -15.42495059967041, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9a6a818e319d1a38943ed39f3744bf0ce7ff570586e188b790e8d99e5f098a92", + "prompt_hash": "feb4610e33e35a2ab7303ab190151070e7ca5d88a02104fb86048ba3508510f8", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 19, + "doc": { + "question": "How many GPUs were used for training and testing MedSAM-2?", + "choices": [ + "32", + "64", + "128", + "256" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many GPUs were used for training and testing MedSAM-2?\nAnswer:", + " 32" + ], + [ + "Question:How many GPUs were used for training and testing MedSAM-2?\nAnswer:", + " 64" + ], + [ + "Question:How many GPUs were used for training and testing MedSAM-2?\nAnswer:", + " 128" + ], + [ + "Question:How many GPUs were used for training and testing MedSAM-2?\nAnswer:", + " 256" + ] + ], + "resps": [ + [ + [ + -5.806703567504883, + false + ] + ], + [ + [ + -7.463939666748047, + false + ] + ], + [ + [ + -7.4690446853637695, + false + ] + ], + [ + [ + -8.828374862670898, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.806703567504883, + false + ], + [ + -7.463939666748047, + false + ], + [ + -7.4690446853637695, + false + ], + [ + -8.828374862670898, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a4494f34db05155b1bb4cb1416cd7e0a4c3c3f82427a97adb362ed5eadf60cd0", + "prompt_hash": "74c4c7819aed8c050224abc259669dfd661560212a0ea91485acf1c392669c06", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What is the maximum context length supported by the LongVILA training system?", + "choices": [ + "1 million tokens", + "1.5 million tokens", + "2 million tokens", + "2.5 million tokens" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum context length supported by the LongVILA training system?\nAnswer:", + " 1 million tokens" + ], + [ + "Question:What is the maximum context length supported by the LongVILA training system?\nAnswer:", + " 1.5 million tokens" + ], + [ + "Question:What is the maximum context length supported by the LongVILA training system?\nAnswer:", + " 2 million tokens" + ], + [ + "Question:What is the maximum context length supported by the LongVILA training system?\nAnswer:", + " 2.5 million tokens" + ] + ], + "resps": [ + [ + [ + -10.128260612487793, + false + ] + ], + [ + [ + -12.194124221801758, + false + ] + ], + [ + [ + -11.196681022644043, + false + ] + ], + [ + [ + -13.080391883850098, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.128260612487793, + false + ], + [ + -12.194124221801758, + false + ], + [ + -11.196681022644043, + false + ], + [ + -13.080391883850098, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a141a5184d5a62903bf04e38d70a7794df07219d83153e0c1b68e2a515c1804e", + "prompt_hash": "56db71b047e286f882824cf68ce6953f0db72790be1553e20bb18eedb012cfac", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 21, + "doc": { + "question": "Which video benchmark did LongVILA-7B achieve 65.1% accuracy on with subtitles?", + "choices": [ + "ActivityNet-QA", + "MVBench", + "VideoMME", + "NExT-QA" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which video benchmark did LongVILA-7B achieve 65.1% accuracy on with subtitles?\nAnswer:", + " ActivityNet-QA" + ], + [ + "Question:Which video benchmark did LongVILA-7B achieve 65.1% accuracy on with subtitles?\nAnswer:", + " MVBench" + ], + [ + "Question:Which video benchmark did LongVILA-7B achieve 65.1% accuracy on with subtitles?\nAnswer:", + " VideoMME" + ], + [ + "Question:Which video benchmark did LongVILA-7B achieve 65.1% accuracy on with subtitles?\nAnswer:", + " NExT-QA" + ] + ], + "resps": [ + [ + [ + -16.321186065673828, + false + ] + ], + [ + [ + -9.568792343139648, + false + ] + ], + [ + [ + -14.908329963684082, + false + ] + ], + [ + [ + -15.955806732177734, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.321186065673828, + false + ], + [ + -9.568792343139648, + false + ], + [ + -14.908329963684082, + false + ], + [ + -15.955806732177734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a5cd9da82e58ee1724813d414580a6214f097549786cc1672569bcd8345ed30c", + "prompt_hash": "1b2dd67bbfee3227cc9ed70d506261123a60e49132a90b5d4f7604645c7fc245", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "During Stage 1 of training, which components are trainable in the LongVILA pipeline?", + "choices": [ + "Only the LLM decoder", + "Only the vision encoder", + "Only the multi-modal projector", + "All components" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:During Stage 1 of training, which components are trainable in the LongVILA pipeline?\nAnswer:", + " Only the LLM decoder" + ], + [ + "Question:During Stage 1 of training, which components are trainable in the LongVILA pipeline?\nAnswer:", + " Only the vision encoder" + ], + [ + "Question:During Stage 1 of training, which components are trainable in the LongVILA pipeline?\nAnswer:", + " Only the multi-modal projector" + ], + [ + "Question:During Stage 1 of training, which components are trainable in the LongVILA pipeline?\nAnswer:", + " All components" + ] + ], + "resps": [ + [ + [ + -14.713794708251953, + false + ] + ], + [ + [ + -11.596830368041992, + false + ] + ], + [ + [ + -20.188129425048828, + false + ] + ], + [ + [ + -5.85800313949585, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.713794708251953, + false + ], + [ + -11.596830368041992, + false + ], + [ + -20.188129425048828, + false + ], + [ + -5.85800313949585, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b47ec35348a110052f19639f2b9499bc0c46cf8429488ca42df5e378e1202a22", + "prompt_hash": "a2b2d866314cdfb5427c81ef7684c3e5c20dfb869a71d5187e6a42b9dd54261d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 23, + "doc": { + "question": "Which dataset is used in Stage 4 to extend the context length of the LLM?", + "choices": [ + "COYO-25M", + "SlimPajama", + "YouCook2", + "Shot2Story" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset is used in Stage 4 to extend the context length of the LLM?\nAnswer:", + " COYO-25M" + ], + [ + "Question:Which dataset is used in Stage 4 to extend the context length of the LLM?\nAnswer:", + " SlimPajama" + ], + [ + "Question:Which dataset is used in Stage 4 to extend the context length of the LLM?\nAnswer:", + " YouCook2" + ], + [ + "Question:Which dataset is used in Stage 4 to extend the context length of the LLM?\nAnswer:", + " Shot2Story" + ] + ], + "resps": [ + [ + [ + -27.8762264251709, + false + ] + ], + [ + [ + -17.669025421142578, + false + ] + ], + [ + [ + -21.235336303710938, + false + ] + ], + [ + [ + -26.448081970214844, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.8762264251709, + false + ], + [ + -17.669025421142578, + false + ], + [ + -21.235336303710938, + false + ], + [ + -26.448081970214844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a4faf5c9264ce653b3bedfdacea3d798fea9a79474a73ef34ecd0ee6cfdb95ed", + "prompt_hash": "32cb48456ade5784ba620662840f9a7e452b30ccc3f89208bd37508e8fc34185", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 24, + "doc": { + "question": "How many videos are included in the LongVILA long video supervised fine-tuning dataset?", + "choices": [ + "12,345", + "15,292", + "18,764", + "20,001" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many videos are included in the LongVILA long video supervised fine-tuning dataset?\nAnswer:", + " 12,345" + ], + [ + "Question:How many videos are included in the LongVILA long video supervised fine-tuning dataset?\nAnswer:", + " 15,292" + ], + [ + "Question:How many videos are included in the LongVILA long video supervised fine-tuning dataset?\nAnswer:", + " 18,764" + ], + [ + "Question:How many videos are included in the LongVILA long video supervised fine-tuning dataset?\nAnswer:", + " 20,001" + ] + ], + "resps": [ + [ + [ + -13.590742111206055, + false + ] + ], + [ + [ + -14.44241714477539, + false + ] + ], + [ + [ + -14.642477035522461, + false + ] + ], + [ + [ + -13.420373916625977, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.590742111206055, + false + ], + [ + -14.44241714477539, + false + ], + [ + -14.642477035522461, + false + ], + [ + -13.420373916625977, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d46fb59bfb0fb6f34a89faf012d5d165c19301c9ff502c47f685e65636163fab", + "prompt_hash": "5db7d08dc8a19b42b72123d0cf2b236d0a4105c971f8cffbcdc8c9f1ca6ea2e2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 25, + "doc": { + "question": "What is the main function of the Visual Summarization Token (VST) in Video-XL?", + "choices": [ + "To reduce model size by pruning layers", + "To summarize visual information within intervals into compressed KVs", + "To generate subtitles for video content", + "To align visual tokens with language tokens" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main function of the Visual Summarization Token (VST) in Video-XL?\nAnswer:", + " To reduce model size by pruning layers" + ], + [ + "Question:What is the main function of the Visual Summarization Token (VST) in Video-XL?\nAnswer:", + " To summarize visual information within intervals into compressed KVs" + ], + [ + "Question:What is the main function of the Visual Summarization Token (VST) in Video-XL?\nAnswer:", + " To generate subtitles for video content" + ], + [ + "Question:What is the main function of the Visual Summarization Token (VST) in Video-XL?\nAnswer:", + " To align visual tokens with language tokens" + ] + ], + "resps": [ + [ + [ + -30.24161720275879, + false + ] + ], + [ + [ + -52.90513229370117, + false + ] + ], + [ + [ + -17.981550216674805, + false + ] + ], + [ + [ + -19.51709747314453, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.24161720275879, + false + ], + [ + -52.90513229370117, + false + ], + [ + -17.981550216674805, + false + ], + [ + -19.51709747314453, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ab6b3e9da155f4e091c7adca38ca3f2181393cff5bcd17547010c14389815ad3", + "prompt_hash": "4f3a04b3e1ee5bb04e70300064c2ec7ae91e27ae9093f854d97d886610c6e9c8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "Which visual encoder does Video-XL use to process input images?", + "choices": [ + "ViViT", + "TimeSformer", + "CLIP-ViT-L", + "Swin Transformer" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which visual encoder does Video-XL use to process input images?\nAnswer:", + " ViViT" + ], + [ + "Question:Which visual encoder does Video-XL use to process input images?\nAnswer:", + " TimeSformer" + ], + [ + "Question:Which visual encoder does Video-XL use to process input images?\nAnswer:", + " CLIP-ViT-L" + ], + [ + "Question:Which visual encoder does Video-XL use to process input images?\nAnswer:", + " Swin Transformer" + ] + ], + "resps": [ + [ + [ + -14.41057300567627, + false + ] + ], + [ + [ + -13.634620666503906, + false + ] + ], + [ + [ + -14.097136497497559, + false + ] + ], + [ + [ + -12.50584888458252, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.41057300567627, + false + ], + [ + -13.634620666503906, + false + ], + [ + -14.097136497497559, + false + ], + [ + -12.50584888458252, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "19029acc1632e0ce04138bde642186adfdb088364c6e22820cbced7dbd8b47a7", + "prompt_hash": "48a33f308bc8305cb506bf394dd05b82a1d9948d6b21d4320d2a0e16afcbb07d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "What technique does Video-XL use to enable training with increasing compression ratios?", + "choices": [ + "Dropout regularization", + "Curriculum learning", + "Contrastive learning", + "Token shuffling" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What technique does Video-XL use to enable training with increasing compression ratios?\nAnswer:", + " Dropout regularization" + ], + [ + "Question:What technique does Video-XL use to enable training with increasing compression ratios?\nAnswer:", + " Curriculum learning" + ], + [ + "Question:What technique does Video-XL use to enable training with increasing compression ratios?\nAnswer:", + " Contrastive learning" + ], + [ + "Question:What technique does Video-XL use to enable training with increasing compression ratios?\nAnswer:", + " Token shuffling" + ] + ], + "resps": [ + [ + [ + -15.249528884887695, + false + ] + ], + [ + [ + -12.305272102355957, + false + ] + ], + [ + [ + -10.114755630493164, + false + ] + ], + [ + [ + -15.036741256713867, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.249528884887695, + false + ], + [ + -12.305272102355957, + false + ], + [ + -10.114755630493164, + false + ], + [ + -15.036741256713867, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "05ed3ed749c5082a395841f692f4e756ff6c490ecc6fd11c77bd2c9d78f01e2f", + "prompt_hash": "9326395753e20d8f6a210f80607a0defaf3caee8d5cf3017c7650ac04added78", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 28, + "doc": { + "question": "How many frames can Video-XL process using a single A100-80GB GPU?", + "choices": [ + "1024", + "4096", + "2048", + "512" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many frames can Video-XL process using a single A100-80GB GPU?\nAnswer:", + " 1024" + ], + [ + "Question:How many frames can Video-XL process using a single A100-80GB GPU?\nAnswer:", + " 4096" + ], + [ + "Question:How many frames can Video-XL process using a single A100-80GB GPU?\nAnswer:", + " 2048" + ], + [ + "Question:How many frames can Video-XL process using a single A100-80GB GPU?\nAnswer:", + " 512" + ] + ], + "resps": [ + [ + [ + -6.188669204711914, + false + ] + ], + [ + [ + -7.195087432861328, + false + ] + ], + [ + [ + -6.599781513214111, + false + ] + ], + [ + [ + -5.575624465942383, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.188669204711914, + false + ], + [ + -7.195087432861328, + false + ], + [ + -6.599781513214111, + false + ], + [ + -5.575624465942383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7b7c34cd606d716032e76d0177ab07a608e8af518d855eb8985638d584495d78", + "prompt_hash": "976819c662fd052e46e58e47c454e8d74ee47a3c9d5efeff61089cb2f90e7296", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "What is the name of the synthetic dataset introduced to improve multi-detail retrieval capabilities in Video-XL?", + "choices": [ + "VideoQA++", + "V-Language", + "VICO", + "VisMulti" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the name of the synthetic dataset introduced to improve multi-detail retrieval capabilities in Video-XL?\nAnswer:", + " VideoQA++" + ], + [ + "Question:What is the name of the synthetic dataset introduced to improve multi-detail retrieval capabilities in Video-XL?\nAnswer:", + " V-Language" + ], + [ + "Question:What is the name of the synthetic dataset introduced to improve multi-detail retrieval capabilities in Video-XL?\nAnswer:", + " VICO" + ], + [ + "Question:What is the name of the synthetic dataset introduced to improve multi-detail retrieval capabilities in Video-XL?\nAnswer:", + " VisMulti" + ] + ], + "resps": [ + [ + [ + -13.095142364501953, + false + ] + ], + [ + [ + -17.66976547241211, + false + ] + ], + [ + [ + -13.14517879486084, + false + ] + ], + [ + [ + -18.147605895996094, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.095142364501953, + false + ], + [ + -17.66976547241211, + false + ], + [ + -13.14517879486084, + false + ], + [ + -18.147605895996094, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ee12ffee2d13e230f203929df631c9db7b4bb411b9d369b3cd82831de0779b9a", + "prompt_hash": "46c9ab5339134e7daf84834412de21e0f378044a50bd1769753d972666508580", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "How many problems in the Omni-MATH dataset are sourced from the HMMT_2 contest?", + "choices": [ + "896", + "1385", + "106", + "75" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many problems in the Omni-MATH dataset are sourced from the HMMT_2 contest?\nAnswer:", + " 896" + ], + [ + "Question:How many problems in the Omni-MATH dataset are sourced from the HMMT_2 contest?\nAnswer:", + " 1385" + ], + [ + "Question:How many problems in the Omni-MATH dataset are sourced from the HMMT_2 contest?\nAnswer:", + " 106" + ], + [ + "Question:How many problems in the Omni-MATH dataset are sourced from the HMMT_2 contest?\nAnswer:", + " 75" + ] + ], + "resps": [ + [ + [ + -10.793913841247559, + false + ] + ], + [ + [ + -12.004586219787598, + false + ] + ], + [ + [ + -7.941033363342285, + false + ] + ], + [ + [ + -7.036806106567383, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.793913841247559, + false + ], + [ + -12.004586219787598, + false + ], + [ + -7.941033363342285, + false + ], + [ + -7.036806106567383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0a5e5235b6e1fe059e6bf460d653870620d294c41e45b803d11e32dcdf5a318f", + "prompt_hash": "ace468d42a70969b8a40545c6810c92b0928178610da37347fa2ff704a75febe", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 31, + "doc": { + "question": "What is the maximum value of the trend intensity metric \\\\( \\\\mathcal{A} \\\\) defined in the difficulty consistency analysis?", + "choices": [ + "75", + "200", + "-200", + "100" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the maximum value of the trend intensity metric \\\\( \\\\mathcal{A} \\\\) defined in the difficulty consistency analysis?\nAnswer:", + " 75" + ], + [ + "Question:What is the maximum value of the trend intensity metric \\\\( \\\\mathcal{A} \\\\) defined in the difficulty consistency analysis?\nAnswer:", + " 200" + ], + [ + "Question:What is the maximum value of the trend intensity metric \\\\( \\\\mathcal{A} \\\\) defined in the difficulty consistency analysis?\nAnswer:", + " -200" + ], + [ + "Question:What is the maximum value of the trend intensity metric \\\\( \\\\mathcal{A} \\\\) defined in the difficulty consistency analysis?\nAnswer:", + " 100" + ] + ], + "resps": [ + [ + [ + -7.222622871398926, + false + ] + ], + [ + [ + -7.002682685852051, + false + ] + ], + [ + [ + -11.823040962219238, + false + ] + ], + [ + [ + -5.583542346954346, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.222622871398926, + false + ], + [ + -7.002682685852051, + false + ], + [ + -11.823040962219238, + false + ], + [ + -5.583542346954346, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "95c164209da12cf48779f8fd02937d4b49ef90e767068725ecef87b8de14fe2f", + "prompt_hash": "ebaff398c564036b069093bb1d4499c667f7c8f74c86f8941bcd4e0cfb64d5d6", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 32, + "doc": { + "question": "Which error type is identified as the most frequent during process-level assessment of model-generated mathematical solutions?", + "choices": [ + "Calculation", + "Accumulation", + "Unrelated", + "Logic" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which error type is identified as the most frequent during process-level assessment of model-generated mathematical solutions?\nAnswer:", + " Calculation" + ], + [ + "Question:Which error type is identified as the most frequent during process-level assessment of model-generated mathematical solutions?\nAnswer:", + " Accumulation" + ], + [ + "Question:Which error type is identified as the most frequent during process-level assessment of model-generated mathematical solutions?\nAnswer:", + " Unrelated" + ], + [ + "Question:Which error type is identified as the most frequent during process-level assessment of model-generated mathematical solutions?\nAnswer:", + " Logic" + ] + ], + "resps": [ + [ + [ + -8.842470169067383, + false + ] + ], + [ + [ + -12.819282531738281, + false + ] + ], + [ + [ + -12.745844841003418, + false + ] + ], + [ + [ + -11.778804779052734, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.842470169067383, + false + ], + [ + -12.819282531738281, + false + ], + [ + -12.745844841003418, + false + ], + [ + -11.778804779052734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a965a9a067cdd28c14092a1c2c8424370bd560fe3dbe71408eb6149645fc2916", + "prompt_hash": "055439271e3814e4eee77b7f8ca47bd8364a1de10e29066297dd459b83408328", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 33, + "doc": { + "question": "Which model achieved the highest consistency rate with GPT-4o in the Omni-Judge evaluation?", + "choices": [ + "LLaMA-2-7b-Chat", + "LLaMA-3-8b-Instruct", + "LLaMA-3.1-8b-Instruct", + "MetaLlama-3.1-70B-instruct" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model achieved the highest consistency rate with GPT-4o in the Omni-Judge evaluation?\nAnswer:", + " LLaMA-2-7b-Chat" + ], + [ + "Question:Which model achieved the highest consistency rate with GPT-4o in the Omni-Judge evaluation?\nAnswer:", + " LLaMA-3-8b-Instruct" + ], + [ + "Question:Which model achieved the highest consistency rate with GPT-4o in the Omni-Judge evaluation?\nAnswer:", + " LLaMA-3.1-8b-Instruct" + ], + [ + "Question:Which model achieved the highest consistency rate with GPT-4o in the Omni-Judge evaluation?\nAnswer:", + " MetaLlama-3.1-70B-instruct" + ] + ], + "resps": [ + [ + [ + -16.803985595703125, + false + ] + ], + [ + [ + -15.235649108886719, + false + ] + ], + [ + [ + -29.402576446533203, + false + ] + ], + [ + [ + -33.316680908203125, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.803985595703125, + false + ], + [ + -15.235649108886719, + false + ], + [ + -29.402576446533203, + false + ], + [ + -33.316680908203125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6be0fdba9c7e5a2761a177a9bd233dce391802d7ff89b3039b893e560c5aefef", + "prompt_hash": "7679b8758e83a28a1b38c63d37f3adaa9c9102f41967a6f095a737d4fc63f07f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 34, + "doc": { + "question": "What proportion of answers in the 200 randomly sampled Omni-MATH problems were classified as 'Text' type?", + "choices": [ + "34", + "51", + "95", + "8" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What proportion of answers in the 200 randomly sampled Omni-MATH problems were classified as 'Text' type?\nAnswer:", + " 34" + ], + [ + "Question:What proportion of answers in the 200 randomly sampled Omni-MATH problems were classified as 'Text' type?\nAnswer:", + " 51" + ], + [ + "Question:What proportion of answers in the 200 randomly sampled Omni-MATH problems were classified as 'Text' type?\nAnswer:", + " 95" + ], + [ + "Question:What proportion of answers in the 200 randomly sampled Omni-MATH problems were classified as 'Text' type?\nAnswer:", + " 8" + ] + ], + "resps": [ + [ + [ + -6.218066692352295, + false + ] + ], + [ + [ + -6.431605815887451, + false + ] + ], + [ + [ + -7.052123546600342, + false + ] + ], + [ + [ + -4.304784774780273, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.218066692352295, + false + ], + [ + -6.431605815887451, + false + ], + [ + -7.052123546600342, + false + ], + [ + -4.304784774780273, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3a86737612a9683d7d370b0913cb5f25f494e298179962b646ce55733319d905", + "prompt_hash": "b8e869651696406e58e09116ea37bc3dc7a38b4745aa4d2aed0fd7d7d97e688d", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What is the success rate of leading language models on the FrontierMath benchmark?", + "choices": [ + "A. Over 50%", + "B. Between 10% and 20%", + "C. Between 2% and 5%", + "D. Less than 2%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the success rate of leading language models on the FrontierMath benchmark?\nAnswer:", + " A. Over 50%" + ], + [ + "Question:What is the success rate of leading language models on the FrontierMath benchmark?\nAnswer:", + " B. Between 10% and 20%" + ], + [ + "Question:What is the success rate of leading language models on the FrontierMath benchmark?\nAnswer:", + " C. Between 2% and 5%" + ], + [ + "Question:What is the success rate of leading language models on the FrontierMath benchmark?\nAnswer:", + " D. Less than 2%" + ] + ], + "resps": [ + [ + [ + -22.314029693603516, + false + ] + ], + [ + [ + -32.98554611206055, + false + ] + ], + [ + [ + -36.0869255065918, + false + ] + ], + [ + [ + -28.037778854370117, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.314029693603516, + false + ], + [ + -32.98554611206055, + false + ], + [ + -36.0869255065918, + false + ], + [ + -28.037778854370117, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fb724d7d2863885f3cf6a1163886e3738f7ac37c4f79ddca68cd0f40d50cb26a", + "prompt_hash": "e9dafc91799916a2de278f5ce5de05e1083033fa4e79b19efd23065a0ddc8416", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which two MSC classifications account for approximately 34% of the FrontierMath dataset?", + "choices": [ + "A. Number theory and group theory", + "B. Combinatorics and algebraic geometry", + "C. Number theory and combinatorics", + "D. Linear algebra and category theory" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which two MSC classifications account for approximately 34% of the FrontierMath dataset?\nAnswer:", + " A. Number theory and group theory" + ], + [ + "Question:Which two MSC classifications account for approximately 34% of the FrontierMath dataset?\nAnswer:", + " B. Combinatorics and algebraic geometry" + ], + [ + "Question:Which two MSC classifications account for approximately 34% of the FrontierMath dataset?\nAnswer:", + " C. Number theory and combinatorics" + ], + [ + "Question:Which two MSC classifications account for approximately 34% of the FrontierMath dataset?\nAnswer:", + " D. Linear algebra and category theory" + ] + ], + "resps": [ + [ + [ + -19.048017501831055, + false + ] + ], + [ + [ + -22.854753494262695, + false + ] + ], + [ + [ + -23.063589096069336, + false + ] + ], + [ + [ + -28.897056579589844, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.048017501831055, + false + ], + [ + -22.854753494262695, + false + ], + [ + -23.063589096069336, + false + ], + [ + -28.897056579589844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "604fd78a586878ff4f413ed6f22f71b442200ec02108500692fc0245ad46b421", + "prompt_hash": "467b4440ea470c77b5f52afebf24eb0321adac3836f625e66bb243d957d956ea", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "What is the estimated critical error rate of the FrontierMath benchmark based on blind review?", + "choices": [ + "A. 3%", + "B. 6%", + "C. 10%", + "D. 15%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the estimated critical error rate of the FrontierMath benchmark based on blind review?\nAnswer:", + " A. 3%" + ], + [ + "Question:What is the estimated critical error rate of the FrontierMath benchmark based on blind review?\nAnswer:", + " B. 6%" + ], + [ + "Question:What is the estimated critical error rate of the FrontierMath benchmark based on blind review?\nAnswer:", + " C. 10%" + ], + [ + "Question:What is the estimated critical error rate of the FrontierMath benchmark based on blind review?\nAnswer:", + " D. 15%" + ] + ], + "resps": [ + [ + [ + -19.265060424804688, + false + ] + ], + [ + [ + -24.73288345336914, + false + ] + ], + [ + [ + -22.54848289489746, + false + ] + ], + [ + [ + -24.39875602722168, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.265060424804688, + false + ], + [ + -24.73288345336914, + false + ], + [ + -22.54848289489746, + false + ], + [ + -24.39875602722168, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "73232ae25a7ec8e87e70a6a8c15642854c2045fe57f97631f7a1c18f334cccb1", + "prompt_hash": "ffd96735ddf9f5aea6c88664038e563748ffb9d2f1703a9e58da228705766970", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 38, + "doc": { + "question": "How many Fields Medalists were interviewed to assess the difficulty and relevance of FrontierMath?", + "choices": [ + "A. One", + "B. Two", + "C. Three", + "D. Four" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many Fields Medalists were interviewed to assess the difficulty and relevance of FrontierMath?\nAnswer:", + " A. One" + ], + [ + "Question:How many Fields Medalists were interviewed to assess the difficulty and relevance of FrontierMath?\nAnswer:", + " B. Two" + ], + [ + "Question:How many Fields Medalists were interviewed to assess the difficulty and relevance of FrontierMath?\nAnswer:", + " C. Three" + ], + [ + "Question:How many Fields Medalists were interviewed to assess the difficulty and relevance of FrontierMath?\nAnswer:", + " D. Four" + ] + ], + "resps": [ + [ + [ + -12.897353172302246, + false + ] + ], + [ + [ + -15.479686737060547, + false + ] + ], + [ + [ + -15.306407928466797, + false + ] + ], + [ + [ + -16.060991287231445, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.897353172302246, + false + ], + [ + -15.479686737060547, + false + ], + [ + -15.306407928466797, + false + ], + [ + -16.060991287231445, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "92536fa76b86f3416d618123ba7e1ac49f79cf14a5ef14270b202fbb22f4a8f0", + "prompt_hash": "c021c1ebb446b3bda19c667d9c143e5fb5afc75d3c09f41c5d7e24568f777fe6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 39, + "doc": { + "question": "What is the primary reason existing benchmarks like MATH and GSM8K are considered insufficient for evaluating advanced mathematical reasoning in AI?", + "choices": [ + "A. They contain too many open-ended problems.", + "B. They are based on outdated mathematical theory.", + "C. Models have achieved near-perfect scores on them.", + "D. They are limited to symbolic computation only." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary reason existing benchmarks like MATH and GSM8K are considered insufficient for evaluating advanced mathematical reasoning in AI?\nAnswer:", + " A. They contain too many open-ended problems." + ], + [ + "Question:What is the primary reason existing benchmarks like MATH and GSM8K are considered insufficient for evaluating advanced mathematical reasoning in AI?\nAnswer:", + " B. They are based on outdated mathematical theory." + ], + [ + "Question:What is the primary reason existing benchmarks like MATH and GSM8K are considered insufficient for evaluating advanced mathematical reasoning in AI?\nAnswer:", + " C. Models have achieved near-perfect scores on them." + ], + [ + "Question:What is the primary reason existing benchmarks like MATH and GSM8K are considered insufficient for evaluating advanced mathematical reasoning in AI?\nAnswer:", + " D. They are limited to symbolic computation only." + ] + ], + "resps": [ + [ + [ + -28.125661849975586, + false + ] + ], + [ + [ + -32.29228973388672, + false + ] + ], + [ + [ + -39.08148193359375, + false + ] + ], + [ + [ + -35.807430267333984, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.125661849975586, + false + ], + [ + -32.29228973388672, + false + ], + [ + -39.08148193359375, + false + ], + [ + -35.807430267333984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "47eb164109115e79f1c451f50e7f4fa925728fd1217329b9466740453a1f5756", + "prompt_hash": "b2ecb72db476336ef9065e8ba6707ea0b2787e97183e1e00aeca7136b40e0d26", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What does the parameter \\\\( \\\\gamma_h \\\\) represent in the Shannon capacity formula?", + "choices": [ + "Available bandwidth", + "Effective channel gain", + "Signal power", + "Noise power" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What does the parameter \\\\( \\\\gamma_h \\\\) represent in the Shannon capacity formula?\nAnswer:", + " Available bandwidth" + ], + [ + "Question:What does the parameter \\\\( \\\\gamma_h \\\\) represent in the Shannon capacity formula?\nAnswer:", + " Effective channel gain" + ], + [ + "Question:What does the parameter \\\\( \\\\gamma_h \\\\) represent in the Shannon capacity formula?\nAnswer:", + " Signal power" + ], + [ + "Question:What does the parameter \\\\( \\\\gamma_h \\\\) represent in the Shannon capacity formula?\nAnswer:", + " Noise power" + ] + ], + "resps": [ + [ + [ + -14.229288101196289, + false + ] + ], + [ + [ + -15.019903182983398, + false + ] + ], + [ + [ + -11.57690715789795, + false + ] + ], + [ + [ + -11.713316917419434, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.229288101196289, + false + ], + [ + -15.019903182983398, + false + ], + [ + -11.57690715789795, + false + ], + [ + -11.713316917419434, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "caa8bbea2cb52d55ccc287406d790e3015ce631c0834d8f17178d18642335aef", + "prompt_hash": "1265b2989bfbc8a4cfb448e69880f5bd0e58c00570492ec1ff561b9a461ad8f0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 41, + "doc": { + "question": "Which communication technology allows \\\\( P_n \\\\) to be treated as a configurable system parameter?", + "choices": [ + "NOMA", + "MIMO", + "Noise modulation", + "RIS" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which communication technology allows \\\\( P_n \\\\) to be treated as a configurable system parameter?\nAnswer:", + " NOMA" + ], + [ + "Question:Which communication technology allows \\\\( P_n \\\\) to be treated as a configurable system parameter?\nAnswer:", + " MIMO" + ], + [ + "Question:Which communication technology allows \\\\( P_n \\\\) to be treated as a configurable system parameter?\nAnswer:", + " Noise modulation" + ], + [ + "Question:Which communication technology allows \\\\( P_n \\\\) to be treated as a configurable system parameter?\nAnswer:", + " RIS" + ] + ], + "resps": [ + [ + [ + -16.54871940612793, + false + ] + ], + [ + [ + -12.308393478393555, + false + ] + ], + [ + [ + -18.554058074951172, + false + ] + ], + [ + [ + -12.541747093200684, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.54871940612793, + false + ], + [ + -12.308393478393555, + false + ], + [ + -18.554058074951172, + false + ], + [ + -12.541747093200684, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8794ede48847d991771909da45d875c91fdb156214c8e27d545be83fca7ce78d", + "prompt_hash": "72ca7c5f2ac66b9929b418ce19a700167b6aa456de73d3c075407a8bbb0c7213", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 42, + "doc": { + "question": "What is one unique feature of pinching antennas demonstrated by DOCOMO in 2022?", + "choices": [ + "High-cost antenna reconfiguration", + "Inability to support LoS communication", + "Fixed installation at transceivers", + "Flexible deployment to support strong LoS links" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is one unique feature of pinching antennas demonstrated by DOCOMO in 2022?\nAnswer:", + " High-cost antenna reconfiguration" + ], + [ + "Question:What is one unique feature of pinching antennas demonstrated by DOCOMO in 2022?\nAnswer:", + " Inability to support LoS communication" + ], + [ + "Question:What is one unique feature of pinching antennas demonstrated by DOCOMO in 2022?\nAnswer:", + " Fixed installation at transceivers" + ], + [ + "Question:What is one unique feature of pinching antennas demonstrated by DOCOMO in 2022?\nAnswer:", + " Flexible deployment to support strong LoS links" + ] + ], + "resps": [ + [ + [ + -30.219932556152344, + false + ] + ], + [ + [ + -29.853424072265625, + false + ] + ], + [ + [ + -34.562137603759766, + false + ] + ], + [ + [ + -44.639793395996094, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.219932556152344, + false + ], + [ + -29.853424072265625, + false + ], + [ + -34.562137603759766, + false + ], + [ + -44.639793395996094, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "df2bf0e8ec5836ac8c9b56aa2cdd36f047860e4145da2b160983e3ae8e237236", + "prompt_hash": "688dfac913d0eba440ad3d2edcf0e107af2f03802368c86f07a686245728bb32", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "In the high SNR regime, what is the approximate ergodic sum rate gain of pinching antennas over conventional antennas dependent on?", + "choices": [ + "The number of waveguides", + "The deployment cost of antennas", + "The ratio \\( \\frac{D}{d} \\)", + "The modulation scheme" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the high SNR regime, what is the approximate ergodic sum rate gain of pinching antennas over conventional antennas dependent on?\nAnswer:", + " The number of waveguides" + ], + [ + "Question:In the high SNR regime, what is the approximate ergodic sum rate gain of pinching antennas over conventional antennas dependent on?\nAnswer:", + " The deployment cost of antennas" + ], + [ + "Question:In the high SNR regime, what is the approximate ergodic sum rate gain of pinching antennas over conventional antennas dependent on?\nAnswer:", + " The ratio \\( \\frac{D}{d} \\)" + ], + [ + "Question:In the high SNR regime, what is the approximate ergodic sum rate gain of pinching antennas over conventional antennas dependent on?\nAnswer:", + " The modulation scheme" + ] + ], + "resps": [ + [ + [ + -17.938186645507812, + false + ] + ], + [ + [ + -19.054763793945312, + false + ] + ], + [ + [ + -31.16602325439453, + false + ] + ], + [ + [ + -16.148653030395508, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.938186645507812, + false + ], + [ + -19.054763793945312, + false + ], + [ + -31.16602325439453, + false + ], + [ + -16.148653030395508, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "913ffe5ab2c4e9354f9da825d2adc0d08a777f3210d7d06478e42072760a2a7c", + "prompt_hash": "dacddc5de4ac44a8476a44d2839dc73456a28baf1523b2e9f0544fd579e7e568", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 44, + "doc": { + "question": "What condition is required for multiple pinching antennas on a single waveguide to achieve constructive interference?", + "choices": [ + "Using independent RF chains for each antenna", + "Ensuring equal distance from all users", + "Aligning signal phase shifts across antennas", + "Increasing the carrier frequency" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What condition is required for multiple pinching antennas on a single waveguide to achieve constructive interference?\nAnswer:", + " Using independent RF chains for each antenna" + ], + [ + "Question:What condition is required for multiple pinching antennas on a single waveguide to achieve constructive interference?\nAnswer:", + " Ensuring equal distance from all users" + ], + [ + "Question:What condition is required for multiple pinching antennas on a single waveguide to achieve constructive interference?\nAnswer:", + " Aligning signal phase shifts across antennas" + ], + [ + "Question:What condition is required for multiple pinching antennas on a single waveguide to achieve constructive interference?\nAnswer:", + " Increasing the carrier frequency" + ] + ], + "resps": [ + [ + [ + -25.47564697265625, + false + ] + ], + [ + [ + -34.380126953125, + false + ] + ], + [ + [ + -38.81388854980469, + false + ] + ], + [ + [ + -12.850831031799316, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.47564697265625, + false + ], + [ + -34.380126953125, + false + ], + [ + -38.81388854980469, + false + ], + [ + -12.850831031799316, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e1bcf5e1bdbd66506843b97b3911efdb450ec267b9757b4852dd24d46d79dc12", + "prompt_hash": "376a8a5df82e3d345c9f436b865e152193185c84d22742a7bb2709adff2478a4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 45, + "doc": { + "question": "What frame rate is used by the speech tokenizer in GLM-4-Voice?", + "choices": [ + "A. 50Hz", + "B. 25Hz", + "C. 12.5Hz", + "D. 6.25Hz" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What frame rate is used by the speech tokenizer in GLM-4-Voice?\nAnswer:", + " A. 50Hz" + ], + [ + "Question:What frame rate is used by the speech tokenizer in GLM-4-Voice?\nAnswer:", + " B. 25Hz" + ], + [ + "Question:What frame rate is used by the speech tokenizer in GLM-4-Voice?\nAnswer:", + " C. 12.5Hz" + ], + [ + "Question:What frame rate is used by the speech tokenizer in GLM-4-Voice?\nAnswer:", + " D. 6.25Hz" + ] + ], + "resps": [ + [ + [ + -14.651763916015625, + false + ] + ], + [ + [ + -19.52559471130371, + false + ] + ], + [ + [ + -21.01659393310547, + false + ] + ], + [ + [ + -27.753231048583984, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.651763916015625, + false + ], + [ + -19.52559471130371, + false + ], + [ + -21.01659393310547, + false + ], + [ + -27.753231048583984, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3f47d465aee898fe5f2858c9697a72df4ec05fa08dcd205a7b7dba6b3385f64a", + "prompt_hash": "bbd6919b9e251cf2028e21fa63346428252f3a62bf9c9dd8658015c1f9739415", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 46, + "doc": { + "question": "Which model is used as the initialization base for GLM-4-Voice?", + "choices": [ + "A. Qwen2-7B", + "B. Whisper-Large-V3", + "C. GLM-4-9B-Base", + "D. ChatGLM3-6B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model is used as the initialization base for GLM-4-Voice?\nAnswer:", + " A. Qwen2-7B" + ], + [ + "Question:Which model is used as the initialization base for GLM-4-Voice?\nAnswer:", + " B. Whisper-Large-V3" + ], + [ + "Question:Which model is used as the initialization base for GLM-4-Voice?\nAnswer:", + " C. GLM-4-9B-Base" + ], + [ + "Question:Which model is used as the initialization base for GLM-4-Voice?\nAnswer:", + " D. ChatGLM3-6B" + ] + ], + "resps": [ + [ + [ + -20.858482360839844, + false + ] + ], + [ + [ + -18.8957462310791, + false + ] + ], + [ + [ + -30.00905418395996, + false + ] + ], + [ + [ + -27.548831939697266, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.858482360839844, + false + ], + [ + -18.8957462310791, + false + ], + [ + -30.00905418395996, + false + ], + [ + -27.548831939697266, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a293f6d8dadaf6bf2f7043fc920412bce4bc44eea4364f1f16750dd30ed27e60", + "prompt_hash": "736edfb168814c76d08998796804177f146b07aacb0759253d8bfd2e1aeb3486", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 47, + "doc": { + "question": "Which speech decoder architecture is used in GLM-4-Voice?", + "choices": [ + "A. FastSpeech 2", + "B. CosyVoice", + "C. Tacotron 2", + "D. VALL-E" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which speech decoder architecture is used in GLM-4-Voice?\nAnswer:", + " A. FastSpeech 2" + ], + [ + "Question:Which speech decoder architecture is used in GLM-4-Voice?\nAnswer:", + " B. CosyVoice" + ], + [ + "Question:Which speech decoder architecture is used in GLM-4-Voice?\nAnswer:", + " C. Tacotron 2" + ], + [ + "Question:Which speech decoder architecture is used in GLM-4-Voice?\nAnswer:", + " D. VALL-E" + ] + ], + "resps": [ + [ + [ + -15.32555866241455, + false + ] + ], + [ + [ + -22.239765167236328, + false + ] + ], + [ + [ + -18.153594970703125, + false + ] + ], + [ + [ + -21.598918914794922, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.32555866241455, + false + ], + [ + -22.239765167236328, + false + ], + [ + -18.153594970703125, + false + ], + [ + -21.598918914794922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "850102a872cc87cacfad06d4e0d5cb4794eba6950191dc24f32f5ba7d381e3ab", + "prompt_hash": "a9fc375bc89c3d93c91a932a96b0528b1f5b93895c18af2d3aa297b0eb2ff84f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "What is the total number of tokens used during pre-training of GLM-4-Voice?", + "choices": [ + "A. 700 million", + "B. 7 billion", + "C. 1 trillion", + "D. 12 billion" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total number of tokens used during pre-training of GLM-4-Voice?\nAnswer:", + " A. 700 million" + ], + [ + "Question:What is the total number of tokens used during pre-training of GLM-4-Voice?\nAnswer:", + " B. 7 billion" + ], + [ + "Question:What is the total number of tokens used during pre-training of GLM-4-Voice?\nAnswer:", + " C. 1 trillion" + ], + [ + "Question:What is the total number of tokens used during pre-training of GLM-4-Voice?\nAnswer:", + " D. 12 billion" + ] + ], + "resps": [ + [ + [ + -16.722270965576172, + false + ] + ], + [ + [ + -20.472637176513672, + false + ] + ], + [ + [ + -22.23086929321289, + false + ] + ], + [ + [ + -22.3320369720459, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.722270965576172, + false + ], + [ + -20.472637176513672, + false + ], + [ + -22.23086929321289, + false + ], + [ + -22.3320369720459, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "74e876c3453e4c4b42efbcfb18fb317a6029dbe0e340688a0d15ebb475e1c0c0", + "prompt_hash": "a7766b046084beb4895caed4bb212405c27f90baeae0cb0f1c8d744b3a4902e5", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 49, + "doc": { + "question": "Which model provides the text predictions for English recognition in the TTS task?", + "choices": [ + "A. Whisper-Large-V3", + "B. Paraformer-Large", + "C. ChatGPT", + "D. CosyVoice" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which model provides the text predictions for English recognition in the TTS task?\nAnswer:", + " A. Whisper-Large-V3" + ], + [ + "Question:Which model provides the text predictions for English recognition in the TTS task?\nAnswer:", + " B. Paraformer-Large" + ], + [ + "Question:Which model provides the text predictions for English recognition in the TTS task?\nAnswer:", + " C. ChatGPT" + ], + [ + "Question:Which model provides the text predictions for English recognition in the TTS task?\nAnswer:", + " D. CosyVoice" + ] + ], + "resps": [ + [ + [ + -15.751648902893066, + false + ] + ], + [ + [ + -26.65165138244629, + false + ] + ], + [ + [ + -15.32774543762207, + false + ] + ], + [ + [ + -22.42506980895996, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.751648902893066, + false + ], + [ + -26.65165138244629, + false + ], + [ + -15.32774543762207, + false + ], + [ + -22.42506980895996, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "70b1b8c729dc4de68d708dae2634d94cd7992df78b1a011c91085f51b37f64b9", + "prompt_hash": "559397a2d29372587b9ffb95e5efbe13ca23fa3ed596ae5a96b78a1d3780be3d", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 50, + "doc": { + "question": "What is the name of the newly introduced dataset that uses randomly generated DAGs for logical reasoning tasks?", + "choices": [ + "TreeQA", + "GraphProofQA", + "ProsQA", + "LogicDAG" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the name of the newly introduced dataset that uses randomly generated DAGs for logical reasoning tasks?\nAnswer:", + " TreeQA" + ], + [ + "Question:What is the name of the newly introduced dataset that uses randomly generated DAGs for logical reasoning tasks?\nAnswer:", + " GraphProofQA" + ], + [ + "Question:What is the name of the newly introduced dataset that uses randomly generated DAGs for logical reasoning tasks?\nAnswer:", + " ProsQA" + ], + [ + "Question:What is the name of the newly introduced dataset that uses randomly generated DAGs for logical reasoning tasks?\nAnswer:", + " LogicDAG" + ] + ], + "resps": [ + [ + [ + -11.236031532287598, + false + ] + ], + [ + [ + -19.69611358642578, + false + ] + ], + [ + [ + -20.450382232666016, + false + ] + ], + [ + [ + -15.820966720581055, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.236031532287598, + false + ], + [ + -19.69611358642578, + false + ], + [ + -20.450382232666016, + false + ], + [ + -15.820966720581055, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "217edf1170ad8ce8a10962ef9838cb1c147f69453ff3b88e325419607a5c912f", + "prompt_hash": "f1f4f13fd8429cb29c05b70cdf386e00ca3b54085197be705ad3102830e66f08", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "Which pre-trained model is used as the base model for all experiments?", + "choices": [ + "GPT-2", + "LLaMA-2", + "GPT-Neo", + "T5" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which pre-trained model is used as the base model for all experiments?\nAnswer:", + " GPT-2" + ], + [ + "Question:Which pre-trained model is used as the base model for all experiments?\nAnswer:", + " LLaMA-2" + ], + [ + "Question:Which pre-trained model is used as the base model for all experiments?\nAnswer:", + " GPT-Neo" + ], + [ + "Question:Which pre-trained model is used as the base model for all experiments?\nAnswer:", + " T5" + ] + ], + "resps": [ + [ + [ + -7.498136520385742, + false + ] + ], + [ + [ + -9.902589797973633, + false + ] + ], + [ + [ + -9.747665405273438, + false + ] + ], + [ + [ + -6.549315452575684, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.498136520385742, + false + ], + [ + -9.902589797973633, + false + ], + [ + -9.747665405273438, + false + ], + [ + -6.549315452575684, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "717cadaa616a2bdcb3b4bf53a886de4b82ea0600a2c355ba95d9d1c16c134cd3", + "prompt_hash": "4b95d935fa7bcd67d739d9d8436016f42a8f32e5e23471f1ae1a0285f58d13fa", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 52, + "doc": { + "question": "Which token is used to mark the end of the latent thought mode in the proposed method?", + "choices": [ + "", + "", + "", + "" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which token is used to mark the end of the latent thought mode in the proposed method?\nAnswer:", + " " + ], + [ + "Question:Which token is used to mark the end of the latent thought mode in the proposed method?\nAnswer:", + " " + ], + [ + "Question:Which token is used to mark the end of the latent thought mode in the proposed method?\nAnswer:", + " " + ], + [ + "Question:Which token is used to mark the end of the latent thought mode in the proposed method?\nAnswer:", + " " + ] + ], + "resps": [ + [ + [ + -13.798894882202148, + false + ] + ], + [ + [ + -16.525150299072266, + false + ] + ], + [ + [ + -18.213878631591797, + false + ] + ], + [ + [ + -21.657358169555664, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.798894882202148, + false + ], + [ + -16.525150299072266, + false + ], + [ + -18.213878631591797, + false + ], + [ + -21.657358169555664, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cf0f9f5525037bbc013d7bb008d93bb912e491d8bfc8a8a3066055cd421ac454", + "prompt_hash": "48b90eea372a285d08bd038c7dfcb3a0d2529d59ea81e7e38d313a5a14106a8e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 53, + "doc": { + "question": "What is the default value of the hyperparameter 'c' used for math reasoning tasks in this study?", + "choices": [ + "1", + "2", + "3", + "4" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the default value of the hyperparameter 'c' used for math reasoning tasks in this study?\nAnswer:", + " 1" + ], + [ + "Question:What is the default value of the hyperparameter 'c' used for math reasoning tasks in this study?\nAnswer:", + " 2" + ], + [ + "Question:What is the default value of the hyperparameter 'c' used for math reasoning tasks in this study?\nAnswer:", + " 3" + ], + [ + "Question:What is the default value of the hyperparameter 'c' used for math reasoning tasks in this study?\nAnswer:", + " 4" + ] + ], + "resps": [ + [ + [ + -3.3783435821533203, + false + ] + ], + [ + [ + -3.9628448486328125, + false + ] + ], + [ + [ + -4.146780014038086, + false + ] + ], + [ + [ + -4.759138107299805, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.3783435821533203, + false + ], + [ + -3.9628448486328125, + false + ], + [ + -4.146780014038086, + false + ], + [ + -4.759138107299805, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ca09e725c397a1c7347e96ad136378e5dd2d335a63df0a84e35597299b2a3ca1", + "prompt_hash": "174c8e3c250a846c3b26420ac08a5f026b3b9b41c8763c39a85758bd0a1c3f99", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 54, + "doc": { + "question": "Which baseline method removes tokens at the beginning of the reasoning chain during training and predicts the answer directly during inference?", + "choices": [ + "No-CoT", + "Pause Token", + "iCoT", + "w/o curriculum" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which baseline method removes tokens at the beginning of the reasoning chain during training and predicts the answer directly during inference?\nAnswer:", + " No-CoT" + ], + [ + "Question:Which baseline method removes tokens at the beginning of the reasoning chain during training and predicts the answer directly during inference?\nAnswer:", + " Pause Token" + ], + [ + "Question:Which baseline method removes tokens at the beginning of the reasoning chain during training and predicts the answer directly during inference?\nAnswer:", + " iCoT" + ], + [ + "Question:Which baseline method removes tokens at the beginning of the reasoning chain during training and predicts the answer directly during inference?\nAnswer:", + " w/o curriculum" + ] + ], + "resps": [ + [ + [ + -16.552953720092773, + false + ] + ], + [ + [ + -15.976731300354004, + false + ] + ], + [ + [ + -17.974000930786133, + false + ] + ], + [ + [ + -22.248485565185547, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.552953720092773, + false + ], + [ + -15.976731300354004, + false + ], + [ + -17.974000930786133, + false + ], + [ + -22.248485565185547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "19c4a79c779e1d991b74627e99dab9307bd5fb398d814d3660d24947b9d54611", + "prompt_hash": "5dee7cee91640ff580164a7cdeffb2da7d4cdd23e33467b9ca6a99a056c7090c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 55, + "doc": { + "question": "Which base model is used for STILL-2?", + "choices": [ + "LLaMA-3-70B", + "Qwen2.5-32B-Instruct", + "DeepSeek-R1", + "Claude 3.5 Sonnet" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which base model is used for STILL-2?\nAnswer:", + " LLaMA-3-70B" + ], + [ + "Question:Which base model is used for STILL-2?\nAnswer:", + " Qwen2.5-32B-Instruct" + ], + [ + "Question:Which base model is used for STILL-2?\nAnswer:", + " DeepSeek-R1" + ], + [ + "Question:Which base model is used for STILL-2?\nAnswer:", + " Claude 3.5 Sonnet" + ] + ], + "resps": [ + [ + [ + -11.793059349060059, + false + ] + ], + [ + [ + -26.26325798034668, + false + ] + ], + [ + [ + -20.53174591064453, + false + ] + ], + [ + [ + -9.624313354492188, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.793059349060059, + false + ], + [ + -26.26325798034668, + false + ], + [ + -20.53174591064453, + false + ], + [ + -9.624313354492188, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c2b849793784a9505396f5301f1eeb76cc79db06232dd82424a84b5740afb1b1", + "prompt_hash": "302486dd58a15c3fe94f662eaa51f2c5361e954848f56c235b63a56b30c3e011", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "Which dataset consists of 198 multiple-choice questions from biology, physics, and chemistry?", + "choices": [ + "MATH-OAI", + "AIME2024", + "GPQA", + "LeetCode-Hard" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dataset consists of 198 multiple-choice questions from biology, physics, and chemistry?\nAnswer:", + " MATH-OAI" + ], + [ + "Question:Which dataset consists of 198 multiple-choice questions from biology, physics, and chemistry?\nAnswer:", + " AIME2024" + ], + [ + "Question:Which dataset consists of 198 multiple-choice questions from biology, physics, and chemistry?\nAnswer:", + " GPQA" + ], + [ + "Question:Which dataset consists of 198 multiple-choice questions from biology, physics, and chemistry?\nAnswer:", + " LeetCode-Hard" + ] + ], + "resps": [ + [ + [ + -28.460281372070312, + false + ] + ], + [ + [ + -24.10089683532715, + false + ] + ], + [ + [ + -13.763631820678711, + false + ] + ], + [ + [ + -31.144329071044922, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.460281372070312, + false + ], + [ + -24.10089683532715, + false + ], + [ + -13.763631820678711, + false + ], + [ + -31.144329071044922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "271cf3dbb0d16a2e5d6d5fa731d7bb108df4fb9b052fc9423f3a85759d29312b", + "prompt_hash": "83f2afe83c85e40c1582a64dd22ef90a1058c1da9529598f225e3d203b4bd707", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 57, + "doc": { + "question": "Which training approach in STILL-2 includes aligning only the thought process and incorporating SFT for stability?", + "choices": [ + "Chain-of-thought prompting", + "Supervised fine-tuning only", + "Direct Preference Optimization", + "Tree search with reward model" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which training approach in STILL-2 includes aligning only the thought process and incorporating SFT for stability?\nAnswer:", + " Chain-of-thought prompting" + ], + [ + "Question:Which training approach in STILL-2 includes aligning only the thought process and incorporating SFT for stability?\nAnswer:", + " Supervised fine-tuning only" + ], + [ + "Question:Which training approach in STILL-2 includes aligning only the thought process and incorporating SFT for stability?\nAnswer:", + " Direct Preference Optimization" + ], + [ + "Question:Which training approach in STILL-2 includes aligning only the thought process and incorporating SFT for stability?\nAnswer:", + " Tree search with reward model" + ] + ], + "resps": [ + [ + [ + -13.133655548095703, + false + ] + ], + [ + [ + -15.158451080322266, + false + ] + ], + [ + [ + -8.369895935058594, + false + ] + ], + [ + [ + -27.84103775024414, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.133655548095703, + false + ], + [ + -15.158451080322266, + false + ], + [ + -8.369895935058594, + false + ], + [ + -27.84103775024414, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f94afbbe7b52724b8214b29114f6b669a4a89d5bbae545496a00f2acdde9df6d", + "prompt_hash": "1d4c18ff649f9f9360faee6f55c223cbb9e600f28419bcf3be5485a504f4a566", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 58, + "doc": { + "question": "Which problem domain is prioritized in the data mixture due to its prevalence of challenging reasoning problems?", + "choices": [ + "Coding", + "Puzzle", + "Mathematics", + "Science" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which problem domain is prioritized in the data mixture due to its prevalence of challenging reasoning problems?\nAnswer:", + " Coding" + ], + [ + "Question:Which problem domain is prioritized in the data mixture due to its prevalence of challenging reasoning problems?\nAnswer:", + " Puzzle" + ], + [ + "Question:Which problem domain is prioritized in the data mixture due to its prevalence of challenging reasoning problems?\nAnswer:", + " Mathematics" + ], + [ + "Question:Which problem domain is prioritized in the data mixture due to its prevalence of challenging reasoning problems?\nAnswer:", + " Science" + ] + ], + "resps": [ + [ + [ + -6.636571884155273, + false + ] + ], + [ + [ + -10.519530296325684, + false + ] + ], + [ + [ + -6.341315269470215, + false + ] + ], + [ + [ + -8.144645690917969, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.636571884155273, + false + ], + [ + -10.519530296325684, + false + ], + [ + -6.341315269470215, + false + ], + [ + -8.144645690917969, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1804ef6e48eb4018a1b8cd33d9dc8a777fddc068fabcf07a1fd4c5a7abb51113", + "prompt_hash": "c673cd913e516594b6879b3185f72c468d1657f0a76609fafed1044316aa1a3a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 59, + "doc": { + "question": "What type of tasks does AIME2024 aim to challenge students with?", + "choices": [ + "Basic arithmetic and algebra", + "Simple geometry questions", + "Complex problem solving tasks", + "Physics-based numerical estimation" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of tasks does AIME2024 aim to challenge students with?\nAnswer:", + " Basic arithmetic and algebra" + ], + [ + "Question:What type of tasks does AIME2024 aim to challenge students with?\nAnswer:", + " Simple geometry questions" + ], + [ + "Question:What type of tasks does AIME2024 aim to challenge students with?\nAnswer:", + " Complex problem solving tasks" + ], + [ + "Question:What type of tasks does AIME2024 aim to challenge students with?\nAnswer:", + " Physics-based numerical estimation" + ] + ], + "resps": [ + [ + [ + -16.77252960205078, + false + ] + ], + [ + [ + -22.267976760864258, + false + ] + ], + [ + [ + -15.13084888458252, + false + ] + ], + [ + [ + -25.581417083740234, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.77252960205078, + false + ], + [ + -22.267976760864258, + false + ], + [ + -15.13084888458252, + false + ], + [ + -25.581417083740234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "422af2770208747c4876585172d73a22ab5ff58500e254276602c33f94a7b6d5", + "prompt_hash": "05bc871e2ef2fc7fafe36650b6c5087172d2af1beedb7446f8da9686ecf8cdf2", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 60, + "doc": { + "question": "What type of quantization module does CosyVoice 2 insert into the encoder of the SenseVoice-Large ASR model?", + "choices": [ + "Vector Quantization (VQ)", + "Scalar Product Quantization (SPQ)", + "Finite Scalar Quantization (FSQ)", + "Dynamic Quantization (DQ)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of quantization module does CosyVoice 2 insert into the encoder of the SenseVoice-Large ASR model?\nAnswer:", + " Vector Quantization (VQ)" + ], + [ + "Question:What type of quantization module does CosyVoice 2 insert into the encoder of the SenseVoice-Large ASR model?\nAnswer:", + " Scalar Product Quantization (SPQ)" + ], + [ + "Question:What type of quantization module does CosyVoice 2 insert into the encoder of the SenseVoice-Large ASR model?\nAnswer:", + " Finite Scalar Quantization (FSQ)" + ], + [ + "Question:What type of quantization module does CosyVoice 2 insert into the encoder of the SenseVoice-Large ASR model?\nAnswer:", + " Dynamic Quantization (DQ)" + ] + ], + "resps": [ + [ + [ + -11.393033981323242, + false + ] + ], + [ + [ + -20.69158172607422, + false + ] + ], + [ + [ + -21.93514633178711, + false + ] + ], + [ + [ + -11.593354225158691, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.393033981323242, + false + ], + [ + -20.69158172607422, + false + ], + [ + -21.93514633178711, + false + ], + [ + -11.593354225158691, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "32388d6a9407a2fcfc1e9858f4d73cc33a35b7225d109348bbc29bbee35de03d", + "prompt_hash": "32b3cf4085d62ea87f7bdc218fb3adf337a472f25c450abaaab4d2cbacbe42c6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 61, + "doc": { + "question": "What is the token rate of CosyVoice 2's speech tokenizer?", + "choices": [ + "50 Hz", + "25 Hz", + "16 Hz", + "100 Hz" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the token rate of CosyVoice 2's speech tokenizer?\nAnswer:", + " 50 Hz" + ], + [ + "Question:What is the token rate of CosyVoice 2's speech tokenizer?\nAnswer:", + " 25 Hz" + ], + [ + "Question:What is the token rate of CosyVoice 2's speech tokenizer?\nAnswer:", + " 16 Hz" + ], + [ + "Question:What is the token rate of CosyVoice 2's speech tokenizer?\nAnswer:", + " 100 Hz" + ] + ], + "resps": [ + [ + [ + -9.393841743469238, + false + ] + ], + [ + [ + -9.112127304077148, + false + ] + ], + [ + [ + -11.231280326843262, + false + ] + ], + [ + [ + -9.16458511352539, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.393841743469238, + false + ], + [ + -9.112127304077148, + false + ], + [ + -11.231280326843262, + false + ], + [ + -9.16458511352539, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6bd69fa3a8b3b05be09aef518a19ec78c4b03b5fabb4e150d4336d1b9adbeaf4", + "prompt_hash": "7280d11880ab1693eb3cf76da610ec6c43fdfbc75219ef6f7aab4661a959250d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 62, + "doc": { + "question": "Which pre-trained language model is used in CosyVoice 2 as the text-speech language model?", + "choices": [ + "ChatGLM3", + "Qwen1.5-1.8B", + "Qwen2.5-0.5B", + "GPT-NeoX-20B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which pre-trained language model is used in CosyVoice 2 as the text-speech language model?\nAnswer:", + " ChatGLM3" + ], + [ + "Question:Which pre-trained language model is used in CosyVoice 2 as the text-speech language model?\nAnswer:", + " Qwen1.5-1.8B" + ], + [ + "Question:Which pre-trained language model is used in CosyVoice 2 as the text-speech language model?\nAnswer:", + " Qwen2.5-0.5B" + ], + [ + "Question:Which pre-trained language model is used in CosyVoice 2 as the text-speech language model?\nAnswer:", + " GPT-NeoX-20B" + ] + ], + "resps": [ + [ + [ + -7.910797595977783, + false + ] + ], + [ + [ + -13.443157196044922, + false + ] + ], + [ + [ + -28.324539184570312, + false + ] + ], + [ + [ + -19.48931121826172, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.910797595977783, + false + ], + [ + -13.443157196044922, + false + ], + [ + -28.324539184570312, + false + ], + [ + -19.48931121826172, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "59de127e082d56bb6a752e5a2a7289961596af15dbf9353afd2237d9808dcc5d", + "prompt_hash": "41dd334a9a9ce470c060aff357efbcd2b2ba5e2c6095c1a9c4c3eed65a8f3ed2", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "What chunk sizes (N:M) are used in CosyVoice 2’s streaming mode for mixing text and speech tokens?", + "choices": [ + "3:9", + "4:10", + "5:15", + "6:18" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What chunk sizes (N:M) are used in CosyVoice 2’s streaming mode for mixing text and speech tokens?\nAnswer:", + " 3:9" + ], + [ + "Question:What chunk sizes (N:M) are used in CosyVoice 2’s streaming mode for mixing text and speech tokens?\nAnswer:", + " 4:10" + ], + [ + "Question:What chunk sizes (N:M) are used in CosyVoice 2’s streaming mode for mixing text and speech tokens?\nAnswer:", + " 5:15" + ], + [ + "Question:What chunk sizes (N:M) are used in CosyVoice 2’s streaming mode for mixing text and speech tokens?\nAnswer:", + " 6:18" + ] + ], + "resps": [ + [ + [ + -13.878074645996094, + false + ] + ], + [ + [ + -9.731094360351562, + false + ] + ], + [ + [ + -12.27220344543457, + false + ] + ], + [ + [ + -14.032604217529297, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.878074645996094, + false + ], + [ + -9.731094360351562, + false + ], + [ + -12.27220344543457, + false + ], + [ + -14.032604217529297, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e0e5ce7d1693f556c0ab935f6a2e22c31119e91529cd638537632814431bf87a", + "prompt_hash": "cd61ef147018a2a8b6e7ba44d85766b1813c0bbc1d26a6e5566dbb64ec287bbe", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 64, + "doc": { + "question": "What mask type in CosyVoice 2 is designed for extremely low latency by attending only past frames?", + "choices": [ + "Chunk-2M Mask", + "Non-causal Mask", + "Full-causal Mask", + "Streaming Mask" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What mask type in CosyVoice 2 is designed for extremely low latency by attending only past frames?\nAnswer:", + " Chunk-2M Mask" + ], + [ + "Question:What mask type in CosyVoice 2 is designed for extremely low latency by attending only past frames?\nAnswer:", + " Non-causal Mask" + ], + [ + "Question:What mask type in CosyVoice 2 is designed for extremely low latency by attending only past frames?\nAnswer:", + " Full-causal Mask" + ], + [ + "Question:What mask type in CosyVoice 2 is designed for extremely low latency by attending only past frames?\nAnswer:", + " Streaming Mask" + ] + ], + "resps": [ + [ + [ + -28.615375518798828, + false + ] + ], + [ + [ + -15.082904815673828, + false + ] + ], + [ + [ + -25.027320861816406, + false + ] + ], + [ + [ + -12.463473320007324, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.615375518798828, + false + ], + [ + -15.082904815673828, + false + ], + [ + -25.027320861816406, + false + ], + [ + -12.463473320007324, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "18b2cd20433a68dc5171a40403d0a99ab28e22454bd72874e3985c717b9d38a8", + "prompt_hash": "4749cce3e3a50d1979d5653490c00dbc8f21b3f348996d5f08a68f1e9d2b5f07", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 65, + "doc": { + "question": "What is the base resolution used by the SigLIP-SO400M-384 vision encoder in DeepSeek-VL2?", + "choices": [ + "256 × 256", + "384 × 384", + "512 × 512", + "1024 × 1024" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the base resolution used by the SigLIP-SO400M-384 vision encoder in DeepSeek-VL2?\nAnswer:", + " 256 × 256" + ], + [ + "Question:What is the base resolution used by the SigLIP-SO400M-384 vision encoder in DeepSeek-VL2?\nAnswer:", + " 384 × 384" + ], + [ + "Question:What is the base resolution used by the SigLIP-SO400M-384 vision encoder in DeepSeek-VL2?\nAnswer:", + " 512 × 512" + ], + [ + "Question:What is the base resolution used by the SigLIP-SO400M-384 vision encoder in DeepSeek-VL2?\nAnswer:", + " 1024 × 1024" + ] + ], + "resps": [ + [ + [ + -8.130940437316895, + false + ] + ], + [ + [ + -7.248654365539551, + false + ] + ], + [ + [ + -8.044690132141113, + false + ] + ], + [ + [ + -8.730853080749512, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.130940437316895, + false + ], + [ + -7.248654365539551, + false + ], + [ + -8.044690132141113, + false + ], + [ + -8.730853080749512, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a92a91bc1c33a56adb94738fafdca24088730e185939f28ca1970ae3277e1cf3", + "prompt_hash": "eb055f1388f8ac10285b290c69173e7c821461cfee313aacde2b270f3a7a20dd", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 66, + "doc": { + "question": "Which mechanism in DeepSeek-VL2 reduces inference computational cost by compressing the Key-Value cache?", + "choices": [ + "Multi-query Attention", + "Low-rank Decomposition", + "Multi-head Latent Attention", + "Sparse Transformer Blocks" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which mechanism in DeepSeek-VL2 reduces inference computational cost by compressing the Key-Value cache?\nAnswer:", + " Multi-query Attention" + ], + [ + "Question:Which mechanism in DeepSeek-VL2 reduces inference computational cost by compressing the Key-Value cache?\nAnswer:", + " Low-rank Decomposition" + ], + [ + "Question:Which mechanism in DeepSeek-VL2 reduces inference computational cost by compressing the Key-Value cache?\nAnswer:", + " Multi-head Latent Attention" + ], + [ + "Question:Which mechanism in DeepSeek-VL2 reduces inference computational cost by compressing the Key-Value cache?\nAnswer:", + " Sparse Transformer Blocks" + ] + ], + "resps": [ + [ + [ + -16.46713638305664, + false + ] + ], + [ + [ + -14.968793869018555, + false + ] + ], + [ + [ + -24.092559814453125, + false + ] + ], + [ + [ + -18.959882736206055, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.46713638305664, + false + ], + [ + -14.968793869018555, + false + ], + [ + -24.092559814453125, + false + ], + [ + -18.959882736206055, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "20c9fc230fca349220bc87a8955d8e099f027dbd565e0122628db50fdbfae1c8", + "prompt_hash": "565efc2f8caf679698d0c9ea9eedaa81a8f5ac5ff6aeade1e2212c25f6cdcabd", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 67, + "doc": { + "question": "Which resolution constraint limited the original DeepSeek-VL in handling high-resolution images?", + "choices": [ + "512 × 512", + "256 × 256", + "1024 × 1024", + "2048 × 2048" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which resolution constraint limited the original DeepSeek-VL in handling high-resolution images?\nAnswer:", + " 512 × 512" + ], + [ + "Question:Which resolution constraint limited the original DeepSeek-VL in handling high-resolution images?\nAnswer:", + " 256 × 256" + ], + [ + "Question:Which resolution constraint limited the original DeepSeek-VL in handling high-resolution images?\nAnswer:", + " 1024 × 1024" + ], + [ + "Question:Which resolution constraint limited the original DeepSeek-VL in handling high-resolution images?\nAnswer:", + " 2048 × 2048" + ] + ], + "resps": [ + [ + [ + -10.976244926452637, + false + ] + ], + [ + [ + -10.028334617614746, + false + ] + ], + [ + [ + -11.025457382202148, + false + ] + ], + [ + [ + -12.071310997009277, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.976244926452637, + false + ], + [ + -10.028334617614746, + false + ], + [ + -11.025457382202148, + false + ], + [ + -12.071310997009277, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "339d792bbc3b019970a774aa67ab7b5ca6ce8bd4b5eb6fad1b35f879e699edff", + "prompt_hash": "8d2317f726c17f3e3b8939e1a9ec7485286c12b4f26288b2b1be18212ba0aa70", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 68, + "doc": { + "question": "Which dataset was used in the vision-language alignment stage of DeepSeek-VL2 training?", + "choices": [ + "WIT", + "OBELICS", + "ShareGPT4V", + "WikiHow" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dataset was used in the vision-language alignment stage of DeepSeek-VL2 training?\nAnswer:", + " WIT" + ], + [ + "Question:Which dataset was used in the vision-language alignment stage of DeepSeek-VL2 training?\nAnswer:", + " OBELICS" + ], + [ + "Question:Which dataset was used in the vision-language alignment stage of DeepSeek-VL2 training?\nAnswer:", + " ShareGPT4V" + ], + [ + "Question:Which dataset was used in the vision-language alignment stage of DeepSeek-VL2 training?\nAnswer:", + " WikiHow" + ] + ], + "resps": [ + [ + [ + -11.62100601196289, + false + ] + ], + [ + [ + -19.688602447509766, + false + ] + ], + [ + [ + -13.465234756469727, + false + ] + ], + [ + [ + -13.078892707824707, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.62100601196289, + false + ], + [ + -19.688602447509766, + false + ], + [ + -13.465234756469727, + false + ], + [ + -13.078892707824707, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "99d67a96b52fca73877197e256f8e112981eda8d491873d62f896bd8e54fb9fc", + "prompt_hash": "c65656c4aea83a2ce79f67dc2e6cd18335eed3f311b95e24a22a6e3d99dff98d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 69, + "doc": { + "question": "What vocabulary size is used in both DeepSeek-VL2-Tiny and DeepSeek-VL2 models?", + "choices": [ + "102,400", + "129,280", + "96,000", + "88,512" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What vocabulary size is used in both DeepSeek-VL2-Tiny and DeepSeek-VL2 models?\nAnswer:", + " 102,400" + ], + [ + "Question:What vocabulary size is used in both DeepSeek-VL2-Tiny and DeepSeek-VL2 models?\nAnswer:", + " 129,280" + ], + [ + "Question:What vocabulary size is used in both DeepSeek-VL2-Tiny and DeepSeek-VL2 models?\nAnswer:", + " 96,000" + ], + [ + "Question:What vocabulary size is used in both DeepSeek-VL2-Tiny and DeepSeek-VL2 models?\nAnswer:", + " 88,512" + ] + ], + "resps": [ + [ + [ + -12.950544357299805, + false + ] + ], + [ + [ + -20.23044204711914, + false + ] + ], + [ + [ + -12.914684295654297, + false + ] + ], + [ + [ + -19.146526336669922, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.950544357299805, + false + ], + [ + -20.23044204711914, + false + ], + [ + -12.914684295654297, + false + ], + [ + -19.146526336669922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0f05867b00d406bf5e9952c229cbd0f2bf1767270f14ad1556db51142a068e5e", + "prompt_hash": "8d602975dad9718428ec256f9279453382b67fb55fc0159e498c69f4298b04bf", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 70, + "doc": { + "question": "What is the parameter count of the Phi-4 model?", + "choices": [ + "7 billion", + "14 billion", + "70 billion", + "32 billion" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the parameter count of the Phi-4 model?\nAnswer:", + " 7 billion" + ], + [ + "Question:What is the parameter count of the Phi-4 model?\nAnswer:", + " 14 billion" + ], + [ + "Question:What is the parameter count of the Phi-4 model?\nAnswer:", + " 70 billion" + ], + [ + "Question:What is the parameter count of the Phi-4 model?\nAnswer:", + " 32 billion" + ] + ], + "resps": [ + [ + [ + -8.51523208618164, + false + ] + ], + [ + [ + -10.523784637451172, + false + ] + ], + [ + [ + -10.680200576782227, + false + ] + ], + [ + [ + -10.754088401794434, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.51523208618164, + false + ], + [ + -10.523784637451172, + false + ], + [ + -10.680200576782227, + false + ], + [ + -10.754088401794434, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "116f467649a52acc054bc1f84838646ea02114520a9156a41b9605a49738f1d0", + "prompt_hash": "be084df62358405412baac8c81d863a2a0b37a8d5ce5019beddd342c0af16f65", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "Which benchmark does Phi-4 outperform GPT-4o on?", + "choices": [ + "HumanEval+", + "DROP", + "GPQA", + "MMLU" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which benchmark does Phi-4 outperform GPT-4o on?\nAnswer:", + " HumanEval+" + ], + [ + "Question:Which benchmark does Phi-4 outperform GPT-4o on?\nAnswer:", + " DROP" + ], + [ + "Question:Which benchmark does Phi-4 outperform GPT-4o on?\nAnswer:", + " GPQA" + ], + [ + "Question:Which benchmark does Phi-4 outperform GPT-4o on?\nAnswer:", + " MMLU" + ] + ], + "resps": [ + [ + [ + -13.359102249145508, + false + ] + ], + [ + [ + -13.945856094360352, + false + ] + ], + [ + [ + -14.53800106048584, + false + ] + ], + [ + [ + -8.430791854858398, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.359102249145508, + false + ], + [ + -13.945856094360352, + false + ], + [ + -14.53800106048584, + false + ], + [ + -8.430791854858398, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f2865fffe2519020fe97fce33fc3d4425d49db303f5659dfa1d85c4b590e04a8", + "prompt_hash": "8e29f0852f692ccde8ae0c99c89c51add71e311dcad1bace9a4821757f310040", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 72, + "doc": { + "question": "What percentage of Phi-4’s pretraining data is composed of synthetic data?", + "choices": [ + "20%", + "30%", + "40%", + "50%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What percentage of Phi-4’s pretraining data is composed of synthetic data?\nAnswer:", + " 20%" + ], + [ + "Question:What percentage of Phi-4’s pretraining data is composed of synthetic data?\nAnswer:", + " 30%" + ], + [ + "Question:What percentage of Phi-4’s pretraining data is composed of synthetic data?\nAnswer:", + " 40%" + ], + [ + "Question:What percentage of Phi-4’s pretraining data is composed of synthetic data?\nAnswer:", + " 50%" + ] + ], + "resps": [ + [ + [ + -9.83897590637207, + false + ] + ], + [ + [ + -10.036050796508789, + false + ] + ], + [ + [ + -9.986572265625, + false + ] + ], + [ + [ + -9.598316192626953, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.83897590637207, + false + ], + [ + -10.036050796508789, + false + ], + [ + -9.986572265625, + false + ], + [ + -9.598316192626953, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "74f028a642a4ea81b40a1d46b7967c490245ef183efc0afee04acb9a4fae3253", + "prompt_hash": "5653b6673de59f32813aaf6883f72dc84b2d3055b444083e8650a07442f1c14d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "Which technique is introduced to generate DPO pairs in the first stage of post-training?", + "choices": [ + "Multi-agent prompting", + "Instruction reversal", + "Pivotal Token Search", + "Majority Voting" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which technique is introduced to generate DPO pairs in the first stage of post-training?\nAnswer:", + " Multi-agent prompting" + ], + [ + "Question:Which technique is introduced to generate DPO pairs in the first stage of post-training?\nAnswer:", + " Instruction reversal" + ], + [ + "Question:Which technique is introduced to generate DPO pairs in the first stage of post-training?\nAnswer:", + " Pivotal Token Search" + ], + [ + "Question:Which technique is introduced to generate DPO pairs in the first stage of post-training?\nAnswer:", + " Majority Voting" + ] + ], + "resps": [ + [ + [ + -20.125656127929688, + false + ] + ], + [ + [ + -21.587875366210938, + false + ] + ], + [ + [ + -29.10210418701172, + false + ] + ], + [ + [ + -12.24392318725586, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.125656127929688, + false + ], + [ + -21.587875366210938, + false + ], + [ + -29.10210418701172, + false + ], + [ + -12.24392318725586, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a97af93f7f739a6c4de4934931a620cc1923def75f835215ff4af99c48c3f76c", + "prompt_hash": "203b12dfaa50c24d8451eb1877ce2f71f2cd03a96619328418be7cc1d1a9f44f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "Which benchmark category showed the largest performance gap between synthetic and web-trained models in Table 8?", + "choices": [ + "Human-Eval", + "TQA", + "GSM8k", + "MATH" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark category showed the largest performance gap between synthetic and web-trained models in Table 8?\nAnswer:", + " Human-Eval" + ], + [ + "Question:Which benchmark category showed the largest performance gap between synthetic and web-trained models in Table 8?\nAnswer:", + " TQA" + ], + [ + "Question:Which benchmark category showed the largest performance gap between synthetic and web-trained models in Table 8?\nAnswer:", + " GSM8k" + ], + [ + "Question:Which benchmark category showed the largest performance gap between synthetic and web-trained models in Table 8?\nAnswer:", + " MATH" + ] + ], + "resps": [ + [ + [ + -14.4087495803833, + false + ] + ], + [ + [ + -13.021769523620605, + false + ] + ], + [ + [ + -11.766997337341309, + false + ] + ], + [ + [ + -11.660591125488281, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.4087495803833, + false + ], + [ + -13.021769523620605, + false + ], + [ + -11.766997337341309, + false + ], + [ + -11.660591125488281, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c39391ac6f10f661ca68d0a620f88fd776aa9c7bbc14c437618e02fdf3231050", + "prompt_hash": "1082716728d8aa876731dc059daf270a0d97c1ea049c003518d0de258d9dff00", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 75, + "doc": { + "question": "What percentage of Black and African American adults encounter online racial harassment?", + "choices": [ + "30%", + "46%", + "60%", + "75%" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What percentage of Black and African American adults encounter online racial harassment?\nAnswer:", + " 30%" + ], + [ + "Question:What percentage of Black and African American adults encounter online racial harassment?\nAnswer:", + " 46%" + ], + [ + "Question:What percentage of Black and African American adults encounter online racial harassment?\nAnswer:", + " 60%" + ], + [ + "Question:What percentage of Black and African American adults encounter online racial harassment?\nAnswer:", + " 75%" + ] + ], + "resps": [ + [ + [ + -9.151792526245117, + false + ] + ], + [ + [ + -11.020578384399414, + false + ] + ], + [ + [ + -9.901688575744629, + false + ] + ], + [ + [ + -10.72553825378418, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.151792526245117, + false + ], + [ + -11.020578384399414, + false + ], + [ + -9.901688575744629, + false + ], + [ + -10.72553825378418, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "defc532debfa109bd5786680e55093a0ac8f615953535eba76bfe5f1fe603eb3", + "prompt_hash": "5c5d70de7eece4a6a6270db730f523fd46d4b061e8b3c5c2c438cb55b5afded8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 76, + "doc": { + "question": "What is the purpose of the Digital Services Act (DSA) enforced in 2024?", + "choices": [ + "To ban all forms of social media advertisements", + "To promote blockchain-based content verification", + "To enforce clear obligations for content moderation and explanation on online platforms", + "To encourage anonymous content posting" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the purpose of the Digital Services Act (DSA) enforced in 2024?\nAnswer:", + " To ban all forms of social media advertisements" + ], + [ + "Question:What is the purpose of the Digital Services Act (DSA) enforced in 2024?\nAnswer:", + " To promote blockchain-based content verification" + ], + [ + "Question:What is the purpose of the Digital Services Act (DSA) enforced in 2024?\nAnswer:", + " To enforce clear obligations for content moderation and explanation on online platforms" + ], + [ + "Question:What is the purpose of the Digital Services Act (DSA) enforced in 2024?\nAnswer:", + " To encourage anonymous content posting" + ] + ], + "resps": [ + [ + [ + -32.01473617553711, + false + ] + ], + [ + [ + -30.65254020690918, + false + ] + ], + [ + [ + -49.995948791503906, + false + ] + ], + [ + [ + -29.79315948486328, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.01473617553711, + false + ], + [ + -30.65254020690918, + false + ], + [ + -49.995948791503906, + false + ], + [ + -29.79315948486328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2b67327383aea1b2216df1851800ead1eda5cf24431edc6d2a49f3db192ea7ef", + "prompt_hash": "bf0c0fa613fc1e2f1ce5b58a456d14b2d1ca407b1e52a9726f3918adcb59213f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 77, + "doc": { + "question": "Which large language model is used as the teacher in the knowledge distillation approach?", + "choices": [ + "Llama-2-13B-Instruct", + "GPT-4", + "Llama-3-70B-Instruct", + "BERT-Large" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which large language model is used as the teacher in the knowledge distillation approach?\nAnswer:", + " Llama-2-13B-Instruct" + ], + [ + "Question:Which large language model is used as the teacher in the knowledge distillation approach?\nAnswer:", + " GPT-4" + ], + [ + "Question:Which large language model is used as the teacher in the knowledge distillation approach?\nAnswer:", + " Llama-3-70B-Instruct" + ], + [ + "Question:Which large language model is used as the teacher in the knowledge distillation approach?\nAnswer:", + " BERT-Large" + ] + ], + "resps": [ + [ + [ + -14.030332565307617, + false + ] + ], + [ + [ + -4.68262243270874, + false + ] + ], + [ + [ + -11.608107566833496, + false + ] + ], + [ + [ + -10.403993606567383, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.030332565307617, + false + ], + [ + -4.68262243270874, + false + ], + [ + -11.608107566833496, + false + ], + [ + -10.403993606567383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "afbccc7ee3433afdb0a3b523c58b2d0138396c971149c757784844a5bc74d212", + "prompt_hash": "f89a8b088d90665636d8081e6fb13e0824bf92d489fe6a13c648631f685be539", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 78, + "doc": { + "question": "How many samples were correctly classified and used for distillation in MiniMetaHate Distil?", + "choices": [ + "2993", + "2296", + "2001", + "1500" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many samples were correctly classified and used for distillation in MiniMetaHate Distil?\nAnswer:", + " 2993" + ], + [ + "Question:How many samples were correctly classified and used for distillation in MiniMetaHate Distil?\nAnswer:", + " 2296" + ], + [ + "Question:How many samples were correctly classified and used for distillation in MiniMetaHate Distil?\nAnswer:", + " 2001" + ], + [ + "Question:How many samples were correctly classified and used for distillation in MiniMetaHate Distil?\nAnswer:", + " 1500" + ] + ], + "resps": [ + [ + [ + -11.522136688232422, + false + ] + ], + [ + [ + -11.24272346496582, + false + ] + ], + [ + [ + -10.519241333007812, + false + ] + ], + [ + [ + -7.333746910095215, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.522136688232422, + false + ], + [ + -11.24272346496582, + false + ], + [ + -10.519241333007812, + false + ], + [ + -7.333746910095215, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "54212365c2cf87b74ae0a6054ba14a270ab2ba0fb809d6595173a0de4afccd2e", + "prompt_hash": "4e9f63f3a6233cfb52db93ae4a1f485352f3a5cdad59fcc7aeec3b2230696e02", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 79, + "doc": { + "question": "What F1-score did the Llama-3-8B-Distil-MetaHate model achieve in the classification task?", + "choices": [ + "0.7796", + "0.8499", + "0.9037", + "0.7467" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What F1-score did the Llama-3-8B-Distil-MetaHate model achieve in the classification task?\nAnswer:", + " 0.7796" + ], + [ + "Question:What F1-score did the Llama-3-8B-Distil-MetaHate model achieve in the classification task?\nAnswer:", + " 0.8499" + ], + [ + "Question:What F1-score did the Llama-3-8B-Distil-MetaHate model achieve in the classification task?\nAnswer:", + " 0.9037" + ], + [ + "Question:What F1-score did the Llama-3-8B-Distil-MetaHate model achieve in the classification task?\nAnswer:", + " 0.7467" + ] + ], + "resps": [ + [ + [ + -11.481164932250977, + false + ] + ], + [ + [ + -11.787622451782227, + false + ] + ], + [ + [ + -11.448420524597168, + false + ] + ], + [ + [ + -11.004653930664062, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.481164932250977, + false + ], + [ + -11.787622451782227, + false + ], + [ + -11.448420524597168, + false + ], + [ + -11.004653930664062, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "682153f7962b13a3fca29e0ba040de1d172c39f2dab56ff965e7e9fd9342127e", + "prompt_hash": "0d9e96020a25c9fb03893ba494994b2140c438c15dcdc7e9106794b7f9f93ab8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 80, + "doc": { + "question": "What is the native sequence length of ModernBERT?", + "choices": [ + "512", + "1024", + "4096", + "8192" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the native sequence length of ModernBERT?\nAnswer:", + " 512" + ], + [ + "Question:What is the native sequence length of ModernBERT?\nAnswer:", + " 1024" + ], + [ + "Question:What is the native sequence length of ModernBERT?\nAnswer:", + " 4096" + ], + [ + "Question:What is the native sequence length of ModernBERT?\nAnswer:", + " 8192" + ] + ], + "resps": [ + [ + [ + -2.9802372455596924, + false + ] + ], + [ + [ + -3.8660225868225098, + false + ] + ], + [ + [ + -5.204238414764404, + false + ] + ], + [ + [ + -5.671204566955566, + false + ] + ] + ], + "filtered_resps": [ + [ + -2.9802372455596924, + false + ], + [ + -3.8660225868225098, + false + ], + [ + -5.204238414764404, + false + ], + [ + -5.671204566955566, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4b66277959befb396d08c8d6472ff3dddff308651cc9aa98dc5093a5a147cb85", + "prompt_hash": "ab090baa0e014fb2cd8c2f57d2d56f86c9a8d4d9abdff9b048ee4bbda6745d16", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 81, + "doc": { + "question": "Which optimizer does ModernBERT use during pretraining?", + "choices": [ + "AdamW", + "StableAdamW", + "Adafactor", + "SGD" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which optimizer does ModernBERT use during pretraining?\nAnswer:", + " AdamW" + ], + [ + "Question:Which optimizer does ModernBERT use during pretraining?\nAnswer:", + " StableAdamW" + ], + [ + "Question:Which optimizer does ModernBERT use during pretraining?\nAnswer:", + " Adafactor" + ], + [ + "Question:Which optimizer does ModernBERT use during pretraining?\nAnswer:", + " SGD" + ] + ], + "resps": [ + [ + [ + -4.6689043045043945, + false + ] + ], + [ + [ + -20.804319381713867, + false + ] + ], + [ + [ + -9.3514986038208, + false + ] + ], + [ + [ + -7.600836277008057, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.6689043045043945, + false + ], + [ + -20.804319381713867, + false + ], + [ + -9.3514986038208, + false + ], + [ + -7.600836277008057, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "92bdbed4335dbf1e07e12e87e5059eda5d1bbb35503877e35e8c58a33b52b7f8", + "prompt_hash": "9a4aace23c9f432f3dee6b16fc1443472f954fe5fabf84c7504029042ad05ee5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 82, + "doc": { + "question": "What type of activation function does ModernBERT adopt?", + "choices": [ + "ReLU", + "GeLU", + "Swish", + "GeGLU" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What type of activation function does ModernBERT adopt?\nAnswer:", + " ReLU" + ], + [ + "Question:What type of activation function does ModernBERT adopt?\nAnswer:", + " GeLU" + ], + [ + "Question:What type of activation function does ModernBERT adopt?\nAnswer:", + " Swish" + ], + [ + "Question:What type of activation function does ModernBERT adopt?\nAnswer:", + " GeGLU" + ] + ], + "resps": [ + [ + [ + -3.3449418544769287, + false + ] + ], + [ + [ + -9.327370643615723, + false + ] + ], + [ + [ + -7.710070610046387, + false + ] + ], + [ + [ + -20.871734619140625, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.3449418544769287, + false + ], + [ + -9.327370643615723, + false + ], + [ + -7.710070610046387, + false + ], + [ + -20.871734619140625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "997911e4a17162c72338d72279b22fa19fa06027bb1a1084b09c859116effcbc", + "prompt_hash": "c9f1b89aadf77bba01b0161e076af8b57f527721531ac665734d68c788100eee", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "What tokenizer does ModernBERT use?", + "choices": [ + "Original BERT tokenizer", + "SentencePiece", + "OLMo tokenizer (modified)", + "GPT2 tokenizer" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What tokenizer does ModernBERT use?\nAnswer:", + " Original BERT tokenizer" + ], + [ + "Question:What tokenizer does ModernBERT use?\nAnswer:", + " SentencePiece" + ], + [ + "Question:What tokenizer does ModernBERT use?\nAnswer:", + " OLMo tokenizer (modified)" + ], + [ + "Question:What tokenizer does ModernBERT use?\nAnswer:", + " GPT2 tokenizer" + ] + ], + "resps": [ + [ + [ + -10.338162422180176, + false + ] + ], + [ + [ + -8.317133903503418, + false + ] + ], + [ + [ + -37.740684509277344, + false + ] + ], + [ + [ + -12.453644752502441, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.338162422180176, + false + ], + [ + -8.317133903503418, + false + ], + [ + -37.740684509277344, + false + ], + [ + -12.453644752502441, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c4ded2d8c96422c7f89f3d5ddfb7fd19a7e6a387b40d9228443164a9ca00d5bc", + "prompt_hash": "d7848dcdb9379b33a0cf5ba68f10be54f71c1a2d2d1707173f52ac4845dddd3a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 84, + "doc": { + "question": "What is the batch size warmup schedule used for ModernBERT-base?", + "choices": [ + "From 448 to 4608 over 50B tokens", + "From 768 to 4608 over 50B tokens", + "From 448 to 4928 over 10B tokens", + "From 768 to 4928 over 10B tokens" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the batch size warmup schedule used for ModernBERT-base?\nAnswer:", + " From 448 to 4608 over 50B tokens" + ], + [ + "Question:What is the batch size warmup schedule used for ModernBERT-base?\nAnswer:", + " From 768 to 4608 over 50B tokens" + ], + [ + "Question:What is the batch size warmup schedule used for ModernBERT-base?\nAnswer:", + " From 448 to 4928 over 10B tokens" + ], + [ + "Question:What is the batch size warmup schedule used for ModernBERT-base?\nAnswer:", + " From 768 to 4928 over 10B tokens" + ] + ], + "resps": [ + [ + [ + -54.454219818115234, + false + ] + ], + [ + [ + -48.003021240234375, + false + ] + ], + [ + [ + -53.04581832885742, + false + ] + ], + [ + [ + -49.49294662475586, + false + ] + ] + ], + "filtered_resps": [ + [ + -54.454219818115234, + false + ], + [ + -48.003021240234375, + false + ], + [ + -53.04581832885742, + false + ], + [ + -49.49294662475586, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "794189ecdd29ef84800d4d01b8ee091f8374a1baded0b020ce27070595a49962", + "prompt_hash": "b489d3303e83291a02e9332aac2328a83ff57fc1b2e696f1c0d58e4a0cd10347", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 85, + "doc": { + "question": "What is the maximum number of iterations allowed for generating a correct reasoning trajectory in Stage One?", + "choices": [ + "2", + "3", + "5", + "10" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the maximum number of iterations allowed for generating a correct reasoning trajectory in Stage One?\nAnswer:", + " 2" + ], + [ + "Question:What is the maximum number of iterations allowed for generating a correct reasoning trajectory in Stage One?\nAnswer:", + " 3" + ], + [ + "Question:What is the maximum number of iterations allowed for generating a correct reasoning trajectory in Stage One?\nAnswer:", + " 5" + ], + [ + "Question:What is the maximum number of iterations allowed for generating a correct reasoning trajectory in Stage One?\nAnswer:", + " 10" + ] + ], + "resps": [ + [ + [ + -3.444279432296753, + false + ] + ], + [ + [ + -3.4499194622039795, + false + ] + ], + [ + [ + -3.39045786857605, + false + ] + ], + [ + [ + -3.5267815589904785, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.444279432296753, + false + ], + [ + -3.4499194622039795, + false + ], + [ + -3.39045786857605, + false + ], + [ + -3.5267815589904785, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9523e0cd07b614fe1c199437bf4ffccc04af52eb1ecfc73735f856cdfec656c9", + "prompt_hash": "38df8aadb3c572104715f5ba3d1ca0b9a1204eae23019f3688978c3f1575359e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "Which LLM is used as the verifier to check answers in the proposed method?", + "choices": [ + "GPT-4o", + "LLaMA-3.1", + "Qwen2.5", + "Gemma2" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which LLM is used as the verifier to check answers in the proposed method?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which LLM is used as the verifier to check answers in the proposed method?\nAnswer:", + " LLaMA-3.1" + ], + [ + "Question:Which LLM is used as the verifier to check answers in the proposed method?\nAnswer:", + " Qwen2.5" + ], + [ + "Question:Which LLM is used as the verifier to check answers in the proposed method?\nAnswer:", + " Gemma2" + ] + ], + "resps": [ + [ + [ + -4.99644136428833, + false + ] + ], + [ + [ + -19.128623962402344, + false + ] + ], + [ + [ + -15.504560470581055, + false + ] + ], + [ + [ + -11.37159538269043, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.99644136428833, + false + ], + [ + -19.128623962402344, + false + ], + [ + -15.504560470581055, + false + ], + [ + -11.37159538269043, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5eb7fa165860610eaab01627a1850068a08f3eb2da634b92c0023d437a7d7dfe", + "prompt_hash": "427983833d46e8c41fd395b7ee6c9234517a1a2aca2a96f5cac95570de9717af", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 87, + "doc": { + "question": "Which search strategy involves the LLM critiquing and correcting the current reasoning?", + "choices": [ + "Exploring New Paths", + "Backtracking", + "Verification", + "Corrections" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which search strategy involves the LLM critiquing and correcting the current reasoning?\nAnswer:", + " Exploring New Paths" + ], + [ + "Question:Which search strategy involves the LLM critiquing and correcting the current reasoning?\nAnswer:", + " Backtracking" + ], + [ + "Question:Which search strategy involves the LLM critiquing and correcting the current reasoning?\nAnswer:", + " Verification" + ], + [ + "Question:Which search strategy involves the LLM critiquing and correcting the current reasoning?\nAnswer:", + " Corrections" + ] + ], + "resps": [ + [ + [ + -21.948320388793945, + false + ] + ], + [ + [ + -8.776028633117676, + false + ] + ], + [ + [ + -7.903186321258545, + false + ] + ], + [ + [ + -12.345247268676758, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.948320388793945, + false + ], + [ + -8.776028633117676, + false + ], + [ + -7.903186321258545, + false + ], + [ + -12.345247268676758, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4e3eeed368a493138f5f47e056a32d6fe7d22c1104564f124cb321c66e5905f0", + "prompt_hash": "61066c3003ef4b501f9a45537746350905d46ccd3a17aad075de8ecae6d3cd9b", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 88, + "doc": { + "question": "What was the total number of medical exam questions collected from MedQA-USMLE and MedMcQA?", + "choices": [ + "40K", + "192K", + "60K", + "20K" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the total number of medical exam questions collected from MedQA-USMLE and MedMcQA?\nAnswer:", + " 40K" + ], + [ + "Question:What was the total number of medical exam questions collected from MedQA-USMLE and MedMcQA?\nAnswer:", + " 192K" + ], + [ + "Question:What was the total number of medical exam questions collected from MedQA-USMLE and MedMcQA?\nAnswer:", + " 60K" + ], + [ + "Question:What was the total number of medical exam questions collected from MedQA-USMLE and MedMcQA?\nAnswer:", + " 20K" + ] + ], + "resps": [ + [ + [ + -12.511892318725586, + false + ] + ], + [ + [ + -15.900592803955078, + false + ] + ], + [ + [ + -13.070356369018555, + false + ] + ], + [ + [ + -11.873079299926758, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.511892318725586, + false + ], + [ + -15.900592803955078, + false + ], + [ + -13.070356369018555, + false + ], + [ + -11.873079299926758, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e53c3edd00284a53963bdd1efe103a9bfe9388c5c2e84ac2badd3b9054ba2e05", + "prompt_hash": "f0dbc688373e4cddcbae3e6123c97a37e92a4b7bec4f53459bdf0d44a20e93a6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 89, + "doc": { + "question": "What is the batch size used during reinforcement learning with PPO in Stage 2?", + "choices": [ + "32", + "64", + "128", + "256" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the batch size used during reinforcement learning with PPO in Stage 2?\nAnswer:", + " 32" + ], + [ + "Question:What is the batch size used during reinforcement learning with PPO in Stage 2?\nAnswer:", + " 64" + ], + [ + "Question:What is the batch size used during reinforcement learning with PPO in Stage 2?\nAnswer:", + " 128" + ], + [ + "Question:What is the batch size used during reinforcement learning with PPO in Stage 2?\nAnswer:", + " 256" + ] + ], + "resps": [ + [ + [ + -4.313339710235596, + false + ] + ], + [ + [ + -4.934946060180664, + false + ] + ], + [ + [ + -5.580106735229492, + false + ] + ], + [ + [ + -5.842267036437988, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.313339710235596, + false + ], + [ + -4.934946060180664, + false + ], + [ + -5.580106735229492, + false + ], + [ + -5.842267036437988, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "14a8b0bdb1427efa8f3f46a44fe01b61bf71b7e9be24826304669f9d38aec6be", + "prompt_hash": "494d5b020999ea125ad81c2789a6df16a06f4d8ce120053db53ed5f24704b07f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 90, + "doc": { + "question": "What is the total duration of the 30 million video clips used in the dataset?", + "choices": [ + "50k hours", + "60k hours", + "70k hours", + "80k hours" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the total duration of the 30 million video clips used in the dataset?\nAnswer:", + " 50k hours" + ], + [ + "Question:What is the total duration of the 30 million video clips used in the dataset?\nAnswer:", + " 60k hours" + ], + [ + "Question:What is the total duration of the 30 million video clips used in the dataset?\nAnswer:", + " 70k hours" + ], + [ + "Question:What is the total duration of the 30 million video clips used in the dataset?\nAnswer:", + " 80k hours" + ] + ], + "resps": [ + [ + [ + -13.65107250213623, + false + ] + ], + [ + [ + -14.15365219116211, + false + ] + ], + [ + [ + -14.824312210083008, + false + ] + ], + [ + [ + -14.518945693969727, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.65107250213623, + false + ], + [ + -14.15365219116211, + false + ], + [ + -14.824312210083008, + false + ], + [ + -14.518945693969727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b50a124a9aa6ab1d5bd0f4632abb993e6849b6e9cb9696575ab03d0a9095094f", + "prompt_hash": "810bc5d920fb28c94ecd9f2fc18f5a8fa44857c36784468bb6965d9551f53cb9", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 91, + "doc": { + "question": "Which model is used for Optical Flow Score computation in the data preprocessing pipeline?", + "choices": [ + "RAFT", + "FlowNet", + "UniMatch", + "PWC-Net" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model is used for Optical Flow Score computation in the data preprocessing pipeline?\nAnswer:", + " RAFT" + ], + [ + "Question:Which model is used for Optical Flow Score computation in the data preprocessing pipeline?\nAnswer:", + " FlowNet" + ], + [ + "Question:Which model is used for Optical Flow Score computation in the data preprocessing pipeline?\nAnswer:", + " UniMatch" + ], + [ + "Question:Which model is used for Optical Flow Score computation in the data preprocessing pipeline?\nAnswer:", + " PWC-Net" + ] + ], + "resps": [ + [ + [ + -6.261064529418945, + false + ] + ], + [ + [ + -6.434628009796143, + false + ] + ], + [ + [ + -11.806937217712402, + false + ] + ], + [ + [ + -14.45037841796875, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.261064529418945, + false + ], + [ + -6.434628009796143, + false + ], + [ + -11.806937217712402, + false + ], + [ + -14.45037841796875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "88cec1a4202acd1a92b3ada2606234ac3e46712884ab27ab2a207ce3ecf652b9", + "prompt_hash": "102011f9bdda56af7503599cf0188d34031ccdf8079b632707254ad248936e1d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 92, + "doc": { + "question": "How many total parameters does the video compression network contain?", + "choices": [ + "300M", + "384M", + "400M", + "420M" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many total parameters does the video compression network contain?\nAnswer:", + " 300M" + ], + [ + "Question:How many total parameters does the video compression network contain?\nAnswer:", + " 384M" + ], + [ + "Question:How many total parameters does the video compression network contain?\nAnswer:", + " 400M" + ], + [ + "Question:How many total parameters does the video compression network contain?\nAnswer:", + " 420M" + ] + ], + "resps": [ + [ + [ + -9.516914367675781, + false + ] + ], + [ + [ + -12.171443939208984, + false + ] + ], + [ + [ + -9.804231643676758, + false + ] + ], + [ + [ + -11.168252944946289, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.516914367675781, + false + ], + [ + -12.171443939208984, + false + ], + [ + -9.804231643676758, + false + ], + [ + -11.168252944946289, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7b265aa594361a305450a7af84e6f6ac5abb1d6aa1c4d9a9f888b90df577f0b7", + "prompt_hash": "4d644297715562776f3430e89931dfe7c0191a6a028d9143ffcd10bf90ea95e1", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 93, + "doc": { + "question": "Which model was used to initialize the 2D VAE in Open-Sora 1.2?", + "choices": [ + "Latte", + "PixArt", + "SDXL", + "Magvit-v2" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model was used to initialize the 2D VAE in Open-Sora 1.2?\nAnswer:", + " Latte" + ], + [ + "Question:Which model was used to initialize the 2D VAE in Open-Sora 1.2?\nAnswer:", + " PixArt" + ], + [ + "Question:Which model was used to initialize the 2D VAE in Open-Sora 1.2?\nAnswer:", + " SDXL" + ], + [ + "Question:Which model was used to initialize the 2D VAE in Open-Sora 1.2?\nAnswer:", + " Magvit-v2" + ] + ], + "resps": [ + [ + [ + -15.276412963867188, + false + ] + ], + [ + [ + -10.8336820602417, + false + ] + ], + [ + [ + -12.574335098266602, + false + ] + ], + [ + [ + -30.271799087524414, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.276412963867188, + false + ], + [ + -10.8336820602417, + false + ], + [ + -12.574335098266602, + false + ], + [ + -30.271799087524414, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "faa427f70ad5b6417f6829fa64227f2aebbbca4760908a4dfe6693da596a2743", + "prompt_hash": "67248fc329805136e7e6d18124cb719c6c66be23e0c2c66618faf8c2f43c333d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 94, + "doc": { + "question": "What was the VBench total score achieved by Open-Sora 1.2?", + "choices": [ + "75.91%", + "77.23%", + "78.93%", + "79.76%" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What was the VBench total score achieved by Open-Sora 1.2?\nAnswer:", + " 75.91%" + ], + [ + "Question:What was the VBench total score achieved by Open-Sora 1.2?\nAnswer:", + " 77.23%" + ], + [ + "Question:What was the VBench total score achieved by Open-Sora 1.2?\nAnswer:", + " 78.93%" + ], + [ + "Question:What was the VBench total score achieved by Open-Sora 1.2?\nAnswer:", + " 79.76%" + ] + ], + "resps": [ + [ + [ + -18.253808975219727, + false + ] + ], + [ + [ + -17.968246459960938, + false + ] + ], + [ + [ + -17.808147430419922, + false + ] + ], + [ + [ + -17.937244415283203, + false + ] + ] + ], + "filtered_resps": [ + [ + -18.253808975219727, + false + ], + [ + -17.968246459960938, + false + ], + [ + -17.808147430419922, + false + ], + [ + -17.937244415283203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "996839ff1783e652a55de6089818380d8cb1ac4077a073134dd9e0eb43823415", + "prompt_hash": "a6d9d72a424e7f01bcdbf2bc42ea87b275074d665b20737780add009981a4a02", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 95, + "doc": { + "question": "What is the total spatio-temporal compression ratio achieved by the Video-VAE in LTX-Video?", + "choices": [ + "1:48", + "1:96", + "1:192", + "1:512" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total spatio-temporal compression ratio achieved by the Video-VAE in LTX-Video?\nAnswer:", + " 1:48" + ], + [ + "Question:What is the total spatio-temporal compression ratio achieved by the Video-VAE in LTX-Video?\nAnswer:", + " 1:96" + ], + [ + "Question:What is the total spatio-temporal compression ratio achieved by the Video-VAE in LTX-Video?\nAnswer:", + " 1:192" + ], + [ + "Question:What is the total spatio-temporal compression ratio achieved by the Video-VAE in LTX-Video?\nAnswer:", + " 1:512" + ] + ], + "resps": [ + [ + [ + -14.28443717956543, + false + ] + ], + [ + [ + -14.824919700622559, + false + ] + ], + [ + [ + -17.184690475463867, + false + ] + ], + [ + [ + -17.0477294921875, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.28443717956543, + false + ], + [ + -14.824919700622559, + false + ], + [ + -17.184690475463867, + false + ], + [ + -17.0477294921875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "601e102d0995f5a8d207082151dede875cf07ba21217427be49a4b7f25e9d2f8", + "prompt_hash": "4f2d44c5a4304265e085751f62a4638452aca0db5263a9948634e4742ee519c7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 96, + "doc": { + "question": "Which positional embedding method is used in LTX-Video instead of absolute positional embeddings?", + "choices": [ + "Sinusoidal Embeddings", + "Rotary Positional Embeddings (RoPE)", + "Learned Positional Vectors", + "Relative Position Bias" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which positional embedding method is used in LTX-Video instead of absolute positional embeddings?\nAnswer:", + " Sinusoidal Embeddings" + ], + [ + "Question:Which positional embedding method is used in LTX-Video instead of absolute positional embeddings?\nAnswer:", + " Rotary Positional Embeddings (RoPE)" + ], + [ + "Question:Which positional embedding method is used in LTX-Video instead of absolute positional embeddings?\nAnswer:", + " Learned Positional Vectors" + ], + [ + "Question:Which positional embedding method is used in LTX-Video instead of absolute positional embeddings?\nAnswer:", + " Relative Position Bias" + ] + ], + "resps": [ + [ + [ + -14.810482025146484, + false + ] + ], + [ + [ + -15.664023399353027, + false + ] + ], + [ + [ + -22.923189163208008, + false + ] + ], + [ + [ + -15.321342468261719, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.810482025146484, + false + ], + [ + -15.664023399353027, + false + ], + [ + -22.923189163208008, + false + ], + [ + -15.321342468261719, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5af380cb6067d02b663a4861dfaac101b9b090a294539c623fdb1abefe4150cf", + "prompt_hash": "9c151ca1e15f6348d2eb1c9188f2ee28970395133d7cad4793fd4a80bdc60f30", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 97, + "doc": { + "question": "On which GPU did LTX-Video achieve faster-than-real-time generation of 121 frames in 2 seconds?", + "choices": [ + "Nvidia A100", + "Nvidia V100", + "Nvidia H100", + "Nvidia RTX 4090" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:On which GPU did LTX-Video achieve faster-than-real-time generation of 121 frames in 2 seconds?\nAnswer:", + " Nvidia A100" + ], + [ + "Question:On which GPU did LTX-Video achieve faster-than-real-time generation of 121 frames in 2 seconds?\nAnswer:", + " Nvidia V100" + ], + [ + "Question:On which GPU did LTX-Video achieve faster-than-real-time generation of 121 frames in 2 seconds?\nAnswer:", + " Nvidia H100" + ], + [ + "Question:On which GPU did LTX-Video achieve faster-than-real-time generation of 121 frames in 2 seconds?\nAnswer:", + " Nvidia RTX 4090" + ] + ], + "resps": [ + [ + [ + -7.209535598754883, + false + ] + ], + [ + [ + -9.387601852416992, + false + ] + ], + [ + [ + -9.177935600280762, + false + ] + ], + [ + [ + -8.791450500488281, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.209535598754883, + false + ], + [ + -9.387601852416992, + false + ], + [ + -9.177935600280762, + false + ], + [ + -8.791450500488281, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "44eedd9be0c5f64eaa9704b437069887928e26c434da8c23defedc0628d95e41", + "prompt_hash": "02c313177014283fc2d2ceb3fb889083c088a5ecb4651826427b61531023f9c4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 98, + "doc": { + "question": "Which GAN variant is proposed in LTX-Video to improve stability and reconstruction quality?", + "choices": [ + "PatchGAN", + "CycleGAN", + "Reconstruction GAN", + "StyleGAN" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which GAN variant is proposed in LTX-Video to improve stability and reconstruction quality?\nAnswer:", + " PatchGAN" + ], + [ + "Question:Which GAN variant is proposed in LTX-Video to improve stability and reconstruction quality?\nAnswer:", + " CycleGAN" + ], + [ + "Question:Which GAN variant is proposed in LTX-Video to improve stability and reconstruction quality?\nAnswer:", + " Reconstruction GAN" + ], + [ + "Question:Which GAN variant is proposed in LTX-Video to improve stability and reconstruction quality?\nAnswer:", + " StyleGAN" + ] + ], + "resps": [ + [ + [ + -8.652844429016113, + false + ] + ], + [ + [ + -5.183656692504883, + false + ] + ], + [ + [ + -11.628631591796875, + false + ] + ], + [ + [ + -5.3855133056640625, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.652844429016113, + false + ], + [ + -5.183656692504883, + false + ], + [ + -11.628631591796875, + false + ], + [ + -5.3855133056640625, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4d11afddf855da867964775b6d488b39809add0de3182b038f66d8e7bc791255", + "prompt_hash": "3a76cad05d271f8f47d95186dc29e621e8ecd538ca06bc1cd34fa32a22af9d62", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 99, + "doc": { + "question": "What pre-trained text encoder is used for generating initial text embeddings in LTX-Video?", + "choices": [ + "BERT", + "CLIP", + "T5", + "GPT-2" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What pre-trained text encoder is used for generating initial text embeddings in LTX-Video?\nAnswer:", + " BERT" + ], + [ + "Question:What pre-trained text encoder is used for generating initial text embeddings in LTX-Video?\nAnswer:", + " CLIP" + ], + [ + "Question:What pre-trained text encoder is used for generating initial text embeddings in LTX-Video?\nAnswer:", + " T5" + ], + [ + "Question:What pre-trained text encoder is used for generating initial text embeddings in LTX-Video?\nAnswer:", + " GPT-2" + ] + ], + "resps": [ + [ + [ + -3.287250518798828, + false + ] + ], + [ + [ + -3.3353793621063232, + false + ] + ], + [ + [ + -5.748715400695801, + false + ] + ], + [ + [ + -7.809186935424805, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.287250518798828, + false + ], + [ + -3.3353793621063232, + false + ], + [ + -5.748715400695801, + false + ], + [ + -7.809186935424805, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "72e9c47fb45eea2ab469fb1ddf4f6bf24453e7843260a0b9eadfb41eb325081d", + "prompt_hash": "296ebf6f03fd43f3f28379d8d020e501ea08a43ed5d73900fd472c11cfbc5053", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + } + ], + "arxiv_mc_2501": [ + { + "doc_id": 0, + "doc": { + "question": "What does an approximate unitary k-design aim to replicate?", + "choices": [ + "The behavior of a classical circuit with k gates", + "The full entanglement structure of any quantum state", + "The behavior of Haar-random unitaries within any quantum experiment that queries the unitary k times", + "The output of a fully depolarizing quantum channel" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does an approximate unitary k-design aim to replicate?\nAnswer:", + " The behavior of a classical circuit with k gates" + ], + [ + "Question:What does an approximate unitary k-design aim to replicate?\nAnswer:", + " The full entanglement structure of any quantum state" + ], + [ + "Question:What does an approximate unitary k-design aim to replicate?\nAnswer:", + " The behavior of Haar-random unitaries within any quantum experiment that queries the unitary k times" + ], + [ + "Question:What does an approximate unitary k-design aim to replicate?\nAnswer:", + " The output of a fully depolarizing quantum channel" + ] + ], + "resps": [ + [ + [ + -29.659969329833984, + false + ] + ], + [ + [ + -27.531269073486328, + false + ] + ], + [ + [ + -79.02543640136719, + false + ] + ], + [ + [ + -26.968955993652344, + false + ] + ] + ], + "filtered_resps": [ + [ + -29.659969329833984, + false + ], + [ + -27.531269073486328, + false + ], + [ + -79.02543640136719, + false + ], + [ + -26.968955993652344, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7e116631a34f93e263eba5412718a33e303dfe605621305a2d9162d5f93b5e30", + "prompt_hash": "878dab46c9267372fec18d8fa960c96a12614f77781a17b6117cc17825a75e71", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "What is the required local patch size ξ to ensure the ensemble forms an ε-approximate unitary k-design on n qubits?", + "choices": [ + "ξ ≥ √(nk/ε)", + "ξ ≥ log₂(nk²/ε)", + "ξ ≥ ε / nk", + "ξ ≥ (n + k + ε) / 2" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the required local patch size ξ to ensure the ensemble forms an ε-approximate unitary k-design on n qubits?\nAnswer:", + " ξ ≥ √(nk/ε)" + ], + [ + "Question:What is the required local patch size ξ to ensure the ensemble forms an ε-approximate unitary k-design on n qubits?\nAnswer:", + " ξ ≥ log₂(nk²/ε)" + ], + [ + "Question:What is the required local patch size ξ to ensure the ensemble forms an ε-approximate unitary k-design on n qubits?\nAnswer:", + " ξ ≥ ε / nk" + ], + [ + "Question:What is the required local patch size ξ to ensure the ensemble forms an ε-approximate unitary k-design on n qubits?\nAnswer:", + " ξ ≥ (n + k + ε) / 2" + ] + ], + "resps": [ + [ + [ + -49.53298568725586, + false + ] + ], + [ + [ + -54.27104187011719, + false + ] + ], + [ + [ + -37.01532745361328, + false + ] + ], + [ + [ + -42.08514404296875, + false + ] + ] + ], + "filtered_resps": [ + [ + -49.53298568725586, + false + ], + [ + -54.27104187011719, + false + ], + [ + -37.01532745361328, + false + ], + [ + -42.08514404296875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "102db8f0a456c5c4a4df5e6a3b89c1d0a0c152ab74bc9b0d55821233e4201790", + "prompt_hash": "ec0c42f5b8cc384eacbbfe02d3206f129a055df415b34dfaf626feddae9d4ec9", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 2, + "doc": { + "question": "What circuit depth is sufficient to form pseudorandom unitaries on all-to-all-connected circuits?", + "choices": [ + "poly(n)", + "poly(log log n)", + "poly(n log n)", + "poly(sqrt(n))" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What circuit depth is sufficient to form pseudorandom unitaries on all-to-all-connected circuits?\nAnswer:", + " poly(n)" + ], + [ + "Question:What circuit depth is sufficient to form pseudorandom unitaries on all-to-all-connected circuits?\nAnswer:", + " poly(log log n)" + ], + [ + "Question:What circuit depth is sufficient to form pseudorandom unitaries on all-to-all-connected circuits?\nAnswer:", + " poly(n log n)" + ], + [ + "Question:What circuit depth is sufficient to form pseudorandom unitaries on all-to-all-connected circuits?\nAnswer:", + " poly(sqrt(n))" + ] + ], + "resps": [ + [ + [ + -16.263832092285156, + false + ] + ], + [ + [ + -24.4702091217041, + false + ] + ], + [ + [ + -24.825349807739258, + false + ] + ], + [ + [ + -23.393047332763672, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.263832092285156, + false + ], + [ + -24.4702091217041, + false + ], + [ + -24.825349807739258, + false + ], + [ + -23.393047332763672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dbf5f6346c8e1d783a57e99b79cfa23e88d4e708818bfc4a3a556358d731c5fe", + "prompt_hash": "fd0f10c0a89cb75f0efbdb7c152212d7e9c6fb21be2bf75043c1a09204a8b9b3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 3, + "doc": { + "question": "Why can random quantum circuits appear Haar-random in exponentially lower depth compared to classical circuits?", + "choices": [ + "Quantum circuits use fewer gates", + "Quantum circuits operate only on classical bits", + "Quantum circuits can hide information in non-commuting observables", + "Quantum circuits avoid measurement altogether" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Why can random quantum circuits appear Haar-random in exponentially lower depth compared to classical circuits?\nAnswer:", + " Quantum circuits use fewer gates" + ], + [ + "Question:Why can random quantum circuits appear Haar-random in exponentially lower depth compared to classical circuits?\nAnswer:", + " Quantum circuits operate only on classical bits" + ], + [ + "Question:Why can random quantum circuits appear Haar-random in exponentially lower depth compared to classical circuits?\nAnswer:", + " Quantum circuits can hide information in non-commuting observables" + ], + [ + "Question:Why can random quantum circuits appear Haar-random in exponentially lower depth compared to classical circuits?\nAnswer:", + " Quantum circuits avoid measurement altogether" + ] + ], + "resps": [ + [ + [ + -17.355615615844727, + false + ] + ], + [ + [ + -24.588275909423828, + false + ] + ], + [ + [ + -35.55263900756836, + false + ] + ], + [ + [ + -26.179462432861328, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.355615615844727, + false + ], + [ + -24.588275909423828, + false + ], + [ + -35.55263900756836, + false + ], + [ + -26.179462432861328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8942b343b5900cf80761e00cc194c309d61c7b88a17de5c8c41435096b338eb9", + "prompt_hash": "5f3c72693fae80533b25db692fda49f4cf0382dd96efb58bd5a1b3a719962ca7", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 4, + "doc": { + "question": "What is the minimum circuit depth required for orthogonal quantum circuits to match the second moment of a Haar-random orthogonal matrix in 1D?", + "choices": [ + "Ω(log log n)", + "Ω(n)", + "Ω(log n)", + "Ω(1)" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the minimum circuit depth required for orthogonal quantum circuits to match the second moment of a Haar-random orthogonal matrix in 1D?\nAnswer:", + " Ω(log log n)" + ], + [ + "Question:What is the minimum circuit depth required for orthogonal quantum circuits to match the second moment of a Haar-random orthogonal matrix in 1D?\nAnswer:", + " Ω(n)" + ], + [ + "Question:What is the minimum circuit depth required for orthogonal quantum circuits to match the second moment of a Haar-random orthogonal matrix in 1D?\nAnswer:", + " Ω(log n)" + ], + [ + "Question:What is the minimum circuit depth required for orthogonal quantum circuits to match the second moment of a Haar-random orthogonal matrix in 1D?\nAnswer:", + " Ω(1)" + ] + ], + "resps": [ + [ + [ + -32.244606018066406, + false + ] + ], + [ + [ + -26.079303741455078, + false + ] + ], + [ + [ + -28.070377349853516, + false + ] + ], + [ + [ + -25.273380279541016, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.244606018066406, + false + ], + [ + -26.079303741455078, + false + ], + [ + -28.070377349853516, + false + ], + [ + -25.273380279541016, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c14fd056edad82ec32d1bed34a49d500307e4ba5d553ffaa2100964e31b87fd7", + "prompt_hash": "82814ab236197202c0eaa4e72f1fb33f558dcd6525b1e3e6b2f1db9e735ec159", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 5, + "doc": { + "question": "What type of product is denoted by the symbol \\\\( \\\\odot \\\\) in the notation section?", + "choices": [ + "Kronecker product", + "Outer product", + "Hadamard product", + "Khatri-Rao product" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What type of product is denoted by the symbol \\\\( \\\\odot \\\\) in the notation section?\nAnswer:", + " Kronecker product" + ], + [ + "Question:What type of product is denoted by the symbol \\\\( \\\\odot \\\\) in the notation section?\nAnswer:", + " Outer product" + ], + [ + "Question:What type of product is denoted by the symbol \\\\( \\\\odot \\\\) in the notation section?\nAnswer:", + " Hadamard product" + ], + [ + "Question:What type of product is denoted by the symbol \\\\( \\\\odot \\\\) in the notation section?\nAnswer:", + " Khatri-Rao product" + ] + ], + "resps": [ + [ + [ + -13.476451873779297, + false + ] + ], + [ + [ + -15.377878189086914, + false + ] + ], + [ + [ + -12.29111099243164, + false + ] + ], + [ + [ + -15.445086479187012, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.476451873779297, + false + ], + [ + -15.377878189086914, + false + ], + [ + -12.29111099243164, + false + ], + [ + -15.445086479187012, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7059aba6a93a63f69559df1560312849767e51568200900697eb0d21ba820d66", + "prompt_hash": "63775c730f165c2cdf883a1db71000f8749c0563ab2ce07d1e2bb009870f4d05", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 6, + "doc": { + "question": "What is the role of the matrix \\\\( \\\\bm{\\\\Sigma} \\\\) in the MIMO channel model?", + "choices": [ + "It represents the antenna gain matrix.", + "It denotes the noise correlation matrix.", + "It denotes the path response matrix (PRM) between the Tx and Rx regions.", + "It is the pilot signal matrix." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the role of the matrix \\\\( \\\\bm{\\\\Sigma} \\\\) in the MIMO channel model?\nAnswer:", + " It represents the antenna gain matrix." + ], + [ + "Question:What is the role of the matrix \\\\( \\\\bm{\\\\Sigma} \\\\) in the MIMO channel model?\nAnswer:", + " It denotes the noise correlation matrix." + ], + [ + "Question:What is the role of the matrix \\\\( \\\\bm{\\\\Sigma} \\\\) in the MIMO channel model?\nAnswer:", + " It denotes the path response matrix (PRM) between the Tx and Rx regions." + ], + [ + "Question:What is the role of the matrix \\\\( \\\\bm{\\\\Sigma} \\\\) in the MIMO channel model?\nAnswer:", + " It is the pilot signal matrix." + ] + ], + "resps": [ + [ + [ + -21.347972869873047, + false + ] + ], + [ + [ + -20.47518539428711, + false + ] + ], + [ + [ + -52.65840148925781, + false + ] + ], + [ + [ + -24.194866180419922, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.347972869873047, + false + ], + [ + -20.47518539428711, + false + ], + [ + -52.65840148925781, + false + ], + [ + -24.194866180419922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f3ea440e3b590ca8bf5e1a9ff43163c026a3988f19f3265b244603813e64d127", + "prompt_hash": "cf332b053f0076252bfb678cd15aae63ca2ba6dd97670499a3fb759be57a4062", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "What is the purpose of introducing a two-stage Tx-Rx successive antenna movement pattern?", + "choices": [ + "To minimize the number of antenna ports.", + "To simplify the hardware implementation.", + "To allow the received pilot signals to be expressed as a third-order tensor.", + "To estimate the pilot noise covariance matrix." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the purpose of introducing a two-stage Tx-Rx successive antenna movement pattern?\nAnswer:", + " To minimize the number of antenna ports." + ], + [ + "Question:What is the purpose of introducing a two-stage Tx-Rx successive antenna movement pattern?\nAnswer:", + " To simplify the hardware implementation." + ], + [ + "Question:What is the purpose of introducing a two-stage Tx-Rx successive antenna movement pattern?\nAnswer:", + " To allow the received pilot signals to be expressed as a third-order tensor." + ], + [ + "Question:What is the purpose of introducing a two-stage Tx-Rx successive antenna movement pattern?\nAnswer:", + " To estimate the pilot noise covariance matrix." + ] + ], + "resps": [ + [ + [ + -21.519088745117188, + false + ] + ], + [ + [ + -15.229227066040039, + false + ] + ], + [ + [ + -46.79536819458008, + false + ] + ], + [ + [ + -31.928115844726562, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.519088745117188, + false + ], + [ + -15.229227066040039, + false + ], + [ + -46.79536819458008, + false + ], + [ + -31.928115844726562, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cd078c68e5271f1789f1ad38aff800702fe05b176f29fd943f37fefa4ef38bd7", + "prompt_hash": "b7c805f3a0e82a3b9b69051224bf844789cea13c43f241f8e8a5735274003aac", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 8, + "doc": { + "question": "Which algorithm is used to estimate the factor matrices from the tensor in the proposed method?", + "choices": [ + "Gradient Descent", + "Stochastic Gradient Descent", + "Alternating Least Squares (ALS)", + "Expectation Maximization (EM)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which algorithm is used to estimate the factor matrices from the tensor in the proposed method?\nAnswer:", + " Gradient Descent" + ], + [ + "Question:Which algorithm is used to estimate the factor matrices from the tensor in the proposed method?\nAnswer:", + " Stochastic Gradient Descent" + ], + [ + "Question:Which algorithm is used to estimate the factor matrices from the tensor in the proposed method?\nAnswer:", + " Alternating Least Squares (ALS)" + ], + [ + "Question:Which algorithm is used to estimate the factor matrices from the tensor in the proposed method?\nAnswer:", + " Expectation Maximization (EM)" + ] + ], + "resps": [ + [ + [ + -10.288975715637207, + false + ] + ], + [ + [ + -10.533903121948242, + false + ] + ], + [ + [ + -10.85214614868164, + false + ] + ], + [ + [ + -9.711283683776855, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.288975715637207, + false + ], + [ + -10.533903121948242, + false + ], + [ + -10.85214614868164, + false + ], + [ + -9.711283683776855, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "448aca9f20641c880937f0058af91cb1edc19f3cfd05c3b986c49cfacbe4131b", + "prompt_hash": "34a9086f0fa49009032bd6411419757a73cbd4732e082dd3f55b163c47f3d75e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 9, + "doc": { + "question": "According to Lemma 1, what is the inequality condition for the uniqueness of the CP decomposition of a third-order tensor?", + "choices": [ + "\\( \\min(I_1, L) + \\min(I_2, L) + \\min(I_3, L) \\geq 2L + 1 \\)", + "\\( \\min(I_1, L) + \\min(I_2, L) + \\min(I_3, L) \\leq 2L + 2 \\)", + "\\( \\min(I_1, L) + \\min(I_2, L) + \\min(I_3, L) \\geq 2L + 2 \\)", + "\\( I_1 + I_2 + I_3 \\geq 3L \\)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:According to Lemma 1, what is the inequality condition for the uniqueness of the CP decomposition of a third-order tensor?\nAnswer:", + " \\( \\min(I_1, L) + \\min(I_2, L) + \\min(I_3, L) \\geq 2L + 1 \\)" + ], + [ + "Question:According to Lemma 1, what is the inequality condition for the uniqueness of the CP decomposition of a third-order tensor?\nAnswer:", + " \\( \\min(I_1, L) + \\min(I_2, L) + \\min(I_3, L) \\leq 2L + 2 \\)" + ], + [ + "Question:According to Lemma 1, what is the inequality condition for the uniqueness of the CP decomposition of a third-order tensor?\nAnswer:", + " \\( \\min(I_1, L) + \\min(I_2, L) + \\min(I_3, L) \\geq 2L + 2 \\)" + ], + [ + "Question:According to Lemma 1, what is the inequality condition for the uniqueness of the CP decomposition of a third-order tensor?\nAnswer:", + " \\( I_1 + I_2 + I_3 \\geq 3L \\)" + ] + ], + "resps": [ + [ + [ + -57.100379943847656, + false + ] + ], + [ + [ + -57.3333740234375, + false + ] + ], + [ + [ + -57.717796325683594, + false + ] + ], + [ + [ + -37.311668395996094, + false + ] + ] + ], + "filtered_resps": [ + [ + -57.100379943847656, + false + ], + [ + -57.3333740234375, + false + ], + [ + -57.717796325683594, + false + ], + [ + -37.311668395996094, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "73566678c7792ba0c12d20d4df5544540424788b02bbdfc5520581d7005ffa11", + "prompt_hash": "042d5972389ff5a870b7a6352b566ad9b4781c8c36433ffe586761307d59ad12", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 10, + "doc": { + "question": "What is the total number of parameters activated per text token in the described MoE model?", + "choices": [ + "3.5B", + "24.9B", + "66M", + "438M" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the total number of parameters activated per text token in the described MoE model?\nAnswer:", + " 3.5B" + ], + [ + "Question:What is the total number of parameters activated per text token in the described MoE model?\nAnswer:", + " 24.9B" + ], + [ + "Question:What is the total number of parameters activated per text token in the described MoE model?\nAnswer:", + " 66M" + ], + [ + "Question:What is the total number of parameters activated per text token in the described MoE model?\nAnswer:", + " 438M" + ] + ], + "resps": [ + [ + [ + -13.006896018981934, + false + ] + ], + [ + [ + -17.268253326416016, + false + ] + ], + [ + [ + -10.897493362426758, + false + ] + ], + [ + [ + -13.124433517456055, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.006896018981934, + false + ], + [ + -17.268253326416016, + false + ], + [ + -10.897493362426758, + false + ], + [ + -13.124433517456055, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c6afc5680e8d3dceb6ef438860605a3e0542ca1cce44927569aac288145823d6", + "prompt_hash": "c50de2733f2cf5cd1bba88a2366bb4ae37ade03fd2675d4d2e9050a4239b6220", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 11, + "doc": { + "question": "Which model is used to initialize the weights of the Vision Transformer (ViT) in the visual encoder?", + "choices": [ + "BLIP2", + "SigLIP-SO400M", + "CLIP", + "Qwen-VL" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which model is used to initialize the weights of the Vision Transformer (ViT) in the visual encoder?\nAnswer:", + " BLIP2" + ], + [ + "Question:Which model is used to initialize the weights of the Vision Transformer (ViT) in the visual encoder?\nAnswer:", + " SigLIP-SO400M" + ], + [ + "Question:Which model is used to initialize the weights of the Vision Transformer (ViT) in the visual encoder?\nAnswer:", + " CLIP" + ], + [ + "Question:Which model is used to initialize the weights of the Vision Transformer (ViT) in the visual encoder?\nAnswer:", + " Qwen-VL" + ] + ], + "resps": [ + [ + [ + -12.5610933303833, + false + ] + ], + [ + [ + -35.3951301574707, + false + ] + ], + [ + [ + -6.808763027191162, + false + ] + ], + [ + [ + -10.143580436706543, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.5610933303833, + false + ], + [ + -35.3951301574707, + false + ], + [ + -6.808763027191162, + false + ], + [ + -10.143580436706543, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "439f9fb56c44368ec0d4edbf4d6e0c4b213a2838c243fa648166b17164147572", + "prompt_hash": "12e07657b42f63eb9020cf762b1b06f77d3ea8123fa5b01679b6e03bfa24502f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 12, + "doc": { + "question": "How many experts are there in each MoE layer of the model?", + "choices": [ + "12", + "32", + "66", + "128" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many experts are there in each MoE layer of the model?\nAnswer:", + " 12" + ], + [ + "Question:How many experts are there in each MoE layer of the model?\nAnswer:", + " 32" + ], + [ + "Question:How many experts are there in each MoE layer of the model?\nAnswer:", + " 66" + ], + [ + "Question:How many experts are there in each MoE layer of the model?\nAnswer:", + " 128" + ] + ], + "resps": [ + [ + [ + -5.806491374969482, + false + ] + ], + [ + [ + -6.29629373550415, + false + ] + ], + [ + [ + -11.010457992553711, + false + ] + ], + [ + [ + -7.832982540130615, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.806491374969482, + false + ], + [ + -6.29629373550415, + false + ], + [ + -11.010457992553711, + false + ], + [ + -7.832982540130615, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "64fcd7f21e4f146b99315a2ab8ffecfccbe38f757e4e2e4e741d44cc318cedff", + "prompt_hash": "79143893a7c5b0703ecf4631e7bc9aec135cd93570015c66ad796a6643c95c2a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 13, + "doc": { + "question": "What type of loss is used to prevent routing collapse during training?", + "choices": [ + "Cross-entropy loss", + "Z-loss", + "Load balancing loss", + "Triplet loss" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of loss is used to prevent routing collapse during training?\nAnswer:", + " Cross-entropy loss" + ], + [ + "Question:What type of loss is used to prevent routing collapse during training?\nAnswer:", + " Z-loss" + ], + [ + "Question:What type of loss is used to prevent routing collapse during training?\nAnswer:", + " Load balancing loss" + ], + [ + "Question:What type of loss is used to prevent routing collapse during training?\nAnswer:", + " Triplet loss" + ] + ], + "resps": [ + [ + [ + -7.436159610748291, + false + ] + ], + [ + [ + -12.510422706604004, + false + ] + ], + [ + [ + -13.109281539916992, + false + ] + ], + [ + [ + -12.308365821838379, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.436159610748291, + false + ], + [ + -12.510422706604004, + false + ], + [ + -13.109281539916992, + false + ], + [ + -12.308365821838379, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d4376c2623b39f4a9dba9b0a13d6e872a8b5737b8bd05087639cc083c35eceb8", + "prompt_hash": "4f6cd51e4cda9517a2579b60770da74323879616ff0c76869abad7f2a69feaa2", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 14, + "doc": { + "question": "How many tokens are used in the final multimodal post-training stage?", + "choices": [ + "12B", + "20B", + "33B", + "400B" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many tokens are used in the final multimodal post-training stage?\nAnswer:", + " 12B" + ], + [ + "Question:How many tokens are used in the final multimodal post-training stage?\nAnswer:", + " 20B" + ], + [ + "Question:How many tokens are used in the final multimodal post-training stage?\nAnswer:", + " 33B" + ], + [ + "Question:How many tokens are used in the final multimodal post-training stage?\nAnswer:", + " 400B" + ] + ], + "resps": [ + [ + [ + -11.14369010925293, + false + ] + ], + [ + [ + -11.791297912597656, + false + ] + ], + [ + [ + -12.262496948242188, + false + ] + ], + [ + [ + -13.315629959106445, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.14369010925293, + false + ], + [ + -11.791297912597656, + false + ], + [ + -12.262496948242188, + false + ], + [ + -13.315629959106445, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0825e1d5fb8ee7c6bd644f052a2488cb7be215fb467757bfa2555f98a47c09d0", + "prompt_hash": "440baadf0422a6fba1ef6e8c9f6d53343c33a81f59427b90fdbeee2a2a507e87", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 15, + "doc": { + "question": "What are the four ability modules defined in the evaluation interface for embodied agents?", + "choices": [ + "Goal Interpretation, Subgoal Decomposition, Action Sequencing, Transition Modeling", + "Goal Interpretation, Motion Control, Planning, Feedback Analysis", + "Goal Setting, Path Planning, Execution, Memory Encoding", + "Object Recognition, Scene Understanding, Action Prediction, Feedback Control" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What are the four ability modules defined in the evaluation interface for embodied agents?\nAnswer:", + " Goal Interpretation, Subgoal Decomposition, Action Sequencing, Transition Modeling" + ], + [ + "Question:What are the four ability modules defined in the evaluation interface for embodied agents?\nAnswer:", + " Goal Interpretation, Motion Control, Planning, Feedback Analysis" + ], + [ + "Question:What are the four ability modules defined in the evaluation interface for embodied agents?\nAnswer:", + " Goal Setting, Path Planning, Execution, Memory Encoding" + ], + [ + "Question:What are the four ability modules defined in the evaluation interface for embodied agents?\nAnswer:", + " Object Recognition, Scene Understanding, Action Prediction, Feedback Control" + ] + ], + "resps": [ + [ + [ + -52.57338333129883, + false + ] + ], + [ + [ + -45.967445373535156, + false + ] + ], + [ + [ + -43.07550811767578, + false + ] + ], + [ + [ + -40.704917907714844, + false + ] + ] + ], + "filtered_resps": [ + [ + -52.57338333129883, + false + ], + [ + -45.967445373535156, + false + ], + [ + -43.07550811767578, + false + ], + [ + -40.704917907714844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "446907946e6be1ea3eddc9b9023aa680aab7631bc23164a68c1f2ceaae9ba2ab", + "prompt_hash": "c6e08ed15bc7ade20774b38a2bd6126aa9628961781f8bf1134f6e6a9efa8b2c", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "Which logic formalism is used to unify goal representation and interface communication across modules?", + "choices": [ + "First-order logic", + "Linear Temporal Logic (LTL)", + "Propositional Logic", + "Modal Logic" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which logic formalism is used to unify goal representation and interface communication across modules?\nAnswer:", + " First-order logic" + ], + [ + "Question:Which logic formalism is used to unify goal representation and interface communication across modules?\nAnswer:", + " Linear Temporal Logic (LTL)" + ], + [ + "Question:Which logic formalism is used to unify goal representation and interface communication across modules?\nAnswer:", + " Propositional Logic" + ], + [ + "Question:Which logic formalism is used to unify goal representation and interface communication across modules?\nAnswer:", + " Modal Logic" + ] + ], + "resps": [ + [ + [ + -9.375009536743164, + false + ] + ], + [ + [ + -10.933296203613281, + false + ] + ], + [ + [ + -8.579730987548828, + false + ] + ], + [ + [ + -11.018582344055176, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.375009536743164, + false + ], + [ + -10.933296203613281, + false + ], + [ + -8.579730987548828, + false + ], + [ + -11.018582344055176, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a34d1de6c62668e56480d62ad4a4d294667162537293a265b2a23149e6cd5b3c", + "prompt_hash": "fbc065b0157fa4517c361a13aad431e8683795a12f276ad4162de1750f148b19", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 17, + "doc": { + "question": "Which operator in the LTL extension replaces traditional 'Next' and 'Eventually' operators?", + "choices": [ + "until", + "followed_by", + "then", + "after" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which operator in the LTL extension replaces traditional 'Next' and 'Eventually' operators?\nAnswer:", + " until" + ], + [ + "Question:Which operator in the LTL extension replaces traditional 'Next' and 'Eventually' operators?\nAnswer:", + " followed_by" + ], + [ + "Question:Which operator in the LTL extension replaces traditional 'Next' and 'Eventually' operators?\nAnswer:", + " then" + ], + [ + "Question:Which operator in the LTL extension replaces traditional 'Next' and 'Eventually' operators?\nAnswer:", + " after" + ] + ], + "resps": [ + [ + [ + -8.634527206420898, + false + ] + ], + [ + [ + -16.37803840637207, + false + ] + ], + [ + [ + -8.34276008605957, + false + ] + ], + [ + [ + -8.659734725952148, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.634527206420898, + false + ], + [ + -16.37803840637207, + false + ], + [ + -8.34276008605957, + false + ], + [ + -8.659734725952148, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b853127ae9c576f6be3da574057481ba1e3eac84d279279889e5df6bf8bf2088", + "prompt_hash": "8ee4cf7b73048e86482c23135a7db52e17051368b55de95740771faa55e50b7c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 18, + "doc": { + "question": "What are the three main categories used to evaluate Goal Interpretation?", + "choices": [ + "Grammar Accuracy, Object Mapping, Action Simulation", + "State Goals, Relation Goals, Action Goals", + "Visual Matching, Semantic Parsing, Execution Accuracy", + "Input Coherence, Goal Satisfaction, Subgoal Length" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What are the three main categories used to evaluate Goal Interpretation?\nAnswer:", + " Grammar Accuracy, Object Mapping, Action Simulation" + ], + [ + "Question:What are the three main categories used to evaluate Goal Interpretation?\nAnswer:", + " State Goals, Relation Goals, Action Goals" + ], + [ + "Question:What are the three main categories used to evaluate Goal Interpretation?\nAnswer:", + " Visual Matching, Semantic Parsing, Execution Accuracy" + ], + [ + "Question:What are the three main categories used to evaluate Goal Interpretation?\nAnswer:", + " Input Coherence, Goal Satisfaction, Subgoal Length" + ] + ], + "resps": [ + [ + [ + -46.37894821166992, + false + ] + ], + [ + [ + -30.310394287109375, + false + ] + ], + [ + [ + -46.54631423950195, + false + ] + ], + [ + [ + -45.22802734375, + false + ] + ] + ], + "filtered_resps": [ + [ + -46.37894821166992, + false + ], + [ + -30.310394287109375, + false + ], + [ + -46.54631423950195, + false + ], + [ + -45.22802734375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e39038c99ac3dba99213e032531639c25f9198784fa744f6528f32358acaf39e", + "prompt_hash": "65380e72e4caa365a491095aaae576924e67da98e0d80326c8e24bce0b8c3f8c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 19, + "doc": { + "question": "Which type of error occurs when an action is correct in format but involves an object or action not present in the environment?", + "choices": [ + "Parsing Error", + "Affordance Error", + "Hallucination Error", + "Missing Step" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which type of error occurs when an action is correct in format but involves an object or action not present in the environment?\nAnswer:", + " Parsing Error" + ], + [ + "Question:Which type of error occurs when an action is correct in format but involves an object or action not present in the environment?\nAnswer:", + " Affordance Error" + ], + [ + "Question:Which type of error occurs when an action is correct in format but involves an object or action not present in the environment?\nAnswer:", + " Hallucination Error" + ], + [ + "Question:Which type of error occurs when an action is correct in format but involves an object or action not present in the environment?\nAnswer:", + " Missing Step" + ] + ], + "resps": [ + [ + [ + -11.865713119506836, + false + ] + ], + [ + [ + -12.490418434143066, + false + ] + ], + [ + [ + -12.585088729858398, + false + ] + ], + [ + [ + -15.464625358581543, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.865713119506836, + false + ], + [ + -12.490418434143066, + false + ], + [ + -12.585088729858398, + false + ], + [ + -15.464625358581543, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3fabb95c875a224c6b8a5289f4928512d9403c0f1ade52711fe13600462d2cd1", + "prompt_hash": "cc058b02a36c03400b43d613b77eec781e329a7dc4c8e0e5588cf7397820cf16", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 20, + "doc": { + "question": "What is the function of the model referred to as G_RM?", + "choices": [ + "To generate completions based on user prompts", + "To provide safety specifications for model training", + "To judge completions for correctness, helpfulness, and compliance", + "To encode the chain-of-thought during inference" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the function of the model referred to as G_RM?\nAnswer:", + " To generate completions based on user prompts" + ], + [ + "Question:What is the function of the model referred to as G_RM?\nAnswer:", + " To provide safety specifications for model training" + ], + [ + "Question:What is the function of the model referred to as G_RM?\nAnswer:", + " To judge completions for correctness, helpfulness, and compliance" + ], + [ + "Question:What is the function of the model referred to as G_RM?\nAnswer:", + " To encode the chain-of-thought during inference" + ] + ], + "resps": [ + [ + [ + -28.619800567626953, + false + ] + ], + [ + [ + -35.57646560668945, + false + ] + ], + [ + [ + -51.08036804199219, + false + ] + ], + [ + [ + -38.449432373046875, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.619800567626953, + false + ], + [ + -35.57646560668945, + false + ], + [ + -51.08036804199219, + false + ], + [ + -38.449432373046875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "76cc915c4efc02cf6d849bb601b3d2006b33d12d3b4f4892d0c94aa43042494c", + "prompt_hash": "e15cc7d6ef7856d5aa45d2fc0c5805fcab80ecf2b6ffc22d82642c782e849a7d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 21, + "doc": { + "question": "What are the three labels used to classify user requests based on content policy?", + "choices": [ + "Accept, Reject, Moderate", + "Allow, Deny, Defer", + "Allowed, Disallowed, Requires Safe Completion", + "Safe, Unsafe, Context-Dependent" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What are the three labels used to classify user requests based on content policy?\nAnswer:", + " Accept, Reject, Moderate" + ], + [ + "Question:What are the three labels used to classify user requests based on content policy?\nAnswer:", + " Allow, Deny, Defer" + ], + [ + "Question:What are the three labels used to classify user requests based on content policy?\nAnswer:", + " Allowed, Disallowed, Requires Safe Completion" + ], + [ + "Question:What are the three labels used to classify user requests based on content policy?\nAnswer:", + " Safe, Unsafe, Context-Dependent" + ] + ], + "resps": [ + [ + [ + -17.829608917236328, + false + ] + ], + [ + [ + -20.263399124145508, + false + ] + ], + [ + [ + -43.32582473754883, + false + ] + ], + [ + [ + -25.447376251220703, + false + ] + ] + ], + "filtered_resps": [ + [ + -17.829608917236328, + false + ], + [ + -20.263399124145508, + false + ], + [ + -43.32582473754883, + false + ], + [ + -25.447376251220703, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "756aef5e8cc9ae0ae256cd9578807a8e0142956531fd021cbde0219bca1220be", + "prompt_hash": "7021a82e6a2233212c747912d12da88d433e42f877665a22d6da5907bc0825d1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "Which safety category allows educational discussions about suicide but requires a safe-completion for ideation or methods?", + "choices": [ + "Extremism", + "Self-harm", + "Harassment", + "Violence" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which safety category allows educational discussions about suicide but requires a safe-completion for ideation or methods?\nAnswer:", + " Extremism" + ], + [ + "Question:Which safety category allows educational discussions about suicide but requires a safe-completion for ideation or methods?\nAnswer:", + " Self-harm" + ], + [ + "Question:Which safety category allows educational discussions about suicide but requires a safe-completion for ideation or methods?\nAnswer:", + " Harassment" + ], + [ + "Question:Which safety category allows educational discussions about suicide but requires a safe-completion for ideation or methods?\nAnswer:", + " Violence" + ] + ], + "resps": [ + [ + [ + -11.61020565032959, + false + ] + ], + [ + [ + -9.390151023864746, + false + ] + ], + [ + [ + -10.084878921508789, + false + ] + ], + [ + [ + -7.957781791687012, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.61020565032959, + false + ], + [ + -9.390151023864746, + false + ], + [ + -10.084878921508789, + false + ], + [ + -7.957781791687012, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8e13a825edb967f944b382519cbe119e167c8083076f40e27ed48a979af5f556", + "prompt_hash": "2760df879ddc37afd924dd9c38a63395dec97002b56fb0f3bbd691751e7d282a", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 23, + "doc": { + "question": "What does the refusal style guideline explicitly prohibit?", + "choices": [ + "Using abstract references to requests", + "Mentioning specific criminal acts", + "Encouraging users to consult professionals", + "Providing brief apologies" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What does the refusal style guideline explicitly prohibit?\nAnswer:", + " Using abstract references to requests" + ], + [ + "Question:What does the refusal style guideline explicitly prohibit?\nAnswer:", + " Mentioning specific criminal acts" + ], + [ + "Question:What does the refusal style guideline explicitly prohibit?\nAnswer:", + " Encouraging users to consult professionals" + ], + [ + "Question:What does the refusal style guideline explicitly prohibit?\nAnswer:", + " Providing brief apologies" + ] + ], + "resps": [ + [ + [ + -31.08670425415039, + false + ] + ], + [ + [ + -24.397045135498047, + false + ] + ], + [ + [ + -29.132076263427734, + false + ] + ], + [ + [ + -22.21463394165039, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.08670425415039, + false + ], + [ + -24.397045135498047, + false + ], + [ + -29.132076263427734, + false + ], + [ + -22.21463394165039, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6fb9927c8cc4285578785a6dd33dd9c2aa08ec5fd0ba60b17966e1321d6caa0b", + "prompt_hash": "5e1e243af4670b0db136a0863bb51d6893576e2ab2d2eba42416bf75235420a2", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 24, + "doc": { + "question": "What type of training information is excluded from prompts during SFT to encourage model recall of safety policies?", + "choices": [ + "Chain-of-thought references", + "Category-specific specifications", + "Final answer text", + "Safety categories" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of training information is excluded from prompts during SFT to encourage model recall of safety policies?\nAnswer:", + " Chain-of-thought references" + ], + [ + "Question:What type of training information is excluded from prompts during SFT to encourage model recall of safety policies?\nAnswer:", + " Category-specific specifications" + ], + [ + "Question:What type of training information is excluded from prompts during SFT to encourage model recall of safety policies?\nAnswer:", + " Final answer text" + ], + [ + "Question:What type of training information is excluded from prompts during SFT to encourage model recall of safety policies?\nAnswer:", + " Safety categories" + ] + ], + "resps": [ + [ + [ + -19.58326530456543, + false + ] + ], + [ + [ + -21.516347885131836, + false + ] + ], + [ + [ + -22.331558227539062, + false + ] + ], + [ + [ + -15.906003952026367, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.58326530456543, + false + ], + [ + -21.516347885131836, + false + ], + [ + -22.331558227539062, + false + ], + [ + -15.906003952026367, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4120363f1bef97c9f0e1c4e575fd00747e559372e35a5088d1b0c464f076421a", + "prompt_hash": "cce0d69851e1b0c4a24df4edfaace2832a3fedfaf5d1bca4cdf8e8849f4c503e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 25, + "doc": { + "question": "What symbol does Search-o1 use to mark the beginning of a search query?", + "choices": [ + "<|begin_search_query|>", + "<|start_search_query|>", + "<|search_query_begin|>", + "<|open_search_query|>" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What symbol does Search-o1 use to mark the beginning of a search query?\nAnswer:", + " <|begin_search_query|>" + ], + [ + "Question:What symbol does Search-o1 use to mark the beginning of a search query?\nAnswer:", + " <|start_search_query|>" + ], + [ + "Question:What symbol does Search-o1 use to mark the beginning of a search query?\nAnswer:", + " <|search_query_begin|>" + ], + [ + "Question:What symbol does Search-o1 use to mark the beginning of a search query?\nAnswer:", + " <|open_search_query|>" + ] + ], + "resps": [ + [ + [ + -23.955759048461914, + false + ] + ], + [ + [ + -23.02724838256836, + false + ] + ], + [ + [ + -24.06060791015625, + false + ] + ], + [ + [ + -28.29389190673828, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.955759048461914, + false + ], + [ + -23.02724838256836, + false + ], + [ + -24.06060791015625, + false + ], + [ + -28.29389190673828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "266ffc54ecf6947b9dbe06a2643ec3a9a78751d0b06809c042ca50fa25ebdd80", + "prompt_hash": "bcc7f84f3b6b3f32231f935019674ec6e3e6e8383565d32e5385bb91eb20ed20", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "Which API does Search-o1 use to fetch web content for given URLs?", + "choices": [ + "Jina Reader API", + "OpenAI Retrieval API", + "Bing Content API", + "Doc2Text API" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which API does Search-o1 use to fetch web content for given URLs?\nAnswer:", + " Jina Reader API" + ], + [ + "Question:Which API does Search-o1 use to fetch web content for given URLs?\nAnswer:", + " OpenAI Retrieval API" + ], + [ + "Question:Which API does Search-o1 use to fetch web content for given URLs?\nAnswer:", + " Bing Content API" + ], + [ + "Question:Which API does Search-o1 use to fetch web content for given URLs?\nAnswer:", + " Doc2Text API" + ] + ], + "resps": [ + [ + [ + -31.21390724182129, + false + ] + ], + [ + [ + -21.821231842041016, + false + ] + ], + [ + [ + -15.548657417297363, + false + ] + ], + [ + [ + -21.298263549804688, + false + ] + ] + ], + "filtered_resps": [ + [ + -31.21390724182129, + false + ], + [ + -21.821231842041016, + false + ], + [ + -15.548657417297363, + false + ], + [ + -21.298263549804688, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "12b97b0d53975bf2ea01b8de1d4f72c94139f2d040997485fc5b631ae309bdc4", + "prompt_hash": "cd7f2ae9f1e2f4ed786f45762308172a6b4c6791d7fcc0971b432ac17897e6cb", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "What is the purpose of the Reason-in-Documents module in the Search-o1 framework?", + "choices": [ + "To analyze retrieved documents and extract relevant information for reasoning", + "To generate random search queries when needed", + "To replace the original reasoning model with a distilled one", + "To summarize the question before answering" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the purpose of the Reason-in-Documents module in the Search-o1 framework?\nAnswer:", + " To analyze retrieved documents and extract relevant information for reasoning" + ], + [ + "Question:What is the purpose of the Reason-in-Documents module in the Search-o1 framework?\nAnswer:", + " To generate random search queries when needed" + ], + [ + "Question:What is the purpose of the Reason-in-Documents module in the Search-o1 framework?\nAnswer:", + " To replace the original reasoning model with a distilled one" + ], + [ + "Question:What is the purpose of the Reason-in-Documents module in the Search-o1 framework?\nAnswer:", + " To summarize the question before answering" + ] + ], + "resps": [ + [ + [ + -25.887866973876953, + false + ] + ], + [ + [ + -29.12590980529785, + false + ] + ], + [ + [ + -37.679595947265625, + false + ] + ], + [ + [ + -23.72551155090332, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.887866973876953, + false + ], + [ + -29.12590980529785, + false + ], + [ + -37.679595947265625, + false + ], + [ + -23.72551155090332, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "52afe4206ea3aa17a25d1a11ac1089b750f7309cae24882d6ed7eb1a59c03fdd", + "prompt_hash": "7a2712b8c180e61fc344ce7e93ff1255af68261f77c18a4d32a2b2d43729afec", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 28, + "doc": { + "question": "What does the GPQA dataset evaluate?", + "choices": [ + "PhD-level science multiple-choice QA", + "Elementary arithmetic reasoning", + "General commonsense QA", + "Code generation tasks" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What does the GPQA dataset evaluate?\nAnswer:", + " PhD-level science multiple-choice QA" + ], + [ + "Question:What does the GPQA dataset evaluate?\nAnswer:", + " Elementary arithmetic reasoning" + ], + [ + "Question:What does the GPQA dataset evaluate?\nAnswer:", + " General commonsense QA" + ], + [ + "Question:What does the GPQA dataset evaluate?\nAnswer:", + " Code generation tasks" + ] + ], + "resps": [ + [ + [ + -38.47972869873047, + false + ] + ], + [ + [ + -19.329471588134766, + false + ] + ], + [ + [ + -19.353612899780273, + false + ] + ], + [ + [ + -13.192042350769043, + false + ] + ] + ], + "filtered_resps": [ + [ + -38.47972869873047, + false + ], + [ + -19.329471588134766, + false + ], + [ + -19.353612899780273, + false + ], + [ + -13.192042350769043, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "561a101e49c5d7e509296df702d6c922e782fa6fbdbdde6a232e1c11981f122e", + "prompt_hash": "947b58fbbac3e915e72713db7825fd176a0104e4b240ab4c19f1a9f4a3312ebb", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "What function is triggered when a new search query is detected in Search-o1?", + "choices": [ + "Search", + "RetrieveDocs", + "FetchKnowledge", + "ExternalCall" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What function is triggered when a new search query is detected in Search-o1?\nAnswer:", + " Search" + ], + [ + "Question:What function is triggered when a new search query is detected in Search-o1?\nAnswer:", + " RetrieveDocs" + ], + [ + "Question:What function is triggered when a new search query is detected in Search-o1?\nAnswer:", + " FetchKnowledge" + ], + [ + "Question:What function is triggered when a new search query is detected in Search-o1?\nAnswer:", + " ExternalCall" + ] + ], + "resps": [ + [ + [ + -3.8662726879119873, + false + ] + ], + [ + [ + -16.96810531616211, + false + ] + ], + [ + [ + -16.960493087768555, + false + ] + ], + [ + [ + -18.54381561279297, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.8662726879119873, + false + ], + [ + -16.96810531616211, + false + ], + [ + -16.960493087768555, + false + ], + [ + -18.54381561279297, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a24d136e0962c5f560ab2ebf59f1bababedb02eb2e3cc801d2c3ea0679e0d3bb", + "prompt_hash": "d422059119c74c5c9293ac200c32bcfd09919427a52e44b1e1a52b27aaa55aee", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 30, + "doc": { + "question": "What is the main computational advantage of Mamba over Transformer models?", + "choices": [ + "It uses quadratic complexity with respect to sequence length.", + "It requires less training data.", + "It maintains linear computational complexity with respect to sequence length.", + "It ignores spatial features to save computation." + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the main computational advantage of Mamba over Transformer models?\nAnswer:", + " It uses quadratic complexity with respect to sequence length." + ], + [ + "Question:What is the main computational advantage of Mamba over Transformer models?\nAnswer:", + " It requires less training data." + ], + [ + "Question:What is the main computational advantage of Mamba over Transformer models?\nAnswer:", + " It maintains linear computational complexity with respect to sequence length." + ], + [ + "Question:What is the main computational advantage of Mamba over Transformer models?\nAnswer:", + " It ignores spatial features to save computation." + ] + ], + "resps": [ + [ + [ + -28.060897827148438, + false + ] + ], + [ + [ + -19.728771209716797, + false + ] + ], + [ + [ + -23.50119400024414, + false + ] + ], + [ + [ + -40.33341979980469, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.060897827148438, + false + ], + [ + -19.728771209716797, + false + ], + [ + -23.50119400024414, + false + ], + [ + -40.33341979980469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d14123ec3f8bb1e10aa06ff5803bfe8a8f2865a7e274a4e0332261e5e4f2c5f0", + "prompt_hash": "4b86acb7e70681cd4346ecce9c8ae6978cca29f5f4293186c1c15a10a933061e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 31, + "doc": { + "question": "What are the three core components of the MambaHSI encoder?", + "choices": [ + "Convolutional block, spectral transformer, normalization layer", + "Spectral Mamba block, residual learning block, loss function", + "Spatial Mamba block, spectral Mamba block, spatial-spectral fusion module", + "Embedding layer, classification head, dropout block" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What are the three core components of the MambaHSI encoder?\nAnswer:", + " Convolutional block, spectral transformer, normalization layer" + ], + [ + "Question:What are the three core components of the MambaHSI encoder?\nAnswer:", + " Spectral Mamba block, residual learning block, loss function" + ], + [ + "Question:What are the three core components of the MambaHSI encoder?\nAnswer:", + " Spatial Mamba block, spectral Mamba block, spatial-spectral fusion module" + ], + [ + "Question:What are the three core components of the MambaHSI encoder?\nAnswer:", + " Embedding layer, classification head, dropout block" + ] + ], + "resps": [ + [ + [ + -34.9793701171875, + false + ] + ], + [ + [ + -41.64816665649414, + false + ] + ], + [ + [ + -31.879371643066406, + false + ] + ], + [ + [ + -37.53800964355469, + false + ] + ] + ], + "filtered_resps": [ + [ + -34.9793701171875, + false + ], + [ + -41.64816665649414, + false + ], + [ + -31.879371643066406, + false + ], + [ + -37.53800964355469, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4a50923d09b4ff0d230771ad164546c156529be98d7cc504b58f53010ce32086", + "prompt_hash": "bb33e867da7f83f6e54e7e6ab57cbcc4a685b9497075b9f6bb67fc487c7f7af6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 32, + "doc": { + "question": "What is the purpose of the spatial-spectral fusion module in MambaHSI?", + "choices": [ + "To apply dropout regularization across the spectral bands", + "To reduce spatial resolution for faster inference", + "To adaptively fuse the spatial and spectral information based on their importance", + "To convert hyperspectral images into RGB for classification" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the purpose of the spatial-spectral fusion module in MambaHSI?\nAnswer:", + " To apply dropout regularization across the spectral bands" + ], + [ + "Question:What is the purpose of the spatial-spectral fusion module in MambaHSI?\nAnswer:", + " To reduce spatial resolution for faster inference" + ], + [ + "Question:What is the purpose of the spatial-spectral fusion module in MambaHSI?\nAnswer:", + " To adaptively fuse the spatial and spectral information based on their importance" + ], + [ + "Question:What is the purpose of the spatial-spectral fusion module in MambaHSI?\nAnswer:", + " To convert hyperspectral images into RGB for classification" + ] + ], + "resps": [ + [ + [ + -39.88627624511719, + false + ] + ], + [ + [ + -23.449623107910156, + false + ] + ], + [ + [ + -24.270509719848633, + false + ] + ], + [ + [ + -33.8906135559082, + false + ] + ] + ], + "filtered_resps": [ + [ + -39.88627624511719, + false + ], + [ + -23.449623107910156, + false + ], + [ + -24.270509719848633, + false + ], + [ + -33.8906135559082, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d133d375fb6230f9b0602ad1df59198919b09e3b6d3fc6d82ce0c93e04cfc676", + "prompt_hash": "f9b6db19174384edcfb225554ab41f5a024edbaf13203dc62a2cb366c55f82c4", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 33, + "doc": { + "question": "Which dataset used in the experiments was acquired using a Headwall Nano-Hyperspec imaging sensor on a DJI Matrice 600 Pro UAV?", + "choices": [ + "Pavia University", + "Houston", + "WHU-Hi-HanChuan", + "WHU-Hi-HongHu" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:Which dataset used in the experiments was acquired using a Headwall Nano-Hyperspec imaging sensor on a DJI Matrice 600 Pro UAV?\nAnswer:", + " Pavia University" + ], + [ + "Question:Which dataset used in the experiments was acquired using a Headwall Nano-Hyperspec imaging sensor on a DJI Matrice 600 Pro UAV?\nAnswer:", + " Houston" + ], + [ + "Question:Which dataset used in the experiments was acquired using a Headwall Nano-Hyperspec imaging sensor on a DJI Matrice 600 Pro UAV?\nAnswer:", + " WHU-Hi-HanChuan" + ], + [ + "Question:Which dataset used in the experiments was acquired using a Headwall Nano-Hyperspec imaging sensor on a DJI Matrice 600 Pro UAV?\nAnswer:", + " WHU-Hi-HongHu" + ] + ], + "resps": [ + [ + [ + -9.408138275146484, + false + ] + ], + [ + [ + -10.154462814331055, + false + ] + ], + [ + [ + -36.05073547363281, + false + ] + ], + [ + [ + -41.54401397705078, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.408138275146484, + false + ], + [ + -10.154462814331055, + false + ], + [ + -36.05073547363281, + false + ], + [ + -41.54401397705078, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e2885bbce1d81fa6aab9a9a80d3d189bde323125c6c02015d225850009820d4c", + "prompt_hash": "946ca21eb0121687533e64a30df6b4138c9f817ed48c0d5c0f1d846ada337424", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 34, + "doc": { + "question": "How many classes are included in the Houston hyperspectral dataset used in the experiments?", + "choices": [ + "9", + "16", + "22", + "14" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many classes are included in the Houston hyperspectral dataset used in the experiments?\nAnswer:", + " 9" + ], + [ + "Question:How many classes are included in the Houston hyperspectral dataset used in the experiments?\nAnswer:", + " 16" + ], + [ + "Question:How many classes are included in the Houston hyperspectral dataset used in the experiments?\nAnswer:", + " 22" + ], + [ + "Question:How many classes are included in the Houston hyperspectral dataset used in the experiments?\nAnswer:", + " 14" + ] + ], + "resps": [ + [ + [ + -4.74687385559082, + false + ] + ], + [ + [ + -5.32486629486084, + false + ] + ], + [ + [ + -5.936315059661865, + false + ] + ], + [ + [ + -5.394577980041504, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.74687385559082, + false + ], + [ + -5.32486629486084, + false + ], + [ + -5.936315059661865, + false + ], + [ + -5.394577980041504, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "1838ae62bb0ff5951b7b7f3c1eba72e304cdf3ff32578d7c1f9669a1277d8422", + "prompt_hash": "f04139ad7bf7ffeb0eb8aa5ec4a9cdd207bda35e0da604aa82a145eb493c43c6", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What is the total area covered by the DES Y6 Gold wide-area survey footprint?", + "choices": [ + "Approximately 5000 square degrees", + "Approximately 7000 square degrees", + "Approximately 10000 square degrees", + "Approximately 3500 square degrees" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the total area covered by the DES Y6 Gold wide-area survey footprint?\nAnswer:", + " Approximately 5000 square degrees" + ], + [ + "Question:What is the total area covered by the DES Y6 Gold wide-area survey footprint?\nAnswer:", + " Approximately 7000 square degrees" + ], + [ + "Question:What is the total area covered by the DES Y6 Gold wide-area survey footprint?\nAnswer:", + " Approximately 10000 square degrees" + ], + [ + "Question:What is the total area covered by the DES Y6 Gold wide-area survey footprint?\nAnswer:", + " Approximately 3500 square degrees" + ] + ], + "resps": [ + [ + [ + -12.233139038085938, + false + ] + ], + [ + [ + -12.975166320800781, + false + ] + ], + [ + [ + -13.492626190185547, + false + ] + ], + [ + [ + -12.901947021484375, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.233139038085938, + false + ], + [ + -12.975166320800781, + false + ], + [ + -13.492626190185547, + false + ], + [ + -12.901947021484375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "760366bb1c0e2c5d0f1ba382b67ddbcf2a9bee94a3cdf7a390c2168007e141dd", + "prompt_hash": "1276376381c3a6685b55c9cfbb5cfb01cb4b34d3d78df37c762e9d5288d62dd4", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which magnitude correction coefficient is used for the 'g' band when correcting for interstellar extinction in DES Y6 Gold?", + "choices": [ + "3.186", + "2.140", + "1.569", + "1.048" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which magnitude correction coefficient is used for the 'g' band when correcting for interstellar extinction in DES Y6 Gold?\nAnswer:", + " 3.186" + ], + [ + "Question:Which magnitude correction coefficient is used for the 'g' band when correcting for interstellar extinction in DES Y6 Gold?\nAnswer:", + " 2.140" + ], + [ + "Question:Which magnitude correction coefficient is used for the 'g' band when correcting for interstellar extinction in DES Y6 Gold?\nAnswer:", + " 1.569" + ], + [ + "Question:Which magnitude correction coefficient is used for the 'g' band when correcting for interstellar extinction in DES Y6 Gold?\nAnswer:", + " 1.048" + ] + ], + "resps": [ + [ + [ + -12.510847091674805, + false + ] + ], + [ + [ + -12.662933349609375, + false + ] + ], + [ + [ + -12.489203453063965, + false + ] + ], + [ + [ + -11.289423942565918, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.510847091674805, + false + ], + [ + -12.662933349609375, + false + ], + [ + -12.489203453063965, + false + ], + [ + -11.289423942565918, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fb823d5546e6d00cd71d5cf3002eb41d44998ad3f4f9b222a7a5e8bdf61c4b99", + "prompt_hash": "aceccefb48255c4b8fe744cf7e506e79afcac7d34251224bfc68a004861928f5", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "What modeling method is used for PSF photometry in the DES Y6 Gold \\u0003fitvd algorithm?", + "choices": [ + "Zero-size point-source model", + "Extended Sersic profile with variable index", + "Gaussian Mixture Model with ten components", + "Power-law profile with variable slope" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What modeling method is used for PSF photometry in the DES Y6 Gold \\u0003fitvd algorithm?\nAnswer:", + " Zero-size point-source model" + ], + [ + "Question:What modeling method is used for PSF photometry in the DES Y6 Gold \\u0003fitvd algorithm?\nAnswer:", + " Extended Sersic profile with variable index" + ], + [ + "Question:What modeling method is used for PSF photometry in the DES Y6 Gold \\u0003fitvd algorithm?\nAnswer:", + " Gaussian Mixture Model with ten components" + ], + [ + "Question:What modeling method is used for PSF photometry in the DES Y6 Gold \\u0003fitvd algorithm?\nAnswer:", + " Power-law profile with variable slope" + ] + ], + "resps": [ + [ + [ + -28.418212890625, + false + ] + ], + [ + [ + -34.0587043762207, + false + ] + ], + [ + [ + -24.72544288635254, + false + ] + ], + [ + [ + -25.261842727661133, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.418212890625, + false + ], + [ + -34.0587043762207, + false + ], + [ + -24.72544288635254, + false + ], + [ + -25.261842727661133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6211b34489eb7b1c5bd5f5bb696bf2a489773e69dd4526077ec1ff7f3b85f80e", + "prompt_hash": "7d8efd9da91b78776afe4f53f803b44181d00ba7eafc228ca3e91925ae02037b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 38, + "doc": { + "question": "Which component is used as the photometric calibration reference star in DES Y6 Gold?", + "choices": [ + "Hubble Space Telescope CalSpec standard star C26202", + "Gaia DR3 standard star G18392", + "SDSS Stripe 82 reference star SDSSJ014", + "VISTA Hemisphere Survey star VHS001" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which component is used as the photometric calibration reference star in DES Y6 Gold?\nAnswer:", + " Hubble Space Telescope CalSpec standard star C26202" + ], + [ + "Question:Which component is used as the photometric calibration reference star in DES Y6 Gold?\nAnswer:", + " Gaia DR3 standard star G18392" + ], + [ + "Question:Which component is used as the photometric calibration reference star in DES Y6 Gold?\nAnswer:", + " SDSS Stripe 82 reference star SDSSJ014" + ], + [ + "Question:Which component is used as the photometric calibration reference star in DES Y6 Gold?\nAnswer:", + " VISTA Hemisphere Survey star VHS001" + ] + ], + "resps": [ + [ + [ + -53.36168670654297, + false + ] + ], + [ + [ + -27.988567352294922, + false + ] + ], + [ + [ + -37.09762191772461, + false + ] + ], + [ + [ + -36.82593536376953, + false + ] + ] + ], + "filtered_resps": [ + [ + -53.36168670654297, + false + ], + [ + -27.988567352294922, + false + ], + [ + -37.09762191772461, + false + ], + [ + -36.82593536376953, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aae765a7d4300bc0b98db398dececf7d045e8a52d324494c18b70af0fc524d02", + "prompt_hash": "90fa208f136060df152c4e678c37834c4714a6b2624d6526562b9974edae8487", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "What is the total number of DECam exposures included in DES DR2 and Y6 Gold?", + "choices": [ + "72,217", + "84,932", + "45,318", + "60,104" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the total number of DECam exposures included in DES DR2 and Y6 Gold?\nAnswer:", + " 72,217" + ], + [ + "Question:What is the total number of DECam exposures included in DES DR2 and Y6 Gold?\nAnswer:", + " 84,932" + ], + [ + "Question:What is the total number of DECam exposures included in DES DR2 and Y6 Gold?\nAnswer:", + " 45,318" + ], + [ + "Question:What is the total number of DECam exposures included in DES DR2 and Y6 Gold?\nAnswer:", + " 60,104" + ] + ], + "resps": [ + [ + [ + -15.384516716003418, + false + ] + ], + [ + [ + -15.420585632324219, + false + ] + ], + [ + [ + -14.602078437805176, + false + ] + ], + [ + [ + -14.678556442260742, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.384516716003418, + false + ], + [ + -15.420585632324219, + false + ], + [ + -14.602078437805176, + false + ], + [ + -14.678556442260742, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4d1f785b545ac35d5410aa223b3aae264bc492e4fd1ee0b3e0d6517afc30f1a2", + "prompt_hash": "1e570ae93663e6215b95ced6fad37af432a866317248a803a19c9f0da6c06190", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What is the name of the benchmark proposed to evaluate multimodal multi-step reasoning tasks?", + "choices": [ + "VQA-Chain", + "VLM-Reason", + "VRC-Bench", + "MultiStepQA" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the name of the benchmark proposed to evaluate multimodal multi-step reasoning tasks?\nAnswer:", + " VQA-Chain" + ], + [ + "Question:What is the name of the benchmark proposed to evaluate multimodal multi-step reasoning tasks?\nAnswer:", + " VLM-Reason" + ], + [ + "Question:What is the name of the benchmark proposed to evaluate multimodal multi-step reasoning tasks?\nAnswer:", + " VRC-Bench" + ], + [ + "Question:What is the name of the benchmark proposed to evaluate multimodal multi-step reasoning tasks?\nAnswer:", + " MultiStepQA" + ] + ], + "resps": [ + [ + [ + -16.848678588867188, + false + ] + ], + [ + [ + -13.863715171813965, + false + ] + ], + [ + [ + -17.058414459228516, + false + ] + ], + [ + [ + -6.643422603607178, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.848678588867188, + false + ], + [ + -13.863715171813965, + false + ], + [ + -17.058414459228516, + false + ], + [ + -6.643422603607178, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f49d9074782d55a4cb7470bafb5c6fd059a2e0b5d3debacab64bae5906b61854", + "prompt_hash": "4a5c0bd5a79a3f897b9cb74ebb8744267e39db166fdbb8d41e60a4fa71925a5d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 41, + "doc": { + "question": "How many manually verified reasoning steps are included in VRC-Bench?", + "choices": [ + "3,500", + "4,173", + "5,200", + "3,998" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many manually verified reasoning steps are included in VRC-Bench?\nAnswer:", + " 3,500" + ], + [ + "Question:How many manually verified reasoning steps are included in VRC-Bench?\nAnswer:", + " 4,173" + ], + [ + "Question:How many manually verified reasoning steps are included in VRC-Bench?\nAnswer:", + " 5,200" + ], + [ + "Question:How many manually verified reasoning steps are included in VRC-Bench?\nAnswer:", + " 3,998" + ] + ], + "resps": [ + [ + [ + -12.997629165649414, + false + ] + ], + [ + [ + -15.097672462463379, + false + ] + ], + [ + [ + -13.911409378051758, + false + ] + ], + [ + [ + -15.563308715820312, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.997629165649414, + false + ], + [ + -15.097672462463379, + false + ], + [ + -13.911409378051758, + false + ], + [ + -15.563308715820312, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ab029bc7ad364193651f7eafa1a76ce79ef4169c5ca128f8c50ee4c768f63ed5", + "prompt_hash": "aaf9e4ab60af7e986dec67e442603b09898c067e4dc1a58a24f24fd83579e7f2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 42, + "doc": { + "question": "What is the base model used for training LlamaV-o1?", + "choices": [ + "GPT-4o-mini", + "Llava-CoT", + "Llama-3.2-11B-Vision-Instruct", + "Gemini-1.5-Pro" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the base model used for training LlamaV-o1?\nAnswer:", + " GPT-4o-mini" + ], + [ + "Question:What is the base model used for training LlamaV-o1?\nAnswer:", + " Llava-CoT" + ], + [ + "Question:What is the base model used for training LlamaV-o1?\nAnswer:", + " Llama-3.2-11B-Vision-Instruct" + ], + [ + "Question:What is the base model used for training LlamaV-o1?\nAnswer:", + " Gemini-1.5-Pro" + ] + ], + "resps": [ + [ + [ + -15.453899383544922, + false + ] + ], + [ + [ + -22.55254554748535, + false + ] + ], + [ + [ + -40.223777770996094, + false + ] + ], + [ + [ + -11.26479434967041, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.453899383544922, + false + ], + [ + -22.55254554748535, + false + ], + [ + -40.223777770996094, + false + ], + [ + -11.26479434967041, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3794d4c9d0dcc848c6bab66c1636d1cd1ce54b5faa82b7d2367ac43f2f2c0e05", + "prompt_hash": "eee8678d456145a2b8c216698eb4fb00373a3903482962a866cb58ababc6b09a", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "Which metric checks for irrelevant or fabricated reasoning steps?", + "choices": [ + "Informativeness-Step", + "Hallucination", + "Semantic Coverage", + "Reasoning Alignment" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which metric checks for irrelevant or fabricated reasoning steps?\nAnswer:", + " Informativeness-Step" + ], + [ + "Question:Which metric checks for irrelevant or fabricated reasoning steps?\nAnswer:", + " Hallucination" + ], + [ + "Question:Which metric checks for irrelevant or fabricated reasoning steps?\nAnswer:", + " Semantic Coverage" + ], + [ + "Question:Which metric checks for irrelevant or fabricated reasoning steps?\nAnswer:", + " Reasoning Alignment" + ] + ], + "resps": [ + [ + [ + -23.075510025024414, + false + ] + ], + [ + [ + -9.403883934020996, + false + ] + ], + [ + [ + -17.089900970458984, + false + ] + ], + [ + [ + -13.730018615722656, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.075510025024414, + false + ], + [ + -9.403883934020996, + false + ], + [ + -17.089900970458984, + false + ], + [ + -13.730018615722656, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "208558ab3d63cf67ef2b796ee97528ff8bd9af26bb61f291dadc8607768c025e", + "prompt_hash": "38553bafe3fe4bbd2d61b2e2bfc7c60d6803c445de407b64e062033f2c8cbf4c", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 44, + "doc": { + "question": "What type of learning strategy is used to train LlamaV-o1 progressively from simple to complex tasks?", + "choices": [ + "Reinforcement Learning", + "Contrastive Learning", + "Curriculum Learning", + "Unsupervised Pretraining" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What type of learning strategy is used to train LlamaV-o1 progressively from simple to complex tasks?\nAnswer:", + " Reinforcement Learning" + ], + [ + "Question:What type of learning strategy is used to train LlamaV-o1 progressively from simple to complex tasks?\nAnswer:", + " Contrastive Learning" + ], + [ + "Question:What type of learning strategy is used to train LlamaV-o1 progressively from simple to complex tasks?\nAnswer:", + " Curriculum Learning" + ], + [ + "Question:What type of learning strategy is used to train LlamaV-o1 progressively from simple to complex tasks?\nAnswer:", + " Unsupervised Pretraining" + ] + ], + "resps": [ + [ + [ + -4.469661235809326, + false + ] + ], + [ + [ + -9.512903213500977, + false + ] + ], + [ + [ + -9.978853225708008, + false + ] + ], + [ + [ + -17.42882537841797, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.469661235809326, + false + ], + [ + -9.512903213500977, + false + ], + [ + -9.978853225708008, + false + ], + [ + -17.42882537841797, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "cc742e6917db8a54e9e779f37121f0dd4a291bac16e92b3d504ad95b69bb36cd", + "prompt_hash": "83742b8f079b8e3873d047b3d288f0cc351323444b85ee9e14c69e1436287314", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 45, + "doc": { + "question": "Which parameterization type is commonly used for image generation in diffusion models?", + "choices": [ + "A. x₀-prediction", + "B. ε-prediction", + "C. v-prediction", + "D. zₜ-prediction" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which parameterization type is commonly used for image generation in diffusion models?\nAnswer:", + " A. x₀-prediction" + ], + [ + "Question:Which parameterization type is commonly used for image generation in diffusion models?\nAnswer:", + " B. ε-prediction" + ], + [ + "Question:Which parameterization type is commonly used for image generation in diffusion models?\nAnswer:", + " C. v-prediction" + ], + [ + "Question:Which parameterization type is commonly used for image generation in diffusion models?\nAnswer:", + " D. zₜ-prediction" + ] + ], + "resps": [ + [ + [ + -40.44757080078125, + false + ] + ], + [ + [ + -33.222816467285156, + false + ] + ], + [ + [ + -27.685667037963867, + false + ] + ], + [ + [ + -49.870819091796875, + false + ] + ] + ], + "filtered_resps": [ + [ + -40.44757080078125, + false + ], + [ + -33.222816467285156, + false + ], + [ + -27.685667037963867, + false + ], + [ + -49.870819091796875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bdcb2a5306cac39bfd15c10f8bdfcd3eb0e3235eded767ddec9791d1ead52847", + "prompt_hash": "5c780888e8ff88fecc3944fcbca387d3da2260d994c71a1e74dd526b8fadb85b", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 46, + "doc": { + "question": "How many training samples does Lotus use to achieve its reported performance?", + "choices": [ + "A. 59K", + "B. 1.2M", + "C. 250K", + "D. 62.6M" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many training samples does Lotus use to achieve its reported performance?\nAnswer:", + " A. 59K" + ], + [ + "Question:How many training samples does Lotus use to achieve its reported performance?\nAnswer:", + " B. 1.2M" + ], + [ + "Question:How many training samples does Lotus use to achieve its reported performance?\nAnswer:", + " C. 250K" + ], + [ + "Question:How many training samples does Lotus use to achieve its reported performance?\nAnswer:", + " D. 62.6M" + ] + ], + "resps": [ + [ + [ + -20.41183853149414, + false + ] + ], + [ + [ + -24.378026962280273, + false + ] + ], + [ + [ + -22.16771125793457, + false + ] + ], + [ + [ + -32.99885559082031, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.41183853149414, + false + ], + [ + -24.378026962280273, + false + ], + [ + -22.16771125793457, + false + ], + [ + -32.99885559082031, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "88ff9460fbcea5468e805f29dea11ddcf9ef68bf5e10dda39ddea33508d5d380", + "prompt_hash": "844a065d7c38f0c7413afe8111feef8efd06cc9c9d71f6bde03819e7d54b6473", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 47, + "doc": { + "question": "What type of prediction is used in the discriminative version of Lotus (Lotus-D)?", + "choices": [ + "A. Uses ε-prediction with noise", + "B. Uses x₀-prediction without noise", + "C. Uses v-prediction with noise", + "D. Uses latent-space image conditioning only" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of prediction is used in the discriminative version of Lotus (Lotus-D)?\nAnswer:", + " A. Uses ε-prediction with noise" + ], + [ + "Question:What type of prediction is used in the discriminative version of Lotus (Lotus-D)?\nAnswer:", + " B. Uses x₀-prediction without noise" + ], + [ + "Question:What type of prediction is used in the discriminative version of Lotus (Lotus-D)?\nAnswer:", + " C. Uses v-prediction with noise" + ], + [ + "Question:What type of prediction is used in the discriminative version of Lotus (Lotus-D)?\nAnswer:", + " D. Uses latent-space image conditioning only" + ] + ], + "resps": [ + [ + [ + -48.533790588378906, + false + ] + ], + [ + [ + -68.318603515625, + false + ] + ], + [ + [ + -48.11347198486328, + false + ] + ], + [ + [ + -51.81948471069336, + false + ] + ] + ], + "filtered_resps": [ + [ + -48.533790588378906, + false + ], + [ + -68.318603515625, + false + ], + [ + -48.11347198486328, + false + ], + [ + -51.81948471069336, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dd9d203f53f3b8e692b1909ce6796379f9b1791e6dd80cbea978d40fc469027b", + "prompt_hash": "edcd9a17425e90b0a319a6e586bc5db38481b7a415ee9ec0a3551484f4492a30", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "What is the primary purpose of the detail preserver module in Lotus?", + "choices": [ + "A. To improve memory efficiency during training", + "B. To enable dual-modality input", + "C. To preserve fine-grained details in dense annotations", + "D. To increase the number of diffusion steps" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the primary purpose of the detail preserver module in Lotus?\nAnswer:", + " A. To improve memory efficiency during training" + ], + [ + "Question:What is the primary purpose of the detail preserver module in Lotus?\nAnswer:", + " B. To enable dual-modality input" + ], + [ + "Question:What is the primary purpose of the detail preserver module in Lotus?\nAnswer:", + " C. To preserve fine-grained details in dense annotations" + ], + [ + "Question:What is the primary purpose of the detail preserver module in Lotus?\nAnswer:", + " D. To increase the number of diffusion steps" + ] + ], + "resps": [ + [ + [ + -26.969858169555664, + false + ] + ], + [ + [ + -36.691349029541016, + false + ] + ], + [ + [ + -41.21318435668945, + false + ] + ], + [ + [ + -36.86516571044922, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.969858169555664, + false + ], + [ + -36.691349029541016, + false + ], + [ + -41.21318435668945, + false + ], + [ + -36.86516571044922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4a7ae6d28946cceb57aa90a9c6cc416901faaa37a1b16dcdb7fa7287614dfdfd", + "prompt_hash": "a1d8f4d91b8b49a00e7e4671a1f365b13ae751c0ce0344060659f55ddbd7b4d9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 49, + "doc": { + "question": "What happens when ε-prediction is used at the initial denoising steps?", + "choices": [ + "A. The model produces less variance and more stable predictions", + "B. The predicted variance is amplified and propagated", + "C. The model skips early steps to reduce variance", + "D. The denoising step converges faster" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What happens when ε-prediction is used at the initial denoising steps?\nAnswer:", + " A. The model produces less variance and more stable predictions" + ], + [ + "Question:What happens when ε-prediction is used at the initial denoising steps?\nAnswer:", + " B. The predicted variance is amplified and propagated" + ], + [ + "Question:What happens when ε-prediction is used at the initial denoising steps?\nAnswer:", + " C. The model skips early steps to reduce variance" + ], + [ + "Question:What happens when ε-prediction is used at the initial denoising steps?\nAnswer:", + " D. The denoising step converges faster" + ] + ], + "resps": [ + [ + [ + -38.00743103027344, + false + ] + ], + [ + [ + -42.99990463256836, + false + ] + ], + [ + [ + -40.484310150146484, + false + ] + ], + [ + [ + -27.896730422973633, + false + ] + ] + ], + "filtered_resps": [ + [ + -38.00743103027344, + false + ], + [ + -42.99990463256836, + false + ], + [ + -40.484310150146484, + false + ], + [ + -27.896730422973633, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "3786df5cda14a24d6fdf6e60196d1064ffee000819bd029a45cc8d463520b3d9", + "prompt_hash": "d0ba1a9675287af2ea58b7c36baa8d8b2ae830046e092fe8f8b835e59119bf37", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 50, + "doc": { + "question": "What is the default maximum tree depth used in the self-evolution rounds for math problem solving?", + "choices": [ + "12", + "16", + "20", + "24" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the default maximum tree depth used in the self-evolution rounds for math problem solving?\nAnswer:", + " 12" + ], + [ + "Question:What is the default maximum tree depth used in the self-evolution rounds for math problem solving?\nAnswer:", + " 16" + ], + [ + "Question:What is the default maximum tree depth used in the self-evolution rounds for math problem solving?\nAnswer:", + " 20" + ], + [ + "Question:What is the default maximum tree depth used in the self-evolution rounds for math problem solving?\nAnswer:", + " 24" + ] + ], + "resps": [ + [ + [ + -6.0869550704956055, + false + ] + ], + [ + [ + -6.953391075134277, + false + ] + ], + [ + [ + -5.210151672363281, + false + ] + ], + [ + [ + -7.983536243438721, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.0869550704956055, + false + ], + [ + -6.953391075134277, + false + ], + [ + -5.210151672363281, + false + ], + [ + -7.983536243438721, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "800c7f0b32cd7ad8b229bc3280e1de23af72e70bb58dfd1a9409a0dad352f3e1", + "prompt_hash": "8e56d42f1452ec9c6b466ea3808d628d7487ca361c30e83e8777908118e35346", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "What learning rate is used for fine-tuning Qwen models in this study?", + "choices": [ + "5e-6", + "6e-6", + "7e-6", + "1e-5" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What learning rate is used for fine-tuning Qwen models in this study?\nAnswer:", + " 5e-6" + ], + [ + "Question:What learning rate is used for fine-tuning Qwen models in this study?\nAnswer:", + " 6e-6" + ], + [ + "Question:What learning rate is used for fine-tuning Qwen models in this study?\nAnswer:", + " 7e-6" + ], + [ + "Question:What learning rate is used for fine-tuning Qwen models in this study?\nAnswer:", + " 1e-5" + ] + ], + "resps": [ + [ + [ + -7.968042850494385, + false + ] + ], + [ + [ + -10.571170806884766, + false + ] + ], + [ + [ + -10.910438537597656, + false + ] + ], + [ + [ + -6.601953506469727, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.968042850494385, + false + ], + [ + -10.571170806884766, + false + ], + [ + -10.910438537597656, + false + ], + [ + -6.601953506469727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "54afa871339ebb9b9c3054b084eb9c1363694a2ef93b2d3668a4005217c21c11", + "prompt_hash": "65d5f04256731d866019a2a49ee95c22d0fd40b44af6335b4e9f2d22b0da4791", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 52, + "doc": { + "question": "Which reward model uses pairwise ranking loss in its training?", + "choices": [ + "PPM", + "PQM", + "ORM", + "SLM" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which reward model uses pairwise ranking loss in its training?\nAnswer:", + " PPM" + ], + [ + "Question:Which reward model uses pairwise ranking loss in its training?\nAnswer:", + " PQM" + ], + [ + "Question:Which reward model uses pairwise ranking loss in its training?\nAnswer:", + " ORM" + ], + [ + "Question:Which reward model uses pairwise ranking loss in its training?\nAnswer:", + " SLM" + ] + ], + "resps": [ + [ + [ + -14.611196517944336, + false + ] + ], + [ + [ + -15.033802032470703, + false + ] + ], + [ + [ + -14.595401763916016, + false + ] + ], + [ + [ + -13.409589767456055, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.611196517944336, + false + ], + [ + -15.033802032470703, + false + ], + [ + -14.595401763916016, + false + ], + [ + -13.409589767456055, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "49c8a7815d94169375ffb8484392b18bbdfa3b9405ce36f0aee8fe96010acfb9", + "prompt_hash": "49dde6a7ddf4c265f80016da4ff826649fbb6d58a6a1303de1db86e62f3314ec", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 53, + "doc": { + "question": "What was the total GPU setup for the bootstrap round using DeepSeek-Coder-v2-Instruct?", + "choices": [ + "5 nodes of 4×80GB", + "10 nodes of 8×80GB", + "15 nodes of 4×40GB", + "12 nodes of 8×40GB" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the total GPU setup for the bootstrap round using DeepSeek-Coder-v2-Instruct?\nAnswer:", + " 5 nodes of 4×80GB" + ], + [ + "Question:What was the total GPU setup for the bootstrap round using DeepSeek-Coder-v2-Instruct?\nAnswer:", + " 10 nodes of 8×80GB" + ], + [ + "Question:What was the total GPU setup for the bootstrap round using DeepSeek-Coder-v2-Instruct?\nAnswer:", + " 15 nodes of 4×40GB" + ], + [ + "Question:What was the total GPU setup for the bootstrap round using DeepSeek-Coder-v2-Instruct?\nAnswer:", + " 12 nodes of 8×40GB" + ] + ], + "resps": [ + [ + [ + -29.78156089782715, + false + ] + ], + [ + [ + -29.15294075012207, + false + ] + ], + [ + [ + -32.71136474609375, + false + ] + ], + [ + [ + -29.856426239013672, + false + ] + ] + ], + "filtered_resps": [ + [ + -29.78156089782715, + false + ], + [ + -29.15294075012207, + false + ], + [ + -32.71136474609375, + false + ], + [ + -29.856426239013672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4d6cca403c78982916e8fcbbd83f5e540b683e23b4a63fe462e5d3ecd944277b", + "prompt_hash": "807ec96a1d77362088976d759e387ea45c93d3c3811b14b0f08df4cbe0a729b1", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 54, + "doc": { + "question": "Which benchmark required the highest average number of tokens during inference, as shown in the paper?", + "choices": [ + "AIME 2024", + "MATH", + "Olympiad Bench", + "AMC 2023" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark required the highest average number of tokens during inference, as shown in the paper?\nAnswer:", + " AIME 2024" + ], + [ + "Question:Which benchmark required the highest average number of tokens during inference, as shown in the paper?\nAnswer:", + " MATH" + ], + [ + "Question:Which benchmark required the highest average number of tokens during inference, as shown in the paper?\nAnswer:", + " Olympiad Bench" + ], + [ + "Question:Which benchmark required the highest average number of tokens during inference, as shown in the paper?\nAnswer:", + " AMC 2023" + ] + ], + "resps": [ + [ + [ + -21.254226684570312, + false + ] + ], + [ + [ + -9.348733901977539, + false + ] + ], + [ + [ + -22.08643341064453, + false + ] + ], + [ + [ + -21.39059829711914, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.254226684570312, + false + ], + [ + -9.348733901977539, + false + ], + [ + -22.08643341064453, + false + ], + [ + -21.39059829711914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f8bf812cf01b159edc3acbd279319be5e5b3446f4f103ceeb836283b365baab7", + "prompt_hash": "76cb13e84f4c67a72a12d0297e71d697f0ac713ebf7dc305bd7e46b85a436ce0", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 55, + "doc": { + "question": "What benchmark did the paper identify as being mostly solved by models like GPT-4o and Claude?", + "choices": [ + "GSM8k", + "Hendrycks MATH Levels 1-3", + "Omni-MATH", + "HARP" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What benchmark did the paper identify as being mostly solved by models like GPT-4o and Claude?\nAnswer:", + " GSM8k" + ], + [ + "Question:What benchmark did the paper identify as being mostly solved by models like GPT-4o and Claude?\nAnswer:", + " Hendrycks MATH Levels 1-3" + ], + [ + "Question:What benchmark did the paper identify as being mostly solved by models like GPT-4o and Claude?\nAnswer:", + " Omni-MATH" + ], + [ + "Question:What benchmark did the paper identify as being mostly solved by models like GPT-4o and Claude?\nAnswer:", + " HARP" + ] + ], + "resps": [ + [ + [ + -9.384391784667969, + false + ] + ], + [ + [ + -42.24859619140625, + false + ] + ], + [ + [ + -26.777854919433594, + false + ] + ], + [ + [ + -15.270162582397461, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.384391784667969, + false + ], + [ + -42.24859619140625, + false + ], + [ + -26.777854919433594, + false + ], + [ + -15.270162582397461, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b7607a1f58dd0ff61eaad155541032acca0fe44a2832e5f5ec691925e40af001", + "prompt_hash": "fcf8808346cefa46360238ef422ec517fb052d71605a6a782284cf9a13a5b97f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 56, + "doc": { + "question": "What concept is introduced as a generalization of Chain-of-Thought for modeling latent reasoning?", + "choices": [ + "System 2 CoT", + "Hierarchical Prompting", + "Meta-CoT", + "Reflexive Reasoning" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What concept is introduced as a generalization of Chain-of-Thought for modeling latent reasoning?\nAnswer:", + " System 2 CoT" + ], + [ + "Question:What concept is introduced as a generalization of Chain-of-Thought for modeling latent reasoning?\nAnswer:", + " Hierarchical Prompting" + ], + [ + "Question:What concept is introduced as a generalization of Chain-of-Thought for modeling latent reasoning?\nAnswer:", + " Meta-CoT" + ], + [ + "Question:What concept is introduced as a generalization of Chain-of-Thought for modeling latent reasoning?\nAnswer:", + " Reflexive Reasoning" + ] + ], + "resps": [ + [ + [ + -20.35006332397461, + false + ] + ], + [ + [ + -14.312450408935547, + false + ] + ], + [ + [ + -14.631093978881836, + false + ] + ], + [ + [ + -10.159168243408203, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.35006332397461, + false + ], + [ + -14.312450408935547, + false + ], + [ + -14.631093978881836, + false + ], + [ + -10.159168243408203, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d82135d6cba018718c9aeaaf8d337401289d8e5b8c3acc4289de4105f50dae27", + "prompt_hash": "36c13037c05d4f84c743d6955a6d83065962bef50e3857b2f40727e8f593883d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 57, + "doc": { + "question": "Which theoretical framework does Meta-CoT draw inspiration from?", + "choices": [ + "Dual-process theory from Cognitive Science", + "Theory of Computation", + "Game Theory", + "Information Theory" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which theoretical framework does Meta-CoT draw inspiration from?\nAnswer:", + " Dual-process theory from Cognitive Science" + ], + [ + "Question:Which theoretical framework does Meta-CoT draw inspiration from?\nAnswer:", + " Theory of Computation" + ], + [ + "Question:Which theoretical framework does Meta-CoT draw inspiration from?\nAnswer:", + " Game Theory" + ], + [ + "Question:Which theoretical framework does Meta-CoT draw inspiration from?\nAnswer:", + " Information Theory" + ] + ], + "resps": [ + [ + [ + -22.927265167236328, + false + ] + ], + [ + [ + -12.723213195800781, + false + ] + ], + [ + [ + -14.483650207519531, + false + ] + ], + [ + [ + -11.9994478225708, + false + ] + ] + ], + "filtered_resps": [ + [ + -22.927265167236328, + false + ], + [ + -12.723213195800781, + false + ], + [ + -14.483650207519531, + false + ], + [ + -11.9994478225708, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "22c72c05e6d3068fa31abb6d36600f1f8a22f6bba52be443b92256419c2368eb", + "prompt_hash": "75cd67bb7118c915a89d85a02cfb0fbd36a8cc06fd89f0da96b1a4531da8cfad", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 58, + "doc": { + "question": "What is the reward value assigned to a correct solution in the MDP formulation described in the paper?", + "choices": [ + "-1", + "0", + "0.5", + "1" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the reward value assigned to a correct solution in the MDP formulation described in the paper?\nAnswer:", + " -1" + ], + [ + "Question:What is the reward value assigned to a correct solution in the MDP formulation described in the paper?\nAnswer:", + " 0" + ], + [ + "Question:What is the reward value assigned to a correct solution in the MDP formulation described in the paper?\nAnswer:", + " 0.5" + ], + [ + "Question:What is the reward value assigned to a correct solution in the MDP formulation described in the paper?\nAnswer:", + " 1" + ] + ], + "resps": [ + [ + [ + -6.114413261413574, + false + ] + ], + [ + [ + -3.182579517364502, + false + ] + ], + [ + [ + -6.184215068817139, + false + ] + ], + [ + [ + -2.7441458702087402, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.114413261413574, + false + ], + [ + -3.182579517364502, + false + ], + [ + -6.184215068817139, + false + ], + [ + -2.7441458702087402, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "daccd138152fa3660e17d99531ed57ab16f78931bdd8ab5f9f484da037d258db", + "prompt_hash": "c73c924fbb4119640eacaaf7fc282e21f239c938ad8bd82d524b66bd340efd24", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 59, + "doc": { + "question": "According to the scaling law hypothesis in the paper, what three components govern performance on complex reasoning tasks?", + "choices": [ + "Model size, dataset quality, and pretraining loss", + "Training data, inference-time compute, and instruction tuning", + "Model size, training data (compute), and inference-time compute", + "Prompt quality, alignment tuning, and data diversity" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:According to the scaling law hypothesis in the paper, what three components govern performance on complex reasoning tasks?\nAnswer:", + " Model size, dataset quality, and pretraining loss" + ], + [ + "Question:According to the scaling law hypothesis in the paper, what three components govern performance on complex reasoning tasks?\nAnswer:", + " Training data, inference-time compute, and instruction tuning" + ], + [ + "Question:According to the scaling law hypothesis in the paper, what three components govern performance on complex reasoning tasks?\nAnswer:", + " Model size, training data (compute), and inference-time compute" + ], + [ + "Question:According to the scaling law hypothesis in the paper, what three components govern performance on complex reasoning tasks?\nAnswer:", + " Prompt quality, alignment tuning, and data diversity" + ] + ], + "resps": [ + [ + [ + -32.06525421142578, + false + ] + ], + [ + [ + -33.734771728515625, + false + ] + ], + [ + [ + -47.407806396484375, + false + ] + ], + [ + [ + -39.53954315185547, + false + ] + ] + ], + "filtered_resps": [ + [ + -32.06525421142578, + false + ], + [ + -33.734771728515625, + false + ], + [ + -47.407806396484375, + false + ], + [ + -39.53954315185547, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "159836df9c4f73cd3009a29ec4b8941010c8a5ebcfa4dff1037083f566dc7353", + "prompt_hash": "5d7b828f6fa4137c020ef5362dcc6a07274c8fa64e6abd14d5a70074996a69a5", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 60, + "doc": { + "question": "What sampling resolution is used by PixArt-Σ in text-conditioned generation?", + "choices": [ + "256", + "512", + "768", + "1024" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What sampling resolution is used by PixArt-Σ in text-conditioned generation?\nAnswer:", + " 256" + ], + [ + "Question:What sampling resolution is used by PixArt-Σ in text-conditioned generation?\nAnswer:", + " 512" + ], + [ + "Question:What sampling resolution is used by PixArt-Σ in text-conditioned generation?\nAnswer:", + " 768" + ], + [ + "Question:What sampling resolution is used by PixArt-Σ in text-conditioned generation?\nAnswer:", + " 1024" + ] + ], + "resps": [ + [ + [ + -4.888120651245117, + false + ] + ], + [ + [ + -5.437291622161865, + false + ] + ], + [ + [ + -8.039185523986816, + false + ] + ], + [ + [ + -5.633706092834473, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.888120651245117, + false + ], + [ + -5.437291622161865, + false + ], + [ + -8.039185523986816, + false + ], + [ + -5.633706092834473, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "97970f6d8f55383bf53b7ea1c2a7f6598e49b38490cb21931965128928d99680", + "prompt_hash": "8d1205772c1e4887ae735da29fb965b85694e64a5b3d66d7d983d2039f1e9d7b", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 61, + "doc": { + "question": "What search algorithm uses a pivot noise and samples in its neighborhood without gradient information?", + "choices": [ + "Random Search", + "Zero-Order Search", + "First-Order Search", + "Greedy Search" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What search algorithm uses a pivot noise and samples in its neighborhood without gradient information?\nAnswer:", + " Random Search" + ], + [ + "Question:What search algorithm uses a pivot noise and samples in its neighborhood without gradient information?\nAnswer:", + " Zero-Order Search" + ], + [ + "Question:What search algorithm uses a pivot noise and samples in its neighborhood without gradient information?\nAnswer:", + " First-Order Search" + ], + [ + "Question:What search algorithm uses a pivot noise and samples in its neighborhood without gradient information?\nAnswer:", + " Greedy Search" + ] + ], + "resps": [ + [ + [ + -7.393685340881348, + false + ] + ], + [ + [ + -16.32645606994629, + false + ] + ], + [ + [ + -17.336776733398438, + false + ] + ], + [ + [ + -9.410146713256836, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.393685340881348, + false + ], + [ + -16.32645606994629, + false + ], + [ + -17.336776733398438, + false + ], + [ + -9.410146713256836, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "357a3b5c14062a599863360c517c0c36af21071861d1d30d8642005017926ee0", + "prompt_hash": "3a6b0cd595528e1087d58a3792b07f8603504acbe6876de34117950e5d8447f3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 62, + "doc": { + "question": "Which verifier computes scores by comparing the cosine similarity of visual and text features, and rescales them?", + "choices": [ + "Aesthetic", + "CLIPScore", + "ImageReward", + "Verifier Ensemble" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which verifier computes scores by comparing the cosine similarity of visual and text features, and rescales them?\nAnswer:", + " Aesthetic" + ], + [ + "Question:Which verifier computes scores by comparing the cosine similarity of visual and text features, and rescales them?\nAnswer:", + " CLIPScore" + ], + [ + "Question:Which verifier computes scores by comparing the cosine similarity of visual and text features, and rescales them?\nAnswer:", + " ImageReward" + ], + [ + "Question:Which verifier computes scores by comparing the cosine similarity of visual and text features, and rescales them?\nAnswer:", + " Verifier Ensemble" + ] + ], + "resps": [ + [ + [ + -12.820409774780273, + false + ] + ], + [ + [ + -9.78781795501709, + false + ] + ], + [ + [ + -13.850737571716309, + false + ] + ], + [ + [ + -17.897645950317383, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.820409774780273, + false + ], + [ + -9.78781795501709, + false + ], + [ + -13.850737571716309, + false + ], + [ + -17.897645950317383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d943fede6f4223c92f39b95e19cabe457a468968f4425bb33b07615bf0615b05", + "prompt_hash": "72550d78187553582db0694b36f24c2cf00fa605ba169bbb8f1383db3f4687bc", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 63, + "doc": { + "question": "What is the main drawback of excessive search against a verifier like DINO or CLIP on ImageNet?", + "choices": [ + "It increases diversity but lowers sample quality", + "It causes mode collapse and increases FID", + "It improves all metrics uniformly", + "It reduces the need for classifier-free guidance" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main drawback of excessive search against a verifier like DINO or CLIP on ImageNet?\nAnswer:", + " It increases diversity but lowers sample quality" + ], + [ + "Question:What is the main drawback of excessive search against a verifier like DINO or CLIP on ImageNet?\nAnswer:", + " It causes mode collapse and increases FID" + ], + [ + "Question:What is the main drawback of excessive search against a verifier like DINO or CLIP on ImageNet?\nAnswer:", + " It improves all metrics uniformly" + ], + [ + "Question:What is the main drawback of excessive search against a verifier like DINO or CLIP on ImageNet?\nAnswer:", + " It reduces the need for classifier-free guidance" + ] + ], + "resps": [ + [ + [ + -28.893138885498047, + false + ] + ], + [ + [ + -33.32884979248047, + false + ] + ], + [ + [ + -29.780744552612305, + false + ] + ], + [ + [ + -24.765605926513672, + false + ] + ] + ], + "filtered_resps": [ + [ + -28.893138885498047, + false + ], + [ + -33.32884979248047, + false + ], + [ + -29.780744552612305, + false + ], + [ + -24.765605926513672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "66cb58bfc739e7209395b48f1f60b50c7cd403a232ed87e6b21afe306162572e", + "prompt_hash": "bffa743545eee44620a51d1937ccd6a7a38c78ee5e35b3afb4289e5623c9f710", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 64, + "doc": { + "question": "Which method enables gradient descent on noise during sampling by reducing memory cost?", + "choices": [ + "Random Search", + "Search Over Paths", + "Gradient Checkpointing", + "Classifier-Free Guidance" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which method enables gradient descent on noise during sampling by reducing memory cost?\nAnswer:", + " Random Search" + ], + [ + "Question:Which method enables gradient descent on noise during sampling by reducing memory cost?\nAnswer:", + " Search Over Paths" + ], + [ + "Question:Which method enables gradient descent on noise during sampling by reducing memory cost?\nAnswer:", + " Gradient Checkpointing" + ], + [ + "Question:Which method enables gradient descent on noise during sampling by reducing memory cost?\nAnswer:", + " Classifier-Free Guidance" + ] + ], + "resps": [ + [ + [ + -11.641936302185059, + false + ] + ], + [ + [ + -25.17833709716797, + false + ] + ], + [ + [ + -12.970060348510742, + false + ] + ], + [ + [ + -15.819764137268066, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.641936302185059, + false + ], + [ + -25.17833709716797, + false + ], + [ + -12.970060348510742, + false + ], + [ + -15.819764137268066, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ccb6fb769cd18ca7b53dae771a5727a281fe06ce1d2f430a267405324a0f9c91", + "prompt_hash": "2e1663f5c49e461573230553cb5b952dc9680ce3d0736bd8847c48a1ef72c907", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 65, + "doc": { + "question": "Which model achieves 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants?", + "choices": [ + "OpenAI-o1-1217", + "DeepSeek-V3", + "DeepSeek-R1", + "Claude-Sonnet-3.5" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model achieves 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants?\nAnswer:", + " OpenAI-o1-1217" + ], + [ + "Question:Which model achieves 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants?\nAnswer:", + " DeepSeek-V3" + ], + [ + "Question:Which model achieves 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants?\nAnswer:", + " DeepSeek-R1" + ], + [ + "Question:Which model achieves 2,029 Elo rating on Codeforces, outperforming 96.3% of human participants?\nAnswer:", + " Claude-Sonnet-3.5" + ] + ], + "resps": [ + [ + [ + -41.88161849975586, + false + ] + ], + [ + [ + -18.81478500366211, + false + ] + ], + [ + [ + -25.116798400878906, + false + ] + ], + [ + [ + -23.355180740356445, + false + ] + ] + ], + "filtered_resps": [ + [ + -41.88161849975586, + false + ], + [ + -18.81478500366211, + false + ], + [ + -25.116798400878906, + false + ], + [ + -23.355180740356445, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "12f635897625f78e42722d6e00b292a1329bb5992e8237414d1c800e2c747446", + "prompt_hash": "dc10c75cd76d40ab92ce60278ccb889c7f3c23e805ff127830e5a6d7a91352c5", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 66, + "doc": { + "question": "What base model is used to develop DeepSeek-R1?", + "choices": [ + "Qwen2.5-32B", + "DeepSeek-V3-Base", + "GPT-4o", + "LLaMA-3" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What base model is used to develop DeepSeek-R1?\nAnswer:", + " Qwen2.5-32B" + ], + [ + "Question:What base model is used to develop DeepSeek-R1?\nAnswer:", + " DeepSeek-V3-Base" + ], + [ + "Question:What base model is used to develop DeepSeek-R1?\nAnswer:", + " GPT-4o" + ], + [ + "Question:What base model is used to develop DeepSeek-R1?\nAnswer:", + " LLaMA-3" + ] + ], + "resps": [ + [ + [ + -23.438631057739258, + false + ] + ], + [ + [ + -12.684707641601562, + false + ] + ], + [ + [ + -9.185466766357422, + false + ] + ], + [ + [ + -10.656867027282715, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.438631057739258, + false + ], + [ + -12.684707641601562, + false + ], + [ + -9.185466766357422, + false + ], + [ + -10.656867027282715, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "830b53bf691311178035303428a1bc6422c64e1b305e3ccff7febbc55cc5a938", + "prompt_hash": "1c46372fd028656f0d94cd8b52585ed208858a18344c1e95f3855aec6cc90230", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 67, + "doc": { + "question": "What method is used in DeepSeek-R1 to estimate the baseline without a critic model?", + "choices": [ + "PPO", + "KL divergence", + "Group Relative Policy Optimization (GRPO)", + "Monte Carlo Tree Search (MCTS)" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What method is used in DeepSeek-R1 to estimate the baseline without a critic model?\nAnswer:", + " PPO" + ], + [ + "Question:What method is used in DeepSeek-R1 to estimate the baseline without a critic model?\nAnswer:", + " KL divergence" + ], + [ + "Question:What method is used in DeepSeek-R1 to estimate the baseline without a critic model?\nAnswer:", + " Group Relative Policy Optimization (GRPO)" + ], + [ + "Question:What method is used in DeepSeek-R1 to estimate the baseline without a critic model?\nAnswer:", + " Monte Carlo Tree Search (MCTS)" + ] + ], + "resps": [ + [ + [ + -6.6992340087890625, + false + ] + ], + [ + [ + -8.444036483764648, + false + ] + ], + [ + [ + -26.221853256225586, + false + ] + ], + [ + [ + -15.762158393859863, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.6992340087890625, + false + ], + [ + -8.444036483764648, + false + ], + [ + -26.221853256225586, + false + ], + [ + -15.762158393859863, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b2da82c32ba8294928839d6e6c8d24f391d9b9539a533e50ab6f9e67898edcec", + "prompt_hash": "9835b440f352329d56947d3df92af42b7df5090cff13c18de40dd239152b41ac", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 68, + "doc": { + "question": "Which distilled model achieves 72.6% on AIME 2024?", + "choices": [ + "DeepSeek-R1-Distill-Qwen-7B", + "DeepSeek-R1-Distill-Qwen-14B", + "DeepSeek-R1-Distill-Qwen-32B", + "DeepSeek-R1-Distill-Qwen-1.5B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which distilled model achieves 72.6% on AIME 2024?\nAnswer:", + " DeepSeek-R1-Distill-Qwen-7B" + ], + [ + "Question:Which distilled model achieves 72.6% on AIME 2024?\nAnswer:", + " DeepSeek-R1-Distill-Qwen-14B" + ], + [ + "Question:Which distilled model achieves 72.6% on AIME 2024?\nAnswer:", + " DeepSeek-R1-Distill-Qwen-32B" + ], + [ + "Question:Which distilled model achieves 72.6% on AIME 2024?\nAnswer:", + " DeepSeek-R1-Distill-Qwen-1.5B" + ] + ], + "resps": [ + [ + [ + -36.866294860839844, + false + ] + ], + [ + [ + -39.773773193359375, + false + ] + ], + [ + [ + -42.2109375, + false + ] + ], + [ + [ + -45.068634033203125, + false + ] + ] + ], + "filtered_resps": [ + [ + -36.866294860839844, + false + ], + [ + -39.773773193359375, + false + ], + [ + -42.2109375, + false + ], + [ + -45.068634033203125, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "94fbc30c8d5f15538ae62b1b0b1d19ed80bdd98942645eb62fd8aefdbb0d8c64", + "prompt_hash": "08b5360c1d40bcc884fedc3389ee606454b71235337d0a0d7af2a0bac79ade0e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 69, + "doc": { + "question": "Which token format does DeepSeek-R1 use to enclose the reasoning process during training?", + "choices": [ + "[REASON] and [/REASON]", + " and ", + " and ", + "[THINK] and [/THINK]" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which token format does DeepSeek-R1 use to enclose the reasoning process during training?\nAnswer:", + " [REASON] and [/REASON]" + ], + [ + "Question:Which token format does DeepSeek-R1 use to enclose the reasoning process during training?\nAnswer:", + " and " + ], + [ + "Question:Which token format does DeepSeek-R1 use to enclose the reasoning process during training?\nAnswer:", + " and " + ], + [ + "Question:Which token format does DeepSeek-R1 use to enclose the reasoning process during training?\nAnswer:", + " [THINK] and [/THINK]" + ] + ], + "resps": [ + [ + [ + -35.707176208496094, + false + ] + ], + [ + [ + -25.014892578125, + false + ] + ], + [ + [ + -28.014991760253906, + false + ] + ], + [ + [ + -32.550506591796875, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.707176208496094, + false + ], + [ + -25.014892578125, + false + ], + [ + -28.014991760253906, + false + ], + [ + -32.550506591796875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "34f46444412900bed38bc52bb80ad9ccfa5f235245b958c94c219766c3bd820c", + "prompt_hash": "59420b6f9ad70812e0273f197b5a199808b0a46ccbeba15fd6136f297f35a279", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 70, + "doc": { + "question": "What score did Janus-Pro-7B achieve on the MMBench benchmark?", + "choices": [ + "69.4", + "75.2", + "79.2", + "68.9" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What score did Janus-Pro-7B achieve on the MMBench benchmark?\nAnswer:", + " 69.4" + ], + [ + "Question:What score did Janus-Pro-7B achieve on the MMBench benchmark?\nAnswer:", + " 75.2" + ], + [ + "Question:What score did Janus-Pro-7B achieve on the MMBench benchmark?\nAnswer:", + " 79.2" + ], + [ + "Question:What score did Janus-Pro-7B achieve on the MMBench benchmark?\nAnswer:", + " 68.9" + ] + ], + "resps": [ + [ + [ + -8.975870132446289, + false + ] + ], + [ + [ + -8.504131317138672, + false + ] + ], + [ + [ + -8.818510055541992, + false + ] + ], + [ + [ + -8.981317520141602, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.975870132446289, + false + ], + [ + -8.504131317138672, + false + ], + [ + -8.818510055541992, + false + ], + [ + -8.981317520141602, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b2201c41f25906adcc32e575d556ec75c843fd5382a1dbbb4424ede976f2d279", + "prompt_hash": "e4deea529e6cebf2be6a0965ffe863d1cdb6f6c1ee616a49e6d70b1a33221159", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 71, + "doc": { + "question": "What vision encoder is used in Janus-Pro for multimodal understanding tasks?", + "choices": [ + "SigLIP-Large-Patch16-384", + "BLIP-2", + "ViT-B/16", + "Swin Transformer" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What vision encoder is used in Janus-Pro for multimodal understanding tasks?\nAnswer:", + " SigLIP-Large-Patch16-384" + ], + [ + "Question:What vision encoder is used in Janus-Pro for multimodal understanding tasks?\nAnswer:", + " BLIP-2" + ], + [ + "Question:What vision encoder is used in Janus-Pro for multimodal understanding tasks?\nAnswer:", + " ViT-B/16" + ], + [ + "Question:What vision encoder is used in Janus-Pro for multimodal understanding tasks?\nAnswer:", + " Swin Transformer" + ] + ], + "resps": [ + [ + [ + -35.78590393066406, + false + ] + ], + [ + [ + -6.9924750328063965, + false + ] + ], + [ + [ + -9.447872161865234, + false + ] + ], + [ + [ + -11.09534740447998, + false + ] + ] + ], + "filtered_resps": [ + [ + -35.78590393066406, + false + ], + [ + -6.9924750328063965, + false + ], + [ + -9.447872161865234, + false + ], + [ + -11.09534740447998, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e31fb9041b3ed83733d7ab2fef2e800237d29146168e7685a4d0e3b5a2062fd1", + "prompt_hash": "368837545e870bb25b65ae2fb5633fd6c56ec6fc633dde24daa17aa1384e37c0", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 72, + "doc": { + "question": "What is the context window size for both Janus-Pro-1B and Janus-Pro-7B?", + "choices": [ + "2048", + "8192", + "4096", + "1024" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the context window size for both Janus-Pro-1B and Janus-Pro-7B?\nAnswer:", + " 2048" + ], + [ + "Question:What is the context window size for both Janus-Pro-1B and Janus-Pro-7B?\nAnswer:", + " 8192" + ], + [ + "Question:What is the context window size for both Janus-Pro-1B and Janus-Pro-7B?\nAnswer:", + " 4096" + ], + [ + "Question:What is the context window size for both Janus-Pro-1B and Janus-Pro-7B?\nAnswer:", + " 1024" + ] + ], + "resps": [ + [ + [ + -4.942681312561035, + false + ] + ], + [ + [ + -5.659657955169678, + false + ] + ], + [ + [ + -5.5945634841918945, + false + ] + ], + [ + [ + -4.923760414123535, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.942681312561035, + false + ], + [ + -5.659657955169678, + false + ], + [ + -5.5945634841918945, + false + ], + [ + -4.923760414123535, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "81611cf017515f09c3ec9e1e5155afb245d7d7ec751eef1606116a4a7a2c8183", + "prompt_hash": "19f05377c6db8c73c7a629329ce9acb3393ed0f98957bf9f3bb59cf39ecbad18", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 73, + "doc": { + "question": "What is the optimizer used to train Janus-Pro?", + "choices": [ + "SGD", + "Adam", + "RMSProp", + "AdamW" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the optimizer used to train Janus-Pro?\nAnswer:", + " SGD" + ], + [ + "Question:What is the optimizer used to train Janus-Pro?\nAnswer:", + " Adam" + ], + [ + "Question:What is the optimizer used to train Janus-Pro?\nAnswer:", + " RMSProp" + ], + [ + "Question:What is the optimizer used to train Janus-Pro?\nAnswer:", + " AdamW" + ] + ], + "resps": [ + [ + [ + -5.474011421203613, + false + ] + ], + [ + [ + -2.1715750694274902, + false + ] + ], + [ + [ + -7.099437236785889, + false + ] + ], + [ + [ + -3.740978717803955, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.474011421203613, + false + ], + [ + -2.1715750694274902, + false + ], + [ + -7.099437236785889, + false + ], + [ + -3.740978717803955, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f4bbf766db06865279ba3415fed33c24892caf06f882a57bb85fb256ad305048", + "prompt_hash": "8562fd057d4b35d3d349c1d6cab66dd07358bf6535936e8013c49fc2d198d3b1", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 74, + "doc": { + "question": "Which benchmark did Janus-Pro-7B outperform DALL-E 3 and SD3-Medium in?", + "choices": [ + "MMBench", + "GenEval", + "GQA", + "MMMU" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark did Janus-Pro-7B outperform DALL-E 3 and SD3-Medium in?\nAnswer:", + " MMBench" + ], + [ + "Question:Which benchmark did Janus-Pro-7B outperform DALL-E 3 and SD3-Medium in?\nAnswer:", + " GenEval" + ], + [ + "Question:Which benchmark did Janus-Pro-7B outperform DALL-E 3 and SD3-Medium in?\nAnswer:", + " GQA" + ], + [ + "Question:Which benchmark did Janus-Pro-7B outperform DALL-E 3 and SD3-Medium in?\nAnswer:", + " MMMU" + ] + ], + "resps": [ + [ + [ + -13.650606155395508, + false + ] + ], + [ + [ + -14.764251708984375, + false + ] + ], + [ + [ + -10.909505844116211, + false + ] + ], + [ + [ + -15.465291976928711, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.650606155395508, + false + ], + [ + -14.764251708984375, + false + ], + [ + -10.909505844116211, + false + ], + [ + -15.465291976928711, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "01df0a0c6003a0a1e26d89da2d52bfda816fde996d530f584115625e060569d0", + "prompt_hash": "92b90be18377ba553ebd74eff19379547879463d09ad0346e6a83ed4491ae419", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 75, + "doc": { + "question": "Which tokenization method is used in FAST to compress robot action trajectories?", + "choices": [ + "Principal Component Analysis (PCA)", + "Byte-Pair Encoding (BPE) followed by Discrete Cosine Transform (DCT)", + "Discrete Cosine Transform (DCT) followed by Byte-Pair Encoding (BPE)", + "Vector Quantization followed by Huffman Encoding" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which tokenization method is used in FAST to compress robot action trajectories?\nAnswer:", + " Principal Component Analysis (PCA)" + ], + [ + "Question:Which tokenization method is used in FAST to compress robot action trajectories?\nAnswer:", + " Byte-Pair Encoding (BPE) followed by Discrete Cosine Transform (DCT)" + ], + [ + "Question:Which tokenization method is used in FAST to compress robot action trajectories?\nAnswer:", + " Discrete Cosine Transform (DCT) followed by Byte-Pair Encoding (BPE)" + ], + [ + "Question:Which tokenization method is used in FAST to compress robot action trajectories?\nAnswer:", + " Vector Quantization followed by Huffman Encoding" + ] + ], + "resps": [ + [ + [ + -12.270536422729492, + false + ] + ], + [ + [ + -28.064403533935547, + false + ] + ], + [ + [ + -30.861492156982422, + false + ] + ], + [ + [ + -27.247623443603516, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.270536422729492, + false + ], + [ + -28.064403533935547, + false + ], + [ + -30.861492156982422, + false + ], + [ + -27.247623443603516, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "bc20e847d46c6df16829c8f8f51c270aab55e343930e22ad24b4b80c130e8a89", + "prompt_hash": "6780db88023435c5ac2aeabf4f41c3af941c4a465c712acf25388a7131518804", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 76, + "doc": { + "question": "What is the action chunk duration used for policy training in the described VLA experiments?", + "choices": [ + "0.5 seconds", + "2 seconds", + "5 seconds", + "1 second" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the action chunk duration used for policy training in the described VLA experiments?\nAnswer:", + " 0.5 seconds" + ], + [ + "Question:What is the action chunk duration used for policy training in the described VLA experiments?\nAnswer:", + " 2 seconds" + ], + [ + "Question:What is the action chunk duration used for policy training in the described VLA experiments?\nAnswer:", + " 5 seconds" + ], + [ + "Question:What is the action chunk duration used for policy training in the described VLA experiments?\nAnswer:", + " 1 second" + ] + ], + "resps": [ + [ + [ + -7.42416524887085, + false + ] + ], + [ + [ + -5.825782775878906, + false + ] + ], + [ + [ + -5.891945838928223, + false + ] + ], + [ + [ + -5.117180347442627, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.42416524887085, + false + ], + [ + -5.825782775878906, + false + ], + [ + -5.891945838928223, + false + ], + [ + -5.117180347442627, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b2ab5010ee473215515f17ab098c12cdea5091d6623315912d4d4e695273bb67", + "prompt_hash": "668d931deaf85161766f8c175e973cb02b9540443c0d2c8bbf77c366bf489b08", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 77, + "doc": { + "question": "Which evaluation task uses a bi-manual ARX robot to fold a t-shirt?", + "choices": [ + "Laundry Folding", + "Toast out of Toaster", + "T-Shirt Folding", + "Table Bussing" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which evaluation task uses a bi-manual ARX robot to fold a t-shirt?\nAnswer:", + " Laundry Folding" + ], + [ + "Question:Which evaluation task uses a bi-manual ARX robot to fold a t-shirt?\nAnswer:", + " Toast out of Toaster" + ], + [ + "Question:Which evaluation task uses a bi-manual ARX robot to fold a t-shirt?\nAnswer:", + " T-Shirt Folding" + ], + [ + "Question:Which evaluation task uses a bi-manual ARX robot to fold a t-shirt?\nAnswer:", + " Table Bussing" + ] + ], + "resps": [ + [ + [ + -15.65053653717041, + false + ] + ], + [ + [ + -34.21724319458008, + false + ] + ], + [ + [ + -7.9458465576171875, + false + ] + ], + [ + [ + -26.62312889099121, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.65053653717041, + false + ], + [ + -34.21724319458008, + false + ], + [ + -7.9458465576171875, + false + ], + [ + -26.62312889099121, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "535b5c2b5e85ffd70b07c718bcd48ae81fb7299737cbf4c4346da27a0de5bc0a", + "prompt_hash": "5df4789577c533275bffc59775528b1a167b7932bb27a2967fabdf5117b59865", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 78, + "doc": { + "question": "What optimizer is used for policy training in the VLA experiments?", + "choices": [ + "SGD with Momentum", + "Adam", + "AdamW", + "RMSProp" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What optimizer is used for policy training in the VLA experiments?\nAnswer:", + " SGD with Momentum" + ], + [ + "Question:What optimizer is used for policy training in the VLA experiments?\nAnswer:", + " Adam" + ], + [ + "Question:What optimizer is used for policy training in the VLA experiments?\nAnswer:", + " AdamW" + ], + [ + "Question:What optimizer is used for policy training in the VLA experiments?\nAnswer:", + " RMSProp" + ] + ], + "resps": [ + [ + [ + -13.135553359985352, + false + ] + ], + [ + [ + -3.396702766418457, + false + ] + ], + [ + [ + -5.222354888916016, + false + ] + ], + [ + [ + -6.109254360198975, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.135553359985352, + false + ], + [ + -3.396702766418457, + false + ], + [ + -5.222354888916016, + false + ], + [ + -6.109254360198975, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c6ca2f0960faaebe4c1c1a5ed7c0bb61c4ace30f26b3b586ac0f0c8a3e672e44", + "prompt_hash": "10273564162dc5c045ea3ead115969d25d3a00836aaf9eb8620485f76cacbfe1", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 79, + "doc": { + "question": "What control frequency is associated with the T-Shirt Folding task?", + "choices": [ + "15 Hz", + "20 Hz", + "50 Hz", + "5 Hz" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What control frequency is associated with the T-Shirt Folding task?\nAnswer:", + " 15 Hz" + ], + [ + "Question:What control frequency is associated with the T-Shirt Folding task?\nAnswer:", + " 20 Hz" + ], + [ + "Question:What control frequency is associated with the T-Shirt Folding task?\nAnswer:", + " 50 Hz" + ], + [ + "Question:What control frequency is associated with the T-Shirt Folding task?\nAnswer:", + " 5 Hz" + ] + ], + "resps": [ + [ + [ + -6.0199737548828125, + false + ] + ], + [ + [ + -5.349177360534668, + false + ] + ], + [ + [ + -5.311049461364746, + false + ] + ], + [ + [ + -5.154747009277344, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.0199737548828125, + false + ], + [ + -5.349177360534668, + false + ], + [ + -5.311049461364746, + false + ], + [ + -5.154747009277344, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fc67d63b526c2bbd910fe54a16fbd13bb882cbf2a4281310beb7bc522bbf1400", + "prompt_hash": "3536b38edac5e5232898c82dce006c4b587468dc08c5c806303e7817e30f6ceb", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 80, + "doc": { + "question": "What technique has shown that prompts like 'Let's think step by step' can improve reasoning in language models?", + "choices": [ + "Chain-of-Thought prompting", + "Least-to-Most prompting", + "Plan-and-Solve", + "Tree-of-Thought prompting" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What technique has shown that prompts like 'Let's think step by step' can improve reasoning in language models?\nAnswer:", + " Chain-of-Thought prompting" + ], + [ + "Question:What technique has shown that prompts like 'Let's think step by step' can improve reasoning in language models?\nAnswer:", + " Least-to-Most prompting" + ], + [ + "Question:What technique has shown that prompts like 'Let's think step by step' can improve reasoning in language models?\nAnswer:", + " Plan-and-Solve" + ], + [ + "Question:What technique has shown that prompts like 'Let's think step by step' can improve reasoning in language models?\nAnswer:", + " Tree-of-Thought prompting" + ] + ], + "resps": [ + [ + [ + -7.43093204498291, + false + ] + ], + [ + [ + -16.591632843017578, + false + ] + ], + [ + [ + -15.584571838378906, + false + ] + ], + [ + [ + -10.290903091430664, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.43093204498291, + false + ], + [ + -16.591632843017578, + false + ], + [ + -15.584571838378906, + false + ], + [ + -10.290903091430664, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "4895c2f15e6e631e16395be84803e67bf4ec8c1ba2694ccfed56750089722f91", + "prompt_hash": "88de32826fc0329be4c66c083fadb45cbe138d6ebdb91cae39a9799eb3ad610a", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 81, + "doc": { + "question": "What does Direct Preference Optimization (DPO) eliminate in reinforcement learning fine-tuning?", + "choices": [ + "Prompt engineering", + "Reward model training and sampling", + "Token-level scoring", + "Gradient clipping" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What does Direct Preference Optimization (DPO) eliminate in reinforcement learning fine-tuning?\nAnswer:", + " Prompt engineering" + ], + [ + "Question:What does Direct Preference Optimization (DPO) eliminate in reinforcement learning fine-tuning?\nAnswer:", + " Reward model training and sampling" + ], + [ + "Question:What does Direct Preference Optimization (DPO) eliminate in reinforcement learning fine-tuning?\nAnswer:", + " Token-level scoring" + ], + [ + "Question:What does Direct Preference Optimization (DPO) eliminate in reinforcement learning fine-tuning?\nAnswer:", + " Gradient clipping" + ] + ], + "resps": [ + [ + [ + -10.809982299804688, + false + ] + ], + [ + [ + -27.00271224975586, + false + ] + ], + [ + [ + -22.927026748657227, + false + ] + ], + [ + [ + -11.805936813354492, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.809982299804688, + false + ], + [ + -27.00271224975586, + false + ], + [ + -22.927026748657227, + false + ], + [ + -11.805936813354492, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "556e5a4a3ff7ffe7968dd7acadb8155bfec81f188a9c06600fbb9490117cd22b", + "prompt_hash": "df6ee6fd84ed4ca6c319809234c9234f25b815c286da491bfae8916ca174f2e3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 82, + "doc": { + "question": "What strategy in Monte Carlo Tree Search (MCTS) helps reduce computational overhead in reasoning tasks?", + "choices": [ + "Heuristic initialization", + "Gradient pruning", + "Leaf node sharing of high-quality parent nodes", + "Prompt compression" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What strategy in Monte Carlo Tree Search (MCTS) helps reduce computational overhead in reasoning tasks?\nAnswer:", + " Heuristic initialization" + ], + [ + "Question:What strategy in Monte Carlo Tree Search (MCTS) helps reduce computational overhead in reasoning tasks?\nAnswer:", + " Gradient pruning" + ], + [ + "Question:What strategy in Monte Carlo Tree Search (MCTS) helps reduce computational overhead in reasoning tasks?\nAnswer:", + " Leaf node sharing of high-quality parent nodes" + ], + [ + "Question:What strategy in Monte Carlo Tree Search (MCTS) helps reduce computational overhead in reasoning tasks?\nAnswer:", + " Prompt compression" + ] + ], + "resps": [ + [ + [ + -15.309856414794922, + false + ] + ], + [ + [ + -19.264671325683594, + false + ] + ], + [ + [ + -45.913230895996094, + false + ] + ], + [ + [ + -18.06313133239746, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.309856414794922, + false + ], + [ + -19.264671325683594, + false + ], + [ + -45.913230895996094, + false + ], + [ + -18.06313133239746, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c4f7658a1b0e85d0f29104240811cad7577426cdd20d7c60df3922dc9ce83068", + "prompt_hash": "770d1f160ad2a838c42b8537f5f53b79b454987d2870b0681c43ed3785c539b0", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 83, + "doc": { + "question": "Which reinforcement fine-tuning technique is noted for enabling learning from just dozens of user preference data?", + "choices": [ + "ReFT", + "VinePPO", + "RLHF", + "SVPO" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which reinforcement fine-tuning technique is noted for enabling learning from just dozens of user preference data?\nAnswer:", + " ReFT" + ], + [ + "Question:Which reinforcement fine-tuning technique is noted for enabling learning from just dozens of user preference data?\nAnswer:", + " VinePPO" + ], + [ + "Question:Which reinforcement fine-tuning technique is noted for enabling learning from just dozens of user preference data?\nAnswer:", + " RLHF" + ], + [ + "Question:Which reinforcement fine-tuning technique is noted for enabling learning from just dozens of user preference data?\nAnswer:", + " SVPO" + ] + ], + "resps": [ + [ + [ + -16.842266082763672, + false + ] + ], + [ + [ + -26.258159637451172, + false + ] + ], + [ + [ + -5.796584606170654, + false + ] + ], + [ + [ + -23.100177764892578, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.842266082763672, + false + ], + [ + -26.258159637451172, + false + ], + [ + -5.796584606170654, + false + ], + [ + -23.100177764892578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "64deeaefa753f9a68d8ab08e24fd3dafcb8db63bf159e920cb613b2491d819b7", + "prompt_hash": "f7b999fc73f2232a7facb7f3e4c02090f7fc41ad7d90b92f7ff00fc5cb9ee231", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 84, + "doc": { + "question": "Which prompting method breaks down complex problems into smaller subproblems for better reasoning?", + "choices": [ + "Tree-of-Thoughts", + "Self-Consistency", + "Least-to-Most Prompting", + "Chain-of-Thought" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which prompting method breaks down complex problems into smaller subproblems for better reasoning?\nAnswer:", + " Tree-of-Thoughts" + ], + [ + "Question:Which prompting method breaks down complex problems into smaller subproblems for better reasoning?\nAnswer:", + " Self-Consistency" + ], + [ + "Question:Which prompting method breaks down complex problems into smaller subproblems for better reasoning?\nAnswer:", + " Least-to-Most Prompting" + ], + [ + "Question:Which prompting method breaks down complex problems into smaller subproblems for better reasoning?\nAnswer:", + " Chain-of-Thought" + ] + ], + "resps": [ + [ + [ + -9.340664863586426, + false + ] + ], + [ + [ + -9.643707275390625, + false + ] + ], + [ + [ + -13.097075462341309, + false + ] + ], + [ + [ + -5.191791534423828, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.340664863586426, + false + ], + [ + -9.643707275390625, + false + ], + [ + -13.097075462341309, + false + ], + [ + -5.191791534423828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "451ac49f5ff3b62849ec7af0502925f7a3f42595bd827636a337dbcbf8da985f", + "prompt_hash": "1bf69240ab675c5dfb72f2d01b0f633f72f35c2ca71789d9ca2d6052d4c20981", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 85, + "doc": { + "question": "Which activation function is used in the Qwen2.5 dense model architecture?", + "choices": [ + "ReLU", + "SwiGLU", + "GELU", + "SiLU" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which activation function is used in the Qwen2.5 dense model architecture?\nAnswer:", + " ReLU" + ], + [ + "Question:Which activation function is used in the Qwen2.5 dense model architecture?\nAnswer:", + " SwiGLU" + ], + [ + "Question:Which activation function is used in the Qwen2.5 dense model architecture?\nAnswer:", + " GELU" + ], + [ + "Question:Which activation function is used in the Qwen2.5 dense model architecture?\nAnswer:", + " SiLU" + ] + ], + "resps": [ + [ + [ + -3.774085760116577, + false + ] + ], + [ + [ + -8.605820655822754, + false + ] + ], + [ + [ + -5.870603561401367, + false + ] + ], + [ + [ + -7.402235984802246, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.774085760116577, + false + ], + [ + -8.605820655822754, + false + ], + [ + -5.870603561401367, + false + ], + [ + -7.402235984802246, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "6d76752172c847efd8d622077eedb3a62333a62e3f12e9e306ed0795b12dfda1", + "prompt_hash": "9ea9a1067541f5390152093014beb8623cec14591f5b08571de978de4f2e4bd4", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 86, + "doc": { + "question": "What vocabulary size is used in the Qwen2.5 tokenizer?", + "choices": [ + "32,768", + "100,000", + "151,643", + "256,000" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What vocabulary size is used in the Qwen2.5 tokenizer?\nAnswer:", + " 32,768" + ], + [ + "Question:What vocabulary size is used in the Qwen2.5 tokenizer?\nAnswer:", + " 100,000" + ], + [ + "Question:What vocabulary size is used in the Qwen2.5 tokenizer?\nAnswer:", + " 151,643" + ], + [ + "Question:What vocabulary size is used in the Qwen2.5 tokenizer?\nAnswer:", + " 256,000" + ] + ], + "resps": [ + [ + [ + -9.168275833129883, + false + ] + ], + [ + [ + -11.967578887939453, + false + ] + ], + [ + [ + -20.90795135498047, + false + ] + ], + [ + [ + -13.019368171691895, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.168275833129883, + false + ], + [ + -11.967578887939453, + false + ], + [ + -20.90795135498047, + false + ], + [ + -13.019368171691895, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dbded3b4ad35993632462e288e6826b97d7820468e31f62f47cf06eba28b9d37", + "prompt_hash": "02c92dddcf37f76136a51a2822d6e29407b41630ad7c1b2d39f9b8fc2a60929b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 87, + "doc": { + "question": "How many control tokens are included in the Qwen2.5 tokenizer?", + "choices": [ + "3", + "10", + "22", + "151,643" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many control tokens are included in the Qwen2.5 tokenizer?\nAnswer:", + " 3" + ], + [ + "Question:How many control tokens are included in the Qwen2.5 tokenizer?\nAnswer:", + " 10" + ], + [ + "Question:How many control tokens are included in the Qwen2.5 tokenizer?\nAnswer:", + " 22" + ], + [ + "Question:How many control tokens are included in the Qwen2.5 tokenizer?\nAnswer:", + " 151,643" + ] + ], + "resps": [ + [ + [ + -3.963737964630127, + false + ] + ], + [ + [ + -4.7243499755859375, + false + ] + ], + [ + [ + -6.240811347961426, + false + ] + ], + [ + [ + -19.046680450439453, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.963737964630127, + false + ], + [ + -4.7243499755859375, + false + ], + [ + -6.240811347961426, + false + ], + [ + -19.046680450439453, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5e6b5263857f6a7b6e410d36c03b0b1716afaf1b54f60c1e9962f2127298ac11", + "prompt_hash": "6305352bd5b8c7676abf7c3a2dfec5fce5b327bfb1c85fdf41f498492917e294", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 88, + "doc": { + "question": "Which benchmark is used to evaluate multilingual understanding in the Qwen2.5 base model?", + "choices": [ + "BELEBELE", + "TruthfulQA", + "LiveBench", + "ARC-C" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark is used to evaluate multilingual understanding in the Qwen2.5 base model?\nAnswer:", + " BELEBELE" + ], + [ + "Question:Which benchmark is used to evaluate multilingual understanding in the Qwen2.5 base model?\nAnswer:", + " TruthfulQA" + ], + [ + "Question:Which benchmark is used to evaluate multilingual understanding in the Qwen2.5 base model?\nAnswer:", + " LiveBench" + ], + [ + "Question:Which benchmark is used to evaluate multilingual understanding in the Qwen2.5 base model?\nAnswer:", + " ARC-C" + ] + ], + "resps": [ + [ + [ + -20.12228775024414, + false + ] + ], + [ + [ + -11.192878723144531, + false + ] + ], + [ + [ + -16.66100311279297, + false + ] + ], + [ + [ + -12.205469131469727, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.12228775024414, + false + ], + [ + -11.192878723144531, + false + ], + [ + -16.66100311279297, + false + ], + [ + -12.205469131469727, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ddb5b99be432bca64544f9418ed8f3363d61cb4de20929c1fcfa1c57f2fa5247", + "prompt_hash": "c7efca85bb4edd89316ecb42e3ed4f9db0718b5ed440de9ae016fe666f0bec0a", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 89, + "doc": { + "question": "What is the performance of Qwen2.5-72B on the MATH dataset?", + "choices": [ + "50.9", + "62.1", + "64.4", + "83.1" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the performance of Qwen2.5-72B on the MATH dataset?\nAnswer:", + " 50.9" + ], + [ + "Question:What is the performance of Qwen2.5-72B on the MATH dataset?\nAnswer:", + " 62.1" + ], + [ + "Question:What is the performance of Qwen2.5-72B on the MATH dataset?\nAnswer:", + " 64.4" + ], + [ + "Question:What is the performance of Qwen2.5-72B on the MATH dataset?\nAnswer:", + " 83.1" + ] + ], + "resps": [ + [ + [ + -10.716265678405762, + false + ] + ], + [ + [ + -10.533012390136719, + false + ] + ], + [ + [ + -10.480714797973633, + false + ] + ], + [ + [ + -10.151132583618164, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.716265678405762, + false + ], + [ + -10.533012390136719, + false + ], + [ + -10.480714797973633, + false + ], + [ + -10.151132583618164, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b22ab978713dabb7f86c6882088a05c621c77a6637d6387ad21bce8e66b32c11", + "prompt_hash": "7ffd84244888096461c1da3fa98ef3348eac99cf5217ecac6cfcfaf1255e2401", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 90, + "doc": { + "question": "What training loss is used in InternVL 2.5's Stage 1 and Stage 1.5?", + "choices": [ + "Contrastive loss", + "Next Token Prediction loss", + "Cosine similarity loss", + "Cross-entropy loss" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What training loss is used in InternVL 2.5's Stage 1 and Stage 1.5?\nAnswer:", + " Contrastive loss" + ], + [ + "Question:What training loss is used in InternVL 2.5's Stage 1 and Stage 1.5?\nAnswer:", + " Next Token Prediction loss" + ], + [ + "Question:What training loss is used in InternVL 2.5's Stage 1 and Stage 1.5?\nAnswer:", + " Cosine similarity loss" + ], + [ + "Question:What training loss is used in InternVL 2.5's Stage 1 and Stage 1.5?\nAnswer:", + " Cross-entropy loss" + ] + ], + "resps": [ + [ + [ + -8.985716819763184, + false + ] + ], + [ + [ + -20.21367835998535, + false + ] + ], + [ + [ + -14.408019065856934, + false + ] + ], + [ + [ + -7.583483695983887, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.985716819763184, + false + ], + [ + -20.21367835998535, + false + ], + [ + -14.408019065856934, + false + ], + [ + -7.583483695983887, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d73d1b99689947592c163a229b52313719187c2c8c271ab48aeab0cf2b2432e9", + "prompt_hash": "4ff46b1d0c77fa76208ff6114af26f58991e33db92629e1bda64fb4ad5eee115", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 91, + "doc": { + "question": "What is the final number of parameters in InternViT-6B-448px-V2.5?", + "choices": [ + "5.9B", + "6.2B", + "5.5B", + "4.8B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the final number of parameters in InternViT-6B-448px-V2.5?\nAnswer:", + " 5.9B" + ], + [ + "Question:What is the final number of parameters in InternViT-6B-448px-V2.5?\nAnswer:", + " 6.2B" + ], + [ + "Question:What is the final number of parameters in InternViT-6B-448px-V2.5?\nAnswer:", + " 5.5B" + ], + [ + "Question:What is the final number of parameters in InternViT-6B-448px-V2.5?\nAnswer:", + " 4.8B" + ] + ], + "resps": [ + [ + [ + -12.59568977355957, + false + ] + ], + [ + [ + -11.892919540405273, + false + ] + ], + [ + [ + -12.99958610534668, + false + ] + ], + [ + [ + -12.356107711791992, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.59568977355957, + false + ], + [ + -11.892919540405273, + false + ], + [ + -12.99958610534668, + false + ], + [ + -12.356107711791992, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f44a6242fcf0b2ab3a67d2ca668f0122bdc71c0ac3b57d75592826d316614528", + "prompt_hash": "18c04ac86168a8bed0fe625fa0d74a50ffd557102dbda3157b1470b2c6729b22", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 92, + "doc": { + "question": "Which prompt-based technique was shown to improve performance on MMMU in InternVL 2.5?", + "choices": [ + "Zero-shot chain-of-thought", + "Majority voting with CoT", + "Self-consistency decoding", + "Visual instruction tuning" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which prompt-based technique was shown to improve performance on MMMU in InternVL 2.5?\nAnswer:", + " Zero-shot chain-of-thought" + ], + [ + "Question:Which prompt-based technique was shown to improve performance on MMMU in InternVL 2.5?\nAnswer:", + " Majority voting with CoT" + ], + [ + "Question:Which prompt-based technique was shown to improve performance on MMMU in InternVL 2.5?\nAnswer:", + " Self-consistency decoding" + ], + [ + "Question:Which prompt-based technique was shown to improve performance on MMMU in InternVL 2.5?\nAnswer:", + " Visual instruction tuning" + ] + ], + "resps": [ + [ + [ + -14.020514488220215, + false + ] + ], + [ + [ + -23.533023834228516, + false + ] + ], + [ + [ + -19.21437644958496, + false + ] + ], + [ + [ + -12.545539855957031, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.020514488220215, + false + ], + [ + -23.533023834228516, + false + ], + [ + -19.21437644958496, + false + ], + [ + -12.545539855957031, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "37f70303923c80a15f00d15dc63d4dc1dbeb2e45703f039d8140753335cc3f10", + "prompt_hash": "2cd34b7f95101f1507b83dcd748877c9476778ce53e0263ab5512c0c7ec5fb05", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 93, + "doc": { + "question": "What is the resolution size used for each image tile in the InternVL 2.5 architecture?", + "choices": [ + "224×224", + "336×336", + "512×512", + "448×448" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the resolution size used for each image tile in the InternVL 2.5 architecture?\nAnswer:", + " 224×224" + ], + [ + "Question:What is the resolution size used for each image tile in the InternVL 2.5 architecture?\nAnswer:", + " 336×336" + ], + [ + "Question:What is the resolution size used for each image tile in the InternVL 2.5 architecture?\nAnswer:", + " 512×512" + ], + [ + "Question:What is the resolution size used for each image tile in the InternVL 2.5 architecture?\nAnswer:", + " 448×448" + ] + ], + "resps": [ + [ + [ + -8.673851013183594, + false + ] + ], + [ + [ + -10.982954978942871, + false + ] + ], + [ + [ + -7.892268180847168, + false + ] + ], + [ + [ + -9.570035934448242, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.673851013183594, + false + ], + [ + -10.982954978942871, + false + ], + [ + -7.892268180847168, + false + ], + [ + -9.570035934448242, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a28c2abde649e23518ede576c5124c3e89ba96cbedaf97d1660315b44d9cb492", + "prompt_hash": "04cf5c99c4384326cb8493acc380a00f9e8f3123c06cf3a4aad83eae3541e6a5", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 94, + "doc": { + "question": "What is the maximum number of tiles assigned per video sample during training in InternVL 2.5?", + "choices": [ + "1", + "6", + "12", + "24" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the maximum number of tiles assigned per video sample during training in InternVL 2.5?\nAnswer:", + " 1" + ], + [ + "Question:What is the maximum number of tiles assigned per video sample during training in InternVL 2.5?\nAnswer:", + " 6" + ], + [ + "Question:What is the maximum number of tiles assigned per video sample during training in InternVL 2.5?\nAnswer:", + " 12" + ], + [ + "Question:What is the maximum number of tiles assigned per video sample during training in InternVL 2.5?\nAnswer:", + " 24" + ] + ], + "resps": [ + [ + [ + -2.4906997680664062, + false + ] + ], + [ + [ + -3.65911865234375, + false + ] + ], + [ + [ + -4.247614860534668, + false + ] + ], + [ + [ + -5.091933250427246, + false + ] + ] + ], + "filtered_resps": [ + [ + -2.4906997680664062, + false + ], + [ + -3.65911865234375, + false + ], + [ + -4.247614860534668, + false + ], + [ + -5.091933250427246, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e8b5737944f8f32ee3e7b5b6356e280b9018773368427db03e9f6f0290f0670e", + "prompt_hash": "01f6f066cf469892ed01e86213bec73cf971aefa7266e5f0c85f73359ae29b6f", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 95, + "doc": { + "question": "How many models with 1B parameters were trained to evaluate the impact of data mixtures?", + "choices": [ + "32", + "64", + "128", + "256" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many models with 1B parameters were trained to evaluate the impact of data mixtures?\nAnswer:", + " 32" + ], + [ + "Question:How many models with 1B parameters were trained to evaluate the impact of data mixtures?\nAnswer:", + " 64" + ], + [ + "Question:How many models with 1B parameters were trained to evaluate the impact of data mixtures?\nAnswer:", + " 128" + ], + [ + "Question:How many models with 1B parameters were trained to evaluate the impact of data mixtures?\nAnswer:", + " 256" + ] + ], + "resps": [ + [ + [ + -6.174529075622559, + false + ] + ], + [ + [ + -7.080163955688477, + false + ] + ], + [ + [ + -8.164529800415039, + false + ] + ], + [ + [ + -8.490699768066406, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.174529075622559, + false + ], + [ + -7.080163955688477, + false + ], + [ + -8.164529800415039, + false + ], + [ + -8.490699768066406, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "22c302fdda64067f2611d938454e180cc12322d6bb6f233cf4fc7caa396ebc11", + "prompt_hash": "8fe79270102b389c7e664a2ba107cf3f68742e48a2064c5188b5e082cfc5703a", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 96, + "doc": { + "question": "Which dataset showed the strongest correlation with downstream task performance?", + "choices": [ + "Wikipedia (en)", + "WikiText", + "Pile-CC", + "C4" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which dataset showed the strongest correlation with downstream task performance?\nAnswer:", + " Wikipedia (en)" + ], + [ + "Question:Which dataset showed the strongest correlation with downstream task performance?\nAnswer:", + " WikiText" + ], + [ + "Question:Which dataset showed the strongest correlation with downstream task performance?\nAnswer:", + " Pile-CC" + ], + [ + "Question:Which dataset showed the strongest correlation with downstream task performance?\nAnswer:", + " C4" + ] + ], + "resps": [ + [ + [ + -21.24456787109375, + false + ] + ], + [ + [ + -9.600040435791016, + false + ] + ], + [ + [ + -19.288917541503906, + false + ] + ], + [ + [ + -10.923744201660156, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.24456787109375, + false + ], + [ + -9.600040435791016, + false + ], + [ + -19.288917541503906, + false + ], + [ + -10.923744201660156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "279c342e0b86c189b778e77be74e371238a82b037dd8ac7f198bb5601131badc", + "prompt_hash": "5da37f35317d56dc7b0631566fb19ef772cf50ec5acd8dde624b908cb28cb08b", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 97, + "doc": { + "question": "What evaluation metric is used if normalized accuracy is not provided by lm-eval-harness?", + "choices": [ + "BLEU score", + "F1 score", + "Perplexity", + "Regular accuracy" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What evaluation metric is used if normalized accuracy is not provided by lm-eval-harness?\nAnswer:", + " BLEU score" + ], + [ + "Question:What evaluation metric is used if normalized accuracy is not provided by lm-eval-harness?\nAnswer:", + " F1 score" + ], + [ + "Question:What evaluation metric is used if normalized accuracy is not provided by lm-eval-harness?\nAnswer:", + " Perplexity" + ], + [ + "Question:What evaluation metric is used if normalized accuracy is not provided by lm-eval-harness?\nAnswer:", + " Regular accuracy" + ] + ], + "resps": [ + [ + [ + -9.628717422485352, + false + ] + ], + [ + [ + -5.973865032196045, + false + ] + ], + [ + [ + -6.907043933868408, + false + ] + ], + [ + [ + -13.605592727661133, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.628717422485352, + false + ], + [ + -5.973865032196045, + false + ], + [ + -6.907043933868408, + false + ], + [ + -13.605592727661133, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0a7d09c18007aa19218484c38030cc8c137386c6995ff12622f783aedb310279", + "prompt_hash": "abc7194d9f039eb154a8e9c5c668965d30e904867908a5c735dbeebc91e07164", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 98, + "doc": { + "question": "Which benchmark showed the largest performance gap (Δ) between the best and worst models?", + "choices": [ + "PiQA", + "Lambada", + "HellaSwag", + "OpenBookQA" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which benchmark showed the largest performance gap (Δ) between the best and worst models?\nAnswer:", + " PiQA" + ], + [ + "Question:Which benchmark showed the largest performance gap (Δ) between the best and worst models?\nAnswer:", + " Lambada" + ], + [ + "Question:Which benchmark showed the largest performance gap (Δ) between the best and worst models?\nAnswer:", + " HellaSwag" + ], + [ + "Question:Which benchmark showed the largest performance gap (Δ) between the best and worst models?\nAnswer:", + " OpenBookQA" + ] + ], + "resps": [ + [ + [ + -14.476099014282227, + false + ] + ], + [ + [ + -12.178361892700195, + false + ] + ], + [ + [ + -8.666653633117676, + false + ] + ], + [ + [ + -12.985079765319824, + false + ] + ] + ], + "filtered_resps": [ + [ + -14.476099014282227, + false + ], + [ + -12.178361892700195, + false + ], + [ + -8.666653633117676, + false + ], + [ + -12.985079765319824, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "06b48f908f1700e94e22b781c4215de054b9b365f41c55084daedb22d807b5b6", + "prompt_hash": "05aa0d7a5d81aea67d91accbc2cffa2ea224c1d4e1f94c79e834c13a88868ad8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 99, + "doc": { + "question": "What kind of regression model was used in \\u201courmethod\\u201d to predict the optimal data mixture?", + "choices": [ + "Lasso regression", + "Linear regression", + "LightGBM", + "Random forest" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What kind of regression model was used in \\u201courmethod\\u201d to predict the optimal data mixture?\nAnswer:", + " Lasso regression" + ], + [ + "Question:What kind of regression model was used in \\u201courmethod\\u201d to predict the optimal data mixture?\nAnswer:", + " Linear regression" + ], + [ + "Question:What kind of regression model was used in \\u201courmethod\\u201d to predict the optimal data mixture?\nAnswer:", + " LightGBM" + ], + [ + "Question:What kind of regression model was used in \\u201courmethod\\u201d to predict the optimal data mixture?\nAnswer:", + " Random forest" + ] + ], + "resps": [ + [ + [ + -10.868882179260254, + false + ] + ], + [ + [ + -5.115058898925781, + false + ] + ], + [ + [ + -12.149599075317383, + false + ] + ], + [ + [ + -8.57761001586914, + false + ] + ] + ], + "filtered_resps": [ + [ + -10.868882179260254, + false + ], + [ + -5.115058898925781, + false + ], + [ + -12.149599075317383, + false + ], + [ + -8.57761001586914, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b53157e14dd6ab1308b1db7dd65b121972f82fef1555d1a8b7bb279605a2f2f4", + "prompt_hash": "15d02ff658f84423bb16b693b71e523b96c253fb5f5075947c1008589e38ecbf", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + } + ], + "arxiv_mc_2502": [ + { + "doc_id": 0, + "doc": { + "question": "How many video clips does the UCF-101 dataset contain?", + "choices": [ + "13,320", + "35,000", + "70 million", + "2,265" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:How many video clips does the UCF-101 dataset contain?\nAnswer:", + " 13,320" + ], + [ + "Question:How many video clips does the UCF-101 dataset contain?\nAnswer:", + " 35,000" + ], + [ + "Question:How many video clips does the UCF-101 dataset contain?\nAnswer:", + " 70 million" + ], + [ + "Question:How many video clips does the UCF-101 dataset contain?\nAnswer:", + " 2,265" + ] + ], + "resps": [ + [ + [ + -15.38673210144043, + false + ] + ], + [ + [ + -13.665287017822266, + false + ] + ], + [ + [ + -18.272319793701172, + false + ] + ], + [ + [ + -14.839933395385742, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.38673210144043, + false + ], + [ + -13.665287017822266, + false + ], + [ + -18.272319793701172, + false + ], + [ + -14.839933395385742, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d918d9290ba27f5972ce763142840a34b15fccae886c519a5ff068af2d982b0b", + "prompt_hash": "66610f22552921f853fe59e1ea7704b01e3c69cbffae9d2feca90e1b0a4a4e1e", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 1, + "doc": { + "question": "What is the average video length in the WebVid-10M dataset?", + "choices": [ + "7.2 seconds", + "18.7 seconds", + "11.4 seconds", + "8.5 seconds" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the average video length in the WebVid-10M dataset?\nAnswer:", + " 7.2 seconds" + ], + [ + "Question:What is the average video length in the WebVid-10M dataset?\nAnswer:", + " 18.7 seconds" + ], + [ + "Question:What is the average video length in the WebVid-10M dataset?\nAnswer:", + " 11.4 seconds" + ], + [ + "Question:What is the average video length in the WebVid-10M dataset?\nAnswer:", + " 8.5 seconds" + ] + ], + "resps": [ + [ + [ + -9.703564643859863, + false + ] + ], + [ + [ + -10.211236953735352, + false + ] + ], + [ + [ + -9.757585525512695, + false + ] + ], + [ + [ + -9.347663879394531, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.703564643859863, + false + ], + [ + -10.211236953735352, + false + ], + [ + -9.757585525512695, + false + ], + [ + -9.347663879394531, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0c051f64404c22a332292565deb4bcc0138c49b138d6b18679c18b46e9f01084", + "prompt_hash": "93266376b337b79147290d8bc60a6240e38a5e688ef9c7e34717db3df76c55fe", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 2, + "doc": { + "question": "Which dataset contains 2,265 high-quality time-lapse videos with text descriptions?", + "choices": [ + "Taichi-HD", + "ChronoMagic", + "CelebvHQ", + "Panda-70M" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset contains 2,265 high-quality time-lapse videos with text descriptions?\nAnswer:", + " Taichi-HD" + ], + [ + "Question:Which dataset contains 2,265 high-quality time-lapse videos with text descriptions?\nAnswer:", + " ChronoMagic" + ], + [ + "Question:Which dataset contains 2,265 high-quality time-lapse videos with text descriptions?\nAnswer:", + " CelebvHQ" + ], + [ + "Question:Which dataset contains 2,265 high-quality time-lapse videos with text descriptions?\nAnswer:", + " Panda-70M" + ] + ], + "resps": [ + [ + [ + -26.884653091430664, + false + ] + ], + [ + [ + -30.02543830871582, + false + ] + ], + [ + [ + -24.76498794555664, + false + ] + ], + [ + [ + -22.008342742919922, + false + ] + ] + ], + "filtered_resps": [ + [ + -26.884653091430664, + false + ], + [ + -30.02543830871582, + false + ], + [ + -24.76498794555664, + false + ], + [ + -22.008342742919922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aea8b1e5f35964a5f6bd80bb657c379c0763ba1b2dd00311ea6d8d1a834f7f6b", + "prompt_hash": "3a25697ba9302d309fefbd23edef13e2233a48d1caa5aa842f6dae2e8c6168cc", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 3, + "doc": { + "question": "How many video clips and hours does the InternVid dataset include?", + "choices": [ + "13,320 clips and 2.7 hours", + "2,265 clips and 7 hours", + "234 million clips and 760.3K hours", + "35,000 clips and 65 hours" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many video clips and hours does the InternVid dataset include?\nAnswer:", + " 13,320 clips and 2.7 hours" + ], + [ + "Question:How many video clips and hours does the InternVid dataset include?\nAnswer:", + " 2,265 clips and 7 hours" + ], + [ + "Question:How many video clips and hours does the InternVid dataset include?\nAnswer:", + " 234 million clips and 760.3K hours" + ], + [ + "Question:How many video clips and hours does the InternVid dataset include?\nAnswer:", + " 35,000 clips and 65 hours" + ] + ], + "resps": [ + [ + [ + -23.848697662353516, + false + ] + ], + [ + [ + -21.163166046142578, + false + ] + ], + [ + [ + -40.58928298950195, + false + ] + ], + [ + [ + -19.135757446289062, + false + ] + ] + ], + "filtered_resps": [ + [ + -23.848697662353516, + false + ], + [ + -21.163166046142578, + false + ], + [ + -40.58928298950195, + false + ], + [ + -19.135757446289062, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7ccc4b4a88a7e240547fd23c9a7bed6ef3a596e323340ff9f56c7e337aeffb38", + "prompt_hash": "6d123085222da44784840c0b919186526daf6426113cc555b4d15ce2daece6af", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 4, + "doc": { + "question": "What resolution do the videos in the \\u201chd-dataset\\u201d (HD version of the new dataset) achieve?", + "choices": [ + "512×512", + "1920×1080", + "640×360", + "1280×720" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What resolution do the videos in the \\u201chd-dataset\\u201d (HD version of the new dataset) achieve?\nAnswer:", + " 512×512" + ], + [ + "Question:What resolution do the videos in the \\u201chd-dataset\\u201d (HD version of the new dataset) achieve?\nAnswer:", + " 1920×1080" + ], + [ + "Question:What resolution do the videos in the \\u201chd-dataset\\u201d (HD version of the new dataset) achieve?\nAnswer:", + " 640×360" + ], + [ + "Question:What resolution do the videos in the \\u201chd-dataset\\u201d (HD version of the new dataset) achieve?\nAnswer:", + " 1280×720" + ] + ], + "resps": [ + [ + [ + -9.11259651184082, + false + ] + ], + [ + [ + -6.933300018310547, + false + ] + ], + [ + [ + -9.924569129943848, + false + ] + ], + [ + [ + -8.05185317993164, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.11259651184082, + false + ], + [ + -6.933300018310547, + false + ], + [ + -9.924569129943848, + false + ], + [ + -8.05185317993164, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "68595c0f4ac35ddb77399e10113b20bcee003b37dadc9b9416f1f86c43cdbf6a", + "prompt_hash": "9ff2435dd70469a91ad50c31a8a09ec1e749efb2b188ee18f7e70ae23854d4f9", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 5, + "doc": { + "question": "Which dataset was used as the retain dataset for harmful request refusal training?", + "choices": [ + "Magpie-Align", + "Wikitext", + "Camel-bio", + "CTFtime" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset was used as the retain dataset for harmful request refusal training?\nAnswer:", + " Magpie-Align" + ], + [ + "Question:Which dataset was used as the retain dataset for harmful request refusal training?\nAnswer:", + " Wikitext" + ], + [ + "Question:Which dataset was used as the retain dataset for harmful request refusal training?\nAnswer:", + " Camel-bio" + ], + [ + "Question:Which dataset was used as the retain dataset for harmful request refusal training?\nAnswer:", + " CTFtime" + ] + ], + "resps": [ + [ + [ + -30.949909210205078, + false + ] + ], + [ + [ + -12.598445892333984, + false + ] + ], + [ + [ + -29.680171966552734, + false + ] + ], + [ + [ + -28.077442169189453, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.949909210205078, + false + ], + [ + -12.598445892333984, + false + ], + [ + -29.680171966552734, + false + ], + [ + -28.077442169189453, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7c7ab492f9d6fb841340d642fa8084c5abd581c141445b0e25816877a193aa7d", + "prompt_hash": "73b6ceeaa2d9583cf65a3e36a39c36cade024ff3a12289eae9f7c7422f18af51", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 6, + "doc": { + "question": "What optimizer was used for train-time adversaries in the Biosecurity weaponization restriction setup?", + "choices": [ + "AdamW", + "SGD with Nesterov Momentum", + "Adadelta", + "Schedule Free AdamW" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What optimizer was used for train-time adversaries in the Biosecurity weaponization restriction setup?\nAnswer:", + " AdamW" + ], + [ + "Question:What optimizer was used for train-time adversaries in the Biosecurity weaponization restriction setup?\nAnswer:", + " SGD with Nesterov Momentum" + ], + [ + "Question:What optimizer was used for train-time adversaries in the Biosecurity weaponization restriction setup?\nAnswer:", + " Adadelta" + ], + [ + "Question:What optimizer was used for train-time adversaries in the Biosecurity weaponization restriction setup?\nAnswer:", + " Schedule Free AdamW" + ] + ], + "resps": [ + [ + [ + -4.982319355010986, + false + ] + ], + [ + [ + -16.44598388671875, + false + ] + ], + [ + [ + -7.690619945526123, + false + ] + ], + [ + [ + -31.53502655029297, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.982319355010986, + false + ], + [ + -16.44598388671875, + false + ], + [ + -7.690619945526123, + false + ], + [ + -31.53502655029297, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f8e9a563d381fb960aa6e786dad81b874336a607396c0768ee26742c75b881d4", + "prompt_hash": "3c661c4dc3ad9162e3993d2761aa2ee773b88ac8b9353087f14c1579f1711327", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 7, + "doc": { + "question": "How many adversary setups were simulated for Cybersecurity weaponization restriction during training?", + "choices": [ + "2", + "4", + "6", + "8" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many adversary setups were simulated for Cybersecurity weaponization restriction during training?\nAnswer:", + " 2" + ], + [ + "Question:How many adversary setups were simulated for Cybersecurity weaponization restriction during training?\nAnswer:", + " 4" + ], + [ + "Question:How many adversary setups were simulated for Cybersecurity weaponization restriction during training?\nAnswer:", + " 6" + ], + [ + "Question:How many adversary setups were simulated for Cybersecurity weaponization restriction during training?\nAnswer:", + " 8" + ] + ], + "resps": [ + [ + [ + -3.0547232627868652, + false + ] + ], + [ + [ + -3.540640354156494, + false + ] + ], + [ + [ + -4.074788570404053, + false + ] + ], + [ + [ + -4.373826503753662, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.0547232627868652, + false + ], + [ + -3.540640354156494, + false + ], + [ + -4.074788570404053, + false + ], + [ + -4.373826503753662, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b5e1ccec8cfe6f15e81a591cf22a589bb992cfee4e754932728adb85e0877637", + "prompt_hash": "64f0040af9d1edaa68dde03333b2d294fd6c95538f6dfd8acfabd6a86b688086", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 8, + "doc": { + "question": "In the additional harmful request refusal results, what was the Post-Attack ASR of the TAR method?", + "choices": [ + "84.8", + "78.3", + "74.5", + "63.9" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:In the additional harmful request refusal results, what was the Post-Attack ASR of the TAR method?\nAnswer:", + " 84.8" + ], + [ + "Question:In the additional harmful request refusal results, what was the Post-Attack ASR of the TAR method?\nAnswer:", + " 78.3" + ], + [ + "Question:In the additional harmful request refusal results, what was the Post-Attack ASR of the TAR method?\nAnswer:", + " 74.5" + ], + [ + "Question:In the additional harmful request refusal results, what was the Post-Attack ASR of the TAR method?\nAnswer:", + " 63.9" + ] + ], + "resps": [ + [ + [ + -9.289198875427246, + false + ] + ], + [ + [ + -9.459244728088379, + false + ] + ], + [ + [ + -9.550447463989258, + false + ] + ], + [ + [ + -9.714252471923828, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.289198875427246, + false + ], + [ + -9.459244728088379, + false + ], + [ + -9.550447463989258, + false + ], + [ + -9.714252471923828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "785f5608cc184077484fbf0dac845374b6161aa05d6f07c68f282d09fcc69fe8", + "prompt_hash": "42b34dfe5d13cad8f57f2d25f673ef255ed327321f125c23443d82caa6cbdd0f", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 9, + "doc": { + "question": "Which dataset was scraped to construct the Cybersecurity forget dataset?", + "choices": [ + "Camel AI Biology", + "CTFtime writeups", + "WMDP", + "Pile-bio Retain" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which dataset was scraped to construct the Cybersecurity forget dataset?\nAnswer:", + " Camel AI Biology" + ], + [ + "Question:Which dataset was scraped to construct the Cybersecurity forget dataset?\nAnswer:", + " CTFtime writeups" + ], + [ + "Question:Which dataset was scraped to construct the Cybersecurity forget dataset?\nAnswer:", + " WMDP" + ], + [ + "Question:Which dataset was scraped to construct the Cybersecurity forget dataset?\nAnswer:", + " Pile-bio Retain" + ] + ], + "resps": [ + [ + [ + -36.0738410949707, + false + ] + ], + [ + [ + -41.3992919921875, + false + ] + ], + [ + [ + -16.297882080078125, + false + ] + ], + [ + [ + -48.197181701660156, + false + ] + ] + ], + "filtered_resps": [ + [ + -36.0738410949707, + false + ], + [ + -41.3992919921875, + false + ], + [ + -16.297882080078125, + false + ], + [ + -48.197181701660156, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "9b5d1e8c7027ddc744eeace2cc139a04af92ebd1bf2b40858abd6a1e04cbfc6e", + "prompt_hash": "47bbc435192555e617c67175eb3a4de72282016369dd827a64650787898b3e71", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 10, + "doc": { + "question": "In the Last Letter Concatenation task, how many problem queries are generated for each input length?", + "choices": [ + "128", + "350", + "4096", + "7200" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In the Last Letter Concatenation task, how many problem queries are generated for each input length?\nAnswer:", + " 128" + ], + [ + "Question:In the Last Letter Concatenation task, how many problem queries are generated for each input length?\nAnswer:", + " 350" + ], + [ + "Question:In the Last Letter Concatenation task, how many problem queries are generated for each input length?\nAnswer:", + " 4096" + ], + [ + "Question:In the Last Letter Concatenation task, how many problem queries are generated for each input length?\nAnswer:", + " 7200" + ] + ], + "resps": [ + [ + [ + -8.70600414276123, + false + ] + ], + [ + [ + -10.003148078918457, + false + ] + ], + [ + [ + -10.063257217407227, + false + ] + ], + [ + [ + -13.331168174743652, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.70600414276123, + false + ], + [ + -10.003148078918457, + false + ], + [ + -10.063257217407227, + false + ], + [ + -13.331168174743652, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b48a19f02f6e3781a3960dc5a958e312b6e2457f2179f5a77d9fca4adc6283b7", + "prompt_hash": "39737723d57a40e7a488825a3a9245f925f737b612e3215a668bbb522bc1c2c0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 11, + "doc": { + "question": "For the Word Sorting task, how many lists of words are generated as problem queries for each input length?", + "choices": [ + "128", + "350", + "4096", + "7200" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:For the Word Sorting task, how many lists of words are generated as problem queries for each input length?\nAnswer:", + " 128" + ], + [ + "Question:For the Word Sorting task, how many lists of words are generated as problem queries for each input length?\nAnswer:", + " 350" + ], + [ + "Question:For the Word Sorting task, how many lists of words are generated as problem queries for each input length?\nAnswer:", + " 4096" + ], + [ + "Question:For the Word Sorting task, how many lists of words are generated as problem queries for each input length?\nAnswer:", + " 7200" + ] + ], + "resps": [ + [ + [ + -9.128795623779297, + false + ] + ], + [ + [ + -10.338838577270508, + false + ] + ], + [ + [ + -10.242339134216309, + false + ] + ], + [ + [ + -13.073063850402832, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.128795623779297, + false + ], + [ + -10.338838577270508, + false + ], + [ + -10.242339134216309, + false + ], + [ + -13.073063850402832, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7d4e66b5e39a6f94c63b31e4990ba314e9cfc5c45bebbb1ac8207ba01fb1b0a3", + "prompt_hash": "8f3960d8166d21235488fa33ac5c46b15b7149d8c82186bf7886d3734e4f2c14", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 12, + "doc": { + "question": "In the Grade School Math dataset, approximately how many problems are used for training?", + "choices": [ + "1.3K", + "128", + "7.2K", + "50K" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:In the Grade School Math dataset, approximately how many problems are used for training?\nAnswer:", + " 1.3K" + ], + [ + "Question:In the Grade School Math dataset, approximately how many problems are used for training?\nAnswer:", + " 128" + ], + [ + "Question:In the Grade School Math dataset, approximately how many problems are used for training?\nAnswer:", + " 7.2K" + ], + [ + "Question:In the Grade School Math dataset, approximately how many problems are used for training?\nAnswer:", + " 50K" + ] + ], + "resps": [ + [ + [ + -11.14500617980957, + false + ] + ], + [ + [ + -7.693936824798584, + false + ] + ], + [ + [ + -12.409635543823242, + false + ] + ], + [ + [ + -8.355731964111328, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.14500617980957, + false + ], + [ + -7.693936824798584, + false + ], + [ + -12.409635543823242, + false + ], + [ + -8.355731964111328, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ddfcc10d4cb72f3178246031e500bd4e67b7661425f254523ce17198c9b51dbf", + "prompt_hash": "4db725a7777f9e61ed2d46829e43cf32250785dd099802a22a78ade8030be237", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 13, + "doc": { + "question": "Which optimizer is used for training Gemma-based generative verifiers?", + "choices": [ + "SGD", + "Adam", + "RMSprop", + "Adagrad" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which optimizer is used for training Gemma-based generative verifiers?\nAnswer:", + " SGD" + ], + [ + "Question:Which optimizer is used for training Gemma-based generative verifiers?\nAnswer:", + " Adam" + ], + [ + "Question:Which optimizer is used for training Gemma-based generative verifiers?\nAnswer:", + " RMSprop" + ], + [ + "Question:Which optimizer is used for training Gemma-based generative verifiers?\nAnswer:", + " Adagrad" + ] + ], + "resps": [ + [ + [ + -7.656381607055664, + false + ] + ], + [ + [ + -4.861548900604248, + false + ] + ], + [ + [ + -10.62295913696289, + false + ] + ], + [ + [ + -10.300954818725586, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.656381607055664, + false + ], + [ + -4.861548900604248, + false + ], + [ + -10.62295913696289, + false + ], + [ + -10.300954818725586, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b4f41f766d1df5f2e48170794a9089c321f14247018cfe937b62066953b701a2", + "prompt_hash": "9363cb97c6af5e29ff7a793c58c6291445cbac5ad99bf7c35222bfdadc47b50d", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 14, + "doc": { + "question": "What learning rate worked best for DPO training in the described experiments?", + "choices": [ + "1e-7", + "5e-7", + "1e-6", + "2e-6" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What learning rate worked best for DPO training in the described experiments?\nAnswer:", + " 1e-7" + ], + [ + "Question:What learning rate worked best for DPO training in the described experiments?\nAnswer:", + " 5e-7" + ], + [ + "Question:What learning rate worked best for DPO training in the described experiments?\nAnswer:", + " 1e-6" + ], + [ + "Question:What learning rate worked best for DPO training in the described experiments?\nAnswer:", + " 2e-6" + ] + ], + "resps": [ + [ + [ + -9.123523712158203, + false + ] + ], + [ + [ + -9.504151344299316, + false + ] + ], + [ + [ + -8.128350257873535, + false + ] + ], + [ + [ + -9.45744514465332, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.123523712158203, + false + ], + [ + -9.504151344299316, + false + ], + [ + -8.128350257873535, + false + ], + [ + -9.45744514465332, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "aa2f1f819abc173eee2d667708f63bbd94114eceb9eb24a99d3ca9a048cac5e7", + "prompt_hash": "afa0657c4d865c63c194aeb264649949e04b109fc2a251da1cbb8288f6d2c3c3", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 15, + "doc": { + "question": "What is the name of the pre-trained visual encoder developed in the Oryx model?", + "choices": [ + "OryxViT", + "CLIP", + "NaViT", + "FlexiViT" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the name of the pre-trained visual encoder developed in the Oryx model?\nAnswer:", + " OryxViT" + ], + [ + "Question:What is the name of the pre-trained visual encoder developed in the Oryx model?\nAnswer:", + " CLIP" + ], + [ + "Question:What is the name of the pre-trained visual encoder developed in the Oryx model?\nAnswer:", + " NaViT" + ], + [ + "Question:What is the name of the pre-trained visual encoder developed in the Oryx model?\nAnswer:", + " FlexiViT" + ] + ], + "resps": [ + [ + [ + -9.803050994873047, + false + ] + ], + [ + [ + -3.1500496864318848, + false + ] + ], + [ + [ + -17.866470336914062, + false + ] + ], + [ + [ + -14.184885025024414, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.803050994873047, + false + ], + [ + -3.1500496864318848, + false + ], + [ + -17.866470336914062, + false + ], + [ + -14.184885025024414, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "156b7b840777a70e8b642ac95c6705845ad2ff391c9a995ca447269ee8435f98", + "prompt_hash": "68d2ed3788c0d39dd7228baf9d11482275475f353ed20d41c46b3cab10d97490", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 16, + "doc": { + "question": "Which dataset was used to prepare long-form temporal training data with 'needle-in-a-haystack' tasks?", + "choices": [ + "MovieNet", + "ScanQA", + "LLaVA-NeXt", + "Cambrian-1" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which dataset was used to prepare long-form temporal training data with 'needle-in-a-haystack' tasks?\nAnswer:", + " MovieNet" + ], + [ + "Question:Which dataset was used to prepare long-form temporal training data with 'needle-in-a-haystack' tasks?\nAnswer:", + " ScanQA" + ], + [ + "Question:Which dataset was used to prepare long-form temporal training data with 'needle-in-a-haystack' tasks?\nAnswer:", + " LLaVA-NeXt" + ], + [ + "Question:Which dataset was used to prepare long-form temporal training data with 'needle-in-a-haystack' tasks?\nAnswer:", + " Cambrian-1" + ] + ], + "resps": [ + [ + [ + -8.061107635498047, + false + ] + ], + [ + [ + -16.233022689819336, + false + ] + ], + [ + [ + -17.29337501525879, + false + ] + ], + [ + [ + -22.008586883544922, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.061107635498047, + false + ], + [ + -16.233022689819336, + false + ], + [ + -17.29337501525879, + false + ], + [ + -22.008586883544922, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "eba15b55f0b7a7094523969a778e1f9adb1ad8119d2cf8253c2134b659baa96a", + "prompt_hash": "849ce8d2eb09cbeba5b325412eb5b336286ebbc1f4864858b8569f2b94124d5a", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 17, + "doc": { + "question": "Which tracking model was used to generate coarse correspondences for the ScanQA training set?", + "choices": [ + "Track-Anything", + "NaViT", + "SigLIP", + "FlexiViT" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which tracking model was used to generate coarse correspondences for the ScanQA training set?\nAnswer:", + " Track-Anything" + ], + [ + "Question:Which tracking model was used to generate coarse correspondences for the ScanQA training set?\nAnswer:", + " NaViT" + ], + [ + "Question:Which tracking model was used to generate coarse correspondences for the ScanQA training set?\nAnswer:", + " SigLIP" + ], + [ + "Question:Which tracking model was used to generate coarse correspondences for the ScanQA training set?\nAnswer:", + " FlexiViT" + ] + ], + "resps": [ + [ + [ + -11.741609573364258, + false + ] + ], + [ + [ + -14.81590747833252, + false + ] + ], + [ + [ + -11.319101333618164, + false + ] + ], + [ + [ + -23.714706420898438, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.741609573364258, + false + ], + [ + -14.81590747833252, + false + ], + [ + -11.319101333618164, + false + ], + [ + -23.714706420898438, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "de765beb9e0c5abd06ea1512e4806499af83248f2e9e33ce3f70050ee8c92677", + "prompt_hash": "b3c3c00f007e36c1b86ce3a18f4adf255d055ea83561ca0de94f5c3f37d1bb27", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 18, + "doc": { + "question": "In Stage 1 of the Oryx training pipeline, approximately how many supervised fine-tuning image-text pairs were collected?", + "choices": [ + "4 million", + "400 thousand", + "40 million", + "1 million" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:In Stage 1 of the Oryx training pipeline, approximately how many supervised fine-tuning image-text pairs were collected?\nAnswer:", + " 4 million" + ], + [ + "Question:In Stage 1 of the Oryx training pipeline, approximately how many supervised fine-tuning image-text pairs were collected?\nAnswer:", + " 400 thousand" + ], + [ + "Question:In Stage 1 of the Oryx training pipeline, approximately how many supervised fine-tuning image-text pairs were collected?\nAnswer:", + " 40 million" + ], + [ + "Question:In Stage 1 of the Oryx training pipeline, approximately how many supervised fine-tuning image-text pairs were collected?\nAnswer:", + " 1 million" + ] + ], + "resps": [ + [ + [ + -9.426055908203125, + false + ] + ], + [ + [ + -13.373111724853516, + false + ] + ], + [ + [ + -9.889270782470703, + false + ] + ], + [ + [ + -8.437106132507324, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.426055908203125, + false + ], + [ + -13.373111724853516, + false + ], + [ + -9.889270782470703, + false + ], + [ + -8.437106132507324, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "566b2aac0784a025c3eb826a286d646e61de8d7cecbede2afd0985094ec48415", + "prompt_hash": "fe109bf7a63cea191cc404f1cfd2cae1cc1277fe4036eaf0e2cb64c20daeab38", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 19, + "doc": { + "question": "Which benchmark did the Oryx-1.5-32B model outperform GPT-4o by 7.7%?", + "choices": [ + "MLVU", + "LongVideoBench", + "NextQA", + "OCRBench" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which benchmark did the Oryx-1.5-32B model outperform GPT-4o by 7.7%?\nAnswer:", + " MLVU" + ], + [ + "Question:Which benchmark did the Oryx-1.5-32B model outperform GPT-4o by 7.7%?\nAnswer:", + " LongVideoBench" + ], + [ + "Question:Which benchmark did the Oryx-1.5-32B model outperform GPT-4o by 7.7%?\nAnswer:", + " NextQA" + ], + [ + "Question:Which benchmark did the Oryx-1.5-32B model outperform GPT-4o by 7.7%?\nAnswer:", + " OCRBench" + ] + ], + "resps": [ + [ + [ + -20.46422576904297, + false + ] + ], + [ + [ + -21.4570255279541, + false + ] + ], + [ + [ + -15.763558387756348, + false + ] + ], + [ + [ + -15.791434288024902, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.46422576904297, + false + ], + [ + -21.4570255279541, + false + ], + [ + -15.763558387756348, + false + ], + [ + -15.791434288024902, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "929aedefb09734275f0ac9ad7b47f7a727f751bc2aed619a363d3479e01e1ac6", + "prompt_hash": "c35ffd77c1af1f3323947385684544d64e854cc292267e354343a7e9cc11c5dd", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 20, + "doc": { + "question": "Which activation function is used in the point-wise tokenization embedding for time series points?", + "choices": [ + "ReLU", + "SwiGLU", + "Tanh", + "Sigmoid" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which activation function is used in the point-wise tokenization embedding for time series points?\nAnswer:", + " ReLU" + ], + [ + "Question:Which activation function is used in the point-wise tokenization embedding for time series points?\nAnswer:", + " SwiGLU" + ], + [ + "Question:Which activation function is used in the point-wise tokenization embedding for time series points?\nAnswer:", + " Tanh" + ], + [ + "Question:Which activation function is used in the point-wise tokenization embedding for time series points?\nAnswer:", + " Sigmoid" + ] + ], + "resps": [ + [ + [ + -3.263493776321411, + false + ] + ], + [ + [ + -7.855595588684082, + false + ] + ], + [ + [ + -4.75715970993042, + false + ] + ], + [ + [ + -4.864443302154541, + false + ] + ] + ], + "filtered_resps": [ + [ + -3.263493776321411, + false + ], + [ + -7.855595588684082, + false + ], + [ + -4.75715970993042, + false + ], + [ + -4.864443302154541, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "dcd247f6043e0192a6a4ab1861e955946c93c6d730b7eb13d08215173969bba0", + "prompt_hash": "1372eb375cb978e2bf04c001594c7612e60f838b77d77a7ae699921ef3940ec3", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 21, + "doc": { + "question": "What type of positional embedding is adopted instead of absolute positional encoding?", + "choices": [ + "Sinusoidal positional encoding", + "Rotary positional embeddings", + "Learned positional embeddings", + "Relative positional embeddings" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of positional embedding is adopted instead of absolute positional encoding?\nAnswer:", + " Sinusoidal positional encoding" + ], + [ + "Question:What type of positional embedding is adopted instead of absolute positional encoding?\nAnswer:", + " Rotary positional embeddings" + ], + [ + "Question:What type of positional embedding is adopted instead of absolute positional encoding?\nAnswer:", + " Learned positional embeddings" + ], + [ + "Question:What type of positional embedding is adopted instead of absolute positional encoding?\nAnswer:", + " Relative positional embeddings" + ] + ], + "resps": [ + [ + [ + -12.207653999328613, + false + ] + ], + [ + [ + -13.981444358825684, + false + ] + ], + [ + [ + -14.838812828063965, + false + ] + ], + [ + [ + -9.279513359069824, + false + ] + ] + ], + "filtered_resps": [ + [ + -12.207653999328613, + false + ], + [ + -13.981444358825684, + false + ], + [ + -14.838812828063965, + false + ], + [ + -9.279513359069824, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c0c0237d53201c05ca5446b5272bb12d88287d6e79e64c304a49e708b209080f", + "prompt_hash": "7e0afa984471c4a497ec57593d7a8f8d51751bad8017b324dfc9344edc4d5123", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 22, + "doc": { + "question": "How many time points does the constructed dataset include after processing?", + "choices": [ + "3 billion", + "30 billion", + "300 billion", + "3 trillion" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many time points does the constructed dataset include after processing?\nAnswer:", + " 3 billion" + ], + [ + "Question:How many time points does the constructed dataset include after processing?\nAnswer:", + " 30 billion" + ], + [ + "Question:How many time points does the constructed dataset include after processing?\nAnswer:", + " 300 billion" + ], + [ + "Question:How many time points does the constructed dataset include after processing?\nAnswer:", + " 3 trillion" + ] + ], + "resps": [ + [ + [ + -16.558652877807617, + false + ] + ], + [ + [ + -17.89537811279297, + false + ] + ], + [ + [ + -18.499900817871094, + false + ] + ], + [ + [ + -16.72484016418457, + false + ] + ] + ], + "filtered_resps": [ + [ + -16.558652877807617, + false + ], + [ + -17.89537811279297, + false + ], + [ + -18.499900817871094, + false + ], + [ + -16.72484016418457, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "226e991f49c1d5fd3206a0a0a278cc3dc98554f228cb4e67b2b2ba2fe49d96c5", + "prompt_hash": "eaa8068334e6738a83830dd69c2d18959f2e120641de642a72f068823ca70be6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 23, + "doc": { + "question": "Which loss function is used to improve robustness against outliers in training?", + "choices": [ + "Cross-entropy loss", + "MSE loss", + "Huber loss", + "KL-divergence loss" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which loss function is used to improve robustness against outliers in training?\nAnswer:", + " Cross-entropy loss" + ], + [ + "Question:Which loss function is used to improve robustness against outliers in training?\nAnswer:", + " MSE loss" + ], + [ + "Question:Which loss function is used to improve robustness against outliers in training?\nAnswer:", + " Huber loss" + ], + [ + "Question:Which loss function is used to improve robustness against outliers in training?\nAnswer:", + " KL-divergence loss" + ] + ], + "resps": [ + [ + [ + -7.551655292510986, + false + ] + ], + [ + [ + -9.450379371643066, + false + ] + ], + [ + [ + -10.613362312316895, + false + ] + ], + [ + [ + -11.747757911682129, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.551655292510986, + false + ], + [ + -9.450379371643066, + false + ], + [ + -10.613362312316895, + false + ], + [ + -11.747757911682129, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c528078998f2fb64b3e49b759f52aa67bf869dfc58966f3a6c80b96a53d35824", + "prompt_hash": "228d572ea74fd9a40d2de41cf02d95b737734e128b282417db9a0f171f561923", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 24, + "doc": { + "question": "What is the maximum sequence length used during training?", + "choices": [ + "1024", + "2048", + "4096", + "8192" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the maximum sequence length used during training?\nAnswer:", + " 1024" + ], + [ + "Question:What is the maximum sequence length used during training?\nAnswer:", + " 2048" + ], + [ + "Question:What is the maximum sequence length used during training?\nAnswer:", + " 4096" + ], + [ + "Question:What is the maximum sequence length used during training?\nAnswer:", + " 8192" + ] + ], + "resps": [ + [ + [ + -4.160928726196289, + false + ] + ], + [ + [ + -5.0833048820495605, + false + ] + ], + [ + [ + -5.026993751525879, + false + ] + ], + [ + [ + -5.385982036590576, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.160928726196289, + false + ], + [ + -5.0833048820495605, + false + ], + [ + -5.026993751525879, + false + ], + [ + -5.385982036590576, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "390e1da76812662b20880e121591fd850a1a17f394ad7930d5eb4dff5289720e", + "prompt_hash": "a4432fe7d5f31174a59b3e50b838f427cb5e616f76805fb50d00d7116ba27897", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 25, + "doc": { + "question": "How many Kaggle competitions are included in MLE-bench?", + "choices": [ + "50", + "75", + "100", + "150" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many Kaggle competitions are included in MLE-bench?\nAnswer:", + " 50" + ], + [ + "Question:How many Kaggle competitions are included in MLE-bench?\nAnswer:", + " 75" + ], + [ + "Question:How many Kaggle competitions are included in MLE-bench?\nAnswer:", + " 100" + ], + [ + "Question:How many Kaggle competitions are included in MLE-bench?\nAnswer:", + " 150" + ] + ], + "resps": [ + [ + [ + -6.944413185119629, + false + ] + ], + [ + [ + -8.142876625061035, + false + ] + ], + [ + [ + -7.314369201660156, + false + ] + ], + [ + [ + -9.207300186157227, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.944413185119629, + false + ], + [ + -8.142876625061035, + false + ], + [ + -7.314369201660156, + false + ], + [ + -9.207300186157227, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "052584f68f0b2428e10a00891e5d2883b043bf7950c1da756deb1605ef48fc53", + "prompt_hash": "66e4628c81fbdc9d2ae738e93618091ef84479831f56b4c8e0ed12e03c356ae5", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 26, + "doc": { + "question": "What was the total value of prizes awarded across the 75 competitions in MLE-bench?", + "choices": [ + "$1,000,000", + "$1,948,016", + "$2,500,000", + "$5,000,000" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What was the total value of prizes awarded across the 75 competitions in MLE-bench?\nAnswer:", + " $1,000,000" + ], + [ + "Question:What was the total value of prizes awarded across the 75 competitions in MLE-bench?\nAnswer:", + " $1,948,016" + ], + [ + "Question:What was the total value of prizes awarded across the 75 competitions in MLE-bench?\nAnswer:", + " $2,500,000" + ], + [ + "Question:What was the total value of prizes awarded across the 75 competitions in MLE-bench?\nAnswer:", + " $5,000,000" + ] + ], + "resps": [ + [ + [ + -8.74365234375, + false + ] + ], + [ + [ + -20.456871032714844, + false + ] + ], + [ + [ + -10.962664604187012, + false + ] + ], + [ + [ + -10.215169906616211, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.74365234375, + false + ], + [ + -20.456871032714844, + false + ], + [ + -10.962664604187012, + false + ], + [ + -10.215169906616211, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "86ca1ab5e80d755d678b903acba040813918e585858b1b912f6eedb3a3b34d59", + "prompt_hash": "8b7c80469eced0452b5db403fb4d86ea98b8a11306bd639158ddcc4dfeb3140f", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 27, + "doc": { + "question": "Which model with the AIDE scaffold achieved a medal in 16.9% of competitions on MLE-bench?", + "choices": [ + "GPT-4o", + "Claude 3.5 Sonnet", + "o1-preview", + "Llama 3.1 405B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model with the AIDE scaffold achieved a medal in 16.9% of competitions on MLE-bench?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which model with the AIDE scaffold achieved a medal in 16.9% of competitions on MLE-bench?\nAnswer:", + " Claude 3.5 Sonnet" + ], + [ + "Question:Which model with the AIDE scaffold achieved a medal in 16.9% of competitions on MLE-bench?\nAnswer:", + " o1-preview" + ], + [ + "Question:Which model with the AIDE scaffold achieved a medal in 16.9% of competitions on MLE-bench?\nAnswer:", + " Llama 3.1 405B" + ] + ], + "resps": [ + [ + [ + -9.823843002319336, + false + ] + ], + [ + [ + -12.214433670043945, + false + ] + ], + [ + [ + -29.94273567199707, + false + ] + ], + [ + [ + -38.722164154052734, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.823843002319336, + false + ], + [ + -12.214433670043945, + false + ], + [ + -29.94273567199707, + false + ], + [ + -38.722164154052734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "8aa00e51fa2ef433fd813fa8e76efe0379871555e0115b5953bfb69dc498c845", + "prompt_hash": "ae6765ca651d6c2d2afad9f8a23da90dc99b9b8fc63d5780eb2380285524018f", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 28, + "doc": { + "question": "What is the percentage of competitions in MLE-bench categorized as Medium complexity?", + "choices": [ + "20%", + "30%", + "50%", + "70%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the percentage of competitions in MLE-bench categorized as Medium complexity?\nAnswer:", + " 20%" + ], + [ + "Question:What is the percentage of competitions in MLE-bench categorized as Medium complexity?\nAnswer:", + " 30%" + ], + [ + "Question:What is the percentage of competitions in MLE-bench categorized as Medium complexity?\nAnswer:", + " 50%" + ], + [ + "Question:What is the percentage of competitions in MLE-bench categorized as Medium complexity?\nAnswer:", + " 70%" + ] + ], + "resps": [ + [ + [ + -8.905912399291992, + false + ] + ], + [ + [ + -9.422713279724121, + false + ] + ], + [ + [ + -9.525880813598633, + false + ] + ], + [ + [ + -10.475693702697754, + false + ] + ] + ], + "filtered_resps": [ + [ + -8.905912399291992, + false + ], + [ + -9.422713279724121, + false + ], + [ + -9.525880813598633, + false + ], + [ + -10.475693702697754, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ce97fde4984d268d82cf472e616e69071fe67efd92522924494fca607ef02b45", + "prompt_hash": "295e3ced57c305763394eac8e83129b55dbc39b4d42cc18b9ba35bbd27ea2297", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 29, + "doc": { + "question": "What type of hardware setup was provided to agents in the main experiments?", + "choices": [ + "8 vCPUs, 128GB RAM, one Nvidia P100 GPU", + "36 vCPUs, 440GB RAM, one Nvidia A10 GPU", + "64 vCPUs, 1TB RAM, two Nvidia A100 GPUs", + "16 vCPUs, 256GB RAM, CPU-only" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What type of hardware setup was provided to agents in the main experiments?\nAnswer:", + " 8 vCPUs, 128GB RAM, one Nvidia P100 GPU" + ], + [ + "Question:What type of hardware setup was provided to agents in the main experiments?\nAnswer:", + " 36 vCPUs, 440GB RAM, one Nvidia A10 GPU" + ], + [ + "Question:What type of hardware setup was provided to agents in the main experiments?\nAnswer:", + " 64 vCPUs, 1TB RAM, two Nvidia A100 GPUs" + ], + [ + "Question:What type of hardware setup was provided to agents in the main experiments?\nAnswer:", + " 16 vCPUs, 256GB RAM, CPU-only" + ] + ], + "resps": [ + [ + [ + -39.894012451171875, + false + ] + ], + [ + [ + -50.85301971435547, + false + ] + ], + [ + [ + -34.946414947509766, + false + ] + ], + [ + [ + -34.39673614501953, + false + ] + ] + ], + "filtered_resps": [ + [ + -39.894012451171875, + false + ], + [ + -50.85301971435547, + false + ], + [ + -34.946414947509766, + false + ], + [ + -34.39673614501953, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "324c996d7e72ebec5325fd5337852d942c0db98233ed0d4d48ccca45ecb50ec8", + "prompt_hash": "4cda520c3bf17ff9d3431813d3b315605c015fe02763d0c869cc04b255e6719e", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 30, + "doc": { + "question": "How many text tokens can the Long-prompt MetaCLIP model process?", + "choices": [ + "128", + "256", + "512", + "1024" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:How many text tokens can the Long-prompt MetaCLIP model process?\nAnswer:", + " 128" + ], + [ + "Question:How many text tokens can the Long-prompt MetaCLIP model process?\nAnswer:", + " 256" + ], + [ + "Question:How many text tokens can the Long-prompt MetaCLIP model process?\nAnswer:", + " 512" + ], + [ + "Question:How many text tokens can the Long-prompt MetaCLIP model process?\nAnswer:", + " 1024" + ] + ], + "resps": [ + [ + [ + -5.544123649597168, + false + ] + ], + [ + [ + -5.784742832183838, + false + ] + ], + [ + [ + -5.334044933319092, + false + ] + ], + [ + [ + -5.7418904304504395, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.544123649597168, + false + ], + [ + -5.784742832183838, + false + ], + [ + -5.334044933319092, + false + ], + [ + -5.7418904304504395, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d1b2ded393a41d5e4100bc8ae2aae64f9edeb103890d6b8b9ff30606131a999e", + "prompt_hash": "7c40cd813ebb5da0e8c000275aae0577ed7e15a0c8036e8bba23fa8fbe9e72f8", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 31, + "doc": { + "question": "Which text encoder is used to encode the text within quotation marks for visual text generation?", + "choices": [ + "BERT", + "ByT5", + "T5-base", + "GPT-2" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which text encoder is used to encode the text within quotation marks for visual text generation?\nAnswer:", + " BERT" + ], + [ + "Question:Which text encoder is used to encode the text within quotation marks for visual text generation?\nAnswer:", + " ByT5" + ], + [ + "Question:Which text encoder is used to encode the text within quotation marks for visual text generation?\nAnswer:", + " T5-base" + ], + [ + "Question:Which text encoder is used to encode the text within quotation marks for visual text generation?\nAnswer:", + " GPT-2" + ] + ], + "resps": [ + [ + [ + -4.217916011810303, + false + ] + ], + [ + [ + -19.261009216308594, + false + ] + ], + [ + [ + -9.65689754486084, + false + ] + ], + [ + [ + -8.248754501342773, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.217916011810303, + false + ], + [ + -19.261009216308594, + false + ], + [ + -9.65689754486084, + false + ], + [ + -8.248754501342773, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "f27c798ae14ce3089bf0f0c00deae0322b0372fa8f26a19b8f78218d3ff0714a", + "prompt_hash": "13d9d8a856a2a64dcbc496b7df5c45a65221315895eb20b594e88ea16556d4a2", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 32, + "doc": { + "question": "What is the context length of the \\u005cOursVideo model mentioned as being memory capacity constrained?", + "choices": [ + "32K", + "64K", + "73K", + "100K" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the context length of the \\u005cOursVideo model mentioned as being memory capacity constrained?\nAnswer:", + " 32K" + ], + [ + "Question:What is the context length of the \\u005cOursVideo model mentioned as being memory capacity constrained?\nAnswer:", + " 64K" + ], + [ + "Question:What is the context length of the \\u005cOursVideo model mentioned as being memory capacity constrained?\nAnswer:", + " 73K" + ], + [ + "Question:What is the context length of the \\u005cOursVideo model mentioned as being memory capacity constrained?\nAnswer:", + " 100K" + ] + ], + "resps": [ + [ + [ + -6.79046106338501, + false + ] + ], + [ + [ + -6.787683010101318, + false + ] + ], + [ + [ + -13.908809661865234, + false + ] + ], + [ + [ + -8.865966796875, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.79046106338501, + false + ], + [ + -6.787683010101318, + false + ], + [ + -13.908809661865234, + false + ], + [ + -8.865966796875, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "809bdf95a7d2b2e018745fc5b2d5201b7de5809c2a97ce5d890bfeeeed94d5c3", + "prompt_hash": "03e62a79c165dedef8a425e117dcebd7ed4031dd7561a4ae8a557b510b8eeed9", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 33, + "doc": { + "question": "Which optimizer is used for both pre-training and fine-tuning of the 13B parameter model?", + "choices": [ + "SGD", + "Adam", + "AdamW", + "RMSProp" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which optimizer is used for both pre-training and fine-tuning of the 13B parameter model?\nAnswer:", + " SGD" + ], + [ + "Question:Which optimizer is used for both pre-training and fine-tuning of the 13B parameter model?\nAnswer:", + " Adam" + ], + [ + "Question:Which optimizer is used for both pre-training and fine-tuning of the 13B parameter model?\nAnswer:", + " AdamW" + ], + [ + "Question:Which optimizer is used for both pre-training and fine-tuning of the 13B parameter model?\nAnswer:", + " RMSProp" + ] + ], + "resps": [ + [ + [ + -7.288270950317383, + false + ] + ], + [ + [ + -3.32779598236084, + false + ] + ], + [ + [ + -3.610401153564453, + false + ] + ], + [ + [ + -9.468937873840332, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.288270950317383, + false + ], + [ + -3.32779598236084, + false + ], + [ + -3.610401153564453, + false + ], + [ + -9.468937873840332, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "99c196e8e4c7409c2b7aa9f30d47df15ca1b0c7a722cbbc15a613bfd9dd5aef3", + "prompt_hash": "43cdf1515da7e9b1eb00b02198d7d783e08e0881430e1f3c47504dd92e08ae11", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 34, + "doc": { + "question": "What is the audio sampling rate represented in the DAC-VAE latent features?", + "choices": [ + "16kHz", + "24kHz", + "32kHz", + "48kHz" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the audio sampling rate represented in the DAC-VAE latent features?\nAnswer:", + " 16kHz" + ], + [ + "Question:What is the audio sampling rate represented in the DAC-VAE latent features?\nAnswer:", + " 24kHz" + ], + [ + "Question:What is the audio sampling rate represented in the DAC-VAE latent features?\nAnswer:", + " 32kHz" + ], + [ + "Question:What is the audio sampling rate represented in the DAC-VAE latent features?\nAnswer:", + " 48kHz" + ] + ], + "resps": [ + [ + [ + -5.249194145202637, + false + ] + ], + [ + [ + -6.91918420791626, + false + ] + ], + [ + [ + -7.8241071701049805, + false + ] + ], + [ + [ + -5.517940521240234, + false + ] + ] + ], + "filtered_resps": [ + [ + -5.249194145202637, + false + ], + [ + -6.91918420791626, + false + ], + [ + -7.8241071701049805, + false + ], + [ + -5.517940521240234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "145ccfd7bc07648de249cf0b46076373042acd5df8adc434331dc87be75f34bd", + "prompt_hash": "55567af4aa8ff2edd11efac29095b85b6d1f930db9afcbb8f368d9b15c592253", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 35, + "doc": { + "question": "What does the 'point-wise' input format for LLM-as-a-judge involve?", + "choices": [ + "Comparing multiple candidates simultaneously", + "Evaluating a single candidate in isolation", + "Ranking a list of outputs", + "Generating multiple outputs from one input" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What does the 'point-wise' input format for LLM-as-a-judge involve?\nAnswer:", + " Comparing multiple candidates simultaneously" + ], + [ + "Question:What does the 'point-wise' input format for LLM-as-a-judge involve?\nAnswer:", + " Evaluating a single candidate in isolation" + ], + [ + "Question:What does the 'point-wise' input format for LLM-as-a-judge involve?\nAnswer:", + " Ranking a list of outputs" + ], + [ + "Question:What does the 'point-wise' input format for LLM-as-a-judge involve?\nAnswer:", + " Generating multiple outputs from one input" + ] + ], + "resps": [ + [ + [ + -27.684053421020508, + false + ] + ], + [ + [ + -25.727405548095703, + false + ] + ], + [ + [ + -24.2668399810791, + false + ] + ], + [ + [ + -20.393577575683594, + false + ] + ] + ], + "filtered_resps": [ + [ + -27.684053421020508, + false + ], + [ + -25.727405548095703, + false + ], + [ + -24.2668399810791, + false + ], + [ + -20.393577575683594, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5147b0a7b504381ff4f451965e9cb04f6545f215720ef31c8c0c3afdcd189f9a", + "prompt_hash": "da8f6cedd16b324786b50aa5729cb61fb27c4084eee4aaeb83c4c531386cf529", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 36, + "doc": { + "question": "Which metric is used in JudgeBench to evaluate general performance?", + "choices": [ + "Discernment Score", + "Accuracy and F1", + "Cohen’s kappa and Correlation", + "BrierScore" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which metric is used in JudgeBench to evaluate general performance?\nAnswer:", + " Discernment Score" + ], + [ + "Question:Which metric is used in JudgeBench to evaluate general performance?\nAnswer:", + " Accuracy and F1" + ], + [ + "Question:Which metric is used in JudgeBench to evaluate general performance?\nAnswer:", + " Cohen’s kappa and Correlation" + ], + [ + "Question:Which metric is used in JudgeBench to evaluate general performance?\nAnswer:", + " BrierScore" + ] + ], + "resps": [ + [ + [ + -19.43678855895996, + false + ] + ], + [ + [ + -9.403539657592773, + false + ] + ], + [ + [ + -24.37989044189453, + false + ] + ], + [ + [ + -14.429201126098633, + false + ] + ] + ], + "filtered_resps": [ + [ + -19.43678855895996, + false + ], + [ + -9.403539657592773, + false + ], + [ + -24.37989044189453, + false + ], + [ + -14.429201126098633, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "c853498ca30f12dca012b59e7d6af17ed62c08019674f5677eb5aa99e4380064", + "prompt_hash": "63b034eedcf0a182cce93896fb71599c6b54dafef9ed34606e8cefe024d3461d", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 37, + "doc": { + "question": "What is the main idea behind the 'swapping operation' in LLM-as-a-judge prompting?", + "choices": [ + "Using different LLMs for comparison", + "Generating multiple candidate answers for diversity", + "Swapping the order of candidates to reduce positional bias", + "Embedding rule-based instructions inside prompts" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the main idea behind the 'swapping operation' in LLM-as-a-judge prompting?\nAnswer:", + " Using different LLMs for comparison" + ], + [ + "Question:What is the main idea behind the 'swapping operation' in LLM-as-a-judge prompting?\nAnswer:", + " Generating multiple candidate answers for diversity" + ], + [ + "Question:What is the main idea behind the 'swapping operation' in LLM-as-a-judge prompting?\nAnswer:", + " Swapping the order of candidates to reduce positional bias" + ], + [ + "Question:What is the main idea behind the 'swapping operation' in LLM-as-a-judge prompting?\nAnswer:", + " Embedding rule-based instructions inside prompts" + ] + ], + "resps": [ + [ + [ + -24.063907623291016, + false + ] + ], + [ + [ + -30.256454467773438, + false + ] + ], + [ + [ + -36.72766876220703, + false + ] + ], + [ + [ + -35.9759521484375, + false + ] + ] + ], + "filtered_resps": [ + [ + -24.063907623291016, + false + ], + [ + -30.256454467773438, + false + ], + [ + -36.72766876220703, + false + ], + [ + -35.9759521484375, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "fef50f0d1b80245b2d719dd1ea8316f6f2fa1b8e3658ab61fbc1666932a1dbb7", + "prompt_hash": "c17a750fee73b52ead083b0ff2525ee5d3fe394f6cf9437c474f352bc31ba3c6", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 38, + "doc": { + "question": "Which tuning method is most commonly used to train judge LLMs?", + "choices": [ + "Prompt-tuning", + "Supervised fine-tuning", + "Reinforcement learning", + "Zero-shot learning" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which tuning method is most commonly used to train judge LLMs?\nAnswer:", + " Prompt-tuning" + ], + [ + "Question:Which tuning method is most commonly used to train judge LLMs?\nAnswer:", + " Supervised fine-tuning" + ], + [ + "Question:Which tuning method is most commonly used to train judge LLMs?\nAnswer:", + " Reinforcement learning" + ], + [ + "Question:Which tuning method is most commonly used to train judge LLMs?\nAnswer:", + " Zero-shot learning" + ] + ], + "resps": [ + [ + [ + -11.313997268676758, + false + ] + ], + [ + [ + -7.638260364532471, + false + ] + ], + [ + [ + -7.296088695526123, + false + ] + ], + [ + [ + -8.118300437927246, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.313997268676758, + false + ], + [ + -7.638260364532471, + false + ], + [ + -7.296088695526123, + false + ], + [ + -8.118300437927246, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2d66761160ed9f7951b51315e9b16a0cc365c2b1c8d8acfea1a973d619089c8b", + "prompt_hash": "80ec918f8a736022d5913e2c075a35cec28d2e7842abff0ca5435fe849afa940", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 39, + "doc": { + "question": "Which application domain uses LLM-as-a-judge to filter noisy or irrelevant knowledge in a sub-knowledge graph?", + "choices": [ + "Summarization", + "Dialogue evaluation", + "Retrieval-Augmented Generation", + "Legal reasoning" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which application domain uses LLM-as-a-judge to filter noisy or irrelevant knowledge in a sub-knowledge graph?\nAnswer:", + " Summarization" + ], + [ + "Question:Which application domain uses LLM-as-a-judge to filter noisy or irrelevant knowledge in a sub-knowledge graph?\nAnswer:", + " Dialogue evaluation" + ], + [ + "Question:Which application domain uses LLM-as-a-judge to filter noisy or irrelevant knowledge in a sub-knowledge graph?\nAnswer:", + " Retrieval-Augmented Generation" + ], + [ + "Question:Which application domain uses LLM-as-a-judge to filter noisy or irrelevant knowledge in a sub-knowledge graph?\nAnswer:", + " Legal reasoning" + ] + ], + "resps": [ + [ + [ + -9.570253372192383, + false + ] + ], + [ + [ + -17.797739028930664, + false + ] + ], + [ + [ + -7.8971781730651855, + false + ] + ], + [ + [ + -11.672818183898926, + false + ] + ] + ], + "filtered_resps": [ + [ + -9.570253372192383, + false + ], + [ + -17.797739028930664, + false + ], + [ + -7.8971781730651855, + false + ], + [ + -11.672818183898926, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "51fc77b030408ed6f676f75a626b6f57c63b81eb0a4af3ac720bb5e1cc86a199", + "prompt_hash": "faffe2aa09e2f10dfef227dcff03f205a24d025d756646b3b54740f5aba52296", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 40, + "doc": { + "question": "What is the total number of parameters in DeepSeek-V2-8 (dsviii)?", + "choices": [ + "175B", + "371B", + "671B", + "870B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the total number of parameters in DeepSeek-V2-8 (dsviii)?\nAnswer:", + " 175B" + ], + [ + "Question:What is the total number of parameters in DeepSeek-V2-8 (dsviii)?\nAnswer:", + " 371B" + ], + [ + "Question:What is the total number of parameters in DeepSeek-V2-8 (dsviii)?\nAnswer:", + " 671B" + ], + [ + "Question:What is the total number of parameters in DeepSeek-V2-8 (dsviii)?\nAnswer:", + " 870B" + ] + ], + "resps": [ + [ + [ + -13.916131973266602, + false + ] + ], + [ + [ + -15.847789764404297, + false + ] + ], + [ + [ + -17.21529197692871, + false + ] + ], + [ + [ + -16.59684944152832, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.916131973266602, + false + ], + [ + -15.847789764404297, + false + ], + [ + -17.21529197692871, + false + ], + [ + -16.59684944152832, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7ea93a402030f3c4d9a8bd084d2d28d49a5095123c71e757e71f66ecedf9603c", + "prompt_hash": "b37ad914f970199ece9b60eb37b1e500e5d90430547fb149a786ca0a91a68199", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 41, + "doc": { + "question": "How many activated parameters are used per token in dsviii?", + "choices": [ + "13B", + "25B", + "37B", + "50B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:How many activated parameters are used per token in dsviii?\nAnswer:", + " 13B" + ], + [ + "Question:How many activated parameters are used per token in dsviii?\nAnswer:", + " 25B" + ], + [ + "Question:How many activated parameters are used per token in dsviii?\nAnswer:", + " 37B" + ], + [ + "Question:How many activated parameters are used per token in dsviii?\nAnswer:", + " 50B" + ] + ], + "resps": [ + [ + [ + -13.25839614868164, + false + ] + ], + [ + [ + -14.30405330657959, + false + ] + ], + [ + [ + -14.241345405578613, + false + ] + ], + [ + [ + -14.088238716125488, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.25839614868164, + false + ], + [ + -14.30405330657959, + false + ], + [ + -14.241345405578613, + false + ], + [ + -14.088238716125488, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ad3719a19744495dfb9c5a1ecc6df61a3cde9eb7114c6c9e959570b322ac1353", + "prompt_hash": "1cc12d4fcb1c309957d1f70fb0e1501acb19e2992f602b1462bd4d6a4cc76475", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 42, + "doc": { + "question": "What data format is used to track the first and second moments in the AdamW optimizer in the FP8 training framework?", + "choices": [ + "FP32", + "BF16", + "INT8", + "E5M2" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What data format is used to track the first and second moments in the AdamW optimizer in the FP8 training framework?\nAnswer:", + " FP32" + ], + [ + "Question:What data format is used to track the first and second moments in the AdamW optimizer in the FP8 training framework?\nAnswer:", + " BF16" + ], + [ + "Question:What data format is used to track the first and second moments in the AdamW optimizer in the FP8 training framework?\nAnswer:", + " INT8" + ], + [ + "Question:What data format is used to track the first and second moments in the AdamW optimizer in the FP8 training framework?\nAnswer:", + " E5M2" + ] + ], + "resps": [ + [ + [ + -6.028921127319336, + false + ] + ], + [ + [ + -12.088250160217285, + false + ] + ], + [ + [ + -8.075591087341309, + false + ] + ], + [ + [ + -19.291461944580078, + false + ] + ] + ], + "filtered_resps": [ + [ + -6.028921127319336, + false + ], + [ + -12.088250160217285, + false + ], + [ + -8.075591087341309, + false + ], + [ + -19.291461944580078, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "197e98fe296eb5a9e46206a3581a73f46176e34978688b86540fe1073a41a6bd", + "prompt_hash": "628799f35bbdf1f99ea4cc59b4c1b981c83059334376dc8a4d09a3fe0f1fa7ea", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 43, + "doc": { + "question": "Which components are retained in higher precision for stability in the FP8 mixed precision framework?", + "choices": [ + "GEMM operations, SwiGLU, and MTP modules", + "Embedding module, output head, MoE gating modules, normalization operators, and attention operators", + "Only weight gradients and optimizer states", + "Only the attention operators and SwiGLU" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:Which components are retained in higher precision for stability in the FP8 mixed precision framework?\nAnswer:", + " GEMM operations, SwiGLU, and MTP modules" + ], + [ + "Question:Which components are retained in higher precision for stability in the FP8 mixed precision framework?\nAnswer:", + " Embedding module, output head, MoE gating modules, normalization operators, and attention operators" + ], + [ + "Question:Which components are retained in higher precision for stability in the FP8 mixed precision framework?\nAnswer:", + " Only weight gradients and optimizer states" + ], + [ + "Question:Which components are retained in higher precision for stability in the FP8 mixed precision framework?\nAnswer:", + " Only the attention operators and SwiGLU" + ] + ], + "resps": [ + [ + [ + -47.497535705566406, + false + ] + ], + [ + [ + -75.67779541015625, + false + ] + ], + [ + [ + -19.668460845947266, + false + ] + ], + [ + [ + -29.46173858642578, + false + ] + ] + ], + "filtered_resps": [ + [ + -47.497535705566406, + false + ], + [ + -75.67779541015625, + false + ], + [ + -19.668460845947266, + false + ], + [ + -29.46173858642578, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "324ca988bc896486250ce24ea2d7deaaf1c860ef1c1c34b1197fdbedb6e9355f", + "prompt_hash": "e371a9fdd26fb76793b01d971a20d8b4eb8c18293ae2002a4bdb34f2a2f5ff34", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 44, + "doc": { + "question": "What is the accumulation interval (Nc) set for improving FP8 GEMM precision in the quantization strategy?", + "choices": [ + "32", + "64", + "96", + "128" + ], + "answer": 3 + }, + "target": 3, + "arguments": [ + [ + "Question:What is the accumulation interval (Nc) set for improving FP8 GEMM precision in the quantization strategy?\nAnswer:", + " 32" + ], + [ + "Question:What is the accumulation interval (Nc) set for improving FP8 GEMM precision in the quantization strategy?\nAnswer:", + " 64" + ], + [ + "Question:What is the accumulation interval (Nc) set for improving FP8 GEMM precision in the quantization strategy?\nAnswer:", + " 96" + ], + [ + "Question:What is the accumulation interval (Nc) set for improving FP8 GEMM precision in the quantization strategy?\nAnswer:", + " 128" + ] + ], + "resps": [ + [ + [ + -4.304234027862549, + false + ] + ], + [ + [ + -4.729826927185059, + false + ] + ], + [ + [ + -6.842705249786377, + false + ] + ], + [ + [ + -5.4878153800964355, + false + ] + ] + ], + "filtered_resps": [ + [ + -4.304234027862549, + false + ], + [ + -4.729826927185059, + false + ], + [ + -6.842705249786377, + false + ], + [ + -5.4878153800964355, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5801dde4164274339b30b0390dccd2dedbf00432977312be951e0e4248214ae1", + "prompt_hash": "6e5988a5ae8b3c538c504ef5add7c7e2a8d705df79e155423594dc5f698a7190", + "target_hash": "4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 45, + "doc": { + "question": "What percentage more tokens did o1-like models consume compared to conventional models to answer 'what is the answer of 2 plus 3?'", + "choices": [ + "1953%", + "953%", + "305%", + "120%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What percentage more tokens did o1-like models consume compared to conventional models to answer 'what is the answer of 2 plus 3?'\nAnswer:", + " 1953%" + ], + [ + "Question:What percentage more tokens did o1-like models consume compared to conventional models to answer 'what is the answer of 2 plus 3?'\nAnswer:", + " 953%" + ], + [ + "Question:What percentage more tokens did o1-like models consume compared to conventional models to answer 'what is the answer of 2 plus 3?'\nAnswer:", + " 305%" + ], + [ + "Question:What percentage more tokens did o1-like models consume compared to conventional models to answer 'what is the answer of 2 plus 3?'\nAnswer:", + " 120%" + ] + ], + "resps": [ + [ + [ + -20.174894332885742, + false + ] + ], + [ + [ + -19.59738540649414, + false + ] + ], + [ + [ + -18.090007781982422, + false + ] + ], + [ + [ + -13.911893844604492, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.174894332885742, + false + ], + [ + -19.59738540649414, + false + ], + [ + -18.090007781982422, + false + ], + [ + -13.911893844604492, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "0b0e6779100e4b9673f74cc0c39a40f404d64451eac6844048e1f6384aa72049", + "prompt_hash": "8f0f3344016101019ae1762b909f868ea45e0cd39f37f0d6bc0e4c46e08c21f8", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 46, + "doc": { + "question": "What is the average number of solutions generated by QwQ-32B-Preview on the ASDIV test set?", + "choices": [ + "3.5", + "3.2", + "4.3", + "2.8" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the average number of solutions generated by QwQ-32B-Preview on the ASDIV test set?\nAnswer:", + " 3.5" + ], + [ + "Question:What is the average number of solutions generated by QwQ-32B-Preview on the ASDIV test set?\nAnswer:", + " 3.2" + ], + [ + "Question:What is the average number of solutions generated by QwQ-32B-Preview on the ASDIV test set?\nAnswer:", + " 4.3" + ], + [ + "Question:What is the average number of solutions generated by QwQ-32B-Preview on the ASDIV test set?\nAnswer:", + " 2.8" + ] + ], + "resps": [ + [ + [ + -7.472996711730957, + false + ] + ], + [ + [ + -7.623888969421387, + false + ] + ], + [ + [ + -8.0897798538208, + false + ] + ], + [ + [ + -7.952129364013672, + false + ] + ] + ], + "filtered_resps": [ + [ + -7.472996711730957, + false + ], + [ + -7.623888969421387, + false + ], + [ + -8.0897798538208, + false + ], + [ + -7.952129364013672, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ec0a559a5ff4a1a43d020e7510901163af232b1965edfa2dbb194944d45948b5", + "prompt_hash": "6e995540d3062269c2768fb3c72f748a83b71ad6886e28f838f025c64b8dccb0", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 47, + "doc": { + "question": "Which test set consists of high school math competition problems with difficulty levels based on AoPS?", + "choices": [ + "MATH500", + "ASDIV", + "GSM8K", + "GPQA" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which test set consists of high school math competition problems with difficulty levels based on AoPS?\nAnswer:", + " MATH500" + ], + [ + "Question:Which test set consists of high school math competition problems with difficulty levels based on AoPS?\nAnswer:", + " ASDIV" + ], + [ + "Question:Which test set consists of high school math competition problems with difficulty levels based on AoPS?\nAnswer:", + " GSM8K" + ], + [ + "Question:Which test set consists of high school math competition problems with difficulty levels based on AoPS?\nAnswer:", + " GPQA" + ] + ], + "resps": [ + [ + [ + -13.954486846923828, + false + ] + ], + [ + [ + -23.078916549682617, + false + ] + ], + [ + [ + -6.7538533210754395, + false + ] + ], + [ + [ + -12.076217651367188, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.954486846923828, + false + ], + [ + -23.078916549682617, + false + ], + [ + -6.7538533210754395, + false + ], + [ + -12.076217651367188, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "b6db1da4a0557810aa7d2562fa4c6005c641bb0b5dfedee7da28d5f2d63a3223", + "prompt_hash": "37cdc3dc374f3260b0a938b7a88d7e61032286a9ad6c759c43f393db2300b36c", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 48, + "doc": { + "question": "What is the process efficiency score of QwQ-32B-Preview on the MATH500 dataset?", + "choices": [ + "71.2%", + "66.5%", + "100%", + "52.3%" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:What is the process efficiency score of QwQ-32B-Preview on the MATH500 dataset?\nAnswer:", + " 71.2%" + ], + [ + "Question:What is the process efficiency score of QwQ-32B-Preview on the MATH500 dataset?\nAnswer:", + " 66.5%" + ], + [ + "Question:What is the process efficiency score of QwQ-32B-Preview on the MATH500 dataset?\nAnswer:", + " 100%" + ], + [ + "Question:What is the process efficiency score of QwQ-32B-Preview on the MATH500 dataset?\nAnswer:", + " 52.3%" + ] + ], + "resps": [ + [ + [ + -15.457481384277344, + false + ] + ], + [ + [ + -15.54973030090332, + false + ] + ], + [ + [ + -12.790609359741211, + false + ] + ], + [ + [ + -15.633562088012695, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.457481384277344, + false + ], + [ + -15.54973030090332, + false + ], + [ + -12.790609359741211, + false + ], + [ + -15.633562088012695, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2ad217e842103818fb058ca928915af562c3d32d97403ba2a58aff078ac2dfc9", + "prompt_hash": "1c05afa023d5534e9a4adb5ed1e77790c1f49f9191b7727c9bc7f3ba92500f1b", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 1.0 + }, + { + "doc_id": 49, + "doc": { + "question": "Which training method modifies the DPO loss by adding a negative log-likelihood term on the preferred response?", + "choices": [ + "Reasoning Preference Optimization", + "Direct Preference Optimization", + "Simple Preference Optimization", + "Supervised Fine-Tuning" + ], + "answer": 0 + }, + "target": 0, + "arguments": [ + [ + "Question:Which training method modifies the DPO loss by adding a negative log-likelihood term on the preferred response?\nAnswer:", + " Reasoning Preference Optimization" + ], + [ + "Question:Which training method modifies the DPO loss by adding a negative log-likelihood term on the preferred response?\nAnswer:", + " Direct Preference Optimization" + ], + [ + "Question:Which training method modifies the DPO loss by adding a negative log-likelihood term on the preferred response?\nAnswer:", + " Simple Preference Optimization" + ], + [ + "Question:Which training method modifies the DPO loss by adding a negative log-likelihood term on the preferred response?\nAnswer:", + " Supervised Fine-Tuning" + ] + ], + "resps": [ + [ + [ + -21.339204788208008, + false + ] + ], + [ + [ + -7.6230621337890625, + false + ] + ], + [ + [ + -15.905888557434082, + false + ] + ], + [ + [ + -11.230256080627441, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.339204788208008, + false + ], + [ + -7.6230621337890625, + false + ], + [ + -15.905888557434082, + false + ], + [ + -11.230256080627441, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ae5156c48bff3c2947d892d0104a321c3b910ae6797d6036673ab01f7b04a690", + "prompt_hash": "0a3df59b66b51a90ee60d83f63e77576fba1d1f9cb6476ca070010f130515783", + "target_hash": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 50, + "doc": { + "question": "What is the main purpose of the underthinking metric introduced in the study?", + "choices": [ + "To evaluate model training speed", + "To measure token efficiency in incorrect responses", + "To calculate the accuracy of correct answers", + "To compare different datasets' difficulty levels" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the main purpose of the underthinking metric introduced in the study?\nAnswer:", + " To evaluate model training speed" + ], + [ + "Question:What is the main purpose of the underthinking metric introduced in the study?\nAnswer:", + " To measure token efficiency in incorrect responses" + ], + [ + "Question:What is the main purpose of the underthinking metric introduced in the study?\nAnswer:", + " To calculate the accuracy of correct answers" + ], + [ + "Question:What is the main purpose of the underthinking metric introduced in the study?\nAnswer:", + " To compare different datasets' difficulty levels" + ] + ], + "resps": [ + [ + [ + -20.352832794189453, + false + ] + ], + [ + [ + -38.05958557128906, + false + ] + ], + [ + [ + -21.12741470336914, + false + ] + ], + [ + [ + -26.977842330932617, + false + ] + ] + ], + "filtered_resps": [ + [ + -20.352832794189453, + false + ], + [ + -38.05958557128906, + false + ], + [ + -21.12741470336914, + false + ], + [ + -26.977842330932617, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "a6f3c5261b5413457e10134ff2c03446b0836d09ab936070b0813af4d21110cc", + "prompt_hash": "b0adf48617c122abcbd12087b5da586115198df7e7317189a6afbd3bafe260aa", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 51, + "doc": { + "question": "Which of the following models was evaluated only on MATH500 and AIME test sets due to daily message limits?", + "choices": [ + "QwQ-32B-Preview", + "DeepSeek-R1-671B", + "DeepSeek-R1-Preview", + "DeepSeek-R1-Distill-Qwen-32B" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which of the following models was evaluated only on MATH500 and AIME test sets due to daily message limits?\nAnswer:", + " QwQ-32B-Preview" + ], + [ + "Question:Which of the following models was evaluated only on MATH500 and AIME test sets due to daily message limits?\nAnswer:", + " DeepSeek-R1-671B" + ], + [ + "Question:Which of the following models was evaluated only on MATH500 and AIME test sets due to daily message limits?\nAnswer:", + " DeepSeek-R1-Preview" + ], + [ + "Question:Which of the following models was evaluated only on MATH500 and AIME test sets due to daily message limits?\nAnswer:", + " DeepSeek-R1-Distill-Qwen-32B" + ] + ], + "resps": [ + [ + [ + -36.87464904785156, + false + ] + ], + [ + [ + -45.31444549560547, + false + ] + ], + [ + [ + -40.90092468261719, + false + ] + ], + [ + [ + -55.350929260253906, + false + ] + ] + ], + "filtered_resps": [ + [ + -36.87464904785156, + false + ], + [ + -45.31444549560547, + false + ], + [ + -40.90092468261719, + false + ], + [ + -55.350929260253906, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "7404b2b8cef2fead98865f72bf451d6f363b7078b7f0400602b9a84af2932597", + "prompt_hash": "45139f45e167354a848f1d29a32181af1ba20827585568989c1f658e076971ea", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 52, + "doc": { + "question": "What is the role of the penalty strength parameter α in the Tip decoding method?", + "choices": [ + "To determine the number of reasoning thoughts", + "To control the penalty duration applied to tokens", + "To reduce the likelihood of thought-switching tokens", + "To adjust the token length for correct answers" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What is the role of the penalty strength parameter α in the Tip decoding method?\nAnswer:", + " To determine the number of reasoning thoughts" + ], + [ + "Question:What is the role of the penalty strength parameter α in the Tip decoding method?\nAnswer:", + " To control the penalty duration applied to tokens" + ], + [ + "Question:What is the role of the penalty strength parameter α in the Tip decoding method?\nAnswer:", + " To reduce the likelihood of thought-switching tokens" + ], + [ + "Question:What is the role of the penalty strength parameter α in the Tip decoding method?\nAnswer:", + " To adjust the token length for correct answers" + ] + ], + "resps": [ + [ + [ + -30.340669631958008, + false + ] + ], + [ + [ + -34.11429214477539, + false + ] + ], + [ + [ + -43.95995330810547, + false + ] + ], + [ + [ + -37.085777282714844, + false + ] + ] + ], + "filtered_resps": [ + [ + -30.340669631958008, + false + ], + [ + -34.11429214477539, + false + ], + [ + -43.95995330810547, + false + ], + [ + -37.085777282714844, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "e43c3398bd1490fd4441f8fea9482409b851f3a08624aa7467c36115685d6565", + "prompt_hash": "6fc20179bce03abad0beb9260800aee68a16fcd61d29bc013e831cf94a82478c", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 53, + "doc": { + "question": "In the underthinking score formula, what does a higher value of ξ_UT indicate?", + "choices": [ + "Greater token efficiency", + "Lower token efficiency", + "Shorter reasoning chains", + "Fewer reasoning thoughts" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:In the underthinking score formula, what does a higher value of ξ_UT indicate?\nAnswer:", + " Greater token efficiency" + ], + [ + "Question:In the underthinking score formula, what does a higher value of ξ_UT indicate?\nAnswer:", + " Lower token efficiency" + ], + [ + "Question:In the underthinking score formula, what does a higher value of ξ_UT indicate?\nAnswer:", + " Shorter reasoning chains" + ], + [ + "Question:In the underthinking score formula, what does a higher value of ξ_UT indicate?\nAnswer:", + " Fewer reasoning thoughts" + ] + ], + "resps": [ + [ + [ + -25.667449951171875, + false + ] + ], + [ + [ + -21.466705322265625, + false + ] + ], + [ + [ + -23.393604278564453, + false + ] + ], + [ + [ + -25.824403762817383, + false + ] + ] + ], + "filtered_resps": [ + [ + -25.667449951171875, + false + ], + [ + -21.466705322265625, + false + ], + [ + -23.393604278564453, + false + ], + [ + -25.824403762817383, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "5a2f46c94c1ba3bcb2613a2f231ef9e9fc8a6ec762be09de290a5cf17cc5b670", + "prompt_hash": "f677ecf5c9cdfa65b5fd4869c3dd0ed700ba59daa188fc96c96db5f1b2b011ac", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 1.0, + "acc_norm": 0.0 + }, + { + "doc_id": 54, + "doc": { + "question": "Which prompt-based strategy showed only modest improvements in comparison to the Tip decoding method?", + "choices": [ + "Laconic Decoding", + "Self-Consistency Sampling", + "Prompt Engineering", + "Token Filtering" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which prompt-based strategy showed only modest improvements in comparison to the Tip decoding method?\nAnswer:", + " Laconic Decoding" + ], + [ + "Question:Which prompt-based strategy showed only modest improvements in comparison to the Tip decoding method?\nAnswer:", + " Self-Consistency Sampling" + ], + [ + "Question:Which prompt-based strategy showed only modest improvements in comparison to the Tip decoding method?\nAnswer:", + " Prompt Engineering" + ], + [ + "Question:Which prompt-based strategy showed only modest improvements in comparison to the Tip decoding method?\nAnswer:", + " Token Filtering" + ] + ], + "resps": [ + [ + [ + -21.468769073486328, + false + ] + ], + [ + [ + -18.396778106689453, + false + ] + ], + [ + [ + -12.113412857055664, + false + ] + ], + [ + [ + -16.382930755615234, + false + ] + ] + ], + "filtered_resps": [ + [ + -21.468769073486328, + false + ], + [ + -18.396778106689453, + false + ], + [ + -12.113412857055664, + false + ], + [ + -16.382930755615234, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "d8d69259421589a9881ad09598da0caf91ccd7c26ec1c2758b0c6c862ae81689", + "prompt_hash": "5a1b3dc3f8c0d7848aef3f588f05c7c36fe4faf51cf3a53465878666ef351a49", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 1.0, + "acc_norm": 1.0 + }, + { + "doc_id": 55, + "doc": { + "question": "What is the reported improvement in sample efficiency when using PRIME compared to RL using only outcome rewards?", + "choices": [ + "1.5×", + "2.5×", + "4.0×", + "6.9×" + ], + "answer": 1 + }, + "target": 1, + "arguments": [ + [ + "Question:What is the reported improvement in sample efficiency when using PRIME compared to RL using only outcome rewards?\nAnswer:", + " 1.5×" + ], + [ + "Question:What is the reported improvement in sample efficiency when using PRIME compared to RL using only outcome rewards?\nAnswer:", + " 2.5×" + ], + [ + "Question:What is the reported improvement in sample efficiency when using PRIME compared to RL using only outcome rewards?\nAnswer:", + " 4.0×" + ], + [ + "Question:What is the reported improvement in sample efficiency when using PRIME compared to RL using only outcome rewards?\nAnswer:", + " 6.9×" + ] + ], + "resps": [ + [ + [ + -13.429561614990234, + false + ] + ], + [ + [ + -14.082334518432617, + false + ] + ], + [ + [ + -15.574737548828125, + false + ] + ], + [ + [ + -17.069332122802734, + false + ] + ] + ], + "filtered_resps": [ + [ + -13.429561614990234, + false + ], + [ + -14.082334518432617, + false + ], + [ + -15.574737548828125, + false + ], + [ + -17.069332122802734, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "ce050f483e2d1900645d7aa208c21d9f86fb6ddef6e214eda733bad133756a9c", + "prompt_hash": "7256d5a3a7e1b82502e80fc9bf58c2bd8a01a4c7a3aa9a50a998d1a882e261a0", + "target_hash": "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 56, + "doc": { + "question": "Which model serves as the starting point for training in the PRIME framework?", + "choices": [ + "GPT-4o", + "LLaMA-3.1-70B-Instruct", + "Qwen2.5-Math-7B-Base", + "DeepSeek-R1" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:Which model serves as the starting point for training in the PRIME framework?\nAnswer:", + " GPT-4o" + ], + [ + "Question:Which model serves as the starting point for training in the PRIME framework?\nAnswer:", + " LLaMA-3.1-70B-Instruct" + ], + [ + "Question:Which model serves as the starting point for training in the PRIME framework?\nAnswer:", + " Qwen2.5-Math-7B-Base" + ], + [ + "Question:Which model serves as the starting point for training in the PRIME framework?\nAnswer:", + " DeepSeek-R1" + ] + ], + "resps": [ + [ + [ + -11.462921142578125, + false + ] + ], + [ + [ + -31.877954483032227, + false + ] + ], + [ + [ + -40.35797119140625, + false + ] + ], + [ + [ + -25.913959503173828, + false + ] + ] + ], + "filtered_resps": [ + [ + -11.462921142578125, + false + ], + [ + -31.877954483032227, + false + ], + [ + -40.35797119140625, + false + ], + [ + -25.913959503173828, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "783695e7dd24e82177f27d12e81b48b130250480bd512fbc0859b8e43722cd79", + "prompt_hash": "629ea5c8e6c1a46907d0960aa86fcc9c372b377e4dfca59fc6bac98042f1fabe", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 57, + "doc": { + "question": "What evaluation score did Eurus-2-7B-PRIME achieve on AIME 2024?", + "choices": [ + "16.7%", + "20.0%", + "26.7%", + "32.5%" + ], + "answer": 2 + }, + "target": 2, + "arguments": [ + [ + "Question:What evaluation score did Eurus-2-7B-PRIME achieve on AIME 2024?\nAnswer:", + " 16.7%" + ], + [ + "Question:What evaluation score did Eurus-2-7B-PRIME achieve on AIME 2024?\nAnswer:", + " 20.0%" + ], + [ + "Question:What evaluation score did Eurus-2-7B-PRIME achieve on AIME 2024?\nAnswer:", + " 26.7%" + ], + [ + "Question:What evaluation score did Eurus-2-7B-PRIME achieve on AIME 2024?\nAnswer:", + " 32.5%" + ] + ], + "resps": [ + [ + [ + -15.968780517578125, + false + ] + ], + [ + [ + -15.175897598266602, + false + ] + ], + [ + [ + -15.799150466918945, + false + ] + ], + [ + [ + -15.199570655822754, + false + ] + ] + ], + "filtered_resps": [ + [ + -15.968780517578125, + false + ], + [ + -15.175897598266602, + false + ], + [ + -15.799150466918945, + false + ], + [ + -15.199570655822754, + false + ] + ], + "filter": "none", + "metrics": [ + "acc", + "acc_norm" + ], + "doc_hash": "2aa1e3918ba13aed9884bb87c237252fc729d54b11bccfa83da6b42a19648ec4", + "prompt_hash": "09a72f388f08d5eef9b51f61736ac1c5a71e2d72d4f05d16586c8f8350b3c64e", + "target_hash": "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35", + "acc": 0.0, + "acc_norm": 0.0 + }, + { + "doc_id": 58, + "doc": { + "question": "What is the formula for process reward in the Implicit PRM used by PRIME?", + "choices": [ + "r_\\phi(y_t) := \\alpha \\log \\frac{\\pi_\\phi(y_t)}{\\pi_\\text{ref}(y_t)}", + "r_\\phi(y_t) := \\log \\frac{\\pi_\\text{ref}(y_t|\\mathbf{y}_{