Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .azure/gpu-test.yml +114 -0
- .devcontainer/Dockerfile +9 -0
- .devcontainer/devcontainer.json +105 -0
- .github/CODEOWNERS +2 -0
- .github/dependabot.yml +41 -0
- .gitignore +24 -0
- .pre-commit-config.yaml +89 -0
- CITATION.cff +9 -0
- Dockerfile +17 -0
- LICENSE +201 -0
- README.md +722 -3
- check.py +34 -0
- cmd_start_for.ini +2 -0
- delete.py +83 -0
- environment.yml +261 -0
- fix.py +45 -0
- litgpt.egg-info/PKG-INFO +977 -0
- litgpt.egg-info/SOURCES.txt +89 -0
- litgpt.egg-info/dependency_links.txt +1 -0
- litgpt.egg-info/entry_points.txt +2 -0
- litgpt.egg-info/requires.txt +51 -0
- litgpt.egg-info/top_level.txt +1 -0
- litgpt/__init__.py +20 -0
- litgpt/__main__.py +75 -0
- litgpt/adapter.py +129 -0
- litgpt/adapter_v2.py +210 -0
- litgpt/api.py +734 -0
- litgpt/args.py +104 -0
- litgpt/config.py +3087 -0
- litgpt/lora.py +662 -0
- litgpt/model.py +876 -0
- litgpt/perplexity.py +513 -0
- litgpt/pretrain.py +564 -0
- litgpt/prompts.py +541 -0
- litgpt/tokenizer.py +182 -0
- litgpt/utils.py +875 -0
- out/eval/tinyllama_benches/monthly_metrics.csv +133 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/config.json +24 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/generation_config.json +7 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/log.txt +29 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/model_config.yaml +44 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/results.json +0 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/tokenizer.json +0 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/tokenizer_config.json +35 -0
- out/eval/tinyllama_full_arc_arxiv_mc/2407/values.json +286 -0
- out/eval/tinyllama_full_ppl/2407_full/ppl_metrics.jsonl +48 -0
- out/eval/tinyllama_full_ppl/2408_full/ppl_metrics.jsonl +48 -0
- out/eval/tinyllama_full_ppl/2409_full/ppl_metrics.jsonl +48 -0
- out/eval/tinyllama_full_ppl/2410_full/ppl_metrics.jsonl +48 -0
- out/eval/tinyllama_full_ppl/2411_full/ppl_metrics.jsonl +48 -0
.azure/gpu-test.yml
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: GPU tests
|
| 2 |
+
|
| 3 |
+
trigger:
|
| 4 |
+
branches:
|
| 5 |
+
include:
|
| 6 |
+
- "main"
|
| 7 |
+
- "wip"
|
| 8 |
+
|
| 9 |
+
pr:
|
| 10 |
+
branches:
|
| 11 |
+
include:
|
| 12 |
+
- "main"
|
| 13 |
+
- "wip"
|
| 14 |
+
|
| 15 |
+
jobs:
|
| 16 |
+
- job: testing
|
| 17 |
+
strategy:
|
| 18 |
+
matrix:
|
| 19 |
+
"ordinary":
|
| 20 |
+
#image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.7-cuda12.6.3"
|
| 21 |
+
dependency: ""
|
| 22 |
+
"w. Thunder":
|
| 23 |
+
#image: "pytorchlightning/pytorch_lightning:base-cuda-py3.10-torch2.7-cuda12.6.3"
|
| 24 |
+
dependency: "compiler"
|
| 25 |
+
variables:
|
| 26 |
+
DEVICES: $( python -c 'print("$(Agent.Name)".split("_")[-1])' )
|
| 27 |
+
RUN_ONLY_CUDA_TESTS: "1"
|
| 28 |
+
TRANSFORMERS_CACHE: "/var/tmp/hf/transformers"
|
| 29 |
+
HF_HOME: "/var/tmp/hf/home"
|
| 30 |
+
HF_HUB_CACHE: "/var/tmp/hf/hub"
|
| 31 |
+
SKIP_WITH_CI: "1"
|
| 32 |
+
NCCL_DEBUG: "INFO"
|
| 33 |
+
PYTHON_VERSION: "3.10"
|
| 34 |
+
CUDA_VERSION: "12.6.3"
|
| 35 |
+
TORCH_VERSION: "2.7.1"
|
| 36 |
+
CUDNN_FRONTEND_VERSION: "1.10.0"
|
| 37 |
+
container:
|
| 38 |
+
# image: "pytorchlightning/pytorch_lightning:base-cuda-py$(PYTHON_VERSION)-torch$(TORCH_VERSION)-cuda$(CUDA_VERSION)"
|
| 39 |
+
# pytorchlightning/lightning-thunder:ubuntu22.04-cuda12.1.1-cudnn-fe1.5.0-py3.10-pt_main-dev
|
| 40 |
+
image: "pytorchlightning/lightning-thunder:ubuntu24.04-cuda$(CUDA_VERSION)-cudnn-fe$(CUDNN_FRONTEND_VERSION)-py$(PYTHON_VERSION)-pt_$(TORCH_VERSION)-dev"
|
| 41 |
+
options: "--gpus=all --shm-size=8gb -v /var/tmp:/var/tmp"
|
| 42 |
+
workspace:
|
| 43 |
+
clean: all
|
| 44 |
+
pool: "lit-rtx-3090"
|
| 45 |
+
timeoutInMinutes: "35"
|
| 46 |
+
cancelTimeoutInMinutes: "2"
|
| 47 |
+
steps:
|
| 48 |
+
- bash: |
|
| 49 |
+
echo "##vso[task.setvariable variable=CUDA_VISIBLE_DEVICES]$(DEVICES)"
|
| 50 |
+
displayName: "set env. vars"
|
| 51 |
+
|
| 52 |
+
- bash: |
|
| 53 |
+
echo $(DEVICES)
|
| 54 |
+
echo $CUDA_VISIBLE_DEVICES
|
| 55 |
+
dpkg-query -W -f='${Package} ${Version}\n' libnccl2 libnccl-dev
|
| 56 |
+
whereis nvidia
|
| 57 |
+
nvidia-smi
|
| 58 |
+
which python && which pip
|
| 59 |
+
python --version
|
| 60 |
+
pip --version
|
| 61 |
+
pip list
|
| 62 |
+
displayName: "Image info & NVIDIA"
|
| 63 |
+
|
| 64 |
+
- script: |
|
| 65 |
+
pip install --upgrade pip
|
| 66 |
+
pip install '.[extra,test]' "torch==${TORCH_VERSION}" cffi -U
|
| 67 |
+
displayName: "Install package & dependencies"
|
| 68 |
+
|
| 69 |
+
- script: |
|
| 70 |
+
set -e
|
| 71 |
+
pip uninstall -y torchvision torchaudio
|
| 72 |
+
pip install '.[compiler,extra,test]' "torch==${TORCH_VERSION}"
|
| 73 |
+
python -c "from thunder.executors import nvfuser_available ; assert nvfuser_available(), 'nvFuser is missing!'"
|
| 74 |
+
python -c "from thunder.executors.triton_utils import triton_version ; assert triton_version() is not None, 'triton is missing!'"
|
| 75 |
+
condition: eq(variables['dependency'], 'compiler')
|
| 76 |
+
displayName: "Install `compiler` [nvFuser & Thunder]"
|
| 77 |
+
|
| 78 |
+
- bash: |
|
| 79 |
+
set -e
|
| 80 |
+
pip list
|
| 81 |
+
python -c "import torch ; mgpu = torch.cuda.device_count() ; assert mgpu == 2, f'GPU: {mgpu}'"
|
| 82 |
+
python -c "from torch import __version__ as ver ; assert str(ver).split('+')[0] == '$(TORCH_VERSION)', f'PyTorch: installed {ver} but expected $(TORCH_VERSION)'"
|
| 83 |
+
displayName: "Env details"
|
| 84 |
+
|
| 85 |
+
- bash: pytest -v --durations=100
|
| 86 |
+
displayName: "All tests"
|
| 87 |
+
timeoutInMinutes: "15"
|
| 88 |
+
|
| 89 |
+
- bash: |
|
| 90 |
+
wget https://raw.githubusercontent.com/Lightning-AI/utilities/main/scripts/run_standalone_tests.sh
|
| 91 |
+
bash run_standalone_tests.sh "tests"
|
| 92 |
+
displayName: "Standalone tests"
|
| 93 |
+
env:
|
| 94 |
+
PL_RUN_STANDALONE_TESTS: "1"
|
| 95 |
+
# NUM_PARALLEL_TESTS: "10"
|
| 96 |
+
NCCL_IGNORE_DISABLED_P2P: "1"
|
| 97 |
+
NCCL_DEBUG: "INFO"
|
| 98 |
+
timeoutInMinutes: "10"
|
| 99 |
+
|
| 100 |
+
- bash: |
|
| 101 |
+
pip uninstall -y lightning-thunder
|
| 102 |
+
# install thunder from source, so that, thunder.tests will be available
|
| 103 |
+
pip install -U "lightning-thunder[test] @ git+https://github.com/Lightning-AI/lightning-thunder.git" "torch==${TORCH_VERSION}"
|
| 104 |
+
displayName: "Re-install Thunder [main branch]"
|
| 105 |
+
condition: eq(variables['dependency'], 'compiler')
|
| 106 |
+
|
| 107 |
+
- bash: |
|
| 108 |
+
# without env var, it filters out all tests
|
| 109 |
+
RUN_ONLY_CUDA_TESTS=0 pytest tests/ext_thunder/test_thunder_networks.py -v --durations=50
|
| 110 |
+
displayName: "Extra tests for Thunder [main branch]"
|
| 111 |
+
condition: eq(variables['dependency'], 'compiler')
|
| 112 |
+
env:
|
| 113 |
+
TORCHDYNAMO_VERBOSE: "1"
|
| 114 |
+
timeoutInMinutes: "10"
|
.devcontainer/Dockerfile
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# See here for image contents: https://github.com/devcontainers/images/blob/main/src/python/.devcontainer/Dockerfile
|
| 2 |
+
|
| 3 |
+
# [Choice] Python version (use -bookworm or -bullseye variants on local arm64/Apple Silicon): 3, 3.12, 3.11, 3.10, 3.9, 3.8, 3-bookworm, 3.12-bookworm, 3.11-bookworm, 3.10-bookworm, 3.9-bookworm, 3.8-bookworm, 3-bullseye, 3.12-bullseye, 3.11-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3-buster, 3.12-buster, 3.11-buster, 3.10-buster, 3.9-buster, 3.8-buster
|
| 4 |
+
ARG VARIANT=3-bookworm
|
| 5 |
+
FROM mcr.microsoft.com/devcontainers/python:1-${VARIANT}
|
| 6 |
+
|
| 7 |
+
# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897
|
| 8 |
+
# They are installed by the base image (python) which does not have the patch.
|
| 9 |
+
RUN python3 -m pip install --upgrade pip setuptools
|
.devcontainer/devcontainer.json
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
|
| 2 |
+
// https://github.com/microsoft/vscode-dev-containers/tree/v0.194.0/containers/python-3
|
| 3 |
+
{
|
| 4 |
+
"name": "Python 3 (litgpt)",
|
| 5 |
+
"build": {
|
| 6 |
+
"dockerfile": "Dockerfile",
|
| 7 |
+
"context": "..",
|
| 8 |
+
"args": {
|
| 9 |
+
"VARIANT": "3.11-bookworm"
|
| 10 |
+
}
|
| 11 |
+
},
|
| 12 |
+
"runArgs": [
|
| 13 |
+
// Enable GPU passthrough, requires WSL2 on Windows
|
| 14 |
+
//"--gpus=all",
|
| 15 |
+
// One of the following options is required for torch multiprocessing
|
| 16 |
+
//"--ipc=host",
|
| 17 |
+
//"--shm-size=4gb",
|
| 18 |
+
],
|
| 19 |
+
// Features to add to the dev container. More info: https://containers.dev/features.
|
| 20 |
+
"features": {
|
| 21 |
+
"ghcr.io/devcontainers/features/git:1": {},
|
| 22 |
+
"ghcr.io/devcontainers/features/git-lfs:1": {},
|
| 23 |
+
//"ghcr.io/devcontainers/features/nvidia-cuda:1": {},
|
| 24 |
+
"ghcr.io/devcontainers-extra/features/actionlint:1": {},
|
| 25 |
+
"ghcr.io/devcontainers-extra/features/pre-commit:2": {},
|
| 26 |
+
"ghcr.io/dhoeric/features/act:1": {},
|
| 27 |
+
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
| 28 |
+
"version": "latest",
|
| 29 |
+
"moby": true
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
// Set *default* container specific settings.json values on container create.
|
| 33 |
+
"customizations": {
|
| 34 |
+
"vscode": {
|
| 35 |
+
"settings": {
|
| 36 |
+
"editor.tabSize": 4,
|
| 37 |
+
"editor.renderWhitespace": "all",
|
| 38 |
+
"editor.formatOnSave": true,
|
| 39 |
+
"editor.rulers": [120],
|
| 40 |
+
"files.exclude": {
|
| 41 |
+
"**/__pycache__": true
|
| 42 |
+
},
|
| 43 |
+
"python.pythonPath": "/usr/local/bin/python",
|
| 44 |
+
"python.defaultInterpreterPath": "/usr/local/bin/python",
|
| 45 |
+
"python.languageServer": "Pylance",
|
| 46 |
+
"python.analysis.autoImportCompletions": true,
|
| 47 |
+
"python.analysis.completeFunctionParens": true,
|
| 48 |
+
"python.analysis.autoSearchPaths": true,
|
| 49 |
+
"python.testing.pytestArgs": ["tests"],
|
| 50 |
+
"python.testing.unittestEnabled": false,
|
| 51 |
+
"python.testing.pytestEnabled": true,
|
| 52 |
+
"code-eol.highlightNonDefault": true,
|
| 53 |
+
"code-eol.highlightExtraWhitespace": true,
|
| 54 |
+
"autoDocstring.docstringFormat": "google-notypes",
|
| 55 |
+
"autoDocstring.guessTypes": true,
|
| 56 |
+
"autoDocstring.generateDocstringOnEnter": true,
|
| 57 |
+
"autoDocstring.startOnNewLine": true,
|
| 58 |
+
"telemetry.telemetryLevel": "off",
|
| 59 |
+
"[python]": {
|
| 60 |
+
"editor.formatOnSave": true,
|
| 61 |
+
"editor.defaultFormatter": "charliermarsh.ruff",
|
| 62 |
+
"editor.codeActionsOnSave": {
|
| 63 |
+
"source.organizeImports": "always",
|
| 64 |
+
"source.fixAll": "always"
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
},
|
| 68 |
+
// Add the IDs of extensions you want installed when the container is created.
|
| 69 |
+
"extensions": [
|
| 70 |
+
"ms-python.python",
|
| 71 |
+
"ms-python.vscode-pylance",
|
| 72 |
+
"ms-toolsai.jupyter",
|
| 73 |
+
"GitHub.copilot",
|
| 74 |
+
"GitHub.copilot-chat",
|
| 75 |
+
"github.vscode-github-actions",
|
| 76 |
+
"SanjulaGanepola.github-local-actions",
|
| 77 |
+
"charliermarsh.ruff",
|
| 78 |
+
"esbenp.prettier-vscode",
|
| 79 |
+
"ms-vscode.test-adapter-converter",
|
| 80 |
+
"njqdev.vscode-python-typehint",
|
| 81 |
+
"KevinRose.vsc-python-indent",
|
| 82 |
+
"medo64.render-crlf",
|
| 83 |
+
"shardulm94.trailing-spaces",
|
| 84 |
+
"nhoizey.gremlins",
|
| 85 |
+
"wayou.vscode-todo-highlight",
|
| 86 |
+
"Gruntfuggly.todo-tree",
|
| 87 |
+
"njpwerner.autodocstring",
|
| 88 |
+
"rodolphebarbanneau.python-docstring-highlighter",
|
| 89 |
+
"mechatroner.rainbow-csv",
|
| 90 |
+
"uctakeoff.vscode-counter",
|
| 91 |
+
"bierner.github-markdown-preview",
|
| 92 |
+
"yahyabatulu.vscode-markdown-alert",
|
| 93 |
+
"ms-vscode-remote.vscode-remote-extensionpack",
|
| 94 |
+
"ms-azuretools.vscode-docker",
|
| 95 |
+
"redhat.vscode-yaml"
|
| 96 |
+
]
|
| 97 |
+
}
|
| 98 |
+
},
|
| 99 |
+
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
| 100 |
+
// "forwardPorts": [],
|
| 101 |
+
// Use 'postCreateCommand' to run commands after the container is created.
|
| 102 |
+
"postCreateCommand": "pre-commit install && pip install '.[extra,compiler,test]' -U",
|
| 103 |
+
// Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
| 104 |
+
"remoteUser": "vscode"
|
| 105 |
+
}
|
.github/CODEOWNERS
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
* @lantiga @t-vi @borda
|
| 2 |
+
/README.md @williamfalcon @lantiga
|
.github/dependabot.yml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Basic dependabot.yml file with
|
| 2 |
+
# minimum configuration for two package managers
|
| 3 |
+
|
| 4 |
+
version: 2
|
| 5 |
+
updates:
|
| 6 |
+
# Enable version updates for python
|
| 7 |
+
- package-ecosystem: "pip"
|
| 8 |
+
# Look for a `requirements` in the `root` directory
|
| 9 |
+
directory: "/"
|
| 10 |
+
# Check for updates once a week
|
| 11 |
+
schedule:
|
| 12 |
+
interval: "monthly"
|
| 13 |
+
# Labels on pull requests for version updates only
|
| 14 |
+
labels:
|
| 15 |
+
- "dependencies"
|
| 16 |
+
pull-request-branch-name:
|
| 17 |
+
# Separate sections of the branch name with a hyphen
|
| 18 |
+
# for example, `dependabot-npm_and_yarn-next_js-acorn-6.4.1`
|
| 19 |
+
separator: "-"
|
| 20 |
+
# Allow up to 5 open pull requests for pip dependencies
|
| 21 |
+
open-pull-requests-limit: 3
|
| 22 |
+
|
| 23 |
+
# Enable version updates for GitHub Actions
|
| 24 |
+
- package-ecosystem: "github-actions"
|
| 25 |
+
directory: "/"
|
| 26 |
+
# Check for updates once a week
|
| 27 |
+
schedule:
|
| 28 |
+
interval: "weekly"
|
| 29 |
+
# Labels on pull requests for version updates only
|
| 30 |
+
labels:
|
| 31 |
+
- "CI / actions"
|
| 32 |
+
pull-request-branch-name:
|
| 33 |
+
# Separate sections of the branch name with a hyphen
|
| 34 |
+
# for example, `dependabot-npm_and_yarn-next_js-acorn-6.4.1`
|
| 35 |
+
separator: "-"
|
| 36 |
+
# Allow up to 5 open pull requests for GitHub Actions
|
| 37 |
+
open-pull-requests-limit: 1
|
| 38 |
+
groups:
|
| 39 |
+
GHA-updates:
|
| 40 |
+
patterns:
|
| 41 |
+
- "*"
|
.gitignore
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.ipynb_checkpoints/
|
| 2 |
+
__pycache__
|
| 3 |
+
.idea
|
| 4 |
+
.DS_Store
|
| 5 |
+
*.egg-info
|
| 6 |
+
build
|
| 7 |
+
dist
|
| 8 |
+
.venv
|
| 9 |
+
.vscode
|
| 10 |
+
|
| 11 |
+
# data
|
| 12 |
+
data
|
| 13 |
+
datasets
|
| 14 |
+
!litgpt/data
|
| 15 |
+
!tests/data
|
| 16 |
+
checkpoints
|
| 17 |
+
out
|
| 18 |
+
wandb
|
| 19 |
+
events.out.tfevents*
|
| 20 |
+
|
| 21 |
+
# test artifacts from tests/test_readme.py
|
| 22 |
+
**/custom_finetuning_dataset.json
|
| 23 |
+
client.py
|
| 24 |
+
**/custom_texts/
|
.pre-commit-config.yaml
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright The Lightning team.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
default_language_version:
|
| 16 |
+
python: python3
|
| 17 |
+
|
| 18 |
+
ci:
|
| 19 |
+
autofix_prs: true
|
| 20 |
+
autoupdate_commit_msg: "[pre-commit.ci] pre-commit suggestions"
|
| 21 |
+
autoupdate_schedule: quarterly
|
| 22 |
+
# submodules: true
|
| 23 |
+
|
| 24 |
+
repos:
|
| 25 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 26 |
+
rev: v5.0.0
|
| 27 |
+
hooks:
|
| 28 |
+
- id: end-of-file-fixer
|
| 29 |
+
- id: trailing-whitespace
|
| 30 |
+
exclude: README.md
|
| 31 |
+
- id: check-yaml
|
| 32 |
+
- id: check-toml
|
| 33 |
+
#- id: check-docstring-first
|
| 34 |
+
#- id: check-executables-have-shebangs
|
| 35 |
+
- id: check-case-conflict
|
| 36 |
+
- id: check-added-large-files
|
| 37 |
+
args: ["--maxkb=250", "--enforce-all"]
|
| 38 |
+
- id: detect-private-key
|
| 39 |
+
|
| 40 |
+
- repo: https://github.com/codespell-project/codespell
|
| 41 |
+
rev: v2.4.1
|
| 42 |
+
hooks:
|
| 43 |
+
- id: codespell
|
| 44 |
+
additional_dependencies: [tomli]
|
| 45 |
+
args: ["--write-changes"]
|
| 46 |
+
exclude: pyproject.toml
|
| 47 |
+
|
| 48 |
+
#- repo: https://github.com/crate-ci/typos
|
| 49 |
+
# rev: dictgen-v0.3.1
|
| 50 |
+
# hooks:
|
| 51 |
+
# - id: typos
|
| 52 |
+
# args: [] # empty to do not write fixes
|
| 53 |
+
# exclude: pyproject.toml
|
| 54 |
+
|
| 55 |
+
#- repo: https://github.com/executablebooks/mdformat
|
| 56 |
+
# rev: 0.7.21
|
| 57 |
+
# hooks:
|
| 58 |
+
# - id: mdformat
|
| 59 |
+
# args: ["--number"]
|
| 60 |
+
# additional_dependencies:
|
| 61 |
+
# - mdformat-gfm
|
| 62 |
+
# - mdformat-black
|
| 63 |
+
# - mdformat_frontmatter
|
| 64 |
+
|
| 65 |
+
- repo: https://github.com/pre-commit/mirrors-prettier
|
| 66 |
+
rev: v3.1.0
|
| 67 |
+
hooks:
|
| 68 |
+
- id: prettier
|
| 69 |
+
files: \.(json|yml|yaml|toml)
|
| 70 |
+
# https://prettier.io/docs/en/options.html#print-width
|
| 71 |
+
args: ["--print-width=140"]
|
| 72 |
+
|
| 73 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
| 74 |
+
rev: v0.12.2
|
| 75 |
+
hooks:
|
| 76 |
+
- id: ruff
|
| 77 |
+
args: ["--fix"]
|
| 78 |
+
- id: ruff-format
|
| 79 |
+
- id: ruff
|
| 80 |
+
|
| 81 |
+
- repo: https://github.com/tox-dev/pyproject-fmt
|
| 82 |
+
rev: v2.6.0
|
| 83 |
+
hooks:
|
| 84 |
+
- id: pyproject-fmt
|
| 85 |
+
additional_dependencies: [tox]
|
| 86 |
+
- repo: https://github.com/abravalheri/validate-pyproject
|
| 87 |
+
rev: v0.24.1
|
| 88 |
+
hooks:
|
| 89 |
+
- id: validate-pyproject
|
CITATION.cff
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cff-version: 1.2.0
|
| 2 |
+
message: "If you use this software, you can cite it as shown below."
|
| 3 |
+
title: "LitGPT"
|
| 4 |
+
abstract: "20+ high-performance LLMs with recipes to pretrain, finetune and deploy at scale."
|
| 5 |
+
date-released: 2023-03-22
|
| 6 |
+
authors:
|
| 7 |
+
- name: "The Lightning AI team"
|
| 8 |
+
license: "Apache-2.0"
|
| 9 |
+
url: "https://github.com/Lightning-AI/litgpt"
|
Dockerfile
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM ubuntu:22.04
|
| 2 |
+
|
| 3 |
+
# 设置 UTF-8(重要,否则 python/其他程序可能乱码)
|
| 4 |
+
ENV LANG=C.UTF-8
|
| 5 |
+
ENV LC_ALL=C.UTF-8
|
| 6 |
+
|
| 7 |
+
# 安装常用工具(可选)
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
python3 python3-pip vim git wget curl \
|
| 10 |
+
&& apt-get clean
|
| 11 |
+
|
| 12 |
+
# 把你的整个文件夹复制进镜像
|
| 13 |
+
COPY . /workspace
|
| 14 |
+
|
| 15 |
+
# 设置默认工作目录
|
| 16 |
+
WORKDIR /workspace
|
| 17 |
+
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [2023] Lightning AI
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
CHANGED
|
@@ -1,3 +1,722 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# ⚡ LitGPT
|
| 5 |
+
|
| 6 |
+
**20+ high-performance LLMs with recipes to pretrain, finetune, and deploy at scale.**
|
| 7 |
+
|
| 8 |
+
<pre>
|
| 9 |
+
✅ From scratch implementations ✅ No abstractions ✅ Beginner friendly
|
| 10 |
+
✅ Flash attention ✅ FSDP ✅ LoRA, QLoRA, Adapter
|
| 11 |
+
✅ Reduce GPU memory (fp4/8/16/32) ✅ 1-1000+ GPUs/TPUs ✅ 20+ LLMs
|
| 12 |
+
</pre>
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
 [](https://github.com/Lightning-AI/lit-stablelm/blob/master/LICENSE) [](https://discord.gg/VptPCZkGNa)
|
| 20 |
+
|
| 21 |
+
<p align="center">
|
| 22 |
+
<a href="#quick-start">Quick start</a> •
|
| 23 |
+
<a href="#choose-from-20-llms">Models</a> •
|
| 24 |
+
<a href="#finetune-an-llm">Finetune</a> •
|
| 25 |
+
<a href="#deploy-an-llm">Deploy</a> •
|
| 26 |
+
<a href="#all-workflows">All workflows</a> •
|
| 27 |
+
<a href="#state-of-the-art-features">Features</a> •
|
| 28 |
+
<a href="#training-recipes">Recipes (YAML)</a> •
|
| 29 |
+
<a href="https://lightning.ai/">Lightning AI</a> •
|
| 30 |
+
<a href="#tutorials">Tutorials</a>
|
| 31 |
+
</p>
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-quick-start">
|
| 36 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/get-started-badge.svg" height="36px" alt="Get started"/>
|
| 37 |
+
</a>
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
</div>
|
| 42 |
+
|
| 43 |
+
# Use, finetune, pretrain, and deploy LLMs Lightning fast ⚡⚡
|
| 44 |
+
Every LLM is implemented from scratch with **no abstractions** and **full control**, making them blazing fast, minimal, and performant at enterprise scale.
|
| 45 |
+
|
| 46 |
+
✅ **Enterprise ready -** Apache 2.0 for unlimited enterprise use.</br>
|
| 47 |
+
✅ **Developer friendly -** Easy debugging with no abstraction layers and single file implementations.</br>
|
| 48 |
+
✅ **Optimized performance -** Models designed to maximize performance, reduce costs, and speed up training.</br>
|
| 49 |
+
✅ **Proven recipes -** Highly-optimized training/finetuning recipes tested at enterprise scale.</br>
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# Quick start
|
| 54 |
+
Install LitGPT
|
| 55 |
+
```
|
| 56 |
+
pip install 'litgpt[extra]'
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
Load and use any of the [20+ LLMs](#choose-from-20-llms):
|
| 60 |
+
```python
|
| 61 |
+
from litgpt import LLM
|
| 62 |
+
|
| 63 |
+
llm = LLM.load("microsoft/phi-2")
|
| 64 |
+
text = llm.generate("Fix the spelling: Every fall, the family goes to the mountains.")
|
| 65 |
+
print(text)
|
| 66 |
+
# Corrected Sentence: Every fall, the family goes to the mountains.
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
✅ Optimized for fast inference</br>
|
| 72 |
+
✅ Quantization</br>
|
| 73 |
+
✅ Runs on low-memory GPUs</br>
|
| 74 |
+
✅ No layers of internal abstractions</br>
|
| 75 |
+
✅ Optimized for production scale</br>
|
| 76 |
+
|
| 77 |
+
<details>
|
| 78 |
+
<summary>Advanced install options</summary>
|
| 79 |
+
|
| 80 |
+
Install from source:
|
| 81 |
+
|
| 82 |
+
```bash
|
| 83 |
+
git clone https://github.com/Lightning-AI/litgpt
|
| 84 |
+
cd litgpt
|
| 85 |
+
pip install -e '.[all]'
|
| 86 |
+
```
|
| 87 |
+
</details>
|
| 88 |
+
|
| 89 |
+
[Explore the full Python API docs](tutorials/python-api.md).
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
# Choose from 20+ LLMs
|
| 95 |
+
Every model is written from scratch to maximize performance and remove layers of abstraction:
|
| 96 |
+
|
| 97 |
+
| Model | Model size | Author | Reference |
|
| 98 |
+
|----|----|----|----|
|
| 99 |
+
| Llama 3, 3.1, 3.2, 3.3 | 1B, 3B, 8B, 70B, 405B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) |
|
| 100 |
+
| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) |
|
| 101 |
+
| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) |
|
| 102 |
+
| Gemma 2 | 2B, 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) |
|
| 103 |
+
| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) |
|
| 104 |
+
| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) |
|
| 105 |
+
| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) |
|
| 106 |
+
| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) |
|
| 107 |
+
| ... | ... | ... | ... |
|
| 108 |
+
|
| 109 |
+
<details>
|
| 110 |
+
<summary>See full list of 20+ LLMs</summary>
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
#### All models
|
| 115 |
+
|
| 116 |
+
| Model | Model size | Author | Reference |
|
| 117 |
+
|----|----|----|----|
|
| 118 |
+
| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) |
|
| 119 |
+
| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) |
|
| 120 |
+
| Falcon | 7B, 40B, 180B | TII UAE | [TII 2023](https://falconllm.tii.ae) |
|
| 121 |
+
| Falcon 3 | 1B, 3B, 7B, 10B | TII UAE | [TII 2024](https://huggingface.co/blog/falcon3) |
|
| 122 |
+
| FreeWilly2 (Stable Beluga 2) | 70B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stable-beluga-large-instruction-fine-tuned-models) |
|
| 123 |
+
| Function Calling Llama 2 | 7B | Trelis | [Trelis et al. 2023](https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2) |
|
| 124 |
+
| Gemma | 2B, 7B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf) |
|
| 125 |
+
| Gemma 2 | 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) |
|
| 126 |
+
| Gemma 3 | 1B, 4B, 12B, 27B | Google | [Google Team, Google Deepmind](https://arxiv.org/pdf/2503.19786) |
|
| 127 |
+
| Llama 2 | 7B, 13B, 70B | Meta AI | [Touvron et al. 2023](https://arxiv.org/abs/2307.09288) |
|
| 128 |
+
| Llama 3.1 | 8B, 70B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) |
|
| 129 |
+
| Llama 3.2 | 1B, 3B | Meta AI | [Meta AI 2024](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/) |
|
| 130 |
+
| Llama 3.3 | 70B | Meta AI | [Meta AI 2024](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) |
|
| 131 |
+
| Mathstral | 7B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mathstral/) |
|
| 132 |
+
| MicroLlama | 300M | Ken Wang | [MicroLlama repo](https://github.com/keeeeenw/MicroLlama) |
|
| 133 |
+
| Mixtral MoE | 8x7B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/mixtral-of-experts/) |
|
| 134 |
+
| Mistral | 7B, 123B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/announcing-mistral-7b/) |
|
| 135 |
+
| Mixtral MoE | 8x22B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mixtral-8x22b/) |
|
| 136 |
+
| OLMo | 1B, 7B | Allen Institute for AI (AI2) | [Groeneveld et al. 2024](https://aclanthology.org/2024.acl-long.841/) |
|
| 137 |
+
| OpenLLaMA | 3B, 7B, 13B | OpenLM Research | [Geng & Liu 2023](https://github.com/openlm-research/open_llama) |
|
| 138 |
+
| Phi 1.5 & 2 | 1.3B, 2.7B | Microsoft Research | [Li et al. 2023](https://arxiv.org/abs/2309.05463) |
|
| 139 |
+
| Phi 3 | 3.8B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2404.14219) |
|
| 140 |
+
| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) |
|
| 141 |
+
| Phi 4 Mini Instruct | 3.8B | Microsoft Research | [Microsoft 2025](https://arxiv.org/abs/2503.01743) |
|
| 142 |
+
| Phi 4 Mini Reasoning | 3.8B | Microsoft Research | [Xu, Peng et al. 2025](https://arxiv.org/abs/2504.21233) |
|
| 143 |
+
| Phi 4 Reasoning | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) |
|
| 144 |
+
| Phi 4 Reasoning Plus | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) |
|
| 145 |
+
| Platypus | 7B, 13B, 70B | Lee et al. | [Lee, Hunter, and Ruiz 2023](https://arxiv.org/abs/2308.07317) |
|
| 146 |
+
| Pythia | {14,31,70,160,410}M, {1,1.4,2.8,6.9,12}B | EleutherAI | [Biderman et al. 2023](https://arxiv.org/abs/2304.01373) |
|
| 147 |
+
| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) |
|
| 148 |
+
| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) |
|
| 149 |
+
| Qwen2.5 1M (Long Context) | 7B, 14B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwen2.5-1m/) |
|
| 150 |
+
| Qwen2.5 Math | 1.5B, 7B, 72B | Alibaba Group | [An, Yang et al. 2024](https://arxiv.org/abs/2409.12122) |
|
| 151 |
+
| QwQ | 32B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwq-32b/) |
|
| 152 |
+
| QwQ-Preview | 32B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwq-32b-preview/) |
|
| 153 |
+
| Qwen3 | 0.6B, 1.7B, 4B, 8B, 14B, 32B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) |
|
| 154 |
+
| Qwen3 MoE | 30B, 235B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) |
|
| 155 |
+
| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) |
|
| 156 |
+
| SmolLM2 | 135M, 360M, 1.7B | Hugging Face | [Hugging Face 2024](https://github.com/huggingface/smollm) |
|
| 157 |
+
| Salamandra | 2B, 7B | Barcelona Supercomputing Centre | [BSC-LTC 2024](https://github.com/BSC-LTC/salamandra) |
|
| 158 |
+
| StableCode | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) |
|
| 159 |
+
| StableLM | 3B, 7B | Stability AI | [Stability AI 2023](https://github.com/Stability-AI/StableLM) |
|
| 160 |
+
| StableLM Zephyr | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) |
|
| 161 |
+
| TinyLlama | 1.1B | Zhang et al. | [Zhang et al. 2023](https://github.com/jzhang38/TinyLlama) |
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
**Tip**: You can list all available models by running the `litgpt download list` command.
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
</details>
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
---
|
| 172 |
+
|
| 173 |
+
# Workflows
|
| 174 |
+
|
| 175 |
+
<p align="center">
|
| 176 |
+
<a href="#finetune-an-llm">Finetune</a> •
|
| 177 |
+
<a href="#pretrain-an-llm">Pretrain</a> •
|
| 178 |
+
<a href="#continue-pretraining-an-llm">Continued pretraining</a> •
|
| 179 |
+
<a href="#evaluate-an-llm">Evaluate</a> •
|
| 180 |
+
<a href="#deploy-an-llm">Deploy</a> •
|
| 181 |
+
<a href="#test-an-llm">Test</a>
|
| 182 |
+
</p>
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
Use the command line interface to run advanced workflows such as pretraining or finetuning on your own data.
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
## All workflows
|
| 190 |
+
After installing LitGPT, select the model and workflow to run (finetune, pretrain, evaluate, deploy, etc...):
|
| 191 |
+
|
| 192 |
+
```bash
|
| 193 |
+
# litgpt [action] [model]
|
| 194 |
+
litgpt serve meta-llama/Llama-3.2-3B-Instruct
|
| 195 |
+
litgpt finetune meta-llama/Llama-3.2-3B-Instruct
|
| 196 |
+
litgpt pretrain meta-llama/Llama-3.2-3B-Instruct
|
| 197 |
+
litgpt chat meta-llama/Llama-3.2-3B-Instruct
|
| 198 |
+
litgpt evaluate meta-llama/Llama-3.2-3B-Instruct
|
| 199 |
+
```
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
----
|
| 204 |
+
|
| 205 |
+
## Finetune an LLM
|
| 206 |
+
|
| 207 |
+
<div align="center">
|
| 208 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-finetune">
|
| 209 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 210 |
+
</a>
|
| 211 |
+
</div>
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
Finetuning is the process of taking a pretrained AI model and further training it on a smaller, specialized dataset tailored to a specific task or application.
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
```bash
|
| 221 |
+
# 0) setup your dataset
|
| 222 |
+
curl -L https://huggingface.co/datasets/ksaw008/finance_alpaca/resolve/main/finance_alpaca.json -o my_custom_dataset.json
|
| 223 |
+
|
| 224 |
+
# 1) Finetune a model (auto downloads weights)
|
| 225 |
+
litgpt finetune microsoft/phi-2 \
|
| 226 |
+
--data JSON \
|
| 227 |
+
--data.json_path my_custom_dataset.json \
|
| 228 |
+
--data.val_split_fraction 0.1 \
|
| 229 |
+
--out_dir out/custom-model
|
| 230 |
+
|
| 231 |
+
# 2) Test the model
|
| 232 |
+
litgpt chat out/custom-model/final
|
| 233 |
+
|
| 234 |
+
# 3) Deploy the model
|
| 235 |
+
litgpt serve out/custom-model/final
|
| 236 |
+
```
|
| 237 |
+
|
| 238 |
+
[Read the full finetuning docs](tutorials/finetune.md)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
----
|
| 243 |
+
|
| 244 |
+
## Deploy an LLM
|
| 245 |
+
|
| 246 |
+
<div align="center">
|
| 247 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-serve">
|
| 248 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/deploy-on-studios.svg" height="36px" alt="Deploy on Studios"/>
|
| 249 |
+
</a>
|
| 250 |
+
</div>
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
Deploy a pretrained or finetune LLM to use it in real-world applications. Deploy, automatically sets up a web server that can be accessed by a website or app.
|
| 255 |
+
|
| 256 |
+
```bash
|
| 257 |
+
# deploy an out-of-the-box LLM
|
| 258 |
+
litgpt serve microsoft/phi-2
|
| 259 |
+
|
| 260 |
+
# deploy your own trained model
|
| 261 |
+
litgpt serve path/to/microsoft/phi-2/checkpoint
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
<details>
|
| 265 |
+
<summary>Show code to query server:</summary>
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
Test the server in a separate terminal and integrate the model API into your AI product:
|
| 270 |
+
```python
|
| 271 |
+
# 3) Use the server (in a separate Python session)
|
| 272 |
+
import requests, json
|
| 273 |
+
response = requests.post(
|
| 274 |
+
"http://127.0.0.1:8000/predict",
|
| 275 |
+
json={"prompt": "Fix typos in the following sentence: Example input"}
|
| 276 |
+
)
|
| 277 |
+
print(response.json()["output"])
|
| 278 |
+
```
|
| 279 |
+
</details>
|
| 280 |
+
|
| 281 |
+
[Read the full deploy docs](tutorials/deploy.md).
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
----
|
| 286 |
+
|
| 287 |
+
## Evaluate an LLM
|
| 288 |
+
Evaluate an LLM to test its performance on various tasks to see how well it understands and generates text. Simply put, we can evaluate things like how well would it do in college-level chemistry, coding, etc... (MMLU, Truthful QA, etc...)
|
| 289 |
+
|
| 290 |
+
```bash
|
| 291 |
+
litgpt evaluate microsoft/phi-2 --tasks 'truthfulqa_mc2,mmlu'
|
| 292 |
+
```
|
| 293 |
+
|
| 294 |
+
[Read the full evaluation docs](tutorials/evaluation.md).
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
----
|
| 299 |
+
|
| 300 |
+
## Test an LLM
|
| 301 |
+
|
| 302 |
+
<div align="center">
|
| 303 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-chat">
|
| 304 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 305 |
+
</a>
|
| 306 |
+
</div>
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
Test how well the model works via an interactive chat. Use the `chat` command to chat, extract embeddings, etc...
|
| 311 |
+
|
| 312 |
+
Here's an example showing how to use the Phi-2 LLM:
|
| 313 |
+
```bash
|
| 314 |
+
litgpt chat microsoft/phi-2
|
| 315 |
+
|
| 316 |
+
>> Prompt: What do Llamas eat?
|
| 317 |
+
```
|
| 318 |
+
|
| 319 |
+
<details>
|
| 320 |
+
<summary>Full code:</summary>
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
```bash
|
| 325 |
+
# 1) List all supported LLMs
|
| 326 |
+
litgpt download list
|
| 327 |
+
|
| 328 |
+
# 2) Use a model (auto downloads weights)
|
| 329 |
+
litgpt chat microsoft/phi-2
|
| 330 |
+
|
| 331 |
+
>> Prompt: What do Llamas eat?
|
| 332 |
+
```
|
| 333 |
+
|
| 334 |
+
The download of certain models requires an additional access token. You can read more about this in the [download](tutorials/download_model_weights.md#specific-models-and-access-tokens) documentation.
|
| 335 |
+
|
| 336 |
+
</details>
|
| 337 |
+
|
| 338 |
+
[Read the full chat docs](tutorials/inference.md).
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
----
|
| 343 |
+
|
| 344 |
+
## Pretrain an LLM
|
| 345 |
+
|
| 346 |
+
<div align="center">
|
| 347 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-pretrain">
|
| 348 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 349 |
+
</a>
|
| 350 |
+
</div>
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
Pretraining is the process of teaching an AI model by exposing it to a large amount of data before it is fine-tuned for specific tasks.
|
| 355 |
+
|
| 356 |
+
<details>
|
| 357 |
+
<summary>Show code:</summary>
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
```bash
|
| 362 |
+
mkdir -p custom_texts
|
| 363 |
+
curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt
|
| 364 |
+
curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt
|
| 365 |
+
|
| 366 |
+
# 1) Download a tokenizer
|
| 367 |
+
litgpt download EleutherAI/pythia-160m \
|
| 368 |
+
--tokenizer_only True
|
| 369 |
+
|
| 370 |
+
# 2) Pretrain the model
|
| 371 |
+
litgpt pretrain EleutherAI/pythia-160m \
|
| 372 |
+
--tokenizer_dir EleutherAI/pythia-160m \
|
| 373 |
+
--data TextFiles \
|
| 374 |
+
--data.train_data_path "custom_texts/" \
|
| 375 |
+
--train.max_tokens 10_000_000 \
|
| 376 |
+
--out_dir out/custom-model
|
| 377 |
+
|
| 378 |
+
# 3) Test the model
|
| 379 |
+
litgpt chat out/custom-model/final
|
| 380 |
+
```
|
| 381 |
+
</details>
|
| 382 |
+
|
| 383 |
+
[Read the full pretraining docs](tutorials/pretrain.md)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
----
|
| 388 |
+
|
| 389 |
+
## Continue pretraining an LLM
|
| 390 |
+
|
| 391 |
+
<div align="center">
|
| 392 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-continue-pretraining">
|
| 393 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 394 |
+
</a>
|
| 395 |
+
</div>
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
Continued pretraining is another way of finetuning that specializes an already pretrained model by training on custom data:
|
| 400 |
+
|
| 401 |
+
<details>
|
| 402 |
+
<summary>Show code:</summary>
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
```bash
|
| 407 |
+
mkdir -p custom_texts
|
| 408 |
+
curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt
|
| 409 |
+
curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt
|
| 410 |
+
|
| 411 |
+
# 1) Continue pretraining a model (auto downloads weights)
|
| 412 |
+
litgpt pretrain EleutherAI/pythia-160m \
|
| 413 |
+
--tokenizer_dir EleutherAI/pythia-160m \
|
| 414 |
+
--initial_checkpoint_dir EleutherAI/pythia-160m \
|
| 415 |
+
--data TextFiles \
|
| 416 |
+
--data.train_data_path "custom_texts/" \
|
| 417 |
+
--train.max_tokens 10_000_000 \
|
| 418 |
+
--out_dir out/custom-model
|
| 419 |
+
|
| 420 |
+
# 2) Test the model
|
| 421 |
+
litgpt chat out/custom-model/final
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
</details>
|
| 425 |
+
|
| 426 |
+
[Read the full continued pretraining docs](tutorials/pretrain.md#continued-pretraining-on-custom-data)
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
----
|
| 431 |
+
|
| 432 |
+
# State-of-the-art features
|
| 433 |
+
|
| 434 |
+
✅ State-of-the-art optimizations: Flash Attention v2, multi-GPU support via fully-sharded data parallelism, [optional CPU offloading](tutorials/oom.md#do-sharding-across-multiple-gpus), and [TPU and XLA support](extensions/xla).</br>
|
| 435 |
+
✅ [Pretrain](tutorials/pretrain.md), [finetune](tutorials/finetune.md), and [deploy](tutorials/inference.md)</br>
|
| 436 |
+
✅ Reduce compute requirements with low-precision settings: FP16, BF16, and FP16/FP32 mixed.</br>
|
| 437 |
+
✅ Lower memory requirements with [quantization](tutorials/quantize.md): 4-bit floats, 8-bit integers, and double quantization.</br>
|
| 438 |
+
✅ [Configuration files](config_hub) for great out-of-the-box performance.</br>
|
| 439 |
+
✅ Parameter-efficient finetuning: [LoRA](tutorials/finetune_lora.md), [QLoRA](tutorials/finetune_lora.md), [Adapter](tutorials/finetune_adapter.md), and [Adapter v2](tutorials/finetune_adapter.md).</br>
|
| 440 |
+
✅ [Exporting](tutorials/convert_lit_models.md) to other popular model weight formats.</br>
|
| 441 |
+
✅ Many popular datasets for [pretraining](tutorials/pretrain.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning).</br>
|
| 442 |
+
✅ Readable and easy-to-modify code to experiment with the latest research ideas.</br>
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
---
|
| 447 |
+
|
| 448 |
+
# Training recipes
|
| 449 |
+
|
| 450 |
+
LitGPT comes with validated recipes (YAML configs) to train models under different conditions. We've generated these recipes based on the parameters we found to perform the best for different training conditions.
|
| 451 |
+
|
| 452 |
+
Browse all training recipes [here](config_hub).
|
| 453 |
+
|
| 454 |
+
### Example
|
| 455 |
+
|
| 456 |
+
```bash
|
| 457 |
+
litgpt finetune \
|
| 458 |
+
--config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml
|
| 459 |
+
```
|
| 460 |
+
<details>
|
| 461 |
+
<summary>✅ Use configs to customize training</summary>
|
| 462 |
+
|
| 463 |
+
Configs let you customize training for all granular parameters like:
|
| 464 |
+
|
| 465 |
+
```yaml
|
| 466 |
+
# The path to the base model's checkpoint directory to load for finetuning. (type: <class 'Path'>, default: checkpoints/stabilityai/stablelm-base-alpha-3b)
|
| 467 |
+
checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf
|
| 468 |
+
|
| 469 |
+
# Directory in which to save checkpoints and logs. (type: <class 'Path'>, default: out/lora)
|
| 470 |
+
out_dir: out/finetune/qlora-llama2-7b
|
| 471 |
+
|
| 472 |
+
# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
|
| 473 |
+
precision: bf16-true
|
| 474 |
+
|
| 475 |
+
...
|
| 476 |
+
```
|
| 477 |
+
</details>
|
| 478 |
+
|
| 479 |
+
<details>
|
| 480 |
+
<summary>✅ Example: LoRA finetuning config</summary>
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
|
| 484 |
+
```yaml
|
| 485 |
+
# The path to the base model's checkpoint directory to load for finetuning. (type: <class 'Path'>, default: checkpoints/stabilityai/stablelm-base-alpha-3b)
|
| 486 |
+
checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf
|
| 487 |
+
|
| 488 |
+
# Directory in which to save checkpoints and logs. (type: <class 'Path'>, default: out/lora)
|
| 489 |
+
out_dir: out/finetune/qlora-llama2-7b
|
| 490 |
+
|
| 491 |
+
# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
|
| 492 |
+
precision: bf16-true
|
| 493 |
+
|
| 494 |
+
# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null)
|
| 495 |
+
quantize: bnb.nf4
|
| 496 |
+
|
| 497 |
+
# How many devices/GPUs to use. (type: Union[int, str], default: 1)
|
| 498 |
+
devices: 1
|
| 499 |
+
|
| 500 |
+
# How many nodes to use. (type: int, default: 1)
|
| 501 |
+
num_nodes: 1
|
| 502 |
+
|
| 503 |
+
# The LoRA rank. (type: int, default: 8)
|
| 504 |
+
lora_r: 32
|
| 505 |
+
|
| 506 |
+
# The LoRA alpha. (type: int, default: 16)
|
| 507 |
+
lora_alpha: 16
|
| 508 |
+
|
| 509 |
+
# The LoRA dropout value. (type: float, default: 0.05)
|
| 510 |
+
lora_dropout: 0.05
|
| 511 |
+
|
| 512 |
+
# Whether to apply LoRA to the query weights in attention. (type: bool, default: True)
|
| 513 |
+
lora_query: true
|
| 514 |
+
|
| 515 |
+
# Whether to apply LoRA to the key weights in attention. (type: bool, default: False)
|
| 516 |
+
lora_key: false
|
| 517 |
+
|
| 518 |
+
# Whether to apply LoRA to the value weights in attention. (type: bool, default: True)
|
| 519 |
+
lora_value: true
|
| 520 |
+
|
| 521 |
+
# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False)
|
| 522 |
+
lora_projection: false
|
| 523 |
+
|
| 524 |
+
# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False)
|
| 525 |
+
lora_mlp: false
|
| 526 |
+
|
| 527 |
+
# Whether to apply LoRA to output head in GPT. (type: bool, default: False)
|
| 528 |
+
lora_head: false
|
| 529 |
+
|
| 530 |
+
# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``.
|
| 531 |
+
data:
|
| 532 |
+
class_path: litgpt.data.Alpaca2k
|
| 533 |
+
init_args:
|
| 534 |
+
mask_prompt: false
|
| 535 |
+
val_split_fraction: 0.05
|
| 536 |
+
prompt_style: alpaca
|
| 537 |
+
ignore_index: -100
|
| 538 |
+
seed: 42
|
| 539 |
+
num_workers: 4
|
| 540 |
+
download_dir: data/alpaca2k
|
| 541 |
+
|
| 542 |
+
# Training-related arguments. See ``litgpt.args.TrainArgs`` for details
|
| 543 |
+
train:
|
| 544 |
+
|
| 545 |
+
# Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
|
| 546 |
+
save_interval: 200
|
| 547 |
+
|
| 548 |
+
# Number of iterations between logging calls (type: int, default: 1)
|
| 549 |
+
log_interval: 1
|
| 550 |
+
|
| 551 |
+
# Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128)
|
| 552 |
+
global_batch_size: 8
|
| 553 |
+
|
| 554 |
+
# Number of samples per data-parallel rank (type: int, default: 4)
|
| 555 |
+
micro_batch_size: 2
|
| 556 |
+
|
| 557 |
+
# Number of iterations with learning rate warmup active (type: int, default: 100)
|
| 558 |
+
lr_warmup_steps: 10
|
| 559 |
+
|
| 560 |
+
# Number of epochs to train on (type: Optional[int], default: 5)
|
| 561 |
+
epochs: 4
|
| 562 |
+
|
| 563 |
+
# Total number of tokens to train on (type: Optional[int], default: null)
|
| 564 |
+
max_tokens:
|
| 565 |
+
|
| 566 |
+
# Limits the number of optimizer steps to run (type: Optional[int], default: null)
|
| 567 |
+
max_steps:
|
| 568 |
+
|
| 569 |
+
# Limits the length of samples (type: Optional[int], default: null)
|
| 570 |
+
max_seq_length: 512
|
| 571 |
+
|
| 572 |
+
# Whether to tie the embedding weights with the language modeling head weights (type: Optional[bool], default: null)
|
| 573 |
+
tie_embeddings:
|
| 574 |
+
|
| 575 |
+
# (type: float, default: 0.0003)
|
| 576 |
+
learning_rate: 0.0002
|
| 577 |
+
|
| 578 |
+
# (type: float, default: 0.02)
|
| 579 |
+
weight_decay: 0.0
|
| 580 |
+
|
| 581 |
+
# (type: float, default: 0.9)
|
| 582 |
+
beta1: 0.9
|
| 583 |
+
|
| 584 |
+
# (type: float, default: 0.95)
|
| 585 |
+
beta2: 0.95
|
| 586 |
+
|
| 587 |
+
# (type: Optional[float], default: null)
|
| 588 |
+
max_norm:
|
| 589 |
+
|
| 590 |
+
# (type: float, default: 6e-05)
|
| 591 |
+
min_lr: 6.0e-05
|
| 592 |
+
|
| 593 |
+
# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
|
| 594 |
+
eval:
|
| 595 |
+
|
| 596 |
+
# Number of optimizer steps between evaluation calls (type: int, default: 100)
|
| 597 |
+
interval: 100
|
| 598 |
+
|
| 599 |
+
# Number of tokens to generate (type: Optional[int], default: 100)
|
| 600 |
+
max_new_tokens: 100
|
| 601 |
+
|
| 602 |
+
# Number of iterations (type: int, default: 100)
|
| 603 |
+
max_iters: 100
|
| 604 |
+
|
| 605 |
+
# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv)
|
| 606 |
+
logger_name: csv
|
| 607 |
+
|
| 608 |
+
# The random seed to use for reproducibility. (type: int, default: 1337)
|
| 609 |
+
seed: 1337
|
| 610 |
+
```
|
| 611 |
+
</details>
|
| 612 |
+
|
| 613 |
+
<details>
|
| 614 |
+
<summary>✅ Override any parameter in the CLI:</summary>
|
| 615 |
+
|
| 616 |
+
```bash
|
| 617 |
+
litgpt finetune \
|
| 618 |
+
--config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml \
|
| 619 |
+
--lora_r 4
|
| 620 |
+
```
|
| 621 |
+
</details>
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
----
|
| 626 |
+
|
| 627 |
+
# Project highlights
|
| 628 |
+
|
| 629 |
+
LitGPT powers many great AI projects, initiatives, challenges and of course enterprises. Please submit a pull request to be considered for a feature.
|
| 630 |
+
|
| 631 |
+
<details>
|
| 632 |
+
<summary>📊 SAMBA: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling</summary>
|
| 633 |
+
|
| 634 |
+
The [Samba](https://github.com/microsoft/Samba) project by researchers at Microsoft is built on top of the LitGPT code base and combines state space models with sliding window attention, which outperforms pure state space models.
|
| 635 |
+
|
| 636 |
+
</details>
|
| 637 |
+
|
| 638 |
+
<details>
|
| 639 |
+
<summary>🏆 NeurIPS 2023 Large Language Model Efficiency Challenge: 1 LLM + 1 GPU + 1 Day</summary>
|
| 640 |
+
|
| 641 |
+
The LitGPT repository was the official starter kit for the [NeurIPS 2023 LLM Efficiency Challenge](https://llm-efficiency-challenge.github.io), which is a competition focused on finetuning an existing non-instruction tuned LLM for 24 hours on a single GPU.
|
| 642 |
+
|
| 643 |
+
</details>
|
| 644 |
+
|
| 645 |
+
<details>
|
| 646 |
+
<summary>🦙 TinyLlama: An Open-Source Small Language Model</summary>
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
LitGPT powered the [TinyLlama project](https://github.com/jzhang38/TinyLlama) and [TinyLlama: An Open-Source Small Language Model](https://arxiv.org/abs/2401.02385) research paper.
|
| 650 |
+
|
| 651 |
+
</details>
|
| 652 |
+
|
| 653 |
+
<details>
|
| 654 |
+
<summary>🍪 MicroLlama: MicroLlama-300M</summary>
|
| 655 |
+
|
| 656 |
+
[MicroLlama](https://github.com/keeeeenw/MicroLlama) is a 300M Llama model pretrained on 50B tokens powered by TinyLlama and LitGPT.
|
| 657 |
+
</details>
|
| 658 |
+
|
| 659 |
+
<details>
|
| 660 |
+
<summary>🔬 Pre-training Small Base LMs with Fewer Tokens</summary>
|
| 661 |
+
|
| 662 |
+
The research paper ["Pre-training Small Base LMs with Fewer Tokens"](https://arxiv.org/abs/2404.08634), which utilizes LitGPT, develops smaller base language models by inheriting a few transformer blocks from larger models and training on a tiny fraction of the data used by the larger models. It demonstrates that these smaller models can perform comparably to larger models despite using significantly less training data and resources.
|
| 663 |
+
|
| 664 |
+
</details>
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
----
|
| 669 |
+
|
| 670 |
+
# Community
|
| 671 |
+
|
| 672 |
+
We welcome all individual contributors, regardless of their level of experience or hardware. Your contributions are valuable, and we are excited to see what you can accomplish in this collaborative and supportive environment.
|
| 673 |
+
|
| 674 |
+
- [Request a feature](https://github.com/Lightning-AI/litgpt/issues)
|
| 675 |
+
- [Submit your first contribution](https://lightning.ai/pages/community/tutorial/how-to-contribute-to-litgpt/)
|
| 676 |
+
- [Join our Discord](https://discord.gg/VptPCZkGNa)
|
| 677 |
+
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
# Tutorials
|
| 681 |
+
|
| 682 |
+
🚀 [Get started](tutorials/0_to_litgpt.md)</br>
|
| 683 |
+
⚡️ [Finetuning, incl. LoRA, QLoRA, and Adapters](tutorials/finetune.md)</br>
|
| 684 |
+
🤖 [Pretraining](tutorials/pretrain.md)</br>
|
| 685 |
+
💬 [Model evaluation](tutorials/evaluation.md)</br>
|
| 686 |
+
📘 [Supported and custom datasets](tutorials/prepare_dataset.md)</br>
|
| 687 |
+
🧹 [Quantization](tutorials/quantize.md)</br>
|
| 688 |
+
🤯 [Tips for dealing with out-of-memory (OOM) errors](tutorials/oom.md)</br>
|
| 689 |
+
🧑🏽💻 [Using cloud TPUs](extensions/xla)</br>
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
----
|
| 694 |
+
|
| 695 |
+
### Acknowledgments
|
| 696 |
+
|
| 697 |
+
This implementation extends on [Lit-LLaMA](https://github.com/lightning-AI/lit-llama) and [nanoGPT](https://github.com/karpathy/nanoGPT), and it's **powered by [Lightning Fabric](https://lightning.ai/docs/fabric/stable/) ⚡**.
|
| 698 |
+
|
| 699 |
+
- [@karpathy](https://github.com/karpathy) for [nanoGPT](https://github.com/karpathy/nanoGPT)
|
| 700 |
+
- [@EleutherAI](https://github.com/EleutherAI) for [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) and the [Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness)
|
| 701 |
+
- [@TimDettmers](https://github.com/TimDettmers) for [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)
|
| 702 |
+
- [@Microsoft](https://github.com/microsoft) for [LoRA](https://github.com/microsoft/LoRA)
|
| 703 |
+
- [@tridao](https://github.com/tridao) for [Flash Attention 2](https://github.com/Dao-AILab/flash-attention)
|
| 704 |
+
|
| 705 |
+
### License
|
| 706 |
+
|
| 707 |
+
LitGPT is released under the [Apache 2.0](https://github.com/Lightning-AI/litgpt/blob/main/LICENSE) license.
|
| 708 |
+
|
| 709 |
+
### Citation
|
| 710 |
+
|
| 711 |
+
If you use LitGPT in your research, please cite the following work:
|
| 712 |
+
|
| 713 |
+
```bibtex
|
| 714 |
+
@misc{litgpt-2023,
|
| 715 |
+
author = {Lightning AI},
|
| 716 |
+
title = {LitGPT},
|
| 717 |
+
howpublished = {\url{https://github.com/Lightning-AI/litgpt}},
|
| 718 |
+
year = {2023},
|
| 719 |
+
}
|
| 720 |
+
```
|
| 721 |
+
|
| 722 |
+
|
check.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
def check_file(path):
|
| 6 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 7 |
+
data = json.load(f) # 这里假设每个文件是一个 JSON 数组
|
| 8 |
+
total = len(data)
|
| 9 |
+
# 转换成字符串来判断重复(保证字典可哈希)
|
| 10 |
+
unique = len({json.dumps(item, sort_keys=True) for item in data})
|
| 11 |
+
duplicates = total - unique
|
| 12 |
+
return total, duplicates
|
| 13 |
+
|
| 14 |
+
def main():
|
| 15 |
+
parser = argparse.ArgumentParser(description="Check JSON array dataset files for row count and duplicates.")
|
| 16 |
+
parser.add_argument("folder", help="Folder containing the JSON files")
|
| 17 |
+
args = parser.parse_args()
|
| 18 |
+
|
| 19 |
+
folder = args.folder
|
| 20 |
+
for fname in sorted(os.listdir(folder)):
|
| 21 |
+
fpath = os.path.join(folder, fname)
|
| 22 |
+
if not os.path.isfile(fpath):
|
| 23 |
+
continue
|
| 24 |
+
if not fname.endswith(".json"):
|
| 25 |
+
continue
|
| 26 |
+
try:
|
| 27 |
+
total, duplicates = check_file(fpath)
|
| 28 |
+
print(f"{fname:25} rows={total:6} duplicates={duplicates:4}")
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print(f"{fname:25} [ERROR: {e}]")
|
| 31 |
+
|
| 32 |
+
if __name__ == "__main__":
|
| 33 |
+
main()
|
| 34 |
+
|
cmd_start_for.ini
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[cmd_start_info]
|
| 2 |
+
game_id=26
|
delete.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import pathlib
|
| 6 |
+
import re
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
def remove_explanation_fields(text: str) -> str:
|
| 10 |
+
"""
|
| 11 |
+
删除 JSON 文本中键为 "explanation" 的键值对(值为字符串),
|
| 12 |
+
不使用 json 解析,使用正则在词法层面安全处理转义字符。
|
| 13 |
+
同时正确处理前后逗号,保持 JSON 仍然有效。
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
# JSON 字符串匹配:支持任意数量的转义字符(如 \" \\ \n ...)
|
| 17 |
+
# 说明:JSON 字符串不允许出现未转义的换行,因此不需 DOTALL。
|
| 18 |
+
json_string = r'"(?:\\.|[^"\\])*"'
|
| 19 |
+
|
| 20 |
+
# 情况 A:explanation 在中间/开头,后面跟逗号
|
| 21 |
+
# ... "explanation": "......", ...
|
| 22 |
+
pattern_after = re.compile(
|
| 23 |
+
rf'\s*"explanation"\s*:\s*{json_string}\s*,'
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
# 情况 B:explanation 在中间/末尾,前面有逗号
|
| 27 |
+
# ..., "explanation": "......"
|
| 28 |
+
pattern_before = re.compile(
|
| 29 |
+
rf',\s*"explanation"\s*:\s*{json_string}'
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# 先删“后跟逗号”的情况,再删“前有逗号”的情况
|
| 33 |
+
new_text = pattern_after.sub('', text)
|
| 34 |
+
new_text = pattern_before.sub('', new_text)
|
| 35 |
+
|
| 36 |
+
# 情况 C:对象里只有一个键值对(既没有前逗号也没有后逗号)
|
| 37 |
+
# 比如:{ "explanation": "..." }
|
| 38 |
+
# 这会在 A/B 后仍残留:{ }(已是合法 JSON),无需额外处理。
|
| 39 |
+
# 若出现花括号内部多余空白也不影响合法性。
|
| 40 |
+
|
| 41 |
+
return new_text
|
| 42 |
+
|
| 43 |
+
def process_file(p: pathlib.Path, dry_run: bool = False) -> bool:
|
| 44 |
+
original = p.read_text(encoding='utf-8')
|
| 45 |
+
repaired = remove_explanation_fields(original)
|
| 46 |
+
if repaired != original:
|
| 47 |
+
if not dry_run:
|
| 48 |
+
# 备份
|
| 49 |
+
backup = p.with_suffix(p.suffix + '.bak')
|
| 50 |
+
shutil.copyfile(p, backup)
|
| 51 |
+
# 覆盖
|
| 52 |
+
p.write_text(repaired, encoding='utf-8')
|
| 53 |
+
return True
|
| 54 |
+
return False
|
| 55 |
+
|
| 56 |
+
def main():
|
| 57 |
+
ap = argparse.ArgumentParser(
|
| 58 |
+
description="在不使用 json 解析的前提下,删除所有 JSON 文件中的 \"explanation\": \"...\" 字段(安全处理转义字符与逗号)。"
|
| 59 |
+
)
|
| 60 |
+
ap.add_argument("folder", type=str, help="包含 .json 文件的文件夹路径")
|
| 61 |
+
ap.add_argument("--dry-run", action="store_true", help="仅显示将要修改的文件,不写回")
|
| 62 |
+
args = ap.parse_args()
|
| 63 |
+
|
| 64 |
+
root = pathlib.Path(args.folder)
|
| 65 |
+
if not root.is_dir():
|
| 66 |
+
raise SystemExit(f"路径不存在或不是文件夹:{root}")
|
| 67 |
+
|
| 68 |
+
changed = 0
|
| 69 |
+
total = 0
|
| 70 |
+
for p in sorted(root.glob("*.json")):
|
| 71 |
+
total += 1
|
| 72 |
+
if process_file(p, dry_run=args.dry_run):
|
| 73 |
+
changed += 1
|
| 74 |
+
print(f"[UPDATED] {p}")
|
| 75 |
+
else:
|
| 76 |
+
print(f"[SKIP ] {p}(无 explanation 字段或无需修改)")
|
| 77 |
+
|
| 78 |
+
print(f"\n完成:扫描 {total} 个 .json 文件,修改 {changed} 个。")
|
| 79 |
+
if not args.dry_run:
|
| 80 |
+
print("已为修改过的文件生成 .bak 备份。")
|
| 81 |
+
|
| 82 |
+
if __name__ == "__main__":
|
| 83 |
+
main()
|
environment.yml
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: /mnt/data/llmtcl
|
| 2 |
+
channels:
|
| 3 |
+
- defaults
|
| 4 |
+
dependencies:
|
| 5 |
+
- _libgcc_mutex=0.1=main
|
| 6 |
+
- _openmp_mutex=5.1=1_gnu
|
| 7 |
+
- bzip2=1.0.8=h5eee18b_6
|
| 8 |
+
- ca-certificates=2025.2.25=h06a4308_0
|
| 9 |
+
- expat=2.7.1=h6a678d5_0
|
| 10 |
+
- ld_impl_linux-64=2.40=h12ee557_0
|
| 11 |
+
- libffi=3.4.4=h6a678d5_1
|
| 12 |
+
- libgcc-ng=11.2.0=h1234567_1
|
| 13 |
+
- libgomp=11.2.0=h1234567_1
|
| 14 |
+
- libstdcxx-ng=11.2.0=h1234567_1
|
| 15 |
+
- libuuid=1.41.5=h5eee18b_0
|
| 16 |
+
- libxcb=1.17.0=h9b100fa_0
|
| 17 |
+
- ncurses=6.5=h7934f7d_0
|
| 18 |
+
- openssl=3.0.17=h5eee18b_0
|
| 19 |
+
- pip=25.1=pyhc872135_2
|
| 20 |
+
- pthread-stubs=0.3=h0ce48e5_1
|
| 21 |
+
- python=3.10.18=h1a3bd86_0
|
| 22 |
+
- readline=8.2=h5eee18b_0
|
| 23 |
+
- setuptools=78.1.1=py310h06a4308_0
|
| 24 |
+
- sqlite=3.50.2=hb25bd0a_1
|
| 25 |
+
- tk=8.6.14=h993c535_1
|
| 26 |
+
- wheel=0.45.1=py310h06a4308_0
|
| 27 |
+
- xorg-libx11=1.8.12=h9b100fa_1
|
| 28 |
+
- xorg-libxau=1.0.12=h9b100fa_0
|
| 29 |
+
- xorg-libxdmcp=1.1.5=h9b100fa_0
|
| 30 |
+
- xorg-xorgproto=2024.1=h5eee18b_1
|
| 31 |
+
- xz=5.6.4=h5eee18b_1
|
| 32 |
+
- zlib=1.2.13=h5eee18b_1
|
| 33 |
+
- pip:
|
| 34 |
+
- absl-py==2.3.1
|
| 35 |
+
- accelerate==1.10.0
|
| 36 |
+
- aiohappyeyeballs==2.6.1
|
| 37 |
+
- aiohttp==3.12.15
|
| 38 |
+
- aiosignal==1.4.0
|
| 39 |
+
- annotated-types==0.7.0
|
| 40 |
+
- anyio==4.10.0
|
| 41 |
+
- astor==0.8.1
|
| 42 |
+
- async-timeout==5.0.1
|
| 43 |
+
- attrs==25.3.0
|
| 44 |
+
- blake3==1.0.5
|
| 45 |
+
- boto3==1.40.1
|
| 46 |
+
- botocore==1.40.1
|
| 47 |
+
- cachetools==6.2.0
|
| 48 |
+
- cbor2==5.7.0
|
| 49 |
+
- certifi==2025.8.3
|
| 50 |
+
- cffi==2.0.0
|
| 51 |
+
- cfgv==3.4.0
|
| 52 |
+
- chardet==5.2.0
|
| 53 |
+
- charset-normalizer==3.4.2
|
| 54 |
+
- click==8.2.1
|
| 55 |
+
- cloudpickle==3.1.1
|
| 56 |
+
- colorama==0.4.6
|
| 57 |
+
- compressed-tensors==0.10.2
|
| 58 |
+
- contourpy==1.3.2
|
| 59 |
+
- coverage==7.10.6
|
| 60 |
+
- cupy-cuda12x==13.6.0
|
| 61 |
+
- cycler==0.12.1
|
| 62 |
+
- dataproperty==1.1.0
|
| 63 |
+
- datasets==3.6.0
|
| 64 |
+
- depyf==0.19.0
|
| 65 |
+
- dill==0.3.8
|
| 66 |
+
- diskcache==5.6.3
|
| 67 |
+
- distlib==0.4.0
|
| 68 |
+
- distro==1.9.0
|
| 69 |
+
- dnspython==2.8.0
|
| 70 |
+
- docstring-parser==0.17.0
|
| 71 |
+
- einops==0.8.1
|
| 72 |
+
- email-validator==2.3.0
|
| 73 |
+
- et-xmlfile==2.0.0
|
| 74 |
+
- evaluate==0.4.5
|
| 75 |
+
- exceptiongroup==1.3.0
|
| 76 |
+
- execnet==2.1.1
|
| 77 |
+
- fastapi==0.116.1
|
| 78 |
+
- fastapi-cli==0.0.11
|
| 79 |
+
- fastapi-cloud-cli==0.1.5
|
| 80 |
+
- fastrlock==0.8.3
|
| 81 |
+
- filelock==3.18.0
|
| 82 |
+
- fonttools==4.59.0
|
| 83 |
+
- frozenlist==1.7.0
|
| 84 |
+
- fsspec==2025.3.0
|
| 85 |
+
- gguf==0.17.1
|
| 86 |
+
- grpcio==1.74.0
|
| 87 |
+
- h11==0.16.0
|
| 88 |
+
- hf-transfer==0.1.9
|
| 89 |
+
- hf-xet==1.1.5
|
| 90 |
+
- httpcore==1.0.9
|
| 91 |
+
- httptools==0.6.4
|
| 92 |
+
- httpx==0.28.1
|
| 93 |
+
- huggingface-hub==0.34.4
|
| 94 |
+
- identify==2.6.14
|
| 95 |
+
- idna==3.10
|
| 96 |
+
- importlib-resources==6.5.2
|
| 97 |
+
- iniconfig==2.1.0
|
| 98 |
+
- interegular==0.3.3
|
| 99 |
+
- jinja2==3.1.6
|
| 100 |
+
- jiter==0.10.0
|
| 101 |
+
- jmespath==1.0.1
|
| 102 |
+
- joblib==1.5.1
|
| 103 |
+
- jsonargparse==4.40.1
|
| 104 |
+
- jsonlines==4.0.0
|
| 105 |
+
- jsonschema==4.25.1
|
| 106 |
+
- jsonschema-specifications==2025.9.1
|
| 107 |
+
- kiwisolver==1.4.9
|
| 108 |
+
- lark==1.2.2
|
| 109 |
+
- lightning==2.5.2
|
| 110 |
+
- lightning-utilities==0.15.0
|
| 111 |
+
- litdata==0.2.51
|
| 112 |
+
- litgpt==0.5.9
|
| 113 |
+
- llguidance==0.7.30
|
| 114 |
+
- llvmlite==0.44.0
|
| 115 |
+
- lm-eval==0.4.9.1
|
| 116 |
+
- lm-format-enforcer==0.10.12
|
| 117 |
+
- lxml==6.0.0
|
| 118 |
+
- markdown==3.8.2
|
| 119 |
+
- markdown-it-py==4.0.0
|
| 120 |
+
- markupsafe==3.0.2
|
| 121 |
+
- matplotlib==3.10.5
|
| 122 |
+
- mbstrdecoder==1.1.4
|
| 123 |
+
- mdurl==0.1.2
|
| 124 |
+
- mistral-common==1.8.4
|
| 125 |
+
- more-itertools==10.7.0
|
| 126 |
+
- mpmath==1.3.0
|
| 127 |
+
- msgpack==1.1.1
|
| 128 |
+
- msgspec==0.19.0
|
| 129 |
+
- multidict==6.6.3
|
| 130 |
+
- multiprocess==0.70.16
|
| 131 |
+
- networkx==3.4.2
|
| 132 |
+
- ninja==1.13.0
|
| 133 |
+
- nltk==3.9.1
|
| 134 |
+
- nodeenv==1.9.1
|
| 135 |
+
- numba==0.61.2
|
| 136 |
+
- numexpr==2.11.0
|
| 137 |
+
- numpy==2.2.6
|
| 138 |
+
- nvidia-cublas-cu12==12.6.4.1
|
| 139 |
+
- nvidia-cuda-cupti-cu12==12.6.80
|
| 140 |
+
- nvidia-cuda-nvrtc-cu12==12.6.77
|
| 141 |
+
- nvidia-cuda-runtime-cu12==12.6.77
|
| 142 |
+
- nvidia-cudnn-cu12==9.5.1.17
|
| 143 |
+
- nvidia-cufft-cu12==11.3.0.4
|
| 144 |
+
- nvidia-cufile-cu12==1.11.1.6
|
| 145 |
+
- nvidia-curand-cu12==10.3.7.77
|
| 146 |
+
- nvidia-cusolver-cu12==11.7.1.2
|
| 147 |
+
- nvidia-cusparse-cu12==12.5.4.2
|
| 148 |
+
- nvidia-cusparselt-cu12==0.6.3
|
| 149 |
+
- nvidia-nccl-cu12==2.26.2
|
| 150 |
+
- nvidia-nvjitlink-cu12==12.6.85
|
| 151 |
+
- nvidia-nvtx-cu12==12.6.77
|
| 152 |
+
- obstore==0.7.3
|
| 153 |
+
- openai==1.107.0
|
| 154 |
+
- openai-harmony==0.0.4
|
| 155 |
+
- opencv-python-headless==4.12.0.88
|
| 156 |
+
- openpyxl==3.1.5
|
| 157 |
+
- outlines-core==0.2.10
|
| 158 |
+
- packaging==25.0
|
| 159 |
+
- pandas==2.3.1
|
| 160 |
+
- partial-json-parser==0.2.1.1.post6
|
| 161 |
+
- pathvalidate==3.3.1
|
| 162 |
+
- peft==0.17.0
|
| 163 |
+
- pillow==11.3.0
|
| 164 |
+
- platformdirs==4.4.0
|
| 165 |
+
- pluggy==1.6.0
|
| 166 |
+
- portalocker==3.2.0
|
| 167 |
+
- pre-commit==4.3.0
|
| 168 |
+
- prometheus-client==0.22.1
|
| 169 |
+
- prometheus-fastapi-instrumentator==7.1.0
|
| 170 |
+
- propcache==0.3.2
|
| 171 |
+
- protobuf==6.31.1
|
| 172 |
+
- psutil==7.0.0
|
| 173 |
+
- py-cpuinfo==9.0.0
|
| 174 |
+
- pyarrow==21.0.0
|
| 175 |
+
- pybase64==1.4.2
|
| 176 |
+
- pybind11==3.0.0
|
| 177 |
+
- pycountry==24.6.1
|
| 178 |
+
- pycparser==2.23
|
| 179 |
+
- pydantic==2.11.7
|
| 180 |
+
- pydantic-core==2.33.2
|
| 181 |
+
- pydantic-extra-types==2.10.5
|
| 182 |
+
- pygments==2.19.2
|
| 183 |
+
- pyparsing==3.2.3
|
| 184 |
+
- pytablewriter==1.2.1
|
| 185 |
+
- pytest==8.4.2
|
| 186 |
+
- pytest-cov==6.3.0
|
| 187 |
+
- pytest-xdist==3.8.0
|
| 188 |
+
- python-dateutil==2.9.0.post0
|
| 189 |
+
- python-dotenv==1.1.1
|
| 190 |
+
- python-json-logger==3.3.0
|
| 191 |
+
- python-multipart==0.0.20
|
| 192 |
+
- pytorch-lightning==2.5.2
|
| 193 |
+
- pytz==2025.2
|
| 194 |
+
- pyyaml==6.0.2
|
| 195 |
+
- pyzmq==27.1.0
|
| 196 |
+
- ray==2.49.1
|
| 197 |
+
- referencing==0.36.2
|
| 198 |
+
- regex==2025.7.34
|
| 199 |
+
- requests==2.32.4
|
| 200 |
+
- rich==14.1.0
|
| 201 |
+
- rich-toolkit==0.15.1
|
| 202 |
+
- rignore==0.6.4
|
| 203 |
+
- rouge-score==0.1.2
|
| 204 |
+
- rpds-py==0.27.1
|
| 205 |
+
- s3transfer==0.13.1
|
| 206 |
+
- sacrebleu==2.5.1
|
| 207 |
+
- safetensors==0.5.3
|
| 208 |
+
- scikit-learn==1.7.1
|
| 209 |
+
- scipy==1.15.3
|
| 210 |
+
- seaborn==0.13.2
|
| 211 |
+
- sentencepiece==0.2.0
|
| 212 |
+
- sentry-sdk==2.37.1
|
| 213 |
+
- setproctitle==1.3.7
|
| 214 |
+
- shellingham==1.5.4
|
| 215 |
+
- six==1.17.0
|
| 216 |
+
- sniffio==1.3.1
|
| 217 |
+
- soundfile==0.13.1
|
| 218 |
+
- soxr==1.0.0
|
| 219 |
+
- sqlitedict==2.1.0
|
| 220 |
+
- starlette==0.47.3
|
| 221 |
+
- sympy==1.14.0
|
| 222 |
+
- tabledata==1.3.4
|
| 223 |
+
- tabulate==0.9.0
|
| 224 |
+
- tcolorpy==0.1.7
|
| 225 |
+
- tenacity==9.1.2
|
| 226 |
+
- tensorboard==2.20.0
|
| 227 |
+
- tensorboard-data-server==0.7.2
|
| 228 |
+
- threadpoolctl==3.6.0
|
| 229 |
+
- tifffile==2025.5.10
|
| 230 |
+
- tiktoken==0.11.0
|
| 231 |
+
- tokenizers==0.21.4
|
| 232 |
+
- tomli==2.2.1
|
| 233 |
+
- torch==2.7.1
|
| 234 |
+
- torchaudio==2.7.1
|
| 235 |
+
- torchmetrics==1.8.0
|
| 236 |
+
- torchvision==0.22.1
|
| 237 |
+
- tqdm==4.67.1
|
| 238 |
+
- tqdm-multiprocess==0.0.11
|
| 239 |
+
- transformers==4.55.0
|
| 240 |
+
- triton==3.3.1
|
| 241 |
+
- typepy==1.3.4
|
| 242 |
+
- typer==0.17.4
|
| 243 |
+
- typeshed-client==2.8.2
|
| 244 |
+
- typing-extensions==4.14.1
|
| 245 |
+
- typing-inspection==0.4.1
|
| 246 |
+
- tzdata==2025.2
|
| 247 |
+
- urllib3==2.5.0
|
| 248 |
+
- uvicorn==0.35.0
|
| 249 |
+
- uvloop==0.21.0
|
| 250 |
+
- virtualenv==20.34.0
|
| 251 |
+
- vllm==0.10.1.1
|
| 252 |
+
- watchfiles==1.1.0
|
| 253 |
+
- websockets==15.0.1
|
| 254 |
+
- werkzeug==3.1.3
|
| 255 |
+
- word2number==1.1
|
| 256 |
+
- xformers==0.0.31
|
| 257 |
+
- xgrammar==0.1.21
|
| 258 |
+
- xxhash==3.5.0
|
| 259 |
+
- yarl==1.20.1
|
| 260 |
+
- zstandard==0.23.0
|
| 261 |
+
prefix: /mnt/data/llmtcl
|
fix.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import pathlib
|
| 3 |
+
import shutil
|
| 4 |
+
import argparse
|
| 5 |
+
|
| 6 |
+
def escape_backslashes_in_questions(text: str) -> str:
|
| 7 |
+
# 匹配 "question": "..."
|
| 8 |
+
json_string = r'"(?:\\.|[^"\\])*"'
|
| 9 |
+
pattern = re.compile(rf'("question"\s*:\s*)({json_string})')
|
| 10 |
+
|
| 11 |
+
def replacer(m):
|
| 12 |
+
prefix, raw = m.groups()
|
| 13 |
+
# 去掉外层引号
|
| 14 |
+
inner = raw[1:-1]
|
| 15 |
+
# 把单反斜杠替换成双反斜杠
|
| 16 |
+
inner_fixed = inner.replace("\\", "\\\\")
|
| 17 |
+
return f'{prefix}"{inner_fixed}"'
|
| 18 |
+
|
| 19 |
+
return pattern.sub(replacer, text)
|
| 20 |
+
|
| 21 |
+
def process_file(p: pathlib.Path):
|
| 22 |
+
original = p.read_text(encoding="utf-8")
|
| 23 |
+
fixed = escape_backslashes_in_questions(original)
|
| 24 |
+
if fixed != original:
|
| 25 |
+
shutil.copyfile(p, p.with_suffix(p.suffix + ".bak"))
|
| 26 |
+
p.write_text(fixed, encoding="utf-8")
|
| 27 |
+
print(f"[UPDATED] {p}")
|
| 28 |
+
else:
|
| 29 |
+
print(f"[SKIP ] {p}")
|
| 30 |
+
|
| 31 |
+
def main():
|
| 32 |
+
ap = argparse.ArgumentParser(description="Fix backslashes in 'question' fields of JSON files")
|
| 33 |
+
ap.add_argument("folder", type=str, help="Directory containing .json files")
|
| 34 |
+
args = ap.parse_args()
|
| 35 |
+
|
| 36 |
+
root = pathlib.Path(args.folder)
|
| 37 |
+
if not root.is_dir():
|
| 38 |
+
print(f"[ERROR] {args.folder} is not a valid directory")
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
for p in root.glob("*.json"):
|
| 42 |
+
process_file(p)
|
| 43 |
+
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
main()
|
litgpt.egg-info/PKG-INFO
ADDED
|
@@ -0,0 +1,977 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.4
|
| 2 |
+
Name: litgpt
|
| 3 |
+
Version: 0.5.9
|
| 4 |
+
Summary: Hackable implementation of state-of-the-art open-source LLMs
|
| 5 |
+
Author-email: Lightning AI <contact@lightning.ai>
|
| 6 |
+
License: Apache License
|
| 7 |
+
Version 2.0, January 2004
|
| 8 |
+
http://www.apache.org/licenses/
|
| 9 |
+
|
| 10 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 11 |
+
|
| 12 |
+
1. Definitions.
|
| 13 |
+
|
| 14 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 15 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 16 |
+
|
| 17 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 18 |
+
the copyright owner that is granting the License.
|
| 19 |
+
|
| 20 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 21 |
+
other entities that control, are controlled by, or are under common
|
| 22 |
+
control with that entity. For the purposes of this definition,
|
| 23 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 24 |
+
direction or management of such entity, whether by contract or
|
| 25 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 26 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 27 |
+
|
| 28 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 29 |
+
exercising permissions granted by this License.
|
| 30 |
+
|
| 31 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 32 |
+
including but not limited to software source code, documentation
|
| 33 |
+
source, and configuration files.
|
| 34 |
+
|
| 35 |
+
"Object" form shall mean any form resulting from mechanical
|
| 36 |
+
transformation or translation of a Source form, including but
|
| 37 |
+
not limited to compiled object code, generated documentation,
|
| 38 |
+
and conversions to other media types.
|
| 39 |
+
|
| 40 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 41 |
+
Object form, made available under the License, as indicated by a
|
| 42 |
+
copyright notice that is included in or attached to the work
|
| 43 |
+
(an example is provided in the Appendix below).
|
| 44 |
+
|
| 45 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 46 |
+
form, that is based on (or derived from) the Work and for which the
|
| 47 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 48 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 49 |
+
of this License, Derivative Works shall not include works that remain
|
| 50 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 51 |
+
the Work and Derivative Works thereof.
|
| 52 |
+
|
| 53 |
+
"Contribution" shall mean any work of authorship, including
|
| 54 |
+
the original version of the Work and any modifications or additions
|
| 55 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 56 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 57 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 58 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 59 |
+
means any form of electronic, verbal, or written communication sent
|
| 60 |
+
to the Licensor or its representatives, including but not limited to
|
| 61 |
+
communication on electronic mailing lists, source code control systems,
|
| 62 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 63 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 64 |
+
excluding communication that is conspicuously marked or otherwise
|
| 65 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 66 |
+
|
| 67 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 68 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 69 |
+
subsequently incorporated within the Work.
|
| 70 |
+
|
| 71 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 72 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 73 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 74 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 75 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 76 |
+
Work and such Derivative Works in Source or Object form.
|
| 77 |
+
|
| 78 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 79 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 80 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 81 |
+
(except as stated in this section) patent license to make, have made,
|
| 82 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 83 |
+
where such license applies only to those patent claims licensable
|
| 84 |
+
by such Contributor that are necessarily infringed by their
|
| 85 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 86 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 87 |
+
institute patent litigation against any entity (including a
|
| 88 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 89 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 90 |
+
or contributory patent infringement, then any patent licenses
|
| 91 |
+
granted to You under this License for that Work shall terminate
|
| 92 |
+
as of the date such litigation is filed.
|
| 93 |
+
|
| 94 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 95 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 96 |
+
modifications, and in Source or Object form, provided that You
|
| 97 |
+
meet the following conditions:
|
| 98 |
+
|
| 99 |
+
(a) You must give any other recipients of the Work or
|
| 100 |
+
Derivative Works a copy of this License; and
|
| 101 |
+
|
| 102 |
+
(b) You must cause any modified files to carry prominent notices
|
| 103 |
+
stating that You changed the files; and
|
| 104 |
+
|
| 105 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 106 |
+
that You distribute, all copyright, patent, trademark, and
|
| 107 |
+
attribution notices from the Source form of the Work,
|
| 108 |
+
excluding those notices that do not pertain to any part of
|
| 109 |
+
the Derivative Works; and
|
| 110 |
+
|
| 111 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 112 |
+
distribution, then any Derivative Works that You distribute must
|
| 113 |
+
include a readable copy of the attribution notices contained
|
| 114 |
+
within such NOTICE file, excluding those notices that do not
|
| 115 |
+
pertain to any part of the Derivative Works, in at least one
|
| 116 |
+
of the following places: within a NOTICE text file distributed
|
| 117 |
+
as part of the Derivative Works; within the Source form or
|
| 118 |
+
documentation, if provided along with the Derivative Works; or,
|
| 119 |
+
within a display generated by the Derivative Works, if and
|
| 120 |
+
wherever such third-party notices normally appear. The contents
|
| 121 |
+
of the NOTICE file are for informational purposes only and
|
| 122 |
+
do not modify the License. You may add Your own attribution
|
| 123 |
+
notices within Derivative Works that You distribute, alongside
|
| 124 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 125 |
+
that such additional attribution notices cannot be construed
|
| 126 |
+
as modifying the License.
|
| 127 |
+
|
| 128 |
+
You may add Your own copyright statement to Your modifications and
|
| 129 |
+
may provide additional or different license terms and conditions
|
| 130 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 131 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 132 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 133 |
+
the conditions stated in this License.
|
| 134 |
+
|
| 135 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 136 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 137 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 138 |
+
this License, without any additional terms or conditions.
|
| 139 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 140 |
+
the terms of any separate license agreement you may have executed
|
| 141 |
+
with Licensor regarding such Contributions.
|
| 142 |
+
|
| 143 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 144 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 145 |
+
except as required for reasonable and customary use in describing the
|
| 146 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 147 |
+
|
| 148 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 149 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 150 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 151 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 152 |
+
implied, including, without limitation, any warranties or conditions
|
| 153 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 154 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 155 |
+
appropriateness of using or redistributing the Work and assume any
|
| 156 |
+
risks associated with Your exercise of permissions under this License.
|
| 157 |
+
|
| 158 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 159 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 160 |
+
unless required by applicable law (such as deliberate and grossly
|
| 161 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 162 |
+
liable to You for damages, including any direct, indirect, special,
|
| 163 |
+
incidental, or consequential damages of any character arising as a
|
| 164 |
+
result of this License or out of the use or inability to use the
|
| 165 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 166 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 167 |
+
other commercial damages or losses), even if such Contributor
|
| 168 |
+
has been advised of the possibility of such damages.
|
| 169 |
+
|
| 170 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 171 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 172 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 173 |
+
or other liability obligations and/or rights consistent with this
|
| 174 |
+
License. However, in accepting such obligations, You may act only
|
| 175 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 176 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 177 |
+
defend, and hold each Contributor harmless for any liability
|
| 178 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 179 |
+
of your accepting any such warranty or additional liability.
|
| 180 |
+
|
| 181 |
+
END OF TERMS AND CONDITIONS
|
| 182 |
+
|
| 183 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 184 |
+
|
| 185 |
+
To apply the Apache License to your work, attach the following
|
| 186 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 187 |
+
replaced with your own identifying information. (Don't include
|
| 188 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 189 |
+
comment syntax for the file format. We also recommend that a
|
| 190 |
+
file or class name and description of purpose be included on the
|
| 191 |
+
same "printed page" as the copyright notice for easier
|
| 192 |
+
identification within third-party archives.
|
| 193 |
+
|
| 194 |
+
Copyright [2023] Lightning AI
|
| 195 |
+
|
| 196 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 197 |
+
you may not use this file except in compliance with the License.
|
| 198 |
+
You may obtain a copy of the License at
|
| 199 |
+
|
| 200 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 201 |
+
|
| 202 |
+
Unless required by applicable law or agreed to in writing, software
|
| 203 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 204 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 205 |
+
See the License for the specific language governing permissions and
|
| 206 |
+
limitations under the License.
|
| 207 |
+
|
| 208 |
+
Project-URL: documentation, https://github.com/lightning-AI/litgpt/tutorials
|
| 209 |
+
Project-URL: homepage, https://github.com/lightning-AI/litgpt
|
| 210 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 211 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 212 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 213 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 214 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 215 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 216 |
+
Description-Content-Type: text/markdown
|
| 217 |
+
License-File: LICENSE
|
| 218 |
+
Requires-Dist: huggingface-hub<0.33,>=0.23.5
|
| 219 |
+
Requires-Dist: jsonargparse[signatures]<=4.32.1,>=4.30.1; python_version <= "3.9"
|
| 220 |
+
Requires-Dist: jsonargparse[signatures]>=4.37; python_version > "3.9"
|
| 221 |
+
Requires-Dist: lightning>=2.5
|
| 222 |
+
Requires-Dist: psutil==7
|
| 223 |
+
Requires-Dist: safetensors>=0.4.3
|
| 224 |
+
Requires-Dist: tokenizers>=0.15.2
|
| 225 |
+
Requires-Dist: torch>=2.5
|
| 226 |
+
Requires-Dist: tqdm>=4.66
|
| 227 |
+
Provides-Extra: compiler
|
| 228 |
+
Requires-Dist: lightning-thunder>=0.2.0.dev20250119; (python_version >= "3.10" and sys_platform == "linux") and extra == "compiler"
|
| 229 |
+
Provides-Extra: extra
|
| 230 |
+
Requires-Dist: bitsandbytes<0.43,>=0.42; sys_platform == "darwin" and extra == "extra"
|
| 231 |
+
Requires-Dist: bitsandbytes<0.45.5,>=0.45.2; (sys_platform == "linux" or sys_platform == "win32") and extra == "extra"
|
| 232 |
+
Requires-Dist: datasets<4,>=2.18; extra == "extra"
|
| 233 |
+
Requires-Dist: huggingface-hub[hf-transfer]>=0.21; extra == "extra"
|
| 234 |
+
Requires-Dist: litdata==0.2.51; extra == "extra"
|
| 235 |
+
Requires-Dist: litserve>0.2; extra == "extra"
|
| 236 |
+
Requires-Dist: lm-eval>=0.4.2; extra == "extra"
|
| 237 |
+
Requires-Dist: pandas>=1.9; extra == "extra"
|
| 238 |
+
Requires-Dist: pyarrow>=15.0.2; extra == "extra"
|
| 239 |
+
Requires-Dist: requests>=2.31; extra == "extra"
|
| 240 |
+
Requires-Dist: sentencepiece>=0.2; extra == "extra"
|
| 241 |
+
Requires-Dist: tensorboard>=2.14; extra == "extra"
|
| 242 |
+
Requires-Dist: torchmetrics>=1.3.1; extra == "extra"
|
| 243 |
+
Requires-Dist: transformers<4.52,>=4.51.3; extra == "extra"
|
| 244 |
+
Requires-Dist: uvloop>=0.2; sys_platform != "win32" and extra == "extra"
|
| 245 |
+
Requires-Dist: zstandard>=0.22; extra == "extra"
|
| 246 |
+
Provides-Extra: test
|
| 247 |
+
Requires-Dist: einops>=0.7; extra == "test"
|
| 248 |
+
Requires-Dist: protobuf>=4.23.4; extra == "test"
|
| 249 |
+
Requires-Dist: pytest>=8.1.1; extra == "test"
|
| 250 |
+
Requires-Dist: pytest-benchmark>=5.1; extra == "test"
|
| 251 |
+
Requires-Dist: pytest-dependency>=0.6; extra == "test"
|
| 252 |
+
Requires-Dist: pytest-rerunfailures>=14; extra == "test"
|
| 253 |
+
Requires-Dist: pytest-timeout>=2.3.1; extra == "test"
|
| 254 |
+
Dynamic: license-file
|
| 255 |
+
|
| 256 |
+
<div align="center">
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
# ⚡ LitGPT
|
| 260 |
+
|
| 261 |
+
**20+ high-performance LLMs with recipes to pretrain, finetune, and deploy at scale.**
|
| 262 |
+
|
| 263 |
+
<pre>
|
| 264 |
+
✅ From scratch implementations ✅ No abstractions ✅ Beginner friendly
|
| 265 |
+
✅ Flash attention ✅ FSDP ✅ LoRA, QLoRA, Adapter
|
| 266 |
+
✅ Reduce GPU memory (fp4/8/16/32) ✅ 1-1000+ GPUs/TPUs ✅ 20+ LLMs
|
| 267 |
+
</pre>
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
---
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
 [](https://github.com/Lightning-AI/lit-stablelm/blob/master/LICENSE) [](https://discord.gg/VptPCZkGNa)
|
| 275 |
+
|
| 276 |
+
<p align="center">
|
| 277 |
+
<a href="#quick-start">Quick start</a> •
|
| 278 |
+
<a href="#choose-from-20-llms">Models</a> •
|
| 279 |
+
<a href="#finetune-an-llm">Finetune</a> •
|
| 280 |
+
<a href="#deploy-an-llm">Deploy</a> •
|
| 281 |
+
<a href="#all-workflows">All workflows</a> •
|
| 282 |
+
<a href="#state-of-the-art-features">Features</a> •
|
| 283 |
+
<a href="#training-recipes">Recipes (YAML)</a> •
|
| 284 |
+
<a href="https://lightning.ai/">Lightning AI</a> •
|
| 285 |
+
<a href="#tutorials">Tutorials</a>
|
| 286 |
+
</p>
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-quick-start">
|
| 291 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/get-started-badge.svg" height="36px" alt="Get started"/>
|
| 292 |
+
</a>
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
</div>
|
| 297 |
+
|
| 298 |
+
# Use, finetune, pretrain, and deploy LLMs Lightning fast ⚡⚡
|
| 299 |
+
Every LLM is implemented from scratch with **no abstractions** and **full control**, making them blazing fast, minimal, and performant at enterprise scale.
|
| 300 |
+
|
| 301 |
+
✅ **Enterprise ready -** Apache 2.0 for unlimited enterprise use.</br>
|
| 302 |
+
✅ **Developer friendly -** Easy debugging with no abstraction layers and single file implementations.</br>
|
| 303 |
+
✅ **Optimized performance -** Models designed to maximize performance, reduce costs, and speed up training.</br>
|
| 304 |
+
✅ **Proven recipes -** Highly-optimized training/finetuning recipes tested at enterprise scale.</br>
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
# Quick start
|
| 309 |
+
Install LitGPT
|
| 310 |
+
```
|
| 311 |
+
pip install 'litgpt[extra]'
|
| 312 |
+
```
|
| 313 |
+
|
| 314 |
+
Load and use any of the [20+ LLMs](#choose-from-20-llms):
|
| 315 |
+
```python
|
| 316 |
+
from litgpt import LLM
|
| 317 |
+
|
| 318 |
+
llm = LLM.load("microsoft/phi-2")
|
| 319 |
+
text = llm.generate("Fix the spelling: Every fall, the family goes to the mountains.")
|
| 320 |
+
print(text)
|
| 321 |
+
# Corrected Sentence: Every fall, the family goes to the mountains.
|
| 322 |
+
```
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
✅ Optimized for fast inference</br>
|
| 327 |
+
✅ Quantization</br>
|
| 328 |
+
✅ Runs on low-memory GPUs</br>
|
| 329 |
+
✅ No layers of internal abstractions</br>
|
| 330 |
+
✅ Optimized for production scale</br>
|
| 331 |
+
|
| 332 |
+
<details>
|
| 333 |
+
<summary>Advanced install options</summary>
|
| 334 |
+
|
| 335 |
+
Install from source:
|
| 336 |
+
|
| 337 |
+
```bash
|
| 338 |
+
git clone https://github.com/Lightning-AI/litgpt
|
| 339 |
+
cd litgpt
|
| 340 |
+
pip install -e '.[all]'
|
| 341 |
+
```
|
| 342 |
+
</details>
|
| 343 |
+
|
| 344 |
+
[Explore the full Python API docs](tutorials/python-api.md).
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
---
|
| 349 |
+
# Choose from 20+ LLMs
|
| 350 |
+
Every model is written from scratch to maximize performance and remove layers of abstraction:
|
| 351 |
+
|
| 352 |
+
| Model | Model size | Author | Reference |
|
| 353 |
+
|----|----|----|----|
|
| 354 |
+
| Llama 3, 3.1, 3.2, 3.3 | 1B, 3B, 8B, 70B, 405B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) |
|
| 355 |
+
| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) |
|
| 356 |
+
| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) |
|
| 357 |
+
| Gemma 2 | 2B, 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) |
|
| 358 |
+
| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) |
|
| 359 |
+
| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) |
|
| 360 |
+
| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) |
|
| 361 |
+
| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) |
|
| 362 |
+
| ... | ... | ... | ... |
|
| 363 |
+
|
| 364 |
+
<details>
|
| 365 |
+
<summary>See full list of 20+ LLMs</summary>
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
#### All models
|
| 370 |
+
|
| 371 |
+
| Model | Model size | Author | Reference |
|
| 372 |
+
|----|----|----|----|
|
| 373 |
+
| CodeGemma | 7B | Google | [Google Team, Google Deepmind](https://ai.google.dev/gemma/docs/codegemma) |
|
| 374 |
+
| Code Llama | 7B, 13B, 34B, 70B | Meta AI | [Rozière et al. 2023](https://arxiv.org/abs/2308.12950) |
|
| 375 |
+
| Falcon | 7B, 40B, 180B | TII UAE | [TII 2023](https://falconllm.tii.ae) |
|
| 376 |
+
| Falcon 3 | 1B, 3B, 7B, 10B | TII UAE | [TII 2024](https://huggingface.co/blog/falcon3) |
|
| 377 |
+
| FreeWilly2 (Stable Beluga 2) | 70B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stable-beluga-large-instruction-fine-tuned-models) |
|
| 378 |
+
| Function Calling Llama 2 | 7B | Trelis | [Trelis et al. 2023](https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2) |
|
| 379 |
+
| Gemma | 2B, 7B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-report.pdf) |
|
| 380 |
+
| Gemma 2 | 9B, 27B | Google | [Google Team, Google Deepmind](https://storage.googleapis.com/deepmind-media/gemma/gemma-2-report.pdf) |
|
| 381 |
+
| Gemma 3 | 1B, 4B, 12B, 27B | Google | [Google Team, Google Deepmind](https://arxiv.org/pdf/2503.19786) |
|
| 382 |
+
| Llama 2 | 7B, 13B, 70B | Meta AI | [Touvron et al. 2023](https://arxiv.org/abs/2307.09288) |
|
| 383 |
+
| Llama 3.1 | 8B, 70B | Meta AI | [Meta AI 2024](https://github.com/meta-llama/llama3) |
|
| 384 |
+
| Llama 3.2 | 1B, 3B | Meta AI | [Meta AI 2024](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/) |
|
| 385 |
+
| Llama 3.3 | 70B | Meta AI | [Meta AI 2024](https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct) |
|
| 386 |
+
| Mathstral | 7B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mathstral/) |
|
| 387 |
+
| MicroLlama | 300M | Ken Wang | [MicroLlama repo](https://github.com/keeeeenw/MicroLlama) |
|
| 388 |
+
| Mixtral MoE | 8x7B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/mixtral-of-experts/) |
|
| 389 |
+
| Mistral | 7B, 123B | Mistral AI | [Mistral AI 2023](https://mistral.ai/news/announcing-mistral-7b/) |
|
| 390 |
+
| Mixtral MoE | 8x22B | Mistral AI | [Mistral AI 2024](https://mistral.ai/news/mixtral-8x22b/) |
|
| 391 |
+
| OLMo | 1B, 7B | Allen Institute for AI (AI2) | [Groeneveld et al. 2024](https://aclanthology.org/2024.acl-long.841/) |
|
| 392 |
+
| OpenLLaMA | 3B, 7B, 13B | OpenLM Research | [Geng & Liu 2023](https://github.com/openlm-research/open_llama) |
|
| 393 |
+
| Phi 1.5 & 2 | 1.3B, 2.7B | Microsoft Research | [Li et al. 2023](https://arxiv.org/abs/2309.05463) |
|
| 394 |
+
| Phi 3 | 3.8B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2404.14219) |
|
| 395 |
+
| Phi 4 | 14B | Microsoft Research | [Abdin et al. 2024](https://arxiv.org/abs/2412.08905) |
|
| 396 |
+
| Phi 4 Mini Instruct | 3.8B | Microsoft Research | [Microsoft 2025](https://arxiv.org/abs/2503.01743) |
|
| 397 |
+
| Phi 4 Mini Reasoning | 3.8B | Microsoft Research | [Xu, Peng et al. 2025](https://arxiv.org/abs/2504.21233) |
|
| 398 |
+
| Phi 4 Reasoning | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) |
|
| 399 |
+
| Phi 4 Reasoning Plus | 3.8B | Microsoft Research | [Abdin et al. 2025](https://arxiv.org/abs/2504.21318) |
|
| 400 |
+
| Platypus | 7B, 13B, 70B | Lee et al. | [Lee, Hunter, and Ruiz 2023](https://arxiv.org/abs/2308.07317) |
|
| 401 |
+
| Pythia | {14,31,70,160,410}M, {1,1.4,2.8,6.9,12}B | EleutherAI | [Biderman et al. 2023](https://arxiv.org/abs/2304.01373) |
|
| 402 |
+
| Qwen2.5 | 0.5B, 1.5B, 3B, 7B, 14B, 32B, 72B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwen2.5/) |
|
| 403 |
+
| Qwen2.5 Coder | 0.5B, 1.5B, 3B, 7B, 14B, 32B | Alibaba Group | [Hui, Binyuan et al. 2024](https://arxiv.org/abs/2409.12186) |
|
| 404 |
+
| Qwen2.5 1M (Long Context) | 7B, 14B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwen2.5-1m/) |
|
| 405 |
+
| Qwen2.5 Math | 1.5B, 7B, 72B | Alibaba Group | [An, Yang et al. 2024](https://arxiv.org/abs/2409.12122) |
|
| 406 |
+
| QwQ | 32B | Alibaba Group | [Qwen Team 2025](https://qwenlm.github.io/blog/qwq-32b/) |
|
| 407 |
+
| QwQ-Preview | 32B | Alibaba Group | [Qwen Team 2024](https://qwenlm.github.io/blog/qwq-32b-preview/) |
|
| 408 |
+
| Qwen3 | 0.6B, 1.7B, 4B, 8B, 14B, 32B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) |
|
| 409 |
+
| Qwen3 MoE | 30B, 235B | Alibaba Group | [Qwen Team 2025](https://arxiv.org/abs/2505.09388/) |
|
| 410 |
+
| R1 Distill Llama | 8B, 70B | DeepSeek AI | [DeepSeek AI 2025](https://github.com/deepseek-ai/DeepSeek-R1/blob/main/DeepSeek_R1.pdf) |
|
| 411 |
+
| SmolLM2 | 135M, 360M, 1.7B | Hugging Face | [Hugging Face 2024](https://github.com/huggingface/smollm) |
|
| 412 |
+
| Salamandra | 2B, 7B | Barcelona Supercomputing Centre | [BSC-LTC 2024](https://github.com/BSC-LTC/salamandra) |
|
| 413 |
+
| StableCode | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) |
|
| 414 |
+
| StableLM | 3B, 7B | Stability AI | [Stability AI 2023](https://github.com/Stability-AI/StableLM) |
|
| 415 |
+
| StableLM Zephyr | 3B | Stability AI | [Stability AI 2023](https://stability.ai/blog/stablecode-llm-generative-ai-coding) |
|
| 416 |
+
| TinyLlama | 1.1B | Zhang et al. | [Zhang et al. 2023](https://github.com/jzhang38/TinyLlama) |
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
**Tip**: You can list all available models by running the `litgpt download list` command.
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
</details>
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
---
|
| 427 |
+
|
| 428 |
+
# Workflows
|
| 429 |
+
|
| 430 |
+
<p align="center">
|
| 431 |
+
<a href="#finetune-an-llm">Finetune</a> •
|
| 432 |
+
<a href="#pretrain-an-llm">Pretrain</a> •
|
| 433 |
+
<a href="#continue-pretraining-an-llm">Continued pretraining</a> •
|
| 434 |
+
<a href="#evaluate-an-llm">Evaluate</a> •
|
| 435 |
+
<a href="#deploy-an-llm">Deploy</a> •
|
| 436 |
+
<a href="#test-an-llm">Test</a>
|
| 437 |
+
</p>
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
Use the command line interface to run advanced workflows such as pretraining or finetuning on your own data.
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
## All workflows
|
| 445 |
+
After installing LitGPT, select the model and workflow to run (finetune, pretrain, evaluate, deploy, etc...):
|
| 446 |
+
|
| 447 |
+
```bash
|
| 448 |
+
# litgpt [action] [model]
|
| 449 |
+
litgpt serve meta-llama/Llama-3.2-3B-Instruct
|
| 450 |
+
litgpt finetune meta-llama/Llama-3.2-3B-Instruct
|
| 451 |
+
litgpt pretrain meta-llama/Llama-3.2-3B-Instruct
|
| 452 |
+
litgpt chat meta-llama/Llama-3.2-3B-Instruct
|
| 453 |
+
litgpt evaluate meta-llama/Llama-3.2-3B-Instruct
|
| 454 |
+
```
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
----
|
| 459 |
+
|
| 460 |
+
## Finetune an LLM
|
| 461 |
+
|
| 462 |
+
<div align="center">
|
| 463 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-finetune">
|
| 464 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 465 |
+
</a>
|
| 466 |
+
</div>
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
Finetuning is the process of taking a pretrained AI model and further training it on a smaller, specialized dataset tailored to a specific task or application.
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
```bash
|
| 476 |
+
# 0) setup your dataset
|
| 477 |
+
curl -L https://huggingface.co/datasets/ksaw008/finance_alpaca/resolve/main/finance_alpaca.json -o my_custom_dataset.json
|
| 478 |
+
|
| 479 |
+
# 1) Finetune a model (auto downloads weights)
|
| 480 |
+
litgpt finetune microsoft/phi-2 \
|
| 481 |
+
--data JSON \
|
| 482 |
+
--data.json_path my_custom_dataset.json \
|
| 483 |
+
--data.val_split_fraction 0.1 \
|
| 484 |
+
--out_dir out/custom-model
|
| 485 |
+
|
| 486 |
+
# 2) Test the model
|
| 487 |
+
litgpt chat out/custom-model/final
|
| 488 |
+
|
| 489 |
+
# 3) Deploy the model
|
| 490 |
+
litgpt serve out/custom-model/final
|
| 491 |
+
```
|
| 492 |
+
|
| 493 |
+
[Read the full finetuning docs](tutorials/finetune.md)
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
----
|
| 498 |
+
|
| 499 |
+
## Deploy an LLM
|
| 500 |
+
|
| 501 |
+
<div align="center">
|
| 502 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-serve">
|
| 503 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/deploy-on-studios.svg" height="36px" alt="Deploy on Studios"/>
|
| 504 |
+
</a>
|
| 505 |
+
</div>
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
Deploy a pretrained or finetune LLM to use it in real-world applications. Deploy, automatically sets up a web server that can be accessed by a website or app.
|
| 510 |
+
|
| 511 |
+
```bash
|
| 512 |
+
# deploy an out-of-the-box LLM
|
| 513 |
+
litgpt serve microsoft/phi-2
|
| 514 |
+
|
| 515 |
+
# deploy your own trained model
|
| 516 |
+
litgpt serve path/to/microsoft/phi-2/checkpoint
|
| 517 |
+
```
|
| 518 |
+
|
| 519 |
+
<details>
|
| 520 |
+
<summary>Show code to query server:</summary>
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
Test the server in a separate terminal and integrate the model API into your AI product:
|
| 525 |
+
```python
|
| 526 |
+
# 3) Use the server (in a separate Python session)
|
| 527 |
+
import requests, json
|
| 528 |
+
response = requests.post(
|
| 529 |
+
"http://127.0.0.1:8000/predict",
|
| 530 |
+
json={"prompt": "Fix typos in the following sentence: Example input"}
|
| 531 |
+
)
|
| 532 |
+
print(response.json()["output"])
|
| 533 |
+
```
|
| 534 |
+
</details>
|
| 535 |
+
|
| 536 |
+
[Read the full deploy docs](tutorials/deploy.md).
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
----
|
| 541 |
+
|
| 542 |
+
## Evaluate an LLM
|
| 543 |
+
Evaluate an LLM to test its performance on various tasks to see how well it understands and generates text. Simply put, we can evaluate things like how well would it do in college-level chemistry, coding, etc... (MMLU, Truthful QA, etc...)
|
| 544 |
+
|
| 545 |
+
```bash
|
| 546 |
+
litgpt evaluate microsoft/phi-2 --tasks 'truthfulqa_mc2,mmlu'
|
| 547 |
+
```
|
| 548 |
+
|
| 549 |
+
[Read the full evaluation docs](tutorials/evaluation.md).
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
----
|
| 554 |
+
|
| 555 |
+
## Test an LLM
|
| 556 |
+
|
| 557 |
+
<div align="center">
|
| 558 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-chat">
|
| 559 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 560 |
+
</a>
|
| 561 |
+
</div>
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
Test how well the model works via an interactive chat. Use the `chat` command to chat, extract embeddings, etc...
|
| 566 |
+
|
| 567 |
+
Here's an example showing how to use the Phi-2 LLM:
|
| 568 |
+
```bash
|
| 569 |
+
litgpt chat microsoft/phi-2
|
| 570 |
+
|
| 571 |
+
>> Prompt: What do Llamas eat?
|
| 572 |
+
```
|
| 573 |
+
|
| 574 |
+
<details>
|
| 575 |
+
<summary>Full code:</summary>
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
|
| 579 |
+
```bash
|
| 580 |
+
# 1) List all supported LLMs
|
| 581 |
+
litgpt download list
|
| 582 |
+
|
| 583 |
+
# 2) Use a model (auto downloads weights)
|
| 584 |
+
litgpt chat microsoft/phi-2
|
| 585 |
+
|
| 586 |
+
>> Prompt: What do Llamas eat?
|
| 587 |
+
```
|
| 588 |
+
|
| 589 |
+
The download of certain models requires an additional access token. You can read more about this in the [download](tutorials/download_model_weights.md#specific-models-and-access-tokens) documentation.
|
| 590 |
+
|
| 591 |
+
</details>
|
| 592 |
+
|
| 593 |
+
[Read the full chat docs](tutorials/inference.md).
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
----
|
| 598 |
+
|
| 599 |
+
## Pretrain an LLM
|
| 600 |
+
|
| 601 |
+
<div align="center">
|
| 602 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-pretrain">
|
| 603 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 604 |
+
</a>
|
| 605 |
+
</div>
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
Pretraining is the process of teaching an AI model by exposing it to a large amount of data before it is fine-tuned for specific tasks.
|
| 610 |
+
|
| 611 |
+
<details>
|
| 612 |
+
<summary>Show code:</summary>
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
```bash
|
| 617 |
+
mkdir -p custom_texts
|
| 618 |
+
curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt
|
| 619 |
+
curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt
|
| 620 |
+
|
| 621 |
+
# 1) Download a tokenizer
|
| 622 |
+
litgpt download EleutherAI/pythia-160m \
|
| 623 |
+
--tokenizer_only True
|
| 624 |
+
|
| 625 |
+
# 2) Pretrain the model
|
| 626 |
+
litgpt pretrain EleutherAI/pythia-160m \
|
| 627 |
+
--tokenizer_dir EleutherAI/pythia-160m \
|
| 628 |
+
--data TextFiles \
|
| 629 |
+
--data.train_data_path "custom_texts/" \
|
| 630 |
+
--train.max_tokens 10_000_000 \
|
| 631 |
+
--out_dir out/custom-model
|
| 632 |
+
|
| 633 |
+
# 3) Test the model
|
| 634 |
+
litgpt chat out/custom-model/final
|
| 635 |
+
```
|
| 636 |
+
</details>
|
| 637 |
+
|
| 638 |
+
[Read the full pretraining docs](tutorials/pretrain.md)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
----
|
| 643 |
+
|
| 644 |
+
## Continue pretraining an LLM
|
| 645 |
+
|
| 646 |
+
<div align="center">
|
| 647 |
+
<a target="_blank" href="https://lightning.ai/lightning-ai/studios/litgpt-continue-pretraining">
|
| 648 |
+
<img src="https://pl-bolts-doc-images.s3.us-east-2.amazonaws.com/app-2/run-on-studio.svg" height="36px" alt="Run on Studios"/>
|
| 649 |
+
</a>
|
| 650 |
+
</div>
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
Continued pretraining is another way of finetuning that specializes an already pretrained model by training on custom data:
|
| 655 |
+
|
| 656 |
+
<details>
|
| 657 |
+
<summary>Show code:</summary>
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
```bash
|
| 662 |
+
mkdir -p custom_texts
|
| 663 |
+
curl https://www.gutenberg.org/cache/epub/24440/pg24440.txt --output custom_texts/book1.txt
|
| 664 |
+
curl https://www.gutenberg.org/cache/epub/26393/pg26393.txt --output custom_texts/book2.txt
|
| 665 |
+
|
| 666 |
+
# 1) Continue pretraining a model (auto downloads weights)
|
| 667 |
+
litgpt pretrain EleutherAI/pythia-160m \
|
| 668 |
+
--tokenizer_dir EleutherAI/pythia-160m \
|
| 669 |
+
--initial_checkpoint_dir EleutherAI/pythia-160m \
|
| 670 |
+
--data TextFiles \
|
| 671 |
+
--data.train_data_path "custom_texts/" \
|
| 672 |
+
--train.max_tokens 10_000_000 \
|
| 673 |
+
--out_dir out/custom-model
|
| 674 |
+
|
| 675 |
+
# 2) Test the model
|
| 676 |
+
litgpt chat out/custom-model/final
|
| 677 |
+
```
|
| 678 |
+
|
| 679 |
+
</details>
|
| 680 |
+
|
| 681 |
+
[Read the full continued pretraining docs](tutorials/pretrain.md#continued-pretraining-on-custom-data)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
----
|
| 686 |
+
|
| 687 |
+
# State-of-the-art features
|
| 688 |
+
|
| 689 |
+
✅ State-of-the-art optimizations: Flash Attention v2, multi-GPU support via fully-sharded data parallelism, [optional CPU offloading](tutorials/oom.md#do-sharding-across-multiple-gpus), and [TPU and XLA support](extensions/xla).</br>
|
| 690 |
+
✅ [Pretrain](tutorials/pretrain.md), [finetune](tutorials/finetune.md), and [deploy](tutorials/inference.md)</br>
|
| 691 |
+
✅ Reduce compute requirements with low-precision settings: FP16, BF16, and FP16/FP32 mixed.</br>
|
| 692 |
+
✅ Lower memory requirements with [quantization](tutorials/quantize.md): 4-bit floats, 8-bit integers, and double quantization.</br>
|
| 693 |
+
✅ [Configuration files](config_hub) for great out-of-the-box performance.</br>
|
| 694 |
+
✅ Parameter-efficient finetuning: [LoRA](tutorials/finetune_lora.md), [QLoRA](tutorials/finetune_lora.md), [Adapter](tutorials/finetune_adapter.md), and [Adapter v2](tutorials/finetune_adapter.md).</br>
|
| 695 |
+
✅ [Exporting](tutorials/convert_lit_models.md) to other popular model weight formats.</br>
|
| 696 |
+
✅ Many popular datasets for [pretraining](tutorials/pretrain.md) and [finetuning](tutorials/prepare_dataset.md), and [support for custom datasets](tutorials/prepare_dataset.md#preparing-custom-datasets-for-instruction-finetuning).</br>
|
| 697 |
+
✅ Readable and easy-to-modify code to experiment with the latest research ideas.</br>
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
---
|
| 702 |
+
|
| 703 |
+
# Training recipes
|
| 704 |
+
|
| 705 |
+
LitGPT comes with validated recipes (YAML configs) to train models under different conditions. We've generated these recipes based on the parameters we found to perform the best for different training conditions.
|
| 706 |
+
|
| 707 |
+
Browse all training recipes [here](config_hub).
|
| 708 |
+
|
| 709 |
+
### Example
|
| 710 |
+
|
| 711 |
+
```bash
|
| 712 |
+
litgpt finetune \
|
| 713 |
+
--config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml
|
| 714 |
+
```
|
| 715 |
+
<details>
|
| 716 |
+
<summary>✅ Use configs to customize training</summary>
|
| 717 |
+
|
| 718 |
+
Configs let you customize training for all granular parameters like:
|
| 719 |
+
|
| 720 |
+
```yaml
|
| 721 |
+
# The path to the base model's checkpoint directory to load for finetuning. (type: <class 'Path'>, default: checkpoints/stabilityai/stablelm-base-alpha-3b)
|
| 722 |
+
checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf
|
| 723 |
+
|
| 724 |
+
# Directory in which to save checkpoints and logs. (type: <class 'Path'>, default: out/lora)
|
| 725 |
+
out_dir: out/finetune/qlora-llama2-7b
|
| 726 |
+
|
| 727 |
+
# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
|
| 728 |
+
precision: bf16-true
|
| 729 |
+
|
| 730 |
+
...
|
| 731 |
+
```
|
| 732 |
+
</details>
|
| 733 |
+
|
| 734 |
+
<details>
|
| 735 |
+
<summary>✅ Example: LoRA finetuning config</summary>
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
```yaml
|
| 740 |
+
# The path to the base model's checkpoint directory to load for finetuning. (type: <class 'Path'>, default: checkpoints/stabilityai/stablelm-base-alpha-3b)
|
| 741 |
+
checkpoint_dir: checkpoints/meta-llama/Llama-2-7b-hf
|
| 742 |
+
|
| 743 |
+
# Directory in which to save checkpoints and logs. (type: <class 'Path'>, default: out/lora)
|
| 744 |
+
out_dir: out/finetune/qlora-llama2-7b
|
| 745 |
+
|
| 746 |
+
# The precision to use for finetuning. Possible choices: "bf16-true", "bf16-mixed", "32-true". (type: Optional[str], default: null)
|
| 747 |
+
precision: bf16-true
|
| 748 |
+
|
| 749 |
+
# If set, quantize the model with this algorithm. See ``tutorials/quantize.md`` for more information. (type: Optional[Literal['nf4', 'nf4-dq', 'fp4', 'fp4-dq', 'int8-training']], default: null)
|
| 750 |
+
quantize: bnb.nf4
|
| 751 |
+
|
| 752 |
+
# How many devices/GPUs to use. (type: Union[int, str], default: 1)
|
| 753 |
+
devices: 1
|
| 754 |
+
|
| 755 |
+
# How many nodes to use. (type: int, default: 1)
|
| 756 |
+
num_nodes: 1
|
| 757 |
+
|
| 758 |
+
# The LoRA rank. (type: int, default: 8)
|
| 759 |
+
lora_r: 32
|
| 760 |
+
|
| 761 |
+
# The LoRA alpha. (type: int, default: 16)
|
| 762 |
+
lora_alpha: 16
|
| 763 |
+
|
| 764 |
+
# The LoRA dropout value. (type: float, default: 0.05)
|
| 765 |
+
lora_dropout: 0.05
|
| 766 |
+
|
| 767 |
+
# Whether to apply LoRA to the query weights in attention. (type: bool, default: True)
|
| 768 |
+
lora_query: true
|
| 769 |
+
|
| 770 |
+
# Whether to apply LoRA to the key weights in attention. (type: bool, default: False)
|
| 771 |
+
lora_key: false
|
| 772 |
+
|
| 773 |
+
# Whether to apply LoRA to the value weights in attention. (type: bool, default: True)
|
| 774 |
+
lora_value: true
|
| 775 |
+
|
| 776 |
+
# Whether to apply LoRA to the output projection in the attention block. (type: bool, default: False)
|
| 777 |
+
lora_projection: false
|
| 778 |
+
|
| 779 |
+
# Whether to apply LoRA to the weights of the MLP in the attention block. (type: bool, default: False)
|
| 780 |
+
lora_mlp: false
|
| 781 |
+
|
| 782 |
+
# Whether to apply LoRA to output head in GPT. (type: bool, default: False)
|
| 783 |
+
lora_head: false
|
| 784 |
+
|
| 785 |
+
# Data-related arguments. If not provided, the default is ``litgpt.data.Alpaca``.
|
| 786 |
+
data:
|
| 787 |
+
class_path: litgpt.data.Alpaca2k
|
| 788 |
+
init_args:
|
| 789 |
+
mask_prompt: false
|
| 790 |
+
val_split_fraction: 0.05
|
| 791 |
+
prompt_style: alpaca
|
| 792 |
+
ignore_index: -100
|
| 793 |
+
seed: 42
|
| 794 |
+
num_workers: 4
|
| 795 |
+
download_dir: data/alpaca2k
|
| 796 |
+
|
| 797 |
+
# Training-related arguments. See ``litgpt.args.TrainArgs`` for details
|
| 798 |
+
train:
|
| 799 |
+
|
| 800 |
+
# Number of optimizer steps between saving checkpoints (type: Optional[int], default: 1000)
|
| 801 |
+
save_interval: 200
|
| 802 |
+
|
| 803 |
+
# Number of iterations between logging calls (type: int, default: 1)
|
| 804 |
+
log_interval: 1
|
| 805 |
+
|
| 806 |
+
# Number of samples between optimizer steps across data-parallel ranks (type: int, default: 128)
|
| 807 |
+
global_batch_size: 8
|
| 808 |
+
|
| 809 |
+
# Number of samples per data-parallel rank (type: int, default: 4)
|
| 810 |
+
micro_batch_size: 2
|
| 811 |
+
|
| 812 |
+
# Number of iterations with learning rate warmup active (type: int, default: 100)
|
| 813 |
+
lr_warmup_steps: 10
|
| 814 |
+
|
| 815 |
+
# Number of epochs to train on (type: Optional[int], default: 5)
|
| 816 |
+
epochs: 4
|
| 817 |
+
|
| 818 |
+
# Total number of tokens to train on (type: Optional[int], default: null)
|
| 819 |
+
max_tokens:
|
| 820 |
+
|
| 821 |
+
# Limits the number of optimizer steps to run (type: Optional[int], default: null)
|
| 822 |
+
max_steps:
|
| 823 |
+
|
| 824 |
+
# Limits the length of samples (type: Optional[int], default: null)
|
| 825 |
+
max_seq_length: 512
|
| 826 |
+
|
| 827 |
+
# Whether to tie the embedding weights with the language modeling head weights (type: Optional[bool], default: null)
|
| 828 |
+
tie_embeddings:
|
| 829 |
+
|
| 830 |
+
# (type: float, default: 0.0003)
|
| 831 |
+
learning_rate: 0.0002
|
| 832 |
+
|
| 833 |
+
# (type: float, default: 0.02)
|
| 834 |
+
weight_decay: 0.0
|
| 835 |
+
|
| 836 |
+
# (type: float, default: 0.9)
|
| 837 |
+
beta1: 0.9
|
| 838 |
+
|
| 839 |
+
# (type: float, default: 0.95)
|
| 840 |
+
beta2: 0.95
|
| 841 |
+
|
| 842 |
+
# (type: Optional[float], default: null)
|
| 843 |
+
max_norm:
|
| 844 |
+
|
| 845 |
+
# (type: float, default: 6e-05)
|
| 846 |
+
min_lr: 6.0e-05
|
| 847 |
+
|
| 848 |
+
# Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details
|
| 849 |
+
eval:
|
| 850 |
+
|
| 851 |
+
# Number of optimizer steps between evaluation calls (type: int, default: 100)
|
| 852 |
+
interval: 100
|
| 853 |
+
|
| 854 |
+
# Number of tokens to generate (type: Optional[int], default: 100)
|
| 855 |
+
max_new_tokens: 100
|
| 856 |
+
|
| 857 |
+
# Number of iterations (type: int, default: 100)
|
| 858 |
+
max_iters: 100
|
| 859 |
+
|
| 860 |
+
# The name of the logger to send metrics to. (type: Literal['wandb', 'tensorboard', 'csv'], default: csv)
|
| 861 |
+
logger_name: csv
|
| 862 |
+
|
| 863 |
+
# The random seed to use for reproducibility. (type: int, default: 1337)
|
| 864 |
+
seed: 1337
|
| 865 |
+
```
|
| 866 |
+
</details>
|
| 867 |
+
|
| 868 |
+
<details>
|
| 869 |
+
<summary>✅ Override any parameter in the CLI:</summary>
|
| 870 |
+
|
| 871 |
+
```bash
|
| 872 |
+
litgpt finetune \
|
| 873 |
+
--config https://raw.githubusercontent.com/Lightning-AI/litgpt/main/config_hub/finetune/llama-2-7b/lora.yaml \
|
| 874 |
+
--lora_r 4
|
| 875 |
+
```
|
| 876 |
+
</details>
|
| 877 |
+
|
| 878 |
+
|
| 879 |
+
|
| 880 |
+
----
|
| 881 |
+
|
| 882 |
+
# Project highlights
|
| 883 |
+
|
| 884 |
+
LitGPT powers many great AI projects, initiatives, challenges and of course enterprises. Please submit a pull request to be considered for a feature.
|
| 885 |
+
|
| 886 |
+
<details>
|
| 887 |
+
<summary>📊 SAMBA: Simple Hybrid State Space Models for Efficient Unlimited Context Language Modeling</summary>
|
| 888 |
+
|
| 889 |
+
The [Samba](https://github.com/microsoft/Samba) project by researchers at Microsoft is built on top of the LitGPT code base and combines state space models with sliding window attention, which outperforms pure state space models.
|
| 890 |
+
|
| 891 |
+
</details>
|
| 892 |
+
|
| 893 |
+
<details>
|
| 894 |
+
<summary>🏆 NeurIPS 2023 Large Language Model Efficiency Challenge: 1 LLM + 1 GPU + 1 Day</summary>
|
| 895 |
+
|
| 896 |
+
The LitGPT repository was the official starter kit for the [NeurIPS 2023 LLM Efficiency Challenge](https://llm-efficiency-challenge.github.io), which is a competition focused on finetuning an existing non-instruction tuned LLM for 24 hours on a single GPU.
|
| 897 |
+
|
| 898 |
+
</details>
|
| 899 |
+
|
| 900 |
+
<details>
|
| 901 |
+
<summary>🦙 TinyLlama: An Open-Source Small Language Model</summary>
|
| 902 |
+
|
| 903 |
+
|
| 904 |
+
LitGPT powered the [TinyLlama project](https://github.com/jzhang38/TinyLlama) and [TinyLlama: An Open-Source Small Language Model](https://arxiv.org/abs/2401.02385) research paper.
|
| 905 |
+
|
| 906 |
+
</details>
|
| 907 |
+
|
| 908 |
+
<details>
|
| 909 |
+
<summary>🍪 MicroLlama: MicroLlama-300M</summary>
|
| 910 |
+
|
| 911 |
+
[MicroLlama](https://github.com/keeeeenw/MicroLlama) is a 300M Llama model pretrained on 50B tokens powered by TinyLlama and LitGPT.
|
| 912 |
+
</details>
|
| 913 |
+
|
| 914 |
+
<details>
|
| 915 |
+
<summary>🔬 Pre-training Small Base LMs with Fewer Tokens</summary>
|
| 916 |
+
|
| 917 |
+
The research paper ["Pre-training Small Base LMs with Fewer Tokens"](https://arxiv.org/abs/2404.08634), which utilizes LitGPT, develops smaller base language models by inheriting a few transformer blocks from larger models and training on a tiny fraction of the data used by the larger models. It demonstrates that these smaller models can perform comparably to larger models despite using significantly less training data and resources.
|
| 918 |
+
|
| 919 |
+
</details>
|
| 920 |
+
|
| 921 |
+
|
| 922 |
+
|
| 923 |
+
----
|
| 924 |
+
|
| 925 |
+
# Community
|
| 926 |
+
|
| 927 |
+
We welcome all individual contributors, regardless of their level of experience or hardware. Your contributions are valuable, and we are excited to see what you can accomplish in this collaborative and supportive environment.
|
| 928 |
+
|
| 929 |
+
- [Request a feature](https://github.com/Lightning-AI/litgpt/issues)
|
| 930 |
+
- [Submit your first contribution](https://lightning.ai/pages/community/tutorial/how-to-contribute-to-litgpt/)
|
| 931 |
+
- [Join our Discord](https://discord.gg/VptPCZkGNa)
|
| 932 |
+
|
| 933 |
+
|
| 934 |
+
|
| 935 |
+
# Tutorials
|
| 936 |
+
|
| 937 |
+
🚀 [Get started](tutorials/0_to_litgpt.md)</br>
|
| 938 |
+
⚡️ [Finetuning, incl. LoRA, QLoRA, and Adapters](tutorials/finetune.md)</br>
|
| 939 |
+
🤖 [Pretraining](tutorials/pretrain.md)</br>
|
| 940 |
+
💬 [Model evaluation](tutorials/evaluation.md)</br>
|
| 941 |
+
📘 [Supported and custom datasets](tutorials/prepare_dataset.md)</br>
|
| 942 |
+
🧹 [Quantization](tutorials/quantize.md)</br>
|
| 943 |
+
🤯 [Tips for dealing with out-of-memory (OOM) errors](tutorials/oom.md)</br>
|
| 944 |
+
🧑🏽💻 [Using cloud TPUs](extensions/xla)</br>
|
| 945 |
+
|
| 946 |
+
|
| 947 |
+
|
| 948 |
+
----
|
| 949 |
+
|
| 950 |
+
### Acknowledgments
|
| 951 |
+
|
| 952 |
+
This implementation extends on [Lit-LLaMA](https://github.com/lightning-AI/lit-llama) and [nanoGPT](https://github.com/karpathy/nanoGPT), and it's **powered by [Lightning Fabric](https://lightning.ai/docs/fabric/stable/) ⚡**.
|
| 953 |
+
|
| 954 |
+
- [@karpathy](https://github.com/karpathy) for [nanoGPT](https://github.com/karpathy/nanoGPT)
|
| 955 |
+
- [@EleutherAI](https://github.com/EleutherAI) for [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) and the [Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness)
|
| 956 |
+
- [@TimDettmers](https://github.com/TimDettmers) for [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)
|
| 957 |
+
- [@Microsoft](https://github.com/microsoft) for [LoRA](https://github.com/microsoft/LoRA)
|
| 958 |
+
- [@tridao](https://github.com/tridao) for [Flash Attention 2](https://github.com/Dao-AILab/flash-attention)
|
| 959 |
+
|
| 960 |
+
### License
|
| 961 |
+
|
| 962 |
+
LitGPT is released under the [Apache 2.0](https://github.com/Lightning-AI/litgpt/blob/main/LICENSE) license.
|
| 963 |
+
|
| 964 |
+
### Citation
|
| 965 |
+
|
| 966 |
+
If you use LitGPT in your research, please cite the following work:
|
| 967 |
+
|
| 968 |
+
```bibtex
|
| 969 |
+
@misc{litgpt-2023,
|
| 970 |
+
author = {Lightning AI},
|
| 971 |
+
title = {LitGPT},
|
| 972 |
+
howpublished = {\url{https://github.com/Lightning-AI/litgpt}},
|
| 973 |
+
year = {2023},
|
| 974 |
+
}
|
| 975 |
+
```
|
| 976 |
+
|
| 977 |
+
|
litgpt.egg-info/SOURCES.txt
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LICENSE
|
| 2 |
+
README.md
|
| 3 |
+
pyproject.toml
|
| 4 |
+
litgpt/__init__.py
|
| 5 |
+
litgpt/__main__.py
|
| 6 |
+
litgpt/adapter.py
|
| 7 |
+
litgpt/adapter_v2.py
|
| 8 |
+
litgpt/api.py
|
| 9 |
+
litgpt/args.py
|
| 10 |
+
litgpt/config.py
|
| 11 |
+
litgpt/lora.py
|
| 12 |
+
litgpt/model.py
|
| 13 |
+
litgpt/pretrain.py
|
| 14 |
+
litgpt/prompts.py
|
| 15 |
+
litgpt/tokenizer.py
|
| 16 |
+
litgpt/utils.py
|
| 17 |
+
litgpt.egg-info/PKG-INFO
|
| 18 |
+
litgpt.egg-info/SOURCES.txt
|
| 19 |
+
litgpt.egg-info/dependency_links.txt
|
| 20 |
+
litgpt.egg-info/entry_points.txt
|
| 21 |
+
litgpt.egg-info/requires.txt
|
| 22 |
+
litgpt.egg-info/top_level.txt
|
| 23 |
+
litgpt/chat/__init__.py
|
| 24 |
+
litgpt/chat/base.py
|
| 25 |
+
litgpt/data/__init__.py
|
| 26 |
+
litgpt/data/alpaca.py
|
| 27 |
+
litgpt/data/alpaca_2k.py
|
| 28 |
+
litgpt/data/alpaca_gpt4.py
|
| 29 |
+
litgpt/data/base.py
|
| 30 |
+
litgpt/data/deita.py
|
| 31 |
+
litgpt/data/flan.py
|
| 32 |
+
litgpt/data/json_data.py
|
| 33 |
+
litgpt/data/lima.py
|
| 34 |
+
litgpt/data/lit_data.py
|
| 35 |
+
litgpt/data/longform.py
|
| 36 |
+
litgpt/data/microllama.py
|
| 37 |
+
litgpt/data/openwebtext.py
|
| 38 |
+
litgpt/data/prepare_slimpajama.py
|
| 39 |
+
litgpt/data/prepare_starcoder.py
|
| 40 |
+
litgpt/data/text_files.py
|
| 41 |
+
litgpt/data/tinyllama.py
|
| 42 |
+
litgpt/data/tinystories.py
|
| 43 |
+
litgpt/deploy/__init__.py
|
| 44 |
+
litgpt/deploy/serve.py
|
| 45 |
+
litgpt/eval/evaluate.py
|
| 46 |
+
litgpt/finetune/__init__.py
|
| 47 |
+
litgpt/finetune/adapter.py
|
| 48 |
+
litgpt/finetune/adapter_v2.py
|
| 49 |
+
litgpt/finetune/full.py
|
| 50 |
+
litgpt/finetune/lora.py
|
| 51 |
+
litgpt/generate/__init__.py
|
| 52 |
+
litgpt/generate/adapter.py
|
| 53 |
+
litgpt/generate/adapter_v2.py
|
| 54 |
+
litgpt/generate/base.py
|
| 55 |
+
litgpt/generate/full.py
|
| 56 |
+
litgpt/generate/sequentially.py
|
| 57 |
+
litgpt/generate/speculative_decoding.py
|
| 58 |
+
litgpt/generate/tp.py
|
| 59 |
+
litgpt/scripts/__init__.py
|
| 60 |
+
litgpt/scripts/convert_hf_checkpoint.py
|
| 61 |
+
litgpt/scripts/convert_lit_checkpoint.py
|
| 62 |
+
litgpt/scripts/convert_pretrained_checkpoint.py
|
| 63 |
+
litgpt/scripts/download.py
|
| 64 |
+
litgpt/scripts/merge_lora.py
|
| 65 |
+
tests/test_adapter.py
|
| 66 |
+
tests/test_adapter_v2.py
|
| 67 |
+
tests/test_api.py
|
| 68 |
+
tests/test_args.py
|
| 69 |
+
tests/test_batch.py
|
| 70 |
+
tests/test_chat.py
|
| 71 |
+
tests/test_ci.py
|
| 72 |
+
tests/test_cli.py
|
| 73 |
+
tests/test_config.py
|
| 74 |
+
tests/test_config_hub.py
|
| 75 |
+
tests/test_distributed.py
|
| 76 |
+
tests/test_evaluate.py
|
| 77 |
+
tests/test_full.py
|
| 78 |
+
tests/test_generate_speculatively.py
|
| 79 |
+
tests/test_lora.py
|
| 80 |
+
tests/test_merge_lora.py
|
| 81 |
+
tests/test_model.py
|
| 82 |
+
tests/test_pretrain.py
|
| 83 |
+
tests/test_prompts.py
|
| 84 |
+
tests/test_readme.py
|
| 85 |
+
tests/test_rope.py
|
| 86 |
+
tests/test_serve.py
|
| 87 |
+
tests/test_tokenizer.py
|
| 88 |
+
tests/test_trainer_support.py
|
| 89 |
+
tests/test_utils.py
|
litgpt.egg-info/dependency_links.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
litgpt.egg-info/entry_points.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[console_scripts]
|
| 2 |
+
litgpt = litgpt.__main__:main
|
litgpt.egg-info/requires.txt
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
huggingface-hub<0.33,>=0.23.5
|
| 2 |
+
lightning>=2.5
|
| 3 |
+
psutil==7
|
| 4 |
+
safetensors>=0.4.3
|
| 5 |
+
tokenizers>=0.15.2
|
| 6 |
+
torch>=2.5
|
| 7 |
+
tqdm>=4.66
|
| 8 |
+
|
| 9 |
+
[:python_version <= "3.9"]
|
| 10 |
+
jsonargparse[signatures]<=4.32.1,>=4.30.1
|
| 11 |
+
|
| 12 |
+
[:python_version > "3.9"]
|
| 13 |
+
jsonargparse[signatures]>=4.37
|
| 14 |
+
|
| 15 |
+
[compiler]
|
| 16 |
+
|
| 17 |
+
[compiler:python_version >= "3.10" and sys_platform == "linux"]
|
| 18 |
+
lightning-thunder>=0.2.0.dev20250119
|
| 19 |
+
|
| 20 |
+
[extra]
|
| 21 |
+
datasets<4,>=2.18
|
| 22 |
+
huggingface-hub[hf-transfer]>=0.21
|
| 23 |
+
litdata==0.2.51
|
| 24 |
+
litserve>0.2
|
| 25 |
+
lm-eval>=0.4.2
|
| 26 |
+
pandas>=1.9
|
| 27 |
+
pyarrow>=15.0.2
|
| 28 |
+
requests>=2.31
|
| 29 |
+
sentencepiece>=0.2
|
| 30 |
+
tensorboard>=2.14
|
| 31 |
+
torchmetrics>=1.3.1
|
| 32 |
+
transformers<4.52,>=4.51.3
|
| 33 |
+
zstandard>=0.22
|
| 34 |
+
|
| 35 |
+
[extra:sys_platform != "win32"]
|
| 36 |
+
uvloop>=0.2
|
| 37 |
+
|
| 38 |
+
[extra:sys_platform == "darwin"]
|
| 39 |
+
bitsandbytes<0.43,>=0.42
|
| 40 |
+
|
| 41 |
+
[extra:sys_platform == "linux" or sys_platform == "win32"]
|
| 42 |
+
bitsandbytes<0.45.5,>=0.45.2
|
| 43 |
+
|
| 44 |
+
[test]
|
| 45 |
+
einops>=0.7
|
| 46 |
+
protobuf>=4.23.4
|
| 47 |
+
pytest>=8.1.1
|
| 48 |
+
pytest-benchmark>=5.1
|
| 49 |
+
pytest-dependency>=0.6
|
| 50 |
+
pytest-rerunfailures>=14
|
| 51 |
+
pytest-timeout>=2.3.1
|
litgpt.egg-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
litgpt
|
litgpt/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import re
|
| 5 |
+
|
| 6 |
+
from litgpt.api import LLM
|
| 7 |
+
from litgpt.config import Config
|
| 8 |
+
from litgpt.model import GPT # needs to be imported before config
|
| 9 |
+
from litgpt.prompts import PromptStyle
|
| 10 |
+
from litgpt.tokenizer import Tokenizer
|
| 11 |
+
|
| 12 |
+
# Suppress excessive warnings, see https://github.com/pytorch/pytorch/issues/111632
|
| 13 |
+
pattern = re.compile(".*Profiler function .* will be ignored")
|
| 14 |
+
logging.getLogger("torch._dynamo.variables.torch").addFilter(lambda record: not pattern.search(record.getMessage()))
|
| 15 |
+
|
| 16 |
+
# Avoid printing state-dict profiling output at the WARNING level when saving a checkpoint
|
| 17 |
+
logging.getLogger("torch.distributed.fsdp._optim_utils").disabled = True
|
| 18 |
+
logging.getLogger("torch.distributed.fsdp._debug_utils").disabled = True
|
| 19 |
+
|
| 20 |
+
__all__ = ["LLM", "GPT", "Config", "PromptStyle", "Tokenizer"]
|
litgpt/__main__.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
from jsonargparse import CLI, set_config_read_mode, set_docstring_parse_options
|
| 7 |
+
|
| 8 |
+
from litgpt.chat.base import main as chat_fn
|
| 9 |
+
from litgpt.deploy.serve import run_server as serve_fn
|
| 10 |
+
from litgpt.eval.evaluate import convert_and_evaluate as evaluate_fn
|
| 11 |
+
from litgpt.finetune.adapter import setup as finetune_adapter_fn
|
| 12 |
+
from litgpt.finetune.adapter_v2 import setup as finetune_adapter_v2_fn
|
| 13 |
+
from litgpt.finetune.full import setup as finetune_full_fn
|
| 14 |
+
from litgpt.finetune.lora import setup as finetune_lora_fn
|
| 15 |
+
from litgpt.generate.adapter import main as generate_adapter_fn
|
| 16 |
+
from litgpt.generate.adapter_v2 import main as generate_adapter_v2_fn
|
| 17 |
+
from litgpt.generate.base import main as generate_base_fn
|
| 18 |
+
from litgpt.generate.full import main as generate_full_fn
|
| 19 |
+
from litgpt.generate.sequentially import main as generate_sequentially_fn
|
| 20 |
+
from litgpt.generate.speculative_decoding import main as generate_speculatively_fn
|
| 21 |
+
from litgpt.generate.tp import main as generate_tp_fn
|
| 22 |
+
from litgpt.pretrain import setup as pretrain_fn
|
| 23 |
+
from litgpt.perplexity import setup as perplexity_fn
|
| 24 |
+
from litgpt.scripts.convert_hf_checkpoint import convert_hf_checkpoint as convert_hf_checkpoint_fn
|
| 25 |
+
from litgpt.scripts.convert_lit_checkpoint import convert_lit_checkpoint as convert_lit_checkpoint_fn
|
| 26 |
+
from litgpt.scripts.convert_pretrained_checkpoint import (
|
| 27 |
+
convert_pretrained_checkpoint as convert_pretrained_checkpoint_fn,
|
| 28 |
+
)
|
| 29 |
+
from litgpt.scripts.download import download_from_hub as download_fn
|
| 30 |
+
from litgpt.scripts.merge_lora import merge_lora as merge_lora_fn
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main() -> None:
|
| 34 |
+
parser_data = {
|
| 35 |
+
"download": download_fn,
|
| 36 |
+
"chat": chat_fn,
|
| 37 |
+
"finetune": finetune_lora_fn,
|
| 38 |
+
"finetune_lora": finetune_lora_fn,
|
| 39 |
+
"finetune_full": finetune_full_fn,
|
| 40 |
+
"finetune_adapter": finetune_adapter_fn,
|
| 41 |
+
"finetune_adapter_v2": finetune_adapter_v2_fn,
|
| 42 |
+
"pretrain": pretrain_fn,
|
| 43 |
+
"perplexity": perplexity_fn,
|
| 44 |
+
"generate": generate_base_fn,
|
| 45 |
+
"generate_full": generate_full_fn,
|
| 46 |
+
"generate_adapter": generate_adapter_fn,
|
| 47 |
+
"generate_adapter_v2": generate_adapter_v2_fn,
|
| 48 |
+
"generate_sequentially": generate_sequentially_fn,
|
| 49 |
+
"generate_speculatively": generate_speculatively_fn,
|
| 50 |
+
"generate_tp": generate_tp_fn,
|
| 51 |
+
"convert_to_litgpt": convert_hf_checkpoint_fn,
|
| 52 |
+
"convert_from_litgpt": convert_lit_checkpoint_fn,
|
| 53 |
+
"convert_pretrained_checkpoint": convert_pretrained_checkpoint_fn,
|
| 54 |
+
"merge_lora": merge_lora_fn,
|
| 55 |
+
"evaluate": evaluate_fn,
|
| 56 |
+
"serve": serve_fn,
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
set_docstring_parse_options(attribute_docstrings=True)
|
| 60 |
+
set_config_read_mode(urls_enabled=True)
|
| 61 |
+
|
| 62 |
+
# PyTorch bug that raises a false-positive warning
|
| 63 |
+
# More info: https://github.com/Lightning-AI/litgpt/issues/1561
|
| 64 |
+
warning_message = r"The epoch parameter in `scheduler.step\(\)` was not necessary and is being deprecated.*"
|
| 65 |
+
|
| 66 |
+
warnings.filterwarnings(
|
| 67 |
+
action="ignore", message=warning_message, category=UserWarning, module=r".*torch\.optim\.lr_scheduler.*"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
torch.set_float32_matmul_precision("high")
|
| 71 |
+
CLI(parser_data)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
main()
|
litgpt/adapter.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
"""Implementation of the paper:
|
| 4 |
+
|
| 5 |
+
LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention
|
| 6 |
+
https://arxiv.org/abs/2303.16199
|
| 7 |
+
|
| 8 |
+
Port for LitGPT
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from typing import Any, Dict, Optional, Tuple
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
from typing_extensions import Self
|
| 17 |
+
|
| 18 |
+
from litgpt.config import Config as BaseConfig
|
| 19 |
+
from litgpt.model import GPT as BaseModel
|
| 20 |
+
from litgpt.model import Block as BaseBlock
|
| 21 |
+
from litgpt.model import CausalSelfAttention as BaseCausalSelfAttention
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class Config(BaseConfig):
|
| 26 |
+
adapter_prompt_length: int = 10
|
| 27 |
+
adapter_start_layer: int = 2
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class GPT(BaseModel):
|
| 31 |
+
# Copy & paste from :class:`model.GPT`. Note that :class:`Block` is new here.
|
| 32 |
+
def __init__(self, config: Config) -> None:
|
| 33 |
+
nn.Module.__init__(self)
|
| 34 |
+
assert config.padded_vocab_size is not None
|
| 35 |
+
self.config = config
|
| 36 |
+
|
| 37 |
+
self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias)
|
| 38 |
+
self.transformer = nn.ModuleDict(
|
| 39 |
+
dict(
|
| 40 |
+
wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
|
| 41 |
+
h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)),
|
| 42 |
+
ln_f=config.norm_class(config.n_embd, eps=config.norm_eps),
|
| 43 |
+
)
|
| 44 |
+
)
|
| 45 |
+
self.mask_cache: Optional[torch.Tensor] = None
|
| 46 |
+
self.max_seq_length = self.config.block_size
|
| 47 |
+
|
| 48 |
+
@classmethod
|
| 49 |
+
def from_name(cls, name: str, **kwargs: Any) -> Self:
|
| 50 |
+
return cls(Config.from_name(name, **kwargs))
|
| 51 |
+
|
| 52 |
+
def _init_weights(self, module: nn.Module) -> None:
|
| 53 |
+
"""Meant to be used with `gpt.apply(gpt._init_weights)`. Unused method left for completeness."""
|
| 54 |
+
super()._init_weights(module)
|
| 55 |
+
if isinstance(module, CausalSelfAttention):
|
| 56 |
+
module.reset_parameters()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class Block(BaseBlock):
|
| 60 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 61 |
+
super().__init__(config, block_idx)
|
| 62 |
+
self.attn = CausalSelfAttention(config, block_idx)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class CausalSelfAttention(BaseCausalSelfAttention):
|
| 66 |
+
"""A modification of `litgpt.model.CausalSelfAttention` that adds the attention
|
| 67 |
+
over the adaption prompt."""
|
| 68 |
+
|
| 69 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 70 |
+
super().__init__(config, block_idx)
|
| 71 |
+
if block_idx >= config.adapter_start_layer:
|
| 72 |
+
# adapter embedding layer
|
| 73 |
+
self.adapter_wte = nn.Embedding(config.adapter_prompt_length, config.n_embd)
|
| 74 |
+
# gate for adaption
|
| 75 |
+
self.gating_factor = torch.nn.Parameter(torch.zeros(1, 1, config.n_head, 1))
|
| 76 |
+
# kv cache for inference
|
| 77 |
+
self.adapter_kv_cache: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
|
| 78 |
+
|
| 79 |
+
def scaled_dot_product_attention(
|
| 80 |
+
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: Optional[torch.Tensor] = None
|
| 81 |
+
) -> torch.Tensor:
|
| 82 |
+
y = super().scaled_dot_product_attention(q, k, v, mask)
|
| 83 |
+
if self.block_idx < self.config.adapter_start_layer:
|
| 84 |
+
return y
|
| 85 |
+
|
| 86 |
+
aT = self.config.adapter_prompt_length
|
| 87 |
+
if self.adapter_kv_cache is not None:
|
| 88 |
+
# since this uses the wte weights as the prefix and the kv cache is only used during inference, ak and av
|
| 89 |
+
# are the same every call
|
| 90 |
+
ak, av = self.adapter_kv_cache
|
| 91 |
+
else:
|
| 92 |
+
prefix = self.adapter_wte.weight.reshape(1, aT, self.config.n_embd)
|
| 93 |
+
aqkv = self.qkv(prefix)
|
| 94 |
+
q_per_kv = self.config.n_head // self.config.n_query_groups
|
| 95 |
+
aqkv = aqkv.view(1, aT, self.config.n_query_groups, q_per_kv + 2, self.config.head_size)
|
| 96 |
+
aqkv = aqkv.permute(0, 2, 3, 1, 4)
|
| 97 |
+
_, ak, av = aqkv.split((q_per_kv, 1, 1), dim=2)
|
| 98 |
+
if self.config.n_query_groups != 1:
|
| 99 |
+
# for MHA this is a no-op
|
| 100 |
+
ak = ak.repeat_interleave(q_per_kv, dim=2)
|
| 101 |
+
av = av.repeat_interleave(q_per_kv, dim=2)
|
| 102 |
+
ak = ak.view(1, -1, aT, self.config.head_size) # (1, nh_ak, aT, hs)
|
| 103 |
+
av = av.view(1, -1, aT, self.config.head_size) # (1, nh_av, aT, hs)
|
| 104 |
+
self.adapter_kv_cache = (ak, av)
|
| 105 |
+
|
| 106 |
+
T = q.size(2)
|
| 107 |
+
amask = torch.ones(T, aT, dtype=torch.bool, device=q.device)
|
| 108 |
+
ay = super().scaled_dot_product_attention(q, ak, av, amask)
|
| 109 |
+
return y + self.gating_factor * ay
|
| 110 |
+
|
| 111 |
+
def reset_parameters(self) -> None:
|
| 112 |
+
if hasattr(self, "gating_factor"):
|
| 113 |
+
torch.nn.init.zeros_(self.gating_factor)
|
| 114 |
+
|
| 115 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 116 |
+
"""For compatibility with older checkpoints."""
|
| 117 |
+
if (key := prefix + "gating_factor") in state_dict and state_dict[key].size(1) == self.config.n_head:
|
| 118 |
+
state_dict[key] = state_dict[key].permute(0, 2, 1, 3)
|
| 119 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def mark_only_adapter_as_trainable(model: GPT) -> None:
|
| 123 |
+
"""Sets `requires_grad=False` for all non-adapter weights."""
|
| 124 |
+
for name, param in model.named_parameters():
|
| 125 |
+
param.requires_grad = adapter_filter(name, param)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def adapter_filter(key: str, value: Any) -> bool:
|
| 129 |
+
return "adapter_wte" in key or "gating_factor" in key
|
litgpt/adapter_v2.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
"""Implementation of the paper:
|
| 4 |
+
|
| 5 |
+
LLaMA-Adapter V2: Parameter-Efficient Visual Instruction Model
|
| 6 |
+
https://arxiv.org/abs/2304.15010
|
| 7 |
+
|
| 8 |
+
Port for LitGPT
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from typing import Any, Dict, Optional, Type
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
from typing_extensions import Self
|
| 17 |
+
|
| 18 |
+
import litgpt
|
| 19 |
+
from litgpt.adapter import GPT as BaseModel
|
| 20 |
+
from litgpt.adapter import CausalSelfAttention as BaseCausalSelfAttention
|
| 21 |
+
from litgpt.adapter import Config as BaseConfig
|
| 22 |
+
from litgpt.model import Block as BaseBlock
|
| 23 |
+
from litgpt.scripts.convert_hf_checkpoint import qkv_reassemble
|
| 24 |
+
from litgpt.utils import map_old_state_dict_weights
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class Config(BaseConfig):
|
| 29 |
+
@property
|
| 30 |
+
def mlp_class(self) -> Type:
|
| 31 |
+
return getattr(litgpt.adapter_v2, self.mlp_class_name)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def adapter_filter(key: str, value: Any) -> bool:
|
| 35 |
+
adapter_substrings = (
|
| 36 |
+
# regular adapter v1 parameters
|
| 37 |
+
"adapter_wte",
|
| 38 |
+
"gating_factor",
|
| 39 |
+
# adapter v2: new bias and scale used in Linear
|
| 40 |
+
"adapter_scale",
|
| 41 |
+
"adapter_bias",
|
| 42 |
+
# adapter v2: Norm parameters are now trainable
|
| 43 |
+
"norm_1",
|
| 44 |
+
"norm_2",
|
| 45 |
+
"ln_f",
|
| 46 |
+
)
|
| 47 |
+
return any(s in key for s in adapter_substrings)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class AdapterV2Linear(torch.nn.Module):
|
| 51 |
+
def __init__(self, in_features: int, out_features: int, **kwargs) -> None:
|
| 52 |
+
super().__init__()
|
| 53 |
+
self.linear = torch.nn.Linear(in_features, out_features, **kwargs)
|
| 54 |
+
self.adapter_bias = torch.nn.Parameter(torch.zeros(out_features), requires_grad=False)
|
| 55 |
+
self.adapter_scale = torch.nn.Parameter(torch.ones(out_features), requires_grad=False)
|
| 56 |
+
|
| 57 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 58 |
+
return self.adapter_scale * (self.linear(x) + self.adapter_bias)
|
| 59 |
+
|
| 60 |
+
def reset_parameters(self) -> None:
|
| 61 |
+
nn.init.zeros_(self.adapter_bias)
|
| 62 |
+
nn.init.ones_(self.adapter_scale)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class GPT(BaseModel):
|
| 66 |
+
# Copy & paste from :class:`model.GPT`. Note that :class:`Block` is new here.
|
| 67 |
+
def __init__(self, config: Config) -> None:
|
| 68 |
+
nn.Module.__init__(self)
|
| 69 |
+
assert config.padded_vocab_size is not None
|
| 70 |
+
self.config = config
|
| 71 |
+
|
| 72 |
+
self.lm_head = AdapterV2Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias)
|
| 73 |
+
self.transformer = nn.ModuleDict(
|
| 74 |
+
dict(
|
| 75 |
+
wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
|
| 76 |
+
h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)),
|
| 77 |
+
ln_f=config.norm_class(config.n_embd, eps=config.norm_eps),
|
| 78 |
+
)
|
| 79 |
+
)
|
| 80 |
+
self.mask_cache: Optional[torch.Tensor] = None
|
| 81 |
+
self.max_seq_length = self.config.block_size
|
| 82 |
+
|
| 83 |
+
@classmethod
|
| 84 |
+
def from_name(cls, name: str, **kwargs: Any) -> Self:
|
| 85 |
+
return cls(Config.from_name(name, **kwargs))
|
| 86 |
+
|
| 87 |
+
def _init_weights(self, module: nn.Module) -> None:
|
| 88 |
+
"""Meant to be used with `gpt.apply(gpt._init_weights)`. Unused method left for completeness."""
|
| 89 |
+
super()._init_weights(module)
|
| 90 |
+
if isinstance(module, AdapterV2Linear):
|
| 91 |
+
module.reset_parameters()
|
| 92 |
+
|
| 93 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 94 |
+
"""For compatibility with base checkpoints."""
|
| 95 |
+
mapping = {"lm_head.weight": "lm_head.linear.weight", "lm_head.bias": "lm_head.linear.bias"}
|
| 96 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 97 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class Block(BaseBlock):
|
| 101 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 102 |
+
super().__init__(config, block_idx)
|
| 103 |
+
self.attn = CausalSelfAttention(config, block_idx)
|
| 104 |
+
self.mlp = config.mlp_class(config)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class CausalSelfAttention(BaseCausalSelfAttention):
|
| 108 |
+
"""A modification of `litgpt.adapter.CausalSelfAttention` that uses the Adapter V2 Linear class"""
|
| 109 |
+
|
| 110 |
+
# Copy&paste from :class:`model.CausalSelfAttention`
|
| 111 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 112 |
+
super().__init__(config, block_idx)
|
| 113 |
+
# key, query, value projections for all heads, but in a batch
|
| 114 |
+
shape = (config.n_head + 2 * config.n_query_groups) * config.head_size
|
| 115 |
+
self.qkv = AdapterV2Linear(in_features=config.n_embd, out_features=shape, bias=config.bias or config.attn_bias)
|
| 116 |
+
# output projection
|
| 117 |
+
self.proj = AdapterV2Linear(config.head_size * config.n_head, config.n_embd, bias=config.bias)
|
| 118 |
+
|
| 119 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 120 |
+
"""For compatibility with base and/or legacy checkpoints."""
|
| 121 |
+
mapping = {
|
| 122 |
+
"qkv.weight": "qkv.linear.weight",
|
| 123 |
+
"qkv.bias": "qkv.linear.bias",
|
| 124 |
+
"proj.weight": "proj.linear.weight",
|
| 125 |
+
"proj.bias": "proj.linear.bias",
|
| 126 |
+
}
|
| 127 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 128 |
+
# For compatibility with older checkpoints
|
| 129 |
+
if (key := prefix + "gating_factor") in state_dict and state_dict[key].size(1) == self.config.n_head:
|
| 130 |
+
state_dict[key] = state_dict[key].permute(0, 2, 1, 3)
|
| 131 |
+
|
| 132 |
+
for attr in ("weight", "bias"):
|
| 133 |
+
legacy_key = f"{prefix}attn.linear.{attr}"
|
| 134 |
+
current_key = f"{prefix}qkv.linear.{attr}"
|
| 135 |
+
if legacy_key in state_dict:
|
| 136 |
+
state_dict[current_key] = qkv_reassemble(state_dict.pop(legacy_key), self.config)
|
| 137 |
+
|
| 138 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class GptNeoxMLP(litgpt.model.GptNeoxMLP):
|
| 142 |
+
def __init__(self, config: Config) -> None:
|
| 143 |
+
nn.Module.__init__(self)
|
| 144 |
+
self.fc = AdapterV2Linear(config.n_embd, config.intermediate_size, bias=config.bias)
|
| 145 |
+
self.proj = AdapterV2Linear(config.intermediate_size, config.n_embd, bias=config.bias)
|
| 146 |
+
self.config = config
|
| 147 |
+
|
| 148 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 149 |
+
"""For compatibility with base checkpoints."""
|
| 150 |
+
mapping = {
|
| 151 |
+
"fc.weight": "fc.linear.weight",
|
| 152 |
+
"fc.bias": "fc.linear.bias",
|
| 153 |
+
"proj.weight": "proj.linear.weight",
|
| 154 |
+
"proj.bias": "proj.linear.bias",
|
| 155 |
+
}
|
| 156 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 157 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class LLaMAMLP(litgpt.model.LLaMAMLP):
|
| 161 |
+
def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None:
|
| 162 |
+
nn.Module.__init__(self)
|
| 163 |
+
self.intermediate_size = intermediate_size or config.intermediate_size
|
| 164 |
+
self.fc_1 = AdapterV2Linear(config.n_embd, self.intermediate_size, bias=config.bias)
|
| 165 |
+
self.fc_2 = AdapterV2Linear(config.n_embd, self.intermediate_size, bias=config.bias)
|
| 166 |
+
self.proj = AdapterV2Linear(self.intermediate_size, config.n_embd, bias=config.bias)
|
| 167 |
+
self.config = config
|
| 168 |
+
|
| 169 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 170 |
+
"""For compatibility with base checkpoints."""
|
| 171 |
+
mapping = {
|
| 172 |
+
"fc_1.weight": "fc_1.linear.weight",
|
| 173 |
+
"fc_1.bias": "fc_1.linear.bias",
|
| 174 |
+
"fc_2.weight": "fc_2.linear.weight",
|
| 175 |
+
"fc_2.bias": "fc_2.linear.bias",
|
| 176 |
+
"proj.weight": "proj.linear.weight",
|
| 177 |
+
"proj.bias": "proj.linear.bias",
|
| 178 |
+
}
|
| 179 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 180 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class GemmaMLP(LLaMAMLP):
|
| 184 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 185 |
+
x_fc_1 = self.fc_1(x)
|
| 186 |
+
x_fc_2 = self.fc_2(x)
|
| 187 |
+
x = torch.nn.functional.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2
|
| 188 |
+
return self.proj(x)
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class LLaMAMoE(litgpt.model.LLaMAMoE):
|
| 192 |
+
def __init__(self, config: Config) -> None:
|
| 193 |
+
nn.Module.__init__(self)
|
| 194 |
+
self.gate = AdapterV2Linear(config.n_embd, config.n_expert, bias=False)
|
| 195 |
+
self.experts = nn.ModuleList(
|
| 196 |
+
LLaMAMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(config.n_expert)
|
| 197 |
+
)
|
| 198 |
+
self.config = config
|
| 199 |
+
|
| 200 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 201 |
+
"""For compatibility with base checkpoints."""
|
| 202 |
+
mapping = {"gate.weight": "gate.linear.weight"}
|
| 203 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 204 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def mark_only_adapter_v2_as_trainable(model: GPT) -> None:
|
| 208 |
+
"""Sets requires_grad=False for all non-adapter weights"""
|
| 209 |
+
for name, param in model.named_parameters():
|
| 210 |
+
param.requires_grad = adapter_filter(name, param)
|
litgpt/api.py
ADDED
|
@@ -0,0 +1,734 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
#
|
| 3 |
+
# This file implements the LitGPT Python API
|
| 4 |
+
import sys
|
| 5 |
+
import time
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import Any, Callable, List, Literal, Optional, Tuple, Union
|
| 8 |
+
|
| 9 |
+
import lightning as L
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
from lightning.fabric.accelerators import CUDAAccelerator
|
| 13 |
+
from lightning.fabric.plugins import BitsandbytesPrecision
|
| 14 |
+
from tqdm import tqdm
|
| 15 |
+
|
| 16 |
+
from litgpt.chat.base import generate as stream_generate_fn
|
| 17 |
+
from litgpt.config import Config, name_to_config
|
| 18 |
+
from litgpt.generate.base import generate as generate_fn
|
| 19 |
+
from litgpt.generate.sequentially import sequential
|
| 20 |
+
from litgpt.generate.tp import tensor_parallel
|
| 21 |
+
from litgpt.model import GPT
|
| 22 |
+
from litgpt.prompts import PromptStyle, has_prompt_style, load_prompt_style, save_prompt_style
|
| 23 |
+
from litgpt.tokenizer import Tokenizer
|
| 24 |
+
from litgpt.utils import (
|
| 25 |
+
auto_download_checkpoint,
|
| 26 |
+
check_file_size_on_cpu_and_warn,
|
| 27 |
+
check_nvlink_connectivity,
|
| 28 |
+
chunked_cross_entropy,
|
| 29 |
+
copy_config_files,
|
| 30 |
+
extend_checkpoint_dir,
|
| 31 |
+
get_default_supported_precision,
|
| 32 |
+
load_checkpoint,
|
| 33 |
+
save_config,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class LLM(torch.nn.Module):
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
model: GPT,
|
| 41 |
+
preprocessor=None,
|
| 42 |
+
prompt_style: PromptStyle = None,
|
| 43 |
+
devices: Union[int, List[int]] = None,
|
| 44 |
+
config: Config = None,
|
| 45 |
+
checkpoint_dir: Path = None,
|
| 46 |
+
fabric: L.Fabric = None,
|
| 47 |
+
generate_strategy: Optional[Literal["sequential", "tensor_parallel"]] = None,
|
| 48 |
+
kv_cache_initialized: bool = False,
|
| 49 |
+
fixed_kv_cache_size: Union[int, Literal["max_model_supported"], None] = None,
|
| 50 |
+
) -> None:
|
| 51 |
+
super().__init__()
|
| 52 |
+
self.model = model
|
| 53 |
+
self.preprocessor = preprocessor
|
| 54 |
+
self.devices = devices
|
| 55 |
+
self.prompt_style = prompt_style
|
| 56 |
+
self.config = config
|
| 57 |
+
self.checkpoint_dir = checkpoint_dir
|
| 58 |
+
self.fabric = fabric
|
| 59 |
+
self.generate_strategy = generate_strategy
|
| 60 |
+
self.kv_cache_initialized = kv_cache_initialized
|
| 61 |
+
self.fixed_kv_cache_size = fixed_kv_cache_size
|
| 62 |
+
self.prev_generated_seq_length = 0
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
LLM model class for inference, pretraining, and finetuning.
|
| 66 |
+
|
| 67 |
+
Example:
|
| 68 |
+
from litgpt.api import LLM
|
| 69 |
+
|
| 70 |
+
llm = LLM.load("microsoft/phi-2")
|
| 71 |
+
text = llm.generate("What do Llamas eat?", top_k=1)
|
| 72 |
+
print(text)
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def tokenizer(self):
|
| 77 |
+
return self.preprocessor.tokenizer
|
| 78 |
+
|
| 79 |
+
def state_dict(self, destination=None, prefix="", keep_vars=False):
|
| 80 |
+
return self.model.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
|
| 81 |
+
|
| 82 |
+
def load_state_dict(self, state_dict, strict=True):
|
| 83 |
+
return self.model.load_state_dict(state_dict, strict=strict)
|
| 84 |
+
|
| 85 |
+
def forward(
|
| 86 |
+
self,
|
| 87 |
+
input_ids: torch.Tensor,
|
| 88 |
+
target_ids: Optional[torch.Tensor] = None,
|
| 89 |
+
loss_fn: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
| 90 |
+
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
|
| 91 |
+
logits = self.model(input_ids)
|
| 92 |
+
if target_ids is not None:
|
| 93 |
+
if loss_fn is None:
|
| 94 |
+
loss_fn = chunked_cross_entropy
|
| 95 |
+
loss = loss_fn(logits[..., :-1, :], target_ids[..., 1:])
|
| 96 |
+
return logits, loss
|
| 97 |
+
else:
|
| 98 |
+
return logits
|
| 99 |
+
|
| 100 |
+
def trainer_setup(self, trainer_ckpt: Optional[Path] = None) -> None:
|
| 101 |
+
"""Initializes the model checkpoint for PyTorch Lightning Trainer contexts"""
|
| 102 |
+
self.model = GPT(self.config)
|
| 103 |
+
|
| 104 |
+
if trainer_ckpt is not None:
|
| 105 |
+
# strip the object name key from the state_dict
|
| 106 |
+
state_dict = torch.load(trainer_ckpt, weights_only=True)["state_dict"]
|
| 107 |
+
first_key = next(iter(state_dict))
|
| 108 |
+
prefix = first_key.split(".")[0] + "."
|
| 109 |
+
keys_to_modify = [key for key in state_dict if key.startswith(prefix)]
|
| 110 |
+
for key in keys_to_modify:
|
| 111 |
+
new_key = key.replace(prefix, "", 1)
|
| 112 |
+
state_dict[new_key] = state_dict.pop(key)
|
| 113 |
+
|
| 114 |
+
self.load_state_dict(state_dict, strict=True)
|
| 115 |
+
|
| 116 |
+
elif self.checkpoint_dir is not None:
|
| 117 |
+
state_dict = torch.load(self.checkpoint_dir / "lit_model.pth", weights_only=False)
|
| 118 |
+
self.load_state_dict(state_dict, strict=False)
|
| 119 |
+
|
| 120 |
+
else:
|
| 121 |
+
raise ValueError(
|
| 122 |
+
"No checkpoint found. Either provide a valid path via `trainer_ckpt` "
|
| 123 |
+
"or ensure that `self.checkpoint_dir` points to a folder containing a `lit_model.pth` weight file."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
def save(self, out_dir: Optional[Path] = None, prompt_style: Optional[PromptStyle] = None) -> None:
|
| 127 |
+
out_dir = Path(out_dir)
|
| 128 |
+
save_path = out_dir / "lit_model.pth"
|
| 129 |
+
save_path.parent.mkdir(parents=True, exist_ok=True)
|
| 130 |
+
|
| 131 |
+
if prompt_style is None:
|
| 132 |
+
prompt_style = PromptStyle.from_config(self.config)
|
| 133 |
+
if self.fabric is None:
|
| 134 |
+
torch.save(self.state_dict(), save_path)
|
| 135 |
+
else:
|
| 136 |
+
self.fabric.save(save_path, self.state_dict())
|
| 137 |
+
|
| 138 |
+
if self.fabric is None or self.fabric.global_rank == 0:
|
| 139 |
+
# If initialization a model with random weights, the checkpoint dir can be none
|
| 140 |
+
if self.checkpoint_dir is not None:
|
| 141 |
+
copy_config_files(Path(self.checkpoint_dir), save_path.parent)
|
| 142 |
+
else:
|
| 143 |
+
save_config(self.config, out_dir)
|
| 144 |
+
|
| 145 |
+
save_prompt_style(prompt_style, save_path.parent)
|
| 146 |
+
|
| 147 |
+
@classmethod
|
| 148 |
+
def load(
|
| 149 |
+
cls,
|
| 150 |
+
model: str,
|
| 151 |
+
init: Optional[Literal["pretrained", "random"]] = "pretrained",
|
| 152 |
+
tokenizer_dir: Optional[Path] = None,
|
| 153 |
+
access_token: Optional[str] = None,
|
| 154 |
+
distribute: Optional[Literal["auto"]] = "auto",
|
| 155 |
+
) -> "LLM":
|
| 156 |
+
"""
|
| 157 |
+
Loads the LLM from a local directory or model hub.
|
| 158 |
+
|
| 159 |
+
Arguments
|
| 160 |
+
model: A local path to a directory containing the model weights or a valid model name.
|
| 161 |
+
You can get a list of valid model names via the `litgpt download list` command line argument.
|
| 162 |
+
init: If "pretrained" (default), downloads the model from the HF Hub if a local model can't be found at the `model`
|
| 163 |
+
directory name; otherwise loads the model from the local directory.
|
| 164 |
+
If "random", initializes the `model` with random weights.
|
| 165 |
+
tokenizer_dir: An optional tokenizer directory if `model` is not a checkpoint directory, or if a user
|
| 166 |
+
wants to use a different tokenizer instead.
|
| 167 |
+
access_token: Optional API token to access models with restrictions when using `init="pretrained"`.
|
| 168 |
+
distribute: If "auto" (default), initializes the model on a single GPU if available and otherwise on the CPU.
|
| 169 |
+
To have more control over the model distribution strategy and utilize multiple GPUs, you can set
|
| 170 |
+
`llm = LLM.load(..., distribute=None)` and call `llm.distribute(...)` manually.
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
allowed_init = {"pretrained", "random"}
|
| 174 |
+
|
| 175 |
+
if init == "pretrained":
|
| 176 |
+
checkpoint_dir = auto_download_checkpoint(
|
| 177 |
+
model_name=model, access_token=access_token, ignore_tokenizer_files=tokenizer_dir is not None
|
| 178 |
+
)
|
| 179 |
+
config = Config.from_file(checkpoint_dir / "model_config.yaml")
|
| 180 |
+
|
| 181 |
+
elif init == "random":
|
| 182 |
+
checkpoint_dir = None
|
| 183 |
+
try:
|
| 184 |
+
config = Config.from_name(model)
|
| 185 |
+
except ValueError:
|
| 186 |
+
print(f"Model name {model} is not supported.\n")
|
| 187 |
+
available_models = "\n".join(sorted(name_to_config))
|
| 188 |
+
print(f"Available values:\n{available_models}")
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
else:
|
| 192 |
+
raise ValueError(f"Invalid init option: {init}. Must be one of {allowed_init}")
|
| 193 |
+
|
| 194 |
+
torch.set_float32_matmul_precision("high")
|
| 195 |
+
|
| 196 |
+
if tokenizer_dir is not None:
|
| 197 |
+
tokenizer_dir = extend_checkpoint_dir(Path(tokenizer_dir))
|
| 198 |
+
tokenizer = Tokenizer(tokenizer_dir)
|
| 199 |
+
elif checkpoint_dir is not None:
|
| 200 |
+
tokenizer = Tokenizer(checkpoint_dir)
|
| 201 |
+
else:
|
| 202 |
+
raise ValueError("Provide a path to a tokenizer directory via the `tokenizer_dir` setting.")
|
| 203 |
+
|
| 204 |
+
if checkpoint_dir is not None:
|
| 205 |
+
prompt_style = (
|
| 206 |
+
load_prompt_style(checkpoint_dir)
|
| 207 |
+
if has_prompt_style(checkpoint_dir)
|
| 208 |
+
else PromptStyle.from_config(config)
|
| 209 |
+
)
|
| 210 |
+
else:
|
| 211 |
+
prompt_style = PromptStyle.from_config(config)
|
| 212 |
+
|
| 213 |
+
if distribute == "auto":
|
| 214 |
+
if torch.cuda.is_available():
|
| 215 |
+
accelerator = "cuda"
|
| 216 |
+
elif torch.backends.mps.is_available():
|
| 217 |
+
accelerator = "mps"
|
| 218 |
+
else:
|
| 219 |
+
accelerator = "cpu"
|
| 220 |
+
|
| 221 |
+
fabric = L.Fabric(
|
| 222 |
+
accelerator=accelerator,
|
| 223 |
+
devices=1,
|
| 224 |
+
precision=get_default_supported_precision(training=False),
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
with fabric.init_module(empty_init=False):
|
| 228 |
+
model = GPT(config)
|
| 229 |
+
model.eval()
|
| 230 |
+
preprocessor = Preprocessor(tokenizer, device=fabric.device)
|
| 231 |
+
|
| 232 |
+
if checkpoint_dir is not None:
|
| 233 |
+
checkpoint_path = checkpoint_dir / "lit_model.pth"
|
| 234 |
+
check_file_size_on_cpu_and_warn(checkpoint_path, fabric.device)
|
| 235 |
+
load_checkpoint(fabric, model, checkpoint_path)
|
| 236 |
+
|
| 237 |
+
model = fabric.setup_module(model)
|
| 238 |
+
|
| 239 |
+
else:
|
| 240 |
+
preprocessor = Preprocessor(tokenizer, device="cuda" if torch.cuda.is_available() else "cpu")
|
| 241 |
+
model = None
|
| 242 |
+
fabric = None
|
| 243 |
+
|
| 244 |
+
return cls(
|
| 245 |
+
model=model,
|
| 246 |
+
preprocessor=preprocessor,
|
| 247 |
+
prompt_style=prompt_style,
|
| 248 |
+
config=config,
|
| 249 |
+
checkpoint_dir=checkpoint_dir,
|
| 250 |
+
fabric=fabric,
|
| 251 |
+
generate_strategy=None,
|
| 252 |
+
kv_cache_initialized=False,
|
| 253 |
+
fixed_kv_cache_size=False,
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
def distribute(
|
| 257 |
+
self,
|
| 258 |
+
accelerator: Literal["cpu", "cuda", "auto"] = "auto",
|
| 259 |
+
devices: Union[int, Literal["auto"]] = "auto",
|
| 260 |
+
precision: Optional[Any] = None,
|
| 261 |
+
quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8"]] = None,
|
| 262 |
+
generate_strategy: Optional[Literal["sequential", "tensor_parallel"]] = None,
|
| 263 |
+
fixed_kv_cache_size: Union[int, Literal["max_model_supported"], None] = None,
|
| 264 |
+
) -> None:
|
| 265 |
+
"""
|
| 266 |
+
Moves the model onto specified devices for single-GPU or multi-GPU inference
|
| 267 |
+
|
| 268 |
+
accelerator: Which device type to load the model on ("cpu", "gpu", "mps", "cuda", or "auto")
|
| 269 |
+
devices: The number of devices (1, 2, etc.) or "auto", which uses all available devices
|
| 270 |
+
quantize: Whether to quantize the model and using which method:
|
| 271 |
+
- bnb.nf4, bnb.nf4-dq, bnb.fp4, bnb.fp4-dq: 4-bit quantization from bitsandbytes
|
| 272 |
+
- bnb.int8: 8-bit quantization from bitsandbytes
|
| 273 |
+
for more details, see https://github.com/Lightning-AI/litgpt/blob/main/tutorials/quantize.md
|
| 274 |
+
precision: Indicates the Fabric precision setting to use.
|
| 275 |
+
For instance, "32-true", "16-mixed", "16-true", "bf16-mixed", "bf16-true".
|
| 276 |
+
For more details, see https://lightning.ai/docs/fabric/stable/api/fabric_args.html#precision
|
| 277 |
+
generate_strategy: Whether to use a sequential model generation strategy. The "sequential" settings allows running
|
| 278 |
+
models that wouldn't fit in a single card by partitioning the transformer blocks across
|
| 279 |
+
all devices and running them sequentially. Sequential generation may be slower but allows using larger models.
|
| 280 |
+
Note that sequential generation sets `fixed_kv_cache_size="max_model_supported"`. You can set it to a lower integer
|
| 281 |
+
value, `fixed_kv_cache_size=256` to reduce memory. The `fixed_kv_cache_size` value determines the maximum number
|
| 282 |
+
of tokens that can be returned via `llm.generate(...)`.
|
| 283 |
+
fixed_kv_cache_size: If set to an integer value or "max_model_supported" is set, the kv-cache won't be resized dynamically
|
| 284 |
+
during `llm.generate` calls. Use this setting if you plan to compile the model or use `generate_strategy="sequential`.
|
| 285 |
+
Note that the chosen `fixed_kv_cache_size` value determines the maximum number of tokens that can be returned in `llm.generate(...)`.
|
| 286 |
+
"""
|
| 287 |
+
|
| 288 |
+
if self.checkpoint_dir is None:
|
| 289 |
+
raise NotImplementedError(
|
| 290 |
+
"The LLM was initialized with init='random' but .distribute() "
|
| 291 |
+
"currently only supports pretrained weights."
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
allowed_accelerators = {"cpu", "gpu", "cuda", "mps", "auto"}
|
| 295 |
+
if accelerator not in allowed_accelerators:
|
| 296 |
+
raise ValueError(f"Invalid accelerator: {accelerator}. Must be one of {allowed_accelerators}.")
|
| 297 |
+
|
| 298 |
+
if accelerator == "auto":
|
| 299 |
+
if torch.cuda.is_available():
|
| 300 |
+
accelerator = "cuda"
|
| 301 |
+
elif torch.backends.mps.is_available():
|
| 302 |
+
accelerator = "mps"
|
| 303 |
+
else:
|
| 304 |
+
accelerator = "cpu"
|
| 305 |
+
|
| 306 |
+
if generate_strategy in ("sequential", "tensor_parallel") and accelerator not in ("cuda", "gpu"):
|
| 307 |
+
raise NotImplementedError(
|
| 308 |
+
f"generate_strategy='{generate_strategy}' is only supported for accelerator='cuda'|'gpu'."
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
if devices == "auto":
|
| 312 |
+
if generate_strategy in ("sequential", "tensor_parallel"):
|
| 313 |
+
total_devices = CUDAAccelerator.auto_device_count()
|
| 314 |
+
else:
|
| 315 |
+
total_devices = 1
|
| 316 |
+
elif isinstance(devices, int) and accelerator == "cuda":
|
| 317 |
+
use_devices = calculate_number_of_devices(devices)
|
| 318 |
+
total_devices = CUDAAccelerator.auto_device_count()
|
| 319 |
+
if use_devices > total_devices:
|
| 320 |
+
raise ValueError(
|
| 321 |
+
f"You selected more devices ({use_devices}) than available in your system ({total_devices})."
|
| 322 |
+
)
|
| 323 |
+
else:
|
| 324 |
+
total_devices = use_devices
|
| 325 |
+
|
| 326 |
+
if total_devices > 1 and generate_strategy not in ("sequential", "tensor_parallel"):
|
| 327 |
+
raise NotImplementedError(
|
| 328 |
+
"Support for multiple devices is currently only implemented for generate_strategy='sequential'|'tensor_parallel'."
|
| 329 |
+
)
|
| 330 |
+
elif accelerator == "cpu" or accelerator == "mps":
|
| 331 |
+
total_devices = 1
|
| 332 |
+
|
| 333 |
+
else:
|
| 334 |
+
raise ValueError(f"devices argument must be an integer or 'auto', got {devices}")
|
| 335 |
+
|
| 336 |
+
print(f"Using {total_devices} device(s)", file=sys.stderr)
|
| 337 |
+
|
| 338 |
+
if precision is None:
|
| 339 |
+
precision = get_default_supported_precision(training=False)
|
| 340 |
+
|
| 341 |
+
print("Precision set", file=sys.stderr)
|
| 342 |
+
|
| 343 |
+
plugins = None
|
| 344 |
+
if quantize is not None and quantize.startswith("bnb."):
|
| 345 |
+
if "mixed" in precision:
|
| 346 |
+
raise ValueError("The combination of quantization and mixed precision is not supported.")
|
| 347 |
+
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
|
| 348 |
+
plugins = BitsandbytesPrecision(quantize[4:], dtype)
|
| 349 |
+
precision = None
|
| 350 |
+
|
| 351 |
+
# set "ddp" as the strategy for the launching functionality, but there's no data-parallelism
|
| 352 |
+
if generate_strategy != "tensor_parallel":
|
| 353 |
+
fabric = L.Fabric(
|
| 354 |
+
accelerator=accelerator,
|
| 355 |
+
devices=1, # Otherwise sequential wouldn't work, see litgpt/generate/sequentially.py
|
| 356 |
+
# devices=devices,
|
| 357 |
+
precision=precision,
|
| 358 |
+
plugins=plugins,
|
| 359 |
+
)
|
| 360 |
+
else:
|
| 361 |
+
fabric = L.Fabric(
|
| 362 |
+
accelerator=accelerator, devices=total_devices, strategy="ddp", precision=precision, plugins=plugins
|
| 363 |
+
)
|
| 364 |
+
if torch.cuda.is_available() and fabric.accelerator.auto_device_count() > 1:
|
| 365 |
+
check_nvlink_connectivity(fabric)
|
| 366 |
+
fabric.launch()
|
| 367 |
+
|
| 368 |
+
print("Fabric launched", file=sys.stderr)
|
| 369 |
+
|
| 370 |
+
self.kv_cache_initialized = False
|
| 371 |
+
if generate_strategy is None:
|
| 372 |
+
with fabric.init_module(empty_init=(total_devices > 1)):
|
| 373 |
+
model = GPT(self.config)
|
| 374 |
+
model.eval()
|
| 375 |
+
|
| 376 |
+
if self.checkpoint_dir is not None:
|
| 377 |
+
load_checkpoint(fabric, model, self.checkpoint_dir / "lit_model.pth")
|
| 378 |
+
|
| 379 |
+
model = fabric.setup_module(model)
|
| 380 |
+
|
| 381 |
+
if fixed_kv_cache_size is not None:
|
| 382 |
+
if fixed_kv_cache_size is None or fixed_kv_cache_size == "max_model_supported":
|
| 383 |
+
kv_cache_size = model.max_seq_length
|
| 384 |
+
else:
|
| 385 |
+
kv_cache_size = fixed_kv_cache_size
|
| 386 |
+
model.set_kv_cache(batch_size=1, max_seq_length=kv_cache_size, device=fabric.device)
|
| 387 |
+
self.kv_cache_initialized = True
|
| 388 |
+
self.fixed_kv_cache_size = fixed_kv_cache_size
|
| 389 |
+
|
| 390 |
+
elif generate_strategy in ("sequential", "tensor_parallel"):
|
| 391 |
+
with fabric.init_tensor(), torch.device("meta"):
|
| 392 |
+
model = GPT(self.config)
|
| 393 |
+
model.eval()
|
| 394 |
+
|
| 395 |
+
if generate_strategy == "sequential":
|
| 396 |
+
state_dict = torch.load(
|
| 397 |
+
str(self.checkpoint_dir / "lit_model.pth"), mmap=True, map_location="cpu", weights_only=False
|
| 398 |
+
)
|
| 399 |
+
model.load_state_dict(state_dict, assign=True)
|
| 400 |
+
model = fabric.setup_module(model, move_to_device=False)
|
| 401 |
+
|
| 402 |
+
if fixed_kv_cache_size is None:
|
| 403 |
+
fixed_kv_cache_size = "max_model_supported"
|
| 404 |
+
if fixed_kv_cache_size == "max_model_supported":
|
| 405 |
+
kv_cache_size = model.max_seq_length
|
| 406 |
+
else:
|
| 407 |
+
kv_cache_size = fixed_kv_cache_size
|
| 408 |
+
|
| 409 |
+
model = sequential(model, fabric.device, kv_cache_size, total_devices)
|
| 410 |
+
self.fixed_kv_cache_size = fixed_kv_cache_size
|
| 411 |
+
|
| 412 |
+
elif generate_strategy == "tensor_parallel":
|
| 413 |
+
if fabric.global_rank == 0:
|
| 414 |
+
pbar = tqdm(total=fabric.world_size, desc="Loading model weights")
|
| 415 |
+
for rank in range(fabric.world_size):
|
| 416 |
+
if fabric.global_rank == rank:
|
| 417 |
+
state_dict = torch.load(
|
| 418 |
+
str(self.checkpoint_dir / "lit_model.pth"),
|
| 419 |
+
mmap=True,
|
| 420 |
+
map_location="cpu",
|
| 421 |
+
weights_only=False,
|
| 422 |
+
)
|
| 423 |
+
model.load_state_dict(state_dict, assign=True)
|
| 424 |
+
|
| 425 |
+
# cannot use `.setup_module` because it will wrap with DDP
|
| 426 |
+
model = fabric._precision.convert_module(model)
|
| 427 |
+
model = tensor_parallel(fabric, model)
|
| 428 |
+
|
| 429 |
+
with fabric.init_tensor():
|
| 430 |
+
if fixed_kv_cache_size is None:
|
| 431 |
+
fixed_kv_cache_size = "max_model_supported"
|
| 432 |
+
if fixed_kv_cache_size == "max_model_supported":
|
| 433 |
+
kv_cache_size = model.max_seq_length
|
| 434 |
+
else:
|
| 435 |
+
kv_cache_size = fixed_kv_cache_size
|
| 436 |
+
model.max_seq_length = kv_cache_size
|
| 437 |
+
# the rope cache which is on meta device
|
| 438 |
+
model.cos, model.sin = model.rope_cache()
|
| 439 |
+
# enable the kv cache
|
| 440 |
+
model.set_kv_cache(batch_size=1)
|
| 441 |
+
model.eval()
|
| 442 |
+
model = fabric.to_device(model)
|
| 443 |
+
|
| 444 |
+
fabric.barrier()
|
| 445 |
+
if fabric.global_rank == 0:
|
| 446 |
+
pbar.update(1)
|
| 447 |
+
|
| 448 |
+
if fabric.global_rank == 0:
|
| 449 |
+
pbar.close()
|
| 450 |
+
|
| 451 |
+
self.kv_cache_initialized = True
|
| 452 |
+
|
| 453 |
+
else:
|
| 454 |
+
raise ValueError(f"Unsupported generate_strategy: {generate_strategy}")
|
| 455 |
+
|
| 456 |
+
self.model = model
|
| 457 |
+
self.fabric = fabric
|
| 458 |
+
self.preprocessor.device = fabric.device
|
| 459 |
+
|
| 460 |
+
@torch.inference_mode()
|
| 461 |
+
def generate(
|
| 462 |
+
self,
|
| 463 |
+
prompt: str,
|
| 464 |
+
sys_prompt: Optional[str] = None,
|
| 465 |
+
max_new_tokens: int = 50,
|
| 466 |
+
temperature: float = 1.0,
|
| 467 |
+
top_k: Optional[int] = None,
|
| 468 |
+
top_p: float = 1.0,
|
| 469 |
+
return_as_token_ids: bool = False,
|
| 470 |
+
stream: bool = False,
|
| 471 |
+
) -> Union[str, torch.Tensor]:
|
| 472 |
+
"""
|
| 473 |
+
Takes a conditioning sequence (prompt) as input and continues to generate as many tokens as requested.
|
| 474 |
+
|
| 475 |
+
Arguments:
|
| 476 |
+
model: The model to use.
|
| 477 |
+
prompt: The prompt string to use for generating the samples.
|
| 478 |
+
sys_prompt: The system prompt string to use for generating the samples.
|
| 479 |
+
The system prompt allows the user to provide additional instructions to shape all responses by providing additional context, behavioral guidelines, style, and constraints.
|
| 480 |
+
max_new_tokens: The maximum number of new tokens to return.
|
| 481 |
+
temperature: Scales the predicted logits by 1 / temperature.
|
| 482 |
+
top_k: If specified, only sample among the tokens with the k highest probabilities.
|
| 483 |
+
top_p: If specified, it represents the cumulative probability threshold to consider in the sampling process.
|
| 484 |
+
In top-p sampling, the next token is sampled from the highest probability tokens
|
| 485 |
+
whose cumulative probability exceeds the threshold `top_p`. When specified,
|
| 486 |
+
it must be `0 <= top_p <= 1`. Here, `top_p=0` is equivalent
|
| 487 |
+
to sampling the most probable token, while `top_p=1` samples from the whole distribution.
|
| 488 |
+
It can be used in conjunction with `top_k` and `temperature` with the following order
|
| 489 |
+
of application:
|
| 490 |
+
|
| 491 |
+
1. `top_k` sampling
|
| 492 |
+
2. `temperature` scaling
|
| 493 |
+
3. `top_p` sampling
|
| 494 |
+
|
| 495 |
+
For more details, see https://arxiv.org/abs/1904.09751
|
| 496 |
+
or https://huyenchip.com/2024/01/16/sampling.html#top_p
|
| 497 |
+
return_as_token_ids: If True, returns the token IDs as a torch.Tensor. Otherwise, returns the decoded text as a string.
|
| 498 |
+
stream: If True, returns a generator that yields tokens as they are generated.
|
| 499 |
+
At the moment, this setting is slower and may use more memory than the non-streaming version.
|
| 500 |
+
We plan to resolve this in the future.
|
| 501 |
+
"""
|
| 502 |
+
if self.model is None:
|
| 503 |
+
raise AttributeError(
|
| 504 |
+
"The model is not initialized yet; use the .distribute() "
|
| 505 |
+
"or .trainer_setup() method to initialize the model."
|
| 506 |
+
)
|
| 507 |
+
input_ids = self._text_to_token_ids(prompt, sys_prompt)
|
| 508 |
+
prompt_length = input_ids.size(0)
|
| 509 |
+
max_returned_tokens = prompt_length + max_new_tokens
|
| 510 |
+
|
| 511 |
+
if not self.kv_cache_initialized:
|
| 512 |
+
if self.fabric is not None:
|
| 513 |
+
device = self.fabric.device
|
| 514 |
+
else:
|
| 515 |
+
device = self.preprocessor.device
|
| 516 |
+
self.model.set_kv_cache(batch_size=1, max_seq_length=max_returned_tokens, device=device)
|
| 517 |
+
self.kv_cache_initialized = True
|
| 518 |
+
|
| 519 |
+
# Dynamically grow the kv cache size if necessary
|
| 520 |
+
if not self.fixed_kv_cache_size and self.prev_generated_seq_length < max_returned_tokens:
|
| 521 |
+
tmp_device = self.model.mask_cache.device
|
| 522 |
+
self.model.clear_kv_cache()
|
| 523 |
+
self.model.set_kv_cache(batch_size=1, max_seq_length=max_returned_tokens, device=tmp_device)
|
| 524 |
+
|
| 525 |
+
else:
|
| 526 |
+
for block in self.model.transformer.h:
|
| 527 |
+
block.attn.kv_cache.reset_parameters()
|
| 528 |
+
|
| 529 |
+
self.prev_generated_seq_length = max_returned_tokens
|
| 530 |
+
self.model.eval()
|
| 531 |
+
|
| 532 |
+
def iterator():
|
| 533 |
+
outputs = stream_generate_fn(
|
| 534 |
+
model=self.model,
|
| 535 |
+
prompt=input_ids,
|
| 536 |
+
max_returned_tokens=max_returned_tokens,
|
| 537 |
+
temperature=temperature,
|
| 538 |
+
top_k=top_k,
|
| 539 |
+
top_p=top_p,
|
| 540 |
+
stop_tokens=([self.preprocessor.tokenizer.eos_id],),
|
| 541 |
+
)
|
| 542 |
+
if return_as_token_ids:
|
| 543 |
+
yield from outputs
|
| 544 |
+
else:
|
| 545 |
+
for output in outputs:
|
| 546 |
+
yield self.preprocessor.decode(output)
|
| 547 |
+
return
|
| 548 |
+
|
| 549 |
+
if stream:
|
| 550 |
+
outputs = iterator()
|
| 551 |
+
else:
|
| 552 |
+
outputs = generate_fn(
|
| 553 |
+
model=self.model,
|
| 554 |
+
prompt=input_ids,
|
| 555 |
+
max_returned_tokens=max_returned_tokens,
|
| 556 |
+
temperature=temperature,
|
| 557 |
+
top_k=top_k,
|
| 558 |
+
top_p=top_p,
|
| 559 |
+
eos_id=self.preprocessor.tokenizer.eos_id,
|
| 560 |
+
include_prompt=False,
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
if stream:
|
| 564 |
+
return outputs
|
| 565 |
+
elif return_as_token_ids:
|
| 566 |
+
return outputs
|
| 567 |
+
else:
|
| 568 |
+
return self.preprocessor.decode(outputs)
|
| 569 |
+
|
| 570 |
+
def _text_to_token_ids(self, prompt: str, sys_prompt: Optional[str] = None) -> torch.Tensor:
|
| 571 |
+
"""Utility method to convert a prompt text to token IDs"""
|
| 572 |
+
prompt = self.prompt_style.apply(prompt, sys_prompt=sys_prompt)
|
| 573 |
+
input_ids = self.preprocessor.encode(prompt)
|
| 574 |
+
return input_ids
|
| 575 |
+
|
| 576 |
+
def benchmark(self, num_iterations=1, **kwargs):
|
| 577 |
+
"""
|
| 578 |
+
A wrapper around the .generate() method to calculate runtime performance.
|
| 579 |
+
|
| 580 |
+
Arguments:
|
| 581 |
+
num_iterations: How often the `.generate()` call is repeated.
|
| 582 |
+
kwargs: Keyword arguments that are passed to the .generate() method.
|
| 583 |
+
"""
|
| 584 |
+
benchmark_dict = {}
|
| 585 |
+
|
| 586 |
+
for i in range(num_iterations):
|
| 587 |
+
time_to_first_token = None
|
| 588 |
+
t0 = time.perf_counter()
|
| 589 |
+
outputs = self.generate(**kwargs)
|
| 590 |
+
|
| 591 |
+
if kwargs.get("stream", False):
|
| 592 |
+
gen_outputs = []
|
| 593 |
+
for e in outputs:
|
| 594 |
+
if time_to_first_token is None:
|
| 595 |
+
t1 = time.perf_counter()
|
| 596 |
+
time_to_first_token = t1 - t0
|
| 597 |
+
gen_outputs.append(e)
|
| 598 |
+
outputs = "".join(gen_outputs)
|
| 599 |
+
else:
|
| 600 |
+
outputs = self.generate(
|
| 601 |
+
**kwargs,
|
| 602 |
+
)
|
| 603 |
+
benchmark_dict.setdefault("Seconds total", []).append(time.perf_counter() - t0)
|
| 604 |
+
|
| 605 |
+
benchmark_dict.setdefault("Seconds to first token", []).append(time_to_first_token)
|
| 606 |
+
tokens_generated = self.preprocessor.encode(outputs).size(0)
|
| 607 |
+
benchmark_dict.setdefault("Tokens generated", []).append(tokens_generated)
|
| 608 |
+
benchmark_dict.setdefault("Inference speed in tokens/sec", []).append(
|
| 609 |
+
benchmark_dict["Tokens generated"][-1] / benchmark_dict["Seconds total"][-1]
|
| 610 |
+
)
|
| 611 |
+
if self.fabric is not None and self.fabric.device.type == "cuda":
|
| 612 |
+
benchmark_dict.setdefault("Total GPU memory allocated in GB", []).append(
|
| 613 |
+
torch.cuda.max_memory_allocated() / 1e9
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
return outputs, benchmark_dict
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
class Preprocessor:
|
| 620 |
+
"""
|
| 621 |
+
Preprocessor class for tokenization and de-tokenization.
|
| 622 |
+
"""
|
| 623 |
+
|
| 624 |
+
def __init__(self, tokenizer: Tokenizer, device: str = "cpu") -> None:
|
| 625 |
+
self.tokenizer = tokenizer
|
| 626 |
+
self.device = device
|
| 627 |
+
|
| 628 |
+
def encode(self, text: str) -> torch.Tensor:
|
| 629 |
+
return self.tokenizer.encode(text, device=self.device)
|
| 630 |
+
|
| 631 |
+
def decode(self, token_ids: torch.Tensor) -> str:
|
| 632 |
+
return self.tokenizer.decode(token_ids)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
def calculate_number_of_devices(devices):
|
| 636 |
+
"""
|
| 637 |
+
Utility function to calculate the number of devices.
|
| 638 |
+
"""
|
| 639 |
+
num_devices = devices if isinstance(devices, int) else len(devices) if isinstance(devices, list) else 0
|
| 640 |
+
return num_devices
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
def benchmark_dict_to_markdown_table(data):
|
| 644 |
+
"""
|
| 645 |
+
Converts .benchmark() outputs to a markdown table
|
| 646 |
+
"""
|
| 647 |
+
markdown_table = (
|
| 648 |
+
"| Metric | Mean | Std Dev |\n"
|
| 649 |
+
)
|
| 650 |
+
markdown_table += (
|
| 651 |
+
"|-------------------------------------|-----------------------------|-----------------------------|\n"
|
| 652 |
+
)
|
| 653 |
+
|
| 654 |
+
for key, values in data.items():
|
| 655 |
+
mean_value = np.mean(values)
|
| 656 |
+
std_dev_value = np.std(values, ddof=1)
|
| 657 |
+
|
| 658 |
+
formatted_mean = f"{mean_value:.2f}"
|
| 659 |
+
formatted_std_dev = f"{std_dev_value:.2f}"
|
| 660 |
+
|
| 661 |
+
markdown_table += f"| {key.ljust(35)} | {formatted_mean.ljust(27)} | {formatted_std_dev.ljust(27)} |\n"
|
| 662 |
+
|
| 663 |
+
return markdown_table
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def pull_request_benchmark_util(model_name="microsoft/phi-2", num_iterations=6):
|
| 667 |
+
def print_table(header, data):
|
| 668 |
+
print(f"\n### {header}\n")
|
| 669 |
+
markdown_table = (
|
| 670 |
+
f"| Metric | First Iteration | "
|
| 671 |
+
f"Iter 2-{num_iterations} Mean | Iter 2-{num_iterations} Standard Dev. |\n"
|
| 672 |
+
f"|--------------------------------------|-----------------|"
|
| 673 |
+
f"-------------------|-------------------------|\n"
|
| 674 |
+
)
|
| 675 |
+
|
| 676 |
+
for key, value in data.items():
|
| 677 |
+
first_iteration = f"{value[0]:.2f}" if value[0] is not None else "N/A"
|
| 678 |
+
clean_values = [v for v in value[1:] if v is not None]
|
| 679 |
+
|
| 680 |
+
if clean_values:
|
| 681 |
+
mean_value = np.mean(clean_values)
|
| 682 |
+
std_dev_value = np.std(clean_values, ddof=1)
|
| 683 |
+
mean_str = f"{mean_value:.2f}"
|
| 684 |
+
std_dev_str = f"{std_dev_value:.2f}"
|
| 685 |
+
else:
|
| 686 |
+
mean_str = "N/A"
|
| 687 |
+
std_dev_str = "N/A"
|
| 688 |
+
|
| 689 |
+
markdown_table += f"| {key:<36} | {first_iteration:<15} | {mean_str:<17} | {std_dev_str:<23} |\n"
|
| 690 |
+
print(markdown_table)
|
| 691 |
+
|
| 692 |
+
import subprocess
|
| 693 |
+
|
| 694 |
+
try:
|
| 695 |
+
g_hash = subprocess.run(
|
| 696 |
+
["git", "rev-parse", "--short", "HEAD"], capture_output=True, text=True, check=True
|
| 697 |
+
).stdout.strip()
|
| 698 |
+
print(f"Git Commit Hash: {g_hash}")
|
| 699 |
+
except subprocess.CalledProcessError:
|
| 700 |
+
print("Git Commit Hash: N/A")
|
| 701 |
+
print(f"PyTorch version: {torch.__version__}")
|
| 702 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 703 |
+
print(f"Device: {device}\n")
|
| 704 |
+
|
| 705 |
+
# 1st table
|
| 706 |
+
llm = LLM.load(
|
| 707 |
+
model=model_name,
|
| 708 |
+
)
|
| 709 |
+
text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1)
|
| 710 |
+
print_table(f"Defaults ({model_name}), 1st time", bench_d)
|
| 711 |
+
del llm
|
| 712 |
+
|
| 713 |
+
# 2nd table
|
| 714 |
+
llm = LLM.load(
|
| 715 |
+
model=model_name,
|
| 716 |
+
)
|
| 717 |
+
text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1)
|
| 718 |
+
print_table(f"Defaults ({model_name}), 2nd time", bench_d)
|
| 719 |
+
del llm
|
| 720 |
+
|
| 721 |
+
# 3rd table
|
| 722 |
+
llm = LLM.load(
|
| 723 |
+
model=model_name,
|
| 724 |
+
)
|
| 725 |
+
text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1, stream=True)
|
| 726 |
+
print_table("stream=True", bench_d)
|
| 727 |
+
del llm
|
| 728 |
+
|
| 729 |
+
# 4th table
|
| 730 |
+
llm = LLM.load(model=model_name, distribute=None)
|
| 731 |
+
llm.distribute(fixed_kv_cache_size=500)
|
| 732 |
+
|
| 733 |
+
text, bench_d = llm.benchmark(num_iterations=num_iterations, prompt="What do llamas eat?", top_k=1, stream=True)
|
| 734 |
+
print_table("stream=True + fixed_kv_cache=500", bench_d)
|
litgpt/args.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
import math
|
| 3 |
+
import warnings
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import Optional, Union
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class TrainArgs:
|
| 10 |
+
"""Training-related arguments"""
|
| 11 |
+
|
| 12 |
+
save_interval: Optional[int] = 1000
|
| 13 |
+
"""Number of optimizer steps between saving checkpoints"""
|
| 14 |
+
log_interval: int = 1
|
| 15 |
+
"""Number of iterations between logging calls"""
|
| 16 |
+
global_batch_size: int = 64
|
| 17 |
+
"""Number of samples between optimizer steps across data-parallel ranks"""
|
| 18 |
+
micro_batch_size: int = 4
|
| 19 |
+
"""Number of samples per data-parallel rank"""
|
| 20 |
+
lr_warmup_steps: Optional[int] = 100
|
| 21 |
+
"""Number of iterations with learning rate warmup active"""
|
| 22 |
+
lr_warmup_fraction: Optional[float] = None
|
| 23 |
+
"""The fraction of an epoch to use for learning rate warmup"""
|
| 24 |
+
epochs: Optional[int] = None
|
| 25 |
+
"""Number of epochs to train on"""
|
| 26 |
+
# TODO: `pretrain` is the only script using `max_tokens` explicitly. replace it with epoch_size*epochs?
|
| 27 |
+
max_tokens: Optional[int] = None
|
| 28 |
+
"""Total number of tokens to train on"""
|
| 29 |
+
max_steps: Optional[int] = None
|
| 30 |
+
"""Limits the number of optimizer steps to run"""
|
| 31 |
+
max_seq_length: Optional[int] = None
|
| 32 |
+
"""Limits the length of samples"""
|
| 33 |
+
tie_embeddings: Optional[bool] = None
|
| 34 |
+
"""Whether to tie the embedding weights with the language modeling head weights"""
|
| 35 |
+
|
| 36 |
+
# Optimization args
|
| 37 |
+
max_norm: Optional[float] = None
|
| 38 |
+
min_lr: float = 6e-5
|
| 39 |
+
|
| 40 |
+
def __post_init__(self) -> None:
|
| 41 |
+
if self.lr_warmup_fraction and self.lr_warmup_steps:
|
| 42 |
+
raise ValueError(
|
| 43 |
+
"Can't provide both `--train.lr_warmup_fraction` and `--train.lr_warmup_steps`. Choose one."
|
| 44 |
+
)
|
| 45 |
+
if self.lr_warmup_fraction and not (0 <= self.lr_warmup_fraction <= 1):
|
| 46 |
+
raise ValueError("`--train.lr_warmup_fraction` must be between 0 and 1.")
|
| 47 |
+
|
| 48 |
+
if self.lr_warmup_steps and self.max_steps and (self.lr_warmup_steps >= self.max_steps):
|
| 49 |
+
warnings.warn(
|
| 50 |
+
"`--train.lr_warmup_steps` should be less than `--train.max_steps`."
|
| 51 |
+
f" Got {self.lr_warmup_steps} lr_warmup_steps and {self.max_steps} max_steps.",
|
| 52 |
+
UserWarning,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
def gradient_accumulation_iters(self, devices: int, num_nodes: int = 1) -> int:
|
| 56 |
+
"""Number of iterations between gradient synchronizations"""
|
| 57 |
+
gradient_accumulation_iters = self.batch_size(devices, num_nodes) // self.micro_batch_size
|
| 58 |
+
assert gradient_accumulation_iters > 0
|
| 59 |
+
return gradient_accumulation_iters
|
| 60 |
+
|
| 61 |
+
def batch_size(self, devices: int, num_nodes: int = 1) -> int:
|
| 62 |
+
"""Number of samples between optimizer steps per data-parallel rank"""
|
| 63 |
+
batch_size = self.global_batch_size // (devices * num_nodes)
|
| 64 |
+
assert batch_size > 0
|
| 65 |
+
return batch_size
|
| 66 |
+
|
| 67 |
+
def warmup_iters(self, devices: int, num_nodes: int, max_iters: int, train_dataloader) -> int:
|
| 68 |
+
"""Number of iterations to warm up the learning rate."""
|
| 69 |
+
if self.lr_warmup_fraction:
|
| 70 |
+
return min(max_iters, math.ceil(self.lr_warmup_fraction * len(train_dataloader)))
|
| 71 |
+
if self.lr_warmup_steps:
|
| 72 |
+
return min(max_iters, self.lr_warmup_steps * self.gradient_accumulation_iters(devices, num_nodes))
|
| 73 |
+
return 0
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@dataclass
|
| 77 |
+
class EvalArgs:
|
| 78 |
+
"""Evaluation-related arguments"""
|
| 79 |
+
|
| 80 |
+
interval: int = 600
|
| 81 |
+
"""Number of optimizer steps between evaluation calls"""
|
| 82 |
+
max_new_tokens: Optional[int] = None
|
| 83 |
+
"""Number of tokens to generate"""
|
| 84 |
+
max_iters: int = 100
|
| 85 |
+
"""Number of iterations"""
|
| 86 |
+
initial_validation: bool = False
|
| 87 |
+
"""Whether to evaluate on the validation set at the beginning of the training"""
|
| 88 |
+
final_validation: bool = True
|
| 89 |
+
"""Whether to evaluate on the validation set at the end of the training"""
|
| 90 |
+
evaluate_example: Union[str, int] = "first"
|
| 91 |
+
"""How to pick an example instruction to evaluate periodically during training.
|
| 92 |
+
Can be "first", "random", or an integer index to pick a specific example."""
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@dataclass
|
| 96 |
+
class LogArgs:
|
| 97 |
+
"""Logging-related arguments"""
|
| 98 |
+
|
| 99 |
+
project: Optional[str] = None
|
| 100 |
+
"""Project name"""
|
| 101 |
+
run: Optional[str] = None
|
| 102 |
+
"""Run name"""
|
| 103 |
+
group: Optional[str] = None
|
| 104 |
+
"""Group name"""
|
litgpt/config.py
ADDED
|
@@ -0,0 +1,3087 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
from copy import deepcopy
|
| 4 |
+
from dataclasses import dataclass, field
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import Any, List, Literal, Optional, Type, Union
|
| 7 |
+
|
| 8 |
+
import yaml
|
| 9 |
+
from typing_extensions import Self
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def find_multiple(n: int, k: int) -> int:
|
| 13 |
+
"""Utility function for finding the nearest value to n which is a multiple of k.
|
| 14 |
+
|
| 15 |
+
NOTE: We define this function in this module rather than `litgpt.utils` so that users can import
|
| 16 |
+
this file to do configuration manipulations in Python environments which do not include all the dependencies
|
| 17 |
+
demanded by `litgpt.utils`.
|
| 18 |
+
"""
|
| 19 |
+
assert k > 0
|
| 20 |
+
if n % k == 0:
|
| 21 |
+
return n
|
| 22 |
+
return n + k - (n % k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@dataclass
|
| 26 |
+
class Config:
|
| 27 |
+
name: str = ""
|
| 28 |
+
hf_config: dict = field(default_factory=dict)
|
| 29 |
+
# General size parameters
|
| 30 |
+
block_size: int = 4096
|
| 31 |
+
n_layer: int = 16
|
| 32 |
+
n_embd: int = 4096
|
| 33 |
+
vocab_size: int = 50254
|
| 34 |
+
padding_multiple: int = 512
|
| 35 |
+
padded_vocab_size: Optional[int] = None
|
| 36 |
+
# Transformer block (structure, normalizations)
|
| 37 |
+
norm_class_name: Literal["LayerNorm", "RMSNorm"] = "LayerNorm"
|
| 38 |
+
norm_eps: float = 1e-5
|
| 39 |
+
norm_qk: bool = False
|
| 40 |
+
norm_qk_type: Literal["default", "olmo2"] = "default"
|
| 41 |
+
post_attention_norm: bool = False
|
| 42 |
+
post_mlp_norm: bool = False
|
| 43 |
+
parallel_residual: bool = True
|
| 44 |
+
shared_attention_norm: bool = False
|
| 45 |
+
# Transformer block (self-attention)
|
| 46 |
+
n_head: int = 32
|
| 47 |
+
head_size: Optional[int] = None
|
| 48 |
+
# to use multi-head attention (MHA), set this to `n_head` (default)
|
| 49 |
+
# to use multi-query attention (MQA), set this to 1
|
| 50 |
+
# to use grouped-query attention (GQA), set this to a value in between
|
| 51 |
+
# Example with `n_head=4`
|
| 52 |
+
# ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐
|
| 53 |
+
# │ v ││ v ││ v ││ v │ │ v │ │ v │ │ v │
|
| 54 |
+
# └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘
|
| 55 |
+
# │ │ │ │ │ │ │
|
| 56 |
+
# ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐
|
| 57 |
+
# │ k ││ k ││ k ││ k │ │ k │ │ k │ │ k │
|
| 58 |
+
# └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘
|
| 59 |
+
# │ │ │ │ ┌──┴──┐ ┌──┴──┐ ┌────┬──┴─┬────┐
|
| 60 |
+
# ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐
|
| 61 |
+
# │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │
|
| 62 |
+
# └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘
|
| 63 |
+
# ◀──────────────────▶ ◀──────────────────▶ ◀──────────────────▶
|
| 64 |
+
# MHA GQA MQA
|
| 65 |
+
# n_query_groups=4 n_query_groups=2 n_query_groups=1
|
| 66 |
+
#
|
| 67 |
+
# credit https://arxiv.org/pdf/2305.13245.pdf
|
| 68 |
+
n_query_groups: Optional[int] = None
|
| 69 |
+
attn_bias: bool = False
|
| 70 |
+
attention_scores_scalar: Optional[int] = None
|
| 71 |
+
sliding_window_size: Optional[int] = None
|
| 72 |
+
sliding_window_indices: Optional[List] = None
|
| 73 |
+
# if `attention_logit_softcapping` is used, cannot use optimized
|
| 74 |
+
# `torch.nn.functional.scaled_dot_product_attention` (which implements
|
| 75 |
+
# Flash attention), may result in higher memory and runtime footprint.
|
| 76 |
+
attention_logit_softcapping: Optional[float] = None
|
| 77 |
+
# Rotary position embedding (RoPE)
|
| 78 |
+
rope_base: int = 10000
|
| 79 |
+
rotary_percentage: float = 0.25
|
| 80 |
+
rope_condense_ratio: int = 1
|
| 81 |
+
rope_adjustments: Optional[dict] = None
|
| 82 |
+
# Transformer block (MLP)
|
| 83 |
+
intermediate_size: Optional[int] = None
|
| 84 |
+
moe_intermediate_size: Optional[int] = None
|
| 85 |
+
bias: bool = True
|
| 86 |
+
mlp_class_name: Literal["GptNeoxMLP", "LLaMAMLP", "GemmaMLP", "LLaMAMoE"] = "GptNeoxMLP"
|
| 87 |
+
gelu_approximate: str = "none"
|
| 88 |
+
n_expert: int = 0
|
| 89 |
+
n_expert_per_token: int = 0
|
| 90 |
+
# GPT before/after blocks
|
| 91 |
+
scale_embeddings: bool = False
|
| 92 |
+
lm_head_bias: bool = False
|
| 93 |
+
final_logit_softcapping: Optional[float] = None
|
| 94 |
+
norm_1: bool = True
|
| 95 |
+
norm_2: bool = True
|
| 96 |
+
# The base period of the RoPE embeddings for local attention.
|
| 97 |
+
# If not provided, rope_theta will be used for both local and global attention.
|
| 98 |
+
rope_local_base_freq: Optional[float] = None
|
| 99 |
+
rope_indices: Optional[List] = None
|
| 100 |
+
|
| 101 |
+
def __post_init__(self):
|
| 102 |
+
if not self.name:
|
| 103 |
+
self.name = self.hf_config.get("name", self.name)
|
| 104 |
+
|
| 105 |
+
if self.head_size is None:
|
| 106 |
+
assert self.n_embd % self.n_head == 0
|
| 107 |
+
self.head_size = self.n_embd // self.n_head
|
| 108 |
+
|
| 109 |
+
# vocab size should be a power of 2 to be optimal on hardware. compute the closest value
|
| 110 |
+
if self.padded_vocab_size is None:
|
| 111 |
+
self.padded_vocab_size = find_multiple(self.vocab_size, self.padding_multiple)
|
| 112 |
+
else:
|
| 113 |
+
# vocab size shouldn't be larger than padded vocab size
|
| 114 |
+
self.vocab_size = min(self.vocab_size, self.padded_vocab_size)
|
| 115 |
+
|
| 116 |
+
# compute the number of query groups
|
| 117 |
+
if self.n_query_groups is not None:
|
| 118 |
+
assert self.n_head % self.n_query_groups == 0
|
| 119 |
+
else:
|
| 120 |
+
self.n_query_groups = self.n_head
|
| 121 |
+
|
| 122 |
+
# compute the intermediate size for MLP if not set
|
| 123 |
+
if self.intermediate_size is None:
|
| 124 |
+
if self.mlp_class_name == "LLaMAMLP":
|
| 125 |
+
raise ValueError(f"The config {self.name!r}, needs to set the `intermediate_size`")
|
| 126 |
+
self.intermediate_size = 4 * self.n_embd
|
| 127 |
+
|
| 128 |
+
self.rope_n_elem = int(self.rotary_percentage * self.head_size)
|
| 129 |
+
|
| 130 |
+
if self.sliding_window_size is not None and self.sliding_window_indices is None:
|
| 131 |
+
self.sliding_window_indices = [1] * self.n_layer
|
| 132 |
+
|
| 133 |
+
if self.rope_local_base_freq is not None and self.rope_indices is None:
|
| 134 |
+
self.rope_indices = [1] * self.n_layer
|
| 135 |
+
|
| 136 |
+
@classmethod
|
| 137 |
+
def from_name(cls, name: str, **kwargs: Any) -> Optional[Self]:
|
| 138 |
+
if name not in name_to_config:
|
| 139 |
+
# search through all `config['hf_config']['name']`
|
| 140 |
+
try:
|
| 141 |
+
conf_dict = next(
|
| 142 |
+
config
|
| 143 |
+
for config in configs
|
| 144 |
+
if name == config["hf_config"]["name"]
|
| 145 |
+
or config["hf_config"]["org"] + "/" + config["hf_config"]["name"] == name
|
| 146 |
+
)
|
| 147 |
+
except StopIteration:
|
| 148 |
+
raise ValueError(f"{name!r} is not a supported config name")
|
| 149 |
+
else:
|
| 150 |
+
conf_dict = name_to_config[name]
|
| 151 |
+
|
| 152 |
+
conf_dict = conf_dict.copy()
|
| 153 |
+
conf_dict.update(kwargs)
|
| 154 |
+
return cls(**conf_dict)
|
| 155 |
+
|
| 156 |
+
@classmethod
|
| 157 |
+
def from_file(cls, path: Union[str, Path], **kwargs: Any) -> Self:
|
| 158 |
+
with open(path, encoding="utf-8") as fp:
|
| 159 |
+
file_kwargs = yaml.safe_load(fp)
|
| 160 |
+
if file_kwargs is None:
|
| 161 |
+
raise ValueError(f"{path} is empty which is likely unexpected.")
|
| 162 |
+
file_kwargs.update(kwargs)
|
| 163 |
+
return cls(**file_kwargs)
|
| 164 |
+
|
| 165 |
+
@classmethod
|
| 166 |
+
def from_checkpoint(cls, path: Path, **kwargs: Any) -> Self:
|
| 167 |
+
"""Automatically load `model_config.yaml` and if it doesn't exist - a matching config from `litgpt/config.py`."""
|
| 168 |
+
if (config_path := path / "model_config.yaml").is_file():
|
| 169 |
+
return cls.from_file(config_path, **kwargs)
|
| 170 |
+
if (model_name := path.name) in name_to_config:
|
| 171 |
+
return cls.from_name(model_name, **kwargs)
|
| 172 |
+
raise FileNotFoundError(f"For {str(path)!r} neither 'model_config.yaml' nor matching config exists.")
|
| 173 |
+
|
| 174 |
+
@property
|
| 175 |
+
def mlp_class(self) -> Type:
|
| 176 |
+
# `self.mlp_class_name` cannot be the type to keep the config serializable
|
| 177 |
+
import litgpt.model
|
| 178 |
+
|
| 179 |
+
return getattr(litgpt.model, self.mlp_class_name)
|
| 180 |
+
|
| 181 |
+
@property
|
| 182 |
+
def norm_class(self) -> Type:
|
| 183 |
+
# `self.norm_class_name` cannot be the type to keep the config serializable
|
| 184 |
+
|
| 185 |
+
from functools import partial
|
| 186 |
+
|
| 187 |
+
import torch # Torch import is lazy to make config loading faster
|
| 188 |
+
|
| 189 |
+
if self.norm_class_name == "RMSNorm":
|
| 190 |
+
from litgpt.model import RMSNorm
|
| 191 |
+
|
| 192 |
+
return partial(RMSNorm, add_unit_offset="Gemma" in self.name)
|
| 193 |
+
|
| 194 |
+
if self.norm_class_name == "LayerNorm" and "OLMo" in self.name:
|
| 195 |
+
# this makes it equivalent to `torch.nn.functional.layer_norm`
|
| 196 |
+
# that is used by OLMo
|
| 197 |
+
# Table 5 caption in the OLMo paper shows this - https://aclanthology.org/2024.acl-long.841
|
| 198 |
+
return partial(torch.nn.LayerNorm, elementwise_affine=False)
|
| 199 |
+
|
| 200 |
+
return getattr(torch.nn, self.norm_class_name)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
########################
|
| 204 |
+
# Stability AI StableLM
|
| 205 |
+
########################
|
| 206 |
+
configs = [
|
| 207 |
+
# https://huggingface.co/stabilityai/stablelm-base-alpha-3b/blob/main/config.json
|
| 208 |
+
dict(name="stablelm-base-alpha-3b", hf_config=dict(org="stabilityai", name="stablelm-base-alpha-3b")),
|
| 209 |
+
# https://huggingface.co/stabilityai/stablelm-base-alpha-7b/blob/main/config.json
|
| 210 |
+
dict(
|
| 211 |
+
name="stablelm-base-alpha-7b",
|
| 212 |
+
hf_config=dict(org="stabilityai", name="stablelm-base-alpha-7b"),
|
| 213 |
+
n_head=48,
|
| 214 |
+
n_embd=6144,
|
| 215 |
+
padding_multiple=256,
|
| 216 |
+
),
|
| 217 |
+
# https://huggingface.co/stabilityai/stablelm-tuned-alpha-3b/blob/main/config.json
|
| 218 |
+
dict(name="stablelm-tuned-alpha-3b", hf_config=dict(org="stabilityai", name="stablelm-tuned-alpha-3b"), n_head=32),
|
| 219 |
+
# https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b/blob/main/config.json
|
| 220 |
+
dict(
|
| 221 |
+
name="stablelm-tuned-alpha-7b",
|
| 222 |
+
hf_config=dict(org="stabilityai", name="stablelm-tuned-alpha-7b"),
|
| 223 |
+
n_head=48,
|
| 224 |
+
n_embd=6144,
|
| 225 |
+
padding_multiple=256,
|
| 226 |
+
),
|
| 227 |
+
# https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/config.json
|
| 228 |
+
dict(
|
| 229 |
+
name="stablelm-3b-4e1t",
|
| 230 |
+
hf_config=dict(org="stabilityai", name="stablelm-3b-4e1t"),
|
| 231 |
+
padded_vocab_size=50304,
|
| 232 |
+
n_layer=32,
|
| 233 |
+
n_head=32,
|
| 234 |
+
n_embd=2560,
|
| 235 |
+
parallel_residual=False,
|
| 236 |
+
bias=False,
|
| 237 |
+
mlp_class_name="LLaMAMLP",
|
| 238 |
+
intermediate_size=6912,
|
| 239 |
+
),
|
| 240 |
+
# https://huggingface.co/stabilityai/stablelm-zephyr-3b/blob/main/config.json
|
| 241 |
+
dict(
|
| 242 |
+
name="stablelm-zephyr-3b",
|
| 243 |
+
hf_config=dict(org="stabilityai", name="stablelm-zephyr-3b"),
|
| 244 |
+
padded_vocab_size=50304,
|
| 245 |
+
n_layer=32,
|
| 246 |
+
n_head=32,
|
| 247 |
+
n_embd=2560,
|
| 248 |
+
parallel_residual=False,
|
| 249 |
+
bias=False,
|
| 250 |
+
mlp_class_name="LLaMAMLP",
|
| 251 |
+
intermediate_size=6912,
|
| 252 |
+
),
|
| 253 |
+
]
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
##########################
|
| 257 |
+
# Stability AI StableCode
|
| 258 |
+
##########################
|
| 259 |
+
stablecode = [
|
| 260 |
+
# https://huggingface.co/stabilityai/stablecode-completion-alpha-3b/blob/main/config.json
|
| 261 |
+
dict(
|
| 262 |
+
name="stablecode-completion-alpha-3b",
|
| 263 |
+
hf_config=dict(org="stabilityai", name="stablecode-completion-alpha-3b"),
|
| 264 |
+
block_size=16384,
|
| 265 |
+
vocab_size=49152,
|
| 266 |
+
n_layer=32,
|
| 267 |
+
n_embd=2560,
|
| 268 |
+
),
|
| 269 |
+
# https://huggingface.co/stabilityai/stablecode-completion-alpha-3b-4k/blob/main/config.json
|
| 270 |
+
dict(
|
| 271 |
+
name="stablecode-completion-alpha-3b-4k",
|
| 272 |
+
hf_config=dict(org="stabilityai", name="stablecode-completion-alpha-3b-4k"),
|
| 273 |
+
vocab_size=49152,
|
| 274 |
+
n_layer=32,
|
| 275 |
+
n_embd=2560,
|
| 276 |
+
),
|
| 277 |
+
# https://huggingface.co/stabilityai/stablecode-instruct-alpha-3b/blob/main/config.json
|
| 278 |
+
dict(
|
| 279 |
+
name="stablecode-instruct-alpha-3b",
|
| 280 |
+
hf_config=dict(org="stabilityai", name="stablecode-instruct-alpha-3b"),
|
| 281 |
+
vocab_size=49152,
|
| 282 |
+
n_layer=32,
|
| 283 |
+
n_embd=2560,
|
| 284 |
+
),
|
| 285 |
+
# https://huggingface.co/stabilityai/stable-code-3b/blob/main/config.json
|
| 286 |
+
dict(
|
| 287 |
+
name="stable-code-3b",
|
| 288 |
+
hf_config=dict(org="stabilityai", name="stable-code-3b"),
|
| 289 |
+
padded_vocab_size=50304,
|
| 290 |
+
n_layer=32,
|
| 291 |
+
n_embd=2560,
|
| 292 |
+
block_size=16384,
|
| 293 |
+
parallel_residual=False,
|
| 294 |
+
bias=False,
|
| 295 |
+
mlp_class_name="LLaMAMLP",
|
| 296 |
+
intermediate_size=6912,
|
| 297 |
+
),
|
| 298 |
+
]
|
| 299 |
+
configs.extend(stablecode)
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
####################
|
| 303 |
+
# EleutherAI Pythia
|
| 304 |
+
####################
|
| 305 |
+
pythia = [
|
| 306 |
+
# https://huggingface.co/EleutherAI/pythia-14m/blob/main/config.json
|
| 307 |
+
dict(
|
| 308 |
+
name="pythia-14m",
|
| 309 |
+
hf_config=dict(org="EleutherAI", name="pythia-14m"),
|
| 310 |
+
block_size=512,
|
| 311 |
+
n_layer=6,
|
| 312 |
+
n_embd=128,
|
| 313 |
+
n_head=4,
|
| 314 |
+
padding_multiple=128,
|
| 315 |
+
),
|
| 316 |
+
# https://huggingface.co/EleutherAI/pythia-31m/blob/main/config.json
|
| 317 |
+
dict(
|
| 318 |
+
name="pythia-31m",
|
| 319 |
+
hf_config=dict(org="EleutherAI", name="pythia-31m"),
|
| 320 |
+
block_size=1024,
|
| 321 |
+
n_layer=6,
|
| 322 |
+
n_embd=256,
|
| 323 |
+
n_head=8,
|
| 324 |
+
padding_multiple=128,
|
| 325 |
+
),
|
| 326 |
+
# https://huggingface.co/EleutherAI/pythia-70m/blob/main/config.json
|
| 327 |
+
dict(
|
| 328 |
+
name="pythia-70m",
|
| 329 |
+
hf_config=dict(org="EleutherAI", name="pythia-70m"),
|
| 330 |
+
block_size=2048,
|
| 331 |
+
n_layer=6,
|
| 332 |
+
n_embd=512,
|
| 333 |
+
n_head=8,
|
| 334 |
+
padding_multiple=128,
|
| 335 |
+
),
|
| 336 |
+
# https://huggingface.co/EleutherAI/pythia-160m/blob/main/config.json
|
| 337 |
+
dict(
|
| 338 |
+
name="pythia-160m",
|
| 339 |
+
hf_config=dict(org="EleutherAI", name="pythia-160m"),
|
| 340 |
+
block_size=2048,
|
| 341 |
+
n_layer=12,
|
| 342 |
+
n_embd=768,
|
| 343 |
+
n_head=12,
|
| 344 |
+
padding_multiple=128,
|
| 345 |
+
),
|
| 346 |
+
# https://huggingface.co/EleutherAI/pythia-410m/blob/main/config.json
|
| 347 |
+
dict(
|
| 348 |
+
name="pythia-410m",
|
| 349 |
+
hf_config=dict(org="EleutherAI", name="pythia-410m"),
|
| 350 |
+
block_size=2048,
|
| 351 |
+
n_layer=24,
|
| 352 |
+
n_embd=1024,
|
| 353 |
+
n_head=16,
|
| 354 |
+
padding_multiple=128,
|
| 355 |
+
),
|
| 356 |
+
# https://huggingface.co/EleutherAI/pythia-1b/blob/main/config.json
|
| 357 |
+
dict(
|
| 358 |
+
name="pythia-1b",
|
| 359 |
+
hf_config=dict(org="EleutherAI", name="pythia-1b"),
|
| 360 |
+
block_size=2048,
|
| 361 |
+
n_embd=2048,
|
| 362 |
+
n_head=8,
|
| 363 |
+
padding_multiple=128,
|
| 364 |
+
),
|
| 365 |
+
# https://huggingface.co/EleutherAI/pythia-1.4b/blob/main/config.json
|
| 366 |
+
dict(
|
| 367 |
+
name="pythia-1.4b",
|
| 368 |
+
hf_config=dict(org="EleutherAI", name="pythia-1.4b"),
|
| 369 |
+
block_size=2048,
|
| 370 |
+
n_layer=24,
|
| 371 |
+
n_embd=2048,
|
| 372 |
+
n_head=16,
|
| 373 |
+
padding_multiple=128,
|
| 374 |
+
),
|
| 375 |
+
# https://huggingface.co/EleutherAI/pythia-2.8b/blob/main/config.json
|
| 376 |
+
dict(
|
| 377 |
+
name="pythia-2.8b",
|
| 378 |
+
hf_config=dict(org="EleutherAI", name="pythia-2.8b"),
|
| 379 |
+
block_size=2048,
|
| 380 |
+
n_layer=32,
|
| 381 |
+
n_embd=2560,
|
| 382 |
+
padding_multiple=128,
|
| 383 |
+
),
|
| 384 |
+
# https://huggingface.co/EleutherAI/pythia-6.9b/blob/main/config.json
|
| 385 |
+
dict(
|
| 386 |
+
name="pythia-6.9b",
|
| 387 |
+
hf_config=dict(org="EleutherAI", name="pythia-6.9b"),
|
| 388 |
+
block_size=2048,
|
| 389 |
+
n_layer=32,
|
| 390 |
+
padding_multiple=256,
|
| 391 |
+
),
|
| 392 |
+
# https://huggingface.co/EleutherAI/pythia-12b/blob/main/config.json
|
| 393 |
+
dict(
|
| 394 |
+
name="pythia-12b",
|
| 395 |
+
hf_config=dict(org="EleutherAI", name="pythia-12b"),
|
| 396 |
+
block_size=2048,
|
| 397 |
+
n_layer=36,
|
| 398 |
+
n_embd=5120,
|
| 399 |
+
n_head=40,
|
| 400 |
+
),
|
| 401 |
+
]
|
| 402 |
+
configs.extend(pythia)
|
| 403 |
+
for c in pythia:
|
| 404 |
+
# "pythia-14m" and "pythia-31m" don't have deduped version
|
| 405 |
+
if c["name"] in ("pythia-14m", "pythia-31m"):
|
| 406 |
+
continue
|
| 407 |
+
copy = deepcopy(c)
|
| 408 |
+
copy["name"] = f"{c['name']}-deduped"
|
| 409 |
+
copy["hf_config"]["name"] = f"{c['hf_config']['name']}-deduped"
|
| 410 |
+
configs.append(copy)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
#################
|
| 414 |
+
# TII UAE Falcon
|
| 415 |
+
#################
|
| 416 |
+
falcon = [
|
| 417 |
+
# https://huggingface.co/tiiuae/falcon-7b/blob/main/config.json
|
| 418 |
+
dict(
|
| 419 |
+
name="falcon-7b{}",
|
| 420 |
+
hf_config=dict(org="tiiuae", name="falcon-7b{}"),
|
| 421 |
+
block_size=2048,
|
| 422 |
+
vocab_size=65024,
|
| 423 |
+
padded_vocab_size=65024,
|
| 424 |
+
n_layer=32,
|
| 425 |
+
n_head=71,
|
| 426 |
+
n_embd=4544,
|
| 427 |
+
rotary_percentage=1.0,
|
| 428 |
+
n_query_groups=1,
|
| 429 |
+
bias=False,
|
| 430 |
+
# this is not in the config, but in the original model implementation, only for this config
|
| 431 |
+
shared_attention_norm=True,
|
| 432 |
+
),
|
| 433 |
+
# https://huggingface.co/tiiuae/falcon-40b/blob/main/config.json
|
| 434 |
+
dict(
|
| 435 |
+
name="falcon-40b{}",
|
| 436 |
+
hf_config=dict(org="tiiuae", name="falcon-40b{}"),
|
| 437 |
+
block_size=2048,
|
| 438 |
+
vocab_size=65024,
|
| 439 |
+
padded_vocab_size=65024,
|
| 440 |
+
n_layer=60,
|
| 441 |
+
n_head=128,
|
| 442 |
+
n_embd=8192,
|
| 443 |
+
rotary_percentage=1.0,
|
| 444 |
+
n_query_groups=8,
|
| 445 |
+
bias=False,
|
| 446 |
+
),
|
| 447 |
+
]
|
| 448 |
+
for c in falcon:
|
| 449 |
+
for kind in ("", "-instruct"):
|
| 450 |
+
copy = deepcopy(c)
|
| 451 |
+
copy["name"] = c["name"].format(kind)
|
| 452 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 453 |
+
configs.append(copy)
|
| 454 |
+
|
| 455 |
+
# https://huggingface.co/tiiuae/falcon-180b/blob/main/config.json
|
| 456 |
+
falcon180b = dict(
|
| 457 |
+
name="falcon-180B{}",
|
| 458 |
+
hf_config=dict(org="tiiuae", name="falcon-180B{}"),
|
| 459 |
+
block_size=2048,
|
| 460 |
+
vocab_size=65024,
|
| 461 |
+
padded_vocab_size=65024,
|
| 462 |
+
n_layer=80,
|
| 463 |
+
n_head=232,
|
| 464 |
+
n_embd=14848,
|
| 465 |
+
rotary_percentage=1.0,
|
| 466 |
+
n_query_groups=8,
|
| 467 |
+
bias=False,
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
for kind in ("", "-chat"):
|
| 471 |
+
copy = deepcopy(falcon180b)
|
| 472 |
+
copy["name"] = falcon180b["name"].format(kind)
|
| 473 |
+
copy["hf_config"]["name"] = falcon180b["hf_config"]["name"].format(kind)
|
| 474 |
+
configs.append(copy)
|
| 475 |
+
|
| 476 |
+
falcon3 = [
|
| 477 |
+
# https://huggingface.co/tiiuae/Falcon3-1B-Base/blob/main/config.json
|
| 478 |
+
dict(
|
| 479 |
+
name="Falcon3-1B{}",
|
| 480 |
+
hf_config=dict(org="tiiuae", name="Falcon3-1B{}"),
|
| 481 |
+
block_size=4096,
|
| 482 |
+
vocab_size=131072,
|
| 483 |
+
padded_vocab_size=131072,
|
| 484 |
+
n_layer=18,
|
| 485 |
+
n_head=8,
|
| 486 |
+
n_query_groups=4,
|
| 487 |
+
n_embd=2048,
|
| 488 |
+
rotary_percentage=1.0,
|
| 489 |
+
parallel_residual=False,
|
| 490 |
+
rope_base=1000042,
|
| 491 |
+
norm_eps=1e-6,
|
| 492 |
+
bias=False,
|
| 493 |
+
norm_class_name="RMSNorm",
|
| 494 |
+
mlp_class_name="LLaMAMLP",
|
| 495 |
+
intermediate_size=8192,
|
| 496 |
+
),
|
| 497 |
+
# https://huggingface.co/tiiuae/Falcon3-3B-Base/blob/main/config.json
|
| 498 |
+
dict(
|
| 499 |
+
name="Falcon3-3B{}",
|
| 500 |
+
hf_config=dict(org="tiiuae", name="Falcon3-3B{}"),
|
| 501 |
+
block_size=32768,
|
| 502 |
+
vocab_size=131072,
|
| 503 |
+
padded_vocab_size=131072,
|
| 504 |
+
n_layer=22,
|
| 505 |
+
n_head=12,
|
| 506 |
+
n_query_groups=4,
|
| 507 |
+
n_embd=3072,
|
| 508 |
+
rotary_percentage=1.0,
|
| 509 |
+
parallel_residual=False,
|
| 510 |
+
rope_base=1000042,
|
| 511 |
+
norm_eps=1e-6,
|
| 512 |
+
bias=False,
|
| 513 |
+
norm_class_name="RMSNorm",
|
| 514 |
+
mlp_class_name="LLaMAMLP",
|
| 515 |
+
intermediate_size=9216,
|
| 516 |
+
),
|
| 517 |
+
# https://huggingface.co/tiiuae/Falcon3-7B-Base/blob/main/config.json
|
| 518 |
+
dict(
|
| 519 |
+
name="Falcon3-7B{}",
|
| 520 |
+
hf_config=dict(org="tiiuae", name="Falcon3-7B{}"),
|
| 521 |
+
block_size=32768,
|
| 522 |
+
vocab_size=131072,
|
| 523 |
+
padded_vocab_size=131072,
|
| 524 |
+
n_layer=28,
|
| 525 |
+
n_head=12,
|
| 526 |
+
n_query_groups=4,
|
| 527 |
+
n_embd=3072,
|
| 528 |
+
rotary_percentage=1.0,
|
| 529 |
+
parallel_residual=False,
|
| 530 |
+
rope_base=1000042,
|
| 531 |
+
norm_eps=1e-6,
|
| 532 |
+
bias=False,
|
| 533 |
+
norm_class_name="RMSNorm",
|
| 534 |
+
mlp_class_name="LLaMAMLP",
|
| 535 |
+
intermediate_size=23040,
|
| 536 |
+
),
|
| 537 |
+
# https://huggingface.co/tiiuae/Falcon3-10B-Base/blob/main/config.json
|
| 538 |
+
dict(
|
| 539 |
+
name="Falcon3-10B{}",
|
| 540 |
+
hf_config=dict(org="tiiuae", name="Falcon3-10B{}"),
|
| 541 |
+
block_size=32768,
|
| 542 |
+
vocab_size=131072,
|
| 543 |
+
padded_vocab_size=131072,
|
| 544 |
+
n_layer=40,
|
| 545 |
+
n_head=12,
|
| 546 |
+
n_query_groups=4,
|
| 547 |
+
n_embd=3072,
|
| 548 |
+
rotary_percentage=1.0,
|
| 549 |
+
parallel_residual=False,
|
| 550 |
+
rope_base=1000042,
|
| 551 |
+
norm_eps=1e-6,
|
| 552 |
+
bias=False,
|
| 553 |
+
norm_class_name="RMSNorm",
|
| 554 |
+
mlp_class_name="LLaMAMLP",
|
| 555 |
+
intermediate_size=23040,
|
| 556 |
+
),
|
| 557 |
+
]
|
| 558 |
+
for c in falcon3:
|
| 559 |
+
for kind in ("-Base", "-Instruct"):
|
| 560 |
+
copy = deepcopy(c)
|
| 561 |
+
copy["name"] = c["name"].format(kind)
|
| 562 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 563 |
+
configs.append(copy)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
#############################
|
| 567 |
+
# OpenLM Research Open LLaMA
|
| 568 |
+
#############################
|
| 569 |
+
open_LLaMA = [
|
| 570 |
+
# https://huggingface.co/openlm-research/open_llama_3b/blob/main/config.json
|
| 571 |
+
dict(
|
| 572 |
+
name="open_llama_3b",
|
| 573 |
+
hf_config=dict(org="openlm-research", name="open_llama_3b"),
|
| 574 |
+
block_size=2048,
|
| 575 |
+
vocab_size=32000,
|
| 576 |
+
padding_multiple=64,
|
| 577 |
+
n_layer=26,
|
| 578 |
+
n_embd=3200,
|
| 579 |
+
rotary_percentage=1.0,
|
| 580 |
+
parallel_residual=False,
|
| 581 |
+
bias=False,
|
| 582 |
+
norm_class_name="RMSNorm",
|
| 583 |
+
norm_eps=1e-6,
|
| 584 |
+
mlp_class_name="LLaMAMLP",
|
| 585 |
+
intermediate_size=8640,
|
| 586 |
+
),
|
| 587 |
+
# https://huggingface.co/openlm-research/open_llama_7b/blob/main/config.json
|
| 588 |
+
dict(
|
| 589 |
+
name="open_llama_7b",
|
| 590 |
+
hf_config=dict(org="openlm-research", name="open_llama_7b"),
|
| 591 |
+
block_size=2048,
|
| 592 |
+
vocab_size=32000,
|
| 593 |
+
padding_multiple=64,
|
| 594 |
+
n_layer=32,
|
| 595 |
+
rotary_percentage=1.0,
|
| 596 |
+
parallel_residual=False,
|
| 597 |
+
bias=False,
|
| 598 |
+
norm_class_name="RMSNorm",
|
| 599 |
+
norm_eps=1e-6,
|
| 600 |
+
mlp_class_name="LLaMAMLP",
|
| 601 |
+
intermediate_size=11008,
|
| 602 |
+
),
|
| 603 |
+
# https://huggingface.co/openlm-research/open_llama_13b/blob/main/config.json
|
| 604 |
+
dict(
|
| 605 |
+
name="open_llama_13b",
|
| 606 |
+
hf_config=dict(org="openlm-research", name="open_llama_13b"),
|
| 607 |
+
block_size=2048,
|
| 608 |
+
vocab_size=32000,
|
| 609 |
+
padding_multiple=64,
|
| 610 |
+
n_layer=40,
|
| 611 |
+
n_head=40,
|
| 612 |
+
n_embd=5120,
|
| 613 |
+
rotary_percentage=1.0,
|
| 614 |
+
parallel_residual=False,
|
| 615 |
+
bias=False,
|
| 616 |
+
norm_class_name="RMSNorm",
|
| 617 |
+
norm_eps=1e-6,
|
| 618 |
+
mlp_class_name="LLaMAMLP",
|
| 619 |
+
intermediate_size=13824,
|
| 620 |
+
),
|
| 621 |
+
]
|
| 622 |
+
configs.extend(open_LLaMA)
|
| 623 |
+
|
| 624 |
+
###############
|
| 625 |
+
# Meta LLaMA 2
|
| 626 |
+
###############
|
| 627 |
+
llama_2 = [
|
| 628 |
+
# https://huggingface.co/meta-llama/Llama-2-7b-hf/blob/main/config.json
|
| 629 |
+
dict(
|
| 630 |
+
name="Llama-2-7b{}-hf",
|
| 631 |
+
hf_config=dict(org="meta-llama", name="Llama-2-7b{}-hf"),
|
| 632 |
+
vocab_size=32000,
|
| 633 |
+
padding_multiple=64,
|
| 634 |
+
n_layer=32,
|
| 635 |
+
rotary_percentage=1.0,
|
| 636 |
+
parallel_residual=False,
|
| 637 |
+
bias=False,
|
| 638 |
+
norm_class_name="RMSNorm",
|
| 639 |
+
mlp_class_name="LLaMAMLP",
|
| 640 |
+
intermediate_size=11008,
|
| 641 |
+
),
|
| 642 |
+
# https://huggingface.co/meta-llama/Llama-2-13b-hf/blob/main/config.json
|
| 643 |
+
dict(
|
| 644 |
+
name="Llama-2-13b{}-hf",
|
| 645 |
+
hf_config=dict(org="meta-llama", name="Llama-2-13b{}-hf"),
|
| 646 |
+
vocab_size=32000,
|
| 647 |
+
padding_multiple=64,
|
| 648 |
+
n_layer=40,
|
| 649 |
+
n_head=40,
|
| 650 |
+
n_embd=5120,
|
| 651 |
+
rotary_percentage=1.0,
|
| 652 |
+
parallel_residual=False,
|
| 653 |
+
bias=False,
|
| 654 |
+
norm_class_name="RMSNorm",
|
| 655 |
+
mlp_class_name="LLaMAMLP",
|
| 656 |
+
intermediate_size=13824,
|
| 657 |
+
),
|
| 658 |
+
# https://huggingface.co/meta-llama/Llama-2-70b-hf/blob/main/config.json
|
| 659 |
+
dict(
|
| 660 |
+
name="Llama-2-70b{}-hf",
|
| 661 |
+
hf_config=dict(org="meta-llama", name="Llama-2-70b{}-hf"),
|
| 662 |
+
vocab_size=32000,
|
| 663 |
+
padding_multiple=64,
|
| 664 |
+
n_layer=80,
|
| 665 |
+
n_head=64,
|
| 666 |
+
n_embd=8192,
|
| 667 |
+
n_query_groups=8,
|
| 668 |
+
rotary_percentage=1.0,
|
| 669 |
+
parallel_residual=False,
|
| 670 |
+
bias=False,
|
| 671 |
+
norm_class_name="RMSNorm",
|
| 672 |
+
mlp_class_name="LLaMAMLP",
|
| 673 |
+
intermediate_size=28672,
|
| 674 |
+
),
|
| 675 |
+
]
|
| 676 |
+
for c in llama_2:
|
| 677 |
+
for kind in ("", "-chat"):
|
| 678 |
+
copy = deepcopy(c)
|
| 679 |
+
copy["name"] = c["name"].format(kind)
|
| 680 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 681 |
+
configs.append(copy)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
###############
|
| 685 |
+
# Meta LLaMA 3
|
| 686 |
+
###############
|
| 687 |
+
llama_3 = [
|
| 688 |
+
# https://huggingface.co/meta-llama/Meta-Llama-3-8B/blob/main/config.json
|
| 689 |
+
dict(
|
| 690 |
+
name="Llama-3-8B{}",
|
| 691 |
+
hf_config=dict(org="meta-llama", name="Meta-Llama-3-8B{}"),
|
| 692 |
+
block_size=8192,
|
| 693 |
+
vocab_size=128000,
|
| 694 |
+
padded_vocab_size=128256,
|
| 695 |
+
n_layer=32,
|
| 696 |
+
n_head=32,
|
| 697 |
+
n_query_groups=8,
|
| 698 |
+
rotary_percentage=1.0,
|
| 699 |
+
parallel_residual=False,
|
| 700 |
+
bias=False,
|
| 701 |
+
norm_class_name="RMSNorm",
|
| 702 |
+
mlp_class_name="LLaMAMLP",
|
| 703 |
+
intermediate_size=14336,
|
| 704 |
+
rope_base=500000,
|
| 705 |
+
),
|
| 706 |
+
# https://huggingface.co/meta-llama/Meta-Llama-3.1-8B/blob/main/config.json
|
| 707 |
+
dict(
|
| 708 |
+
name="Llama-3.1-8B{}",
|
| 709 |
+
hf_config=dict(org="meta-llama", name="Meta-Llama-3.1-8B{}"),
|
| 710 |
+
block_size=131072,
|
| 711 |
+
vocab_size=128000,
|
| 712 |
+
padded_vocab_size=128256,
|
| 713 |
+
n_layer=32,
|
| 714 |
+
n_head=32,
|
| 715 |
+
n_query_groups=8,
|
| 716 |
+
rotary_percentage=1.0,
|
| 717 |
+
parallel_residual=False,
|
| 718 |
+
bias=False,
|
| 719 |
+
norm_class_name="RMSNorm",
|
| 720 |
+
mlp_class_name="LLaMAMLP",
|
| 721 |
+
intermediate_size=14336,
|
| 722 |
+
rope_base=500000,
|
| 723 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 724 |
+
),
|
| 725 |
+
# https://huggingface.co/meta-llama/Meta-Llama-3-70B/blob/main/config.json
|
| 726 |
+
dict(
|
| 727 |
+
name="Llama-3-70B{}",
|
| 728 |
+
hf_config=dict(org="meta-llama", name="Meta-Llama-3-70B{}"),
|
| 729 |
+
block_size=8192,
|
| 730 |
+
vocab_size=128000,
|
| 731 |
+
padded_vocab_size=128256,
|
| 732 |
+
n_layer=80,
|
| 733 |
+
n_head=64,
|
| 734 |
+
n_embd=8192,
|
| 735 |
+
n_query_groups=8,
|
| 736 |
+
rotary_percentage=1.0,
|
| 737 |
+
parallel_residual=False,
|
| 738 |
+
bias=False,
|
| 739 |
+
norm_class_name="RMSNorm",
|
| 740 |
+
mlp_class_name="LLaMAMLP",
|
| 741 |
+
intermediate_size=28672,
|
| 742 |
+
rope_base=500000,
|
| 743 |
+
),
|
| 744 |
+
# https://huggingface.co/meta-llama/Meta-Llama-3.1-70B/blob/main/config.json
|
| 745 |
+
dict(
|
| 746 |
+
name="Llama-3.1-70B{}",
|
| 747 |
+
hf_config=dict(org="meta-llama", name="Meta-Llama-3.1-70B{}"),
|
| 748 |
+
block_size=131072,
|
| 749 |
+
vocab_size=128000,
|
| 750 |
+
padded_vocab_size=128256,
|
| 751 |
+
n_layer=80,
|
| 752 |
+
n_head=64,
|
| 753 |
+
n_embd=8192,
|
| 754 |
+
n_query_groups=8,
|
| 755 |
+
rotary_percentage=1.0,
|
| 756 |
+
parallel_residual=False,
|
| 757 |
+
bias=False,
|
| 758 |
+
norm_class_name="RMSNorm",
|
| 759 |
+
mlp_class_name="LLaMAMLP",
|
| 760 |
+
intermediate_size=28672,
|
| 761 |
+
rope_base=500000,
|
| 762 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 763 |
+
),
|
| 764 |
+
# https://huggingface.co/meta-llama/Meta-Llama-3.1-405B/blob/main/config.json
|
| 765 |
+
dict(
|
| 766 |
+
name="Llama-3.1-405B{}",
|
| 767 |
+
hf_config=dict(org="meta-llama", name="Meta-Llama-3.1-405B{}"),
|
| 768 |
+
block_size=131072,
|
| 769 |
+
vocab_size=128000,
|
| 770 |
+
padded_vocab_size=128256,
|
| 771 |
+
n_layer=126,
|
| 772 |
+
n_head=128,
|
| 773 |
+
n_embd=16384,
|
| 774 |
+
n_query_groups=8,
|
| 775 |
+
rotary_percentage=1.0,
|
| 776 |
+
parallel_residual=False,
|
| 777 |
+
bias=False,
|
| 778 |
+
norm_class_name="RMSNorm",
|
| 779 |
+
mlp_class_name="LLaMAMLP",
|
| 780 |
+
intermediate_size=53248,
|
| 781 |
+
rope_base=500000,
|
| 782 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 783 |
+
),
|
| 784 |
+
# https://huggingface.co/meta-llama/Llama-3.2-1B/blob/main/config.json
|
| 785 |
+
dict(
|
| 786 |
+
name="Llama-3.2-1B{}",
|
| 787 |
+
hf_config=dict(org="meta-llama", name="Llama-3.2-1B{}"),
|
| 788 |
+
block_size=131072,
|
| 789 |
+
vocab_size=128000,
|
| 790 |
+
padded_vocab_size=128256,
|
| 791 |
+
n_layer=16,
|
| 792 |
+
n_embd=2048,
|
| 793 |
+
n_head=32,
|
| 794 |
+
n_query_groups=8,
|
| 795 |
+
rotary_percentage=1.0,
|
| 796 |
+
parallel_residual=False,
|
| 797 |
+
bias=False,
|
| 798 |
+
norm_class_name="RMSNorm",
|
| 799 |
+
mlp_class_name="LLaMAMLP",
|
| 800 |
+
intermediate_size=8192,
|
| 801 |
+
rope_base=500000,
|
| 802 |
+
rope_adjustments=dict(factor=32.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 803 |
+
),
|
| 804 |
+
# https://huggingface.co/meta-llama/Llama-3.2-3B/blob/main/config.json
|
| 805 |
+
dict(
|
| 806 |
+
name="Llama-3.2-3B{}",
|
| 807 |
+
hf_config=dict(org="meta-llama", name="Llama-3.2-3B{}"),
|
| 808 |
+
block_size=131072,
|
| 809 |
+
vocab_size=128000,
|
| 810 |
+
padded_vocab_size=128256,
|
| 811 |
+
n_layer=28,
|
| 812 |
+
n_embd=3072,
|
| 813 |
+
n_head=24,
|
| 814 |
+
n_query_groups=8,
|
| 815 |
+
rotary_percentage=1.0,
|
| 816 |
+
parallel_residual=False,
|
| 817 |
+
bias=False,
|
| 818 |
+
norm_class_name="RMSNorm",
|
| 819 |
+
mlp_class_name="LLaMAMLP",
|
| 820 |
+
intermediate_size=8192,
|
| 821 |
+
rope_base=500000,
|
| 822 |
+
rope_adjustments=dict(factor=32.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 823 |
+
),
|
| 824 |
+
# https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct/blob/main/config.json
|
| 825 |
+
dict(
|
| 826 |
+
name="Llama-3.3-70B-Instruct",
|
| 827 |
+
hf_config=dict(org="meta-llama", name="Llama-3.3-70B-Instruct"),
|
| 828 |
+
block_size=131072,
|
| 829 |
+
vocab_size=128000,
|
| 830 |
+
padded_vocab_size=128256,
|
| 831 |
+
n_layer=80,
|
| 832 |
+
n_head=64,
|
| 833 |
+
n_embd=8192,
|
| 834 |
+
n_query_groups=8,
|
| 835 |
+
rotary_percentage=1.0,
|
| 836 |
+
parallel_residual=False,
|
| 837 |
+
bias=False,
|
| 838 |
+
norm_class_name="RMSNorm",
|
| 839 |
+
mlp_class_name="LLaMAMLP",
|
| 840 |
+
intermediate_size=28672,
|
| 841 |
+
rope_base=500000,
|
| 842 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 843 |
+
),
|
| 844 |
+
]
|
| 845 |
+
for c in llama_3:
|
| 846 |
+
if c["name"] == "Llama-3.3-70B-Instruct":
|
| 847 |
+
configs.append(c)
|
| 848 |
+
continue
|
| 849 |
+
for kind in ("", "-Instruct"):
|
| 850 |
+
copy = deepcopy(c)
|
| 851 |
+
copy["name"] = c["name"].format(kind)
|
| 852 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 853 |
+
configs.append(copy)
|
| 854 |
+
|
| 855 |
+
#########################
|
| 856 |
+
# NVIDIA Llama Nemotron
|
| 857 |
+
#########################
|
| 858 |
+
configs.append(
|
| 859 |
+
dict(
|
| 860 |
+
name="Llama-3.1-Nemotron-70B-Instruct-HF",
|
| 861 |
+
hf_config=dict(org="nvidia", name="Llama-3.1-Nemotron-70B-Instruct-HF"),
|
| 862 |
+
block_size=131072,
|
| 863 |
+
vocab_size=128000,
|
| 864 |
+
padded_vocab_size=128256,
|
| 865 |
+
n_layer=80,
|
| 866 |
+
n_head=64,
|
| 867 |
+
n_embd=8192,
|
| 868 |
+
n_query_groups=8,
|
| 869 |
+
rotary_percentage=1.0,
|
| 870 |
+
parallel_residual=False,
|
| 871 |
+
bias=False,
|
| 872 |
+
norm_class_name="RMSNorm",
|
| 873 |
+
mlp_class_name="LLaMAMLP",
|
| 874 |
+
intermediate_size=28672,
|
| 875 |
+
rope_base=500000,
|
| 876 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 877 |
+
),
|
| 878 |
+
)
|
| 879 |
+
|
| 880 |
+
#################
|
| 881 |
+
# Allen AI OLMo
|
| 882 |
+
#################
|
| 883 |
+
olmo = [
|
| 884 |
+
# https://huggingface.co/allenai/OLMo-1B-hf/blob/main/config.json
|
| 885 |
+
dict(
|
| 886 |
+
name="OLMo-1B-hf",
|
| 887 |
+
hf_config=dict(org="allenai", name="OLMo-1B-hf"),
|
| 888 |
+
vocab_size=50280,
|
| 889 |
+
padded_vocab_size=50304,
|
| 890 |
+
block_size=2048,
|
| 891 |
+
n_embd=2048,
|
| 892 |
+
n_layer=16,
|
| 893 |
+
n_head=16,
|
| 894 |
+
rotary_percentage=1.0,
|
| 895 |
+
parallel_residual=False,
|
| 896 |
+
bias=False,
|
| 897 |
+
norm_class_name="LayerNorm",
|
| 898 |
+
mlp_class_name="LLaMAMLP",
|
| 899 |
+
intermediate_size=8192,
|
| 900 |
+
),
|
| 901 |
+
# https://huggingface.co/allenai/OLMo-7B-hf/blob/main/config.json
|
| 902 |
+
dict(
|
| 903 |
+
name="OLMo-7B-hf",
|
| 904 |
+
hf_config=dict(org="allenai", name="OLMo-7B-hf"),
|
| 905 |
+
vocab_size=50280,
|
| 906 |
+
padded_vocab_size=50304,
|
| 907 |
+
block_size=2048,
|
| 908 |
+
n_layer=32,
|
| 909 |
+
n_head=32,
|
| 910 |
+
rotary_percentage=1.0,
|
| 911 |
+
parallel_residual=False,
|
| 912 |
+
bias=False,
|
| 913 |
+
norm_class_name="LayerNorm",
|
| 914 |
+
mlp_class_name="LLaMAMLP",
|
| 915 |
+
intermediate_size=11008,
|
| 916 |
+
),
|
| 917 |
+
# https://huggingface.co/allenai/OLMo-7B-Instruct-hf/blob/main/config.json
|
| 918 |
+
dict(
|
| 919 |
+
name="OLMo-7B-Instruct-hf",
|
| 920 |
+
hf_config=dict(org="allenai", name="OLMo-7B-Instruct-hf"),
|
| 921 |
+
vocab_size=50280,
|
| 922 |
+
padded_vocab_size=50304,
|
| 923 |
+
block_size=2048,
|
| 924 |
+
n_layer=32,
|
| 925 |
+
n_head=32,
|
| 926 |
+
rotary_percentage=1.0,
|
| 927 |
+
parallel_residual=False,
|
| 928 |
+
bias=False,
|
| 929 |
+
norm_class_name="LayerNorm",
|
| 930 |
+
mlp_class_name="LLaMAMLP",
|
| 931 |
+
intermediate_size=11008,
|
| 932 |
+
),
|
| 933 |
+
]
|
| 934 |
+
|
| 935 |
+
configs.extend(olmo)
|
| 936 |
+
|
| 937 |
+
olmo2 = [
|
| 938 |
+
# https://huggingface.co/allenai/OLMo-2-1124-7B/blob/main/config.json
|
| 939 |
+
dict(
|
| 940 |
+
name="OLMo-2-1124-7B{}",
|
| 941 |
+
hf_config=dict(org="allenai", name="OLMo-2-1124-7B{}"),
|
| 942 |
+
vocab_size=100278,
|
| 943 |
+
padded_vocab_size=100352,
|
| 944 |
+
block_size=4096,
|
| 945 |
+
n_embd=4096,
|
| 946 |
+
n_layer=32,
|
| 947 |
+
n_head=32,
|
| 948 |
+
n_query_groups=32,
|
| 949 |
+
rotary_percentage=1.0,
|
| 950 |
+
parallel_residual=False,
|
| 951 |
+
bias=False,
|
| 952 |
+
norm_class_name="RMSNorm",
|
| 953 |
+
mlp_class_name="LLaMAMLP",
|
| 954 |
+
norm_eps=1e-06,
|
| 955 |
+
intermediate_size=11008,
|
| 956 |
+
rope_base=500000,
|
| 957 |
+
norm_qk=True,
|
| 958 |
+
post_mlp_norm=True,
|
| 959 |
+
norm_1=False,
|
| 960 |
+
norm_2=False,
|
| 961 |
+
norm_qk_type="olmo2",
|
| 962 |
+
post_attention_norm=True,
|
| 963 |
+
),
|
| 964 |
+
# https://huggingface.co/allenai/OLMo-2-1124-13B/blob/main/config.json
|
| 965 |
+
dict(
|
| 966 |
+
name="OLMo-2-1124-13B{}",
|
| 967 |
+
hf_config=dict(org="allenai", name="OLMo-2-1124-13B{}"),
|
| 968 |
+
vocab_size=100278,
|
| 969 |
+
padded_vocab_size=100352,
|
| 970 |
+
block_size=4096,
|
| 971 |
+
n_embd=5120,
|
| 972 |
+
n_layer=40,
|
| 973 |
+
n_head=40,
|
| 974 |
+
n_query_groups=40,
|
| 975 |
+
rotary_percentage=1.0,
|
| 976 |
+
parallel_residual=False,
|
| 977 |
+
bias=False,
|
| 978 |
+
norm_class_name="RMSNorm",
|
| 979 |
+
mlp_class_name="LLaMAMLP",
|
| 980 |
+
norm_eps=1e-06,
|
| 981 |
+
intermediate_size=13824,
|
| 982 |
+
rope_base=500000,
|
| 983 |
+
norm_qk=True,
|
| 984 |
+
post_mlp_norm=True,
|
| 985 |
+
norm_1=False,
|
| 986 |
+
norm_2=False,
|
| 987 |
+
norm_qk_type="olmo2",
|
| 988 |
+
post_attention_norm=True,
|
| 989 |
+
),
|
| 990 |
+
]
|
| 991 |
+
|
| 992 |
+
for c in olmo2:
|
| 993 |
+
for kind in ("", "-SFT", "-DPO", "-Instruct"):
|
| 994 |
+
copy = deepcopy(c)
|
| 995 |
+
copy["name"] = c["name"].format(kind)
|
| 996 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 997 |
+
configs.append(copy)
|
| 998 |
+
|
| 999 |
+
###############
|
| 1000 |
+
# Google Gemma
|
| 1001 |
+
###############
|
| 1002 |
+
gemma = [
|
| 1003 |
+
# https://huggingface.co/google/gemma-2b/blob/main/config.json
|
| 1004 |
+
dict(
|
| 1005 |
+
name="Gemma-2b",
|
| 1006 |
+
hf_config=dict(org="google", name="gemma-2b"),
|
| 1007 |
+
scale_embeddings=True,
|
| 1008 |
+
vocab_size=256000,
|
| 1009 |
+
padding_multiple=64,
|
| 1010 |
+
n_embd=2048,
|
| 1011 |
+
n_layer=18,
|
| 1012 |
+
n_head=8,
|
| 1013 |
+
n_query_groups=1,
|
| 1014 |
+
rotary_percentage=1.0,
|
| 1015 |
+
parallel_residual=False,
|
| 1016 |
+
bias=False,
|
| 1017 |
+
norm_class_name="RMSNorm",
|
| 1018 |
+
mlp_class_name="GemmaMLP",
|
| 1019 |
+
gelu_approximate="tanh",
|
| 1020 |
+
intermediate_size=16384,
|
| 1021 |
+
),
|
| 1022 |
+
# https://huggingface.co/google/gemma-7b/blob/main/config.json
|
| 1023 |
+
dict(
|
| 1024 |
+
name="Gemma-7b",
|
| 1025 |
+
hf_config=dict(org="google", name="gemma-7b"),
|
| 1026 |
+
scale_embeddings=True,
|
| 1027 |
+
vocab_size=256000,
|
| 1028 |
+
padding_multiple=64,
|
| 1029 |
+
n_embd=3072,
|
| 1030 |
+
n_layer=28,
|
| 1031 |
+
n_head=16,
|
| 1032 |
+
head_size=256,
|
| 1033 |
+
rotary_percentage=1.0,
|
| 1034 |
+
parallel_residual=False,
|
| 1035 |
+
bias=False,
|
| 1036 |
+
norm_class_name="RMSNorm",
|
| 1037 |
+
mlp_class_name="GemmaMLP",
|
| 1038 |
+
gelu_approximate="tanh",
|
| 1039 |
+
intermediate_size=24576,
|
| 1040 |
+
),
|
| 1041 |
+
# https://huggingface.co/google/gemma-2-2b/blob/main/config.json
|
| 1042 |
+
dict(
|
| 1043 |
+
name="Gemma-2-2b",
|
| 1044 |
+
hf_config=dict(org="google", name="gemma-2-2b"),
|
| 1045 |
+
scale_embeddings=True,
|
| 1046 |
+
attention_scores_scalar=256,
|
| 1047 |
+
vocab_size=256000,
|
| 1048 |
+
block_size=8192,
|
| 1049 |
+
sliding_window_size=4096,
|
| 1050 |
+
# only layer with idx 0, 2, 4, ... have sliding window attention
|
| 1051 |
+
sliding_window_indices=[1 if i % 2 == 0 else 0 for i in range(26)],
|
| 1052 |
+
intermediate_size=9216,
|
| 1053 |
+
n_embd=2304,
|
| 1054 |
+
n_layer=26,
|
| 1055 |
+
n_head=8,
|
| 1056 |
+
n_query_groups=4,
|
| 1057 |
+
head_size=256,
|
| 1058 |
+
rotary_percentage=1.0,
|
| 1059 |
+
parallel_residual=False,
|
| 1060 |
+
bias=False,
|
| 1061 |
+
norm_class_name="RMSNorm",
|
| 1062 |
+
mlp_class_name="GemmaMLP",
|
| 1063 |
+
gelu_approximate="tanh",
|
| 1064 |
+
post_attention_norm=True,
|
| 1065 |
+
post_mlp_norm=True,
|
| 1066 |
+
attention_logit_softcapping=50.0,
|
| 1067 |
+
final_logit_softcapping=30.0,
|
| 1068 |
+
),
|
| 1069 |
+
# https://huggingface.co/google/gemma-2-9b/blob/main/config.json
|
| 1070 |
+
dict(
|
| 1071 |
+
name="Gemma-2-9b",
|
| 1072 |
+
hf_config=dict(org="google", name="gemma-2-9b"),
|
| 1073 |
+
scale_embeddings=True,
|
| 1074 |
+
attention_scores_scalar=256,
|
| 1075 |
+
vocab_size=256000,
|
| 1076 |
+
block_size=8192,
|
| 1077 |
+
sliding_window_size=4096,
|
| 1078 |
+
# only layer with idx 0, 2, 4, ... have sliding window attention
|
| 1079 |
+
sliding_window_indices=[1 if i % 2 == 0 else 0 for i in range(42)],
|
| 1080 |
+
intermediate_size=14336,
|
| 1081 |
+
n_embd=3584,
|
| 1082 |
+
n_layer=42,
|
| 1083 |
+
n_head=16,
|
| 1084 |
+
n_query_groups=8,
|
| 1085 |
+
head_size=256,
|
| 1086 |
+
rotary_percentage=1.0,
|
| 1087 |
+
parallel_residual=False,
|
| 1088 |
+
bias=False,
|
| 1089 |
+
norm_class_name="RMSNorm",
|
| 1090 |
+
mlp_class_name="GemmaMLP",
|
| 1091 |
+
gelu_approximate="tanh",
|
| 1092 |
+
post_attention_norm=True,
|
| 1093 |
+
post_mlp_norm=True,
|
| 1094 |
+
attention_logit_softcapping=50.0,
|
| 1095 |
+
final_logit_softcapping=30.0,
|
| 1096 |
+
),
|
| 1097 |
+
# https://huggingface.co/google/gemma-2-27b/blob/main/config.json
|
| 1098 |
+
dict(
|
| 1099 |
+
name="Gemma-2-27b",
|
| 1100 |
+
hf_config=dict(org="google", name="gemma-2-27b"),
|
| 1101 |
+
scale_embeddings=True,
|
| 1102 |
+
# In Gemma 2 27B attention scores are scaled not by `sqrt(head_size)` (11.31),
|
| 1103 |
+
# but by `sqrt(n_emb // n_head)` = sqrt(4608 // 32) = 12
|
| 1104 |
+
attention_scores_scalar=144,
|
| 1105 |
+
vocab_size=256000,
|
| 1106 |
+
block_size=8192,
|
| 1107 |
+
sliding_window_size=4096,
|
| 1108 |
+
# only layer with idx 0, 2, 4, ... have sliding window attention
|
| 1109 |
+
sliding_window_indices=[1 if i % 2 == 0 else 0 for i in range(46)],
|
| 1110 |
+
intermediate_size=36864,
|
| 1111 |
+
n_embd=4608,
|
| 1112 |
+
n_layer=46,
|
| 1113 |
+
n_head=32,
|
| 1114 |
+
n_query_groups=16,
|
| 1115 |
+
head_size=128,
|
| 1116 |
+
rotary_percentage=1.0,
|
| 1117 |
+
parallel_residual=False,
|
| 1118 |
+
bias=False,
|
| 1119 |
+
norm_class_name="RMSNorm",
|
| 1120 |
+
mlp_class_name="GemmaMLP",
|
| 1121 |
+
gelu_approximate="tanh",
|
| 1122 |
+
post_attention_norm=True,
|
| 1123 |
+
post_mlp_norm=True,
|
| 1124 |
+
attention_logit_softcapping=50.0,
|
| 1125 |
+
final_logit_softcapping=30.0,
|
| 1126 |
+
),
|
| 1127 |
+
]
|
| 1128 |
+
configs.extend(gemma)
|
| 1129 |
+
for c in gemma:
|
| 1130 |
+
copy = deepcopy(c)
|
| 1131 |
+
copy["name"] = f"{c['name']}-it"
|
| 1132 |
+
copy["hf_config"]["name"] = f"{c['hf_config']['name']}-it"
|
| 1133 |
+
configs.append(copy)
|
| 1134 |
+
|
| 1135 |
+
##################
|
| 1136 |
+
# Google Gemma 3
|
| 1137 |
+
##################
|
| 1138 |
+
gemma3 = [
|
| 1139 |
+
# https://huggingface.co/google/gemma-3-1b-it/blob/main/config.json
|
| 1140 |
+
dict(
|
| 1141 |
+
name="Gemma-3-1b-it",
|
| 1142 |
+
hf_config=dict(org="google", name="gemma-3-1b-it"),
|
| 1143 |
+
scale_embeddings=True,
|
| 1144 |
+
attention_scores_scalar=256,
|
| 1145 |
+
vocab_size=262144,
|
| 1146 |
+
block_size=131072,
|
| 1147 |
+
sliding_window_size=512,
|
| 1148 |
+
# 5 local layers for every global layer
|
| 1149 |
+
sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(26)],
|
| 1150 |
+
intermediate_size=6912,
|
| 1151 |
+
n_embd=1152,
|
| 1152 |
+
n_layer=26,
|
| 1153 |
+
n_head=4,
|
| 1154 |
+
n_query_groups=1,
|
| 1155 |
+
head_size=256,
|
| 1156 |
+
rotary_percentage=1.0,
|
| 1157 |
+
rope_adjustments=None,
|
| 1158 |
+
parallel_residual=False,
|
| 1159 |
+
bias=False,
|
| 1160 |
+
norm_class_name="RMSNorm",
|
| 1161 |
+
mlp_class_name="GemmaMLP",
|
| 1162 |
+
gelu_approximate="tanh",
|
| 1163 |
+
post_attention_norm=True,
|
| 1164 |
+
post_mlp_norm=True,
|
| 1165 |
+
norm_qk=True,
|
| 1166 |
+
rope_base=1000000,
|
| 1167 |
+
rope_local_base_freq=10000,
|
| 1168 |
+
# 5 local layers for every global layer
|
| 1169 |
+
rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(26)],
|
| 1170 |
+
),
|
| 1171 |
+
# https://huggingface.co/google/gemma-3-4b-it/blob/main/config.json
|
| 1172 |
+
dict(
|
| 1173 |
+
name="Gemma-3-4b-it",
|
| 1174 |
+
hf_config=dict(org="google", name="gemma-3-4b-it"),
|
| 1175 |
+
scale_embeddings=True,
|
| 1176 |
+
attention_scores_scalar=256,
|
| 1177 |
+
vocab_size=262144,
|
| 1178 |
+
block_size=131072,
|
| 1179 |
+
sliding_window_size=1024,
|
| 1180 |
+
# 5 local layers for every global layer
|
| 1181 |
+
sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(34)],
|
| 1182 |
+
intermediate_size=10240,
|
| 1183 |
+
n_embd=2560,
|
| 1184 |
+
n_layer=34,
|
| 1185 |
+
n_head=8,
|
| 1186 |
+
n_query_groups=4,
|
| 1187 |
+
head_size=256,
|
| 1188 |
+
rotary_percentage=1.0,
|
| 1189 |
+
rope_adjustments=dict(factor=8.0),
|
| 1190 |
+
parallel_residual=False,
|
| 1191 |
+
bias=False,
|
| 1192 |
+
norm_class_name="RMSNorm",
|
| 1193 |
+
mlp_class_name="GemmaMLP",
|
| 1194 |
+
gelu_approximate="tanh",
|
| 1195 |
+
post_attention_norm=True,
|
| 1196 |
+
post_mlp_norm=True,
|
| 1197 |
+
norm_qk=True,
|
| 1198 |
+
rope_base=1000000,
|
| 1199 |
+
rope_local_base_freq=10000,
|
| 1200 |
+
# 5 local layers for every global layer
|
| 1201 |
+
rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(34)],
|
| 1202 |
+
),
|
| 1203 |
+
# https://huggingface.co/google/gemma-3-12b-it/blob/main/config.json
|
| 1204 |
+
dict(
|
| 1205 |
+
name="Gemma-3-12b-it",
|
| 1206 |
+
hf_config=dict(org="google", name="gemma-3-12b-it"),
|
| 1207 |
+
scale_embeddings=True,
|
| 1208 |
+
attention_scores_scalar=256,
|
| 1209 |
+
vocab_size=262144,
|
| 1210 |
+
block_size=131072,
|
| 1211 |
+
sliding_window_size=1024,
|
| 1212 |
+
# 5 local layers for every global layer
|
| 1213 |
+
sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(48)],
|
| 1214 |
+
intermediate_size=15360,
|
| 1215 |
+
n_embd=3840,
|
| 1216 |
+
n_layer=48,
|
| 1217 |
+
n_head=16,
|
| 1218 |
+
n_query_groups=8,
|
| 1219 |
+
head_size=256,
|
| 1220 |
+
rotary_percentage=1.0,
|
| 1221 |
+
rope_adjustments=dict(factor=8.0),
|
| 1222 |
+
parallel_residual=False,
|
| 1223 |
+
bias=False,
|
| 1224 |
+
norm_class_name="RMSNorm",
|
| 1225 |
+
mlp_class_name="GemmaMLP",
|
| 1226 |
+
gelu_approximate="tanh",
|
| 1227 |
+
post_attention_norm=True,
|
| 1228 |
+
post_mlp_norm=True,
|
| 1229 |
+
norm_qk=True,
|
| 1230 |
+
rope_base=1000000,
|
| 1231 |
+
rope_local_base_freq=10000,
|
| 1232 |
+
# 5 local layers for every global layer
|
| 1233 |
+
rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(48)],
|
| 1234 |
+
),
|
| 1235 |
+
# https://huggingface.co/google/gemma-3-27b-it/blob/main/config.json
|
| 1236 |
+
dict(
|
| 1237 |
+
name="Gemma-3-27b-it",
|
| 1238 |
+
hf_config=dict(org="google", name="gemma-3-27b-it"),
|
| 1239 |
+
scale_embeddings=True,
|
| 1240 |
+
attention_scores_scalar=168,
|
| 1241 |
+
vocab_size=262144,
|
| 1242 |
+
block_size=131072,
|
| 1243 |
+
sliding_window_size=1024,
|
| 1244 |
+
# 5 local layers for every global layer
|
| 1245 |
+
sliding_window_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(62)],
|
| 1246 |
+
intermediate_size=21504,
|
| 1247 |
+
n_embd=5376,
|
| 1248 |
+
n_layer=62,
|
| 1249 |
+
n_head=32,
|
| 1250 |
+
n_query_groups=16,
|
| 1251 |
+
head_size=128,
|
| 1252 |
+
rotary_percentage=1.0,
|
| 1253 |
+
rope_adjustments=dict(factor=8.0),
|
| 1254 |
+
parallel_residual=False,
|
| 1255 |
+
bias=False,
|
| 1256 |
+
norm_class_name="RMSNorm",
|
| 1257 |
+
mlp_class_name="GemmaMLP",
|
| 1258 |
+
gelu_approximate="tanh",
|
| 1259 |
+
post_attention_norm=True,
|
| 1260 |
+
post_mlp_norm=True,
|
| 1261 |
+
norm_qk=True,
|
| 1262 |
+
rope_base=1000000,
|
| 1263 |
+
rope_local_base_freq=10000,
|
| 1264 |
+
# 5 local layers for every global layer
|
| 1265 |
+
rope_indices=[0 if (i + 1) % 6 == 0 else 1 for i in range(62)],
|
| 1266 |
+
),
|
| 1267 |
+
]
|
| 1268 |
+
configs.extend(gemma3)
|
| 1269 |
+
|
| 1270 |
+
##################
|
| 1271 |
+
# Google CodeGemma
|
| 1272 |
+
##################
|
| 1273 |
+
codegemma = [
|
| 1274 |
+
# https://huggingface.co/google/codegemma-7b-it/blob/main/config.json
|
| 1275 |
+
dict(
|
| 1276 |
+
name="CodeGemma-7b-it",
|
| 1277 |
+
hf_config=dict(org="google", name="codegemma-7b-it"),
|
| 1278 |
+
scale_embeddings=True,
|
| 1279 |
+
vocab_size=256000,
|
| 1280 |
+
padding_multiple=64,
|
| 1281 |
+
n_embd=3072,
|
| 1282 |
+
n_layer=28,
|
| 1283 |
+
n_head=16,
|
| 1284 |
+
head_size=256,
|
| 1285 |
+
rotary_percentage=1.0,
|
| 1286 |
+
parallel_residual=False,
|
| 1287 |
+
bias=False,
|
| 1288 |
+
norm_class_name="RMSNorm",
|
| 1289 |
+
mlp_class_name="GemmaMLP",
|
| 1290 |
+
gelu_approximate="tanh",
|
| 1291 |
+
intermediate_size=24576,
|
| 1292 |
+
),
|
| 1293 |
+
]
|
| 1294 |
+
configs.extend(codegemma)
|
| 1295 |
+
|
| 1296 |
+
|
| 1297 |
+
##########################
|
| 1298 |
+
# Stability AI FreeWilly2
|
| 1299 |
+
##########################
|
| 1300 |
+
freewilly_2 = [
|
| 1301 |
+
# https://huggingface.co/stabilityai/FreeWilly2/blob/main/config.json
|
| 1302 |
+
dict(
|
| 1303 |
+
name="FreeWilly2",
|
| 1304 |
+
hf_config=dict(org="stabilityai", name="FreeWilly2"),
|
| 1305 |
+
vocab_size=32000,
|
| 1306 |
+
padding_multiple=64,
|
| 1307 |
+
n_layer=80,
|
| 1308 |
+
n_head=64,
|
| 1309 |
+
n_embd=8192,
|
| 1310 |
+
n_query_groups=8,
|
| 1311 |
+
rotary_percentage=1.0,
|
| 1312 |
+
parallel_residual=False,
|
| 1313 |
+
bias=False,
|
| 1314 |
+
norm_class_name="RMSNorm",
|
| 1315 |
+
mlp_class_name="LLaMAMLP",
|
| 1316 |
+
intermediate_size=28672,
|
| 1317 |
+
)
|
| 1318 |
+
]
|
| 1319 |
+
configs.extend(freewilly_2)
|
| 1320 |
+
|
| 1321 |
+
|
| 1322 |
+
##################
|
| 1323 |
+
# Meta Code Llama
|
| 1324 |
+
##################
|
| 1325 |
+
code_llama = [
|
| 1326 |
+
# https://huggingface.co/codellama/CodeLlama-7b-hf/blob/main/config.json
|
| 1327 |
+
dict(
|
| 1328 |
+
name="CodeLlama-7b-hf",
|
| 1329 |
+
hf_config=dict(org="codellama", name="CodeLlama-7b-hf"),
|
| 1330 |
+
block_size=16384,
|
| 1331 |
+
vocab_size=32016,
|
| 1332 |
+
padding_multiple=16,
|
| 1333 |
+
n_layer=32,
|
| 1334 |
+
rotary_percentage=1.0,
|
| 1335 |
+
parallel_residual=False,
|
| 1336 |
+
bias=False,
|
| 1337 |
+
norm_class_name="RMSNorm",
|
| 1338 |
+
norm_eps=1e-05,
|
| 1339 |
+
mlp_class_name="LLaMAMLP",
|
| 1340 |
+
intermediate_size=11008,
|
| 1341 |
+
rope_base=1000000,
|
| 1342 |
+
),
|
| 1343 |
+
# https://huggingface.co/codellama/CodeLlama-13b-hf/blob/main/config.json
|
| 1344 |
+
dict(
|
| 1345 |
+
name="CodeLlama-13b-hf",
|
| 1346 |
+
hf_config=dict(org="codellama", name="CodeLlama-13b-hf"),
|
| 1347 |
+
block_size=16384,
|
| 1348 |
+
vocab_size=32016,
|
| 1349 |
+
padding_multiple=16,
|
| 1350 |
+
n_layer=40,
|
| 1351 |
+
n_head=40,
|
| 1352 |
+
n_embd=5120,
|
| 1353 |
+
rotary_percentage=1.0,
|
| 1354 |
+
parallel_residual=False,
|
| 1355 |
+
bias=False,
|
| 1356 |
+
norm_class_name="RMSNorm",
|
| 1357 |
+
norm_eps=1e-05,
|
| 1358 |
+
mlp_class_name="LLaMAMLP",
|
| 1359 |
+
intermediate_size=13824,
|
| 1360 |
+
rope_base=1000000,
|
| 1361 |
+
),
|
| 1362 |
+
# https://huggingface.co/codellama/CodeLlama-34b-hf/blob/main/config.json
|
| 1363 |
+
dict(
|
| 1364 |
+
name="CodeLlama-34b-hf",
|
| 1365 |
+
hf_config=dict(org="codellama", name="CodeLlama-34b-hf"),
|
| 1366 |
+
block_size=16384,
|
| 1367 |
+
vocab_size=32000,
|
| 1368 |
+
padded_vocab_size=32000,
|
| 1369 |
+
n_layer=48,
|
| 1370 |
+
n_head=64,
|
| 1371 |
+
n_embd=8192,
|
| 1372 |
+
n_query_groups=8,
|
| 1373 |
+
rotary_percentage=1.0,
|
| 1374 |
+
parallel_residual=False,
|
| 1375 |
+
bias=False,
|
| 1376 |
+
norm_class_name="RMSNorm",
|
| 1377 |
+
norm_eps=1e-05,
|
| 1378 |
+
mlp_class_name="LLaMAMLP",
|
| 1379 |
+
intermediate_size=22016,
|
| 1380 |
+
rope_base=1000000,
|
| 1381 |
+
),
|
| 1382 |
+
# https://huggingface.co/codellama/CodeLlama-70b-hf/blob/main/config.json
|
| 1383 |
+
dict(
|
| 1384 |
+
name="CodeLlama-70b-hf",
|
| 1385 |
+
hf_config=dict(org="codellama", name="CodeLlama-70b-hf"),
|
| 1386 |
+
block_size=16384,
|
| 1387 |
+
vocab_size=32016,
|
| 1388 |
+
padding_multiple=16,
|
| 1389 |
+
n_layer=80,
|
| 1390 |
+
n_head=64,
|
| 1391 |
+
n_embd=8192,
|
| 1392 |
+
n_query_groups=8,
|
| 1393 |
+
rotary_percentage=1.0,
|
| 1394 |
+
parallel_residual=False,
|
| 1395 |
+
bias=False,
|
| 1396 |
+
norm_class_name="RMSNorm",
|
| 1397 |
+
norm_eps=1e-05,
|
| 1398 |
+
mlp_class_name="LLaMAMLP",
|
| 1399 |
+
intermediate_size=28672,
|
| 1400 |
+
rope_base=1000000,
|
| 1401 |
+
),
|
| 1402 |
+
# https://huggingface.co/codellama/CodeLlama-7b-Python-hf/blob/main/config.json
|
| 1403 |
+
dict(
|
| 1404 |
+
name="CodeLlama-7b-Python-hf",
|
| 1405 |
+
hf_config=dict(org="codellama", name="CodeLlama-7b-Python-hf"),
|
| 1406 |
+
block_size=16384,
|
| 1407 |
+
vocab_size=32000,
|
| 1408 |
+
padded_vocab_size=32000,
|
| 1409 |
+
n_layer=32,
|
| 1410 |
+
rotary_percentage=1.0,
|
| 1411 |
+
parallel_residual=False,
|
| 1412 |
+
bias=False,
|
| 1413 |
+
norm_class_name="RMSNorm",
|
| 1414 |
+
norm_eps=1e-05,
|
| 1415 |
+
mlp_class_name="LLaMAMLP",
|
| 1416 |
+
intermediate_size=11008,
|
| 1417 |
+
rope_base=1000000,
|
| 1418 |
+
),
|
| 1419 |
+
# https://huggingface.co/codellama/CodeLlama-13b-Python-hf/blob/main/config.json
|
| 1420 |
+
dict(
|
| 1421 |
+
name="CodeLlama-13b-Python-hf",
|
| 1422 |
+
hf_config=dict(org="codellama", name="CodeLlama-13b-Python-hf"),
|
| 1423 |
+
block_size=16384,
|
| 1424 |
+
vocab_size=32000,
|
| 1425 |
+
padded_vocab_size=32000,
|
| 1426 |
+
n_layer=40,
|
| 1427 |
+
n_head=40,
|
| 1428 |
+
n_embd=5120,
|
| 1429 |
+
rotary_percentage=1.0,
|
| 1430 |
+
parallel_residual=False,
|
| 1431 |
+
bias=False,
|
| 1432 |
+
norm_class_name="RMSNorm",
|
| 1433 |
+
norm_eps=1e-05,
|
| 1434 |
+
mlp_class_name="LLaMAMLP",
|
| 1435 |
+
intermediate_size=13824,
|
| 1436 |
+
rope_base=1000000,
|
| 1437 |
+
),
|
| 1438 |
+
# https://huggingface.co/codellama/CodeLlama-34b-Python-hf/blob/main/config.json
|
| 1439 |
+
dict(
|
| 1440 |
+
name="CodeLlama-34b-Python-hf",
|
| 1441 |
+
hf_config=dict(org="codellama", name="CodeLlama-34b-Python-hf"),
|
| 1442 |
+
block_size=16384,
|
| 1443 |
+
vocab_size=32000,
|
| 1444 |
+
padded_vocab_size=32000,
|
| 1445 |
+
n_layer=48,
|
| 1446 |
+
n_head=64,
|
| 1447 |
+
n_embd=8192,
|
| 1448 |
+
n_query_groups=8,
|
| 1449 |
+
rotary_percentage=1.0,
|
| 1450 |
+
parallel_residual=False,
|
| 1451 |
+
bias=False,
|
| 1452 |
+
norm_class_name="RMSNorm",
|
| 1453 |
+
norm_eps=1e-05,
|
| 1454 |
+
mlp_class_name="LLaMAMLP",
|
| 1455 |
+
intermediate_size=22016,
|
| 1456 |
+
rope_base=1000000,
|
| 1457 |
+
),
|
| 1458 |
+
# https://huggingface.co/codellama/CodeLlama-70b-Python-hf/blob/main/config.json
|
| 1459 |
+
dict(
|
| 1460 |
+
name="CodeLlama-70b-Python-hf",
|
| 1461 |
+
hf_config=dict(org="codellama", name="CodeLlama-70b-Python-hf"),
|
| 1462 |
+
block_size=16384,
|
| 1463 |
+
vocab_size=32016,
|
| 1464 |
+
padding_multiple=16,
|
| 1465 |
+
n_layer=80,
|
| 1466 |
+
n_head=64,
|
| 1467 |
+
n_embd=8192,
|
| 1468 |
+
n_query_groups=8,
|
| 1469 |
+
rotary_percentage=1.0,
|
| 1470 |
+
parallel_residual=False,
|
| 1471 |
+
bias=False,
|
| 1472 |
+
norm_class_name="RMSNorm",
|
| 1473 |
+
norm_eps=1e-05,
|
| 1474 |
+
mlp_class_name="LLaMAMLP",
|
| 1475 |
+
intermediate_size=28672,
|
| 1476 |
+
rope_base=1000000,
|
| 1477 |
+
),
|
| 1478 |
+
# https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/blob/main/config.json
|
| 1479 |
+
dict(
|
| 1480 |
+
name="CodeLlama-7b-Instruct-hf",
|
| 1481 |
+
hf_config=dict(org="codellama", name="CodeLlama-7b-Instruct-hf"),
|
| 1482 |
+
block_size=16384,
|
| 1483 |
+
vocab_size=32016,
|
| 1484 |
+
padding_multiple=16,
|
| 1485 |
+
n_layer=32,
|
| 1486 |
+
rotary_percentage=1.0,
|
| 1487 |
+
parallel_residual=False,
|
| 1488 |
+
bias=False,
|
| 1489 |
+
norm_class_name="RMSNorm",
|
| 1490 |
+
norm_eps=1e-05,
|
| 1491 |
+
mlp_class_name="LLaMAMLP",
|
| 1492 |
+
intermediate_size=11008,
|
| 1493 |
+
rope_base=1000000,
|
| 1494 |
+
),
|
| 1495 |
+
# https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf/blob/main/config.json
|
| 1496 |
+
dict(
|
| 1497 |
+
name="CodeLlama-13b-Instruct-hf",
|
| 1498 |
+
hf_config=dict(org="codellama", name="CodeLlama-13b-Instruct-hf"),
|
| 1499 |
+
block_size=2048,
|
| 1500 |
+
vocab_size=32016,
|
| 1501 |
+
padding_multiple=16,
|
| 1502 |
+
n_layer=40,
|
| 1503 |
+
n_head=40,
|
| 1504 |
+
n_embd=5120,
|
| 1505 |
+
rotary_percentage=1.0,
|
| 1506 |
+
parallel_residual=False,
|
| 1507 |
+
bias=False,
|
| 1508 |
+
norm_class_name="RMSNorm",
|
| 1509 |
+
norm_eps=1e-05,
|
| 1510 |
+
mlp_class_name="LLaMAMLP",
|
| 1511 |
+
intermediate_size=13824,
|
| 1512 |
+
rope_base=1000000,
|
| 1513 |
+
),
|
| 1514 |
+
# https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf/blob/main/config.json
|
| 1515 |
+
dict(
|
| 1516 |
+
name="CodeLlama-34b-Instruct-hf",
|
| 1517 |
+
hf_config=dict(org="codellama", name="CodeLlama-34b-Instruct-hf"),
|
| 1518 |
+
block_size=16384,
|
| 1519 |
+
vocab_size=32000,
|
| 1520 |
+
padded_vocab_size=32000,
|
| 1521 |
+
n_layer=48,
|
| 1522 |
+
n_head=64,
|
| 1523 |
+
n_embd=8192,
|
| 1524 |
+
n_query_groups=8,
|
| 1525 |
+
rotary_percentage=1.0,
|
| 1526 |
+
parallel_residual=False,
|
| 1527 |
+
bias=False,
|
| 1528 |
+
norm_class_name="RMSNorm",
|
| 1529 |
+
norm_eps=1e-05,
|
| 1530 |
+
mlp_class_name="LLaMAMLP",
|
| 1531 |
+
intermediate_size=22016,
|
| 1532 |
+
rope_base=1000000,
|
| 1533 |
+
),
|
| 1534 |
+
# https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf/blob/main/config.json
|
| 1535 |
+
dict(
|
| 1536 |
+
name="CodeLlama-70b-Instruct-hf",
|
| 1537 |
+
hf_config=dict(org="codellama", name="CodeLlama-70b-Instruct-hf"),
|
| 1538 |
+
block_size=16384,
|
| 1539 |
+
# 32016 is an added token, so not reported in vocab_size
|
| 1540 |
+
# https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf/blob/main/tokenizer_config.json
|
| 1541 |
+
vocab_size=32015,
|
| 1542 |
+
padding_multiple=16,
|
| 1543 |
+
n_layer=80,
|
| 1544 |
+
n_head=64,
|
| 1545 |
+
n_embd=8192,
|
| 1546 |
+
n_query_groups=8,
|
| 1547 |
+
rotary_percentage=1.0,
|
| 1548 |
+
parallel_residual=False,
|
| 1549 |
+
bias=False,
|
| 1550 |
+
norm_class_name="RMSNorm",
|
| 1551 |
+
norm_eps=1e-05,
|
| 1552 |
+
mlp_class_name="LLaMAMLP",
|
| 1553 |
+
intermediate_size=28672,
|
| 1554 |
+
rope_base=1000000,
|
| 1555 |
+
),
|
| 1556 |
+
]
|
| 1557 |
+
configs.extend(code_llama)
|
| 1558 |
+
|
| 1559 |
+
|
| 1560 |
+
########################
|
| 1561 |
+
# garage-bAInd Platypus
|
| 1562 |
+
########################
|
| 1563 |
+
platypus = [
|
| 1564 |
+
# https://huggingface.co/garage-bAInd/Platypus-30B/blob/main/config.json
|
| 1565 |
+
dict(
|
| 1566 |
+
name="Platypus-30B",
|
| 1567 |
+
hf_config=dict(org="garage-bAInd", name="Platypus-30B"),
|
| 1568 |
+
block_size=2048,
|
| 1569 |
+
padded_vocab_size=32000,
|
| 1570 |
+
n_layer=60,
|
| 1571 |
+
n_head=52,
|
| 1572 |
+
n_embd=6656,
|
| 1573 |
+
rotary_percentage=1.0,
|
| 1574 |
+
parallel_residual=False,
|
| 1575 |
+
bias=False,
|
| 1576 |
+
norm_class_name="RMSNorm",
|
| 1577 |
+
norm_eps=1e-06,
|
| 1578 |
+
mlp_class_name="LLaMAMLP",
|
| 1579 |
+
intermediate_size=17920,
|
| 1580 |
+
),
|
| 1581 |
+
# https://huggingface.co/garage-bAInd/Platypus2-7B/blob/main/config.json
|
| 1582 |
+
dict(
|
| 1583 |
+
name="Platypus2-7B",
|
| 1584 |
+
hf_config=dict(org="garage-bAInd", name="Platypus2-7B"),
|
| 1585 |
+
padded_vocab_size=32000,
|
| 1586 |
+
n_layer=32,
|
| 1587 |
+
rotary_percentage=1.0,
|
| 1588 |
+
parallel_residual=False,
|
| 1589 |
+
bias=False,
|
| 1590 |
+
norm_class_name="RMSNorm",
|
| 1591 |
+
norm_eps=1e-05,
|
| 1592 |
+
mlp_class_name="LLaMAMLP",
|
| 1593 |
+
intermediate_size=11008,
|
| 1594 |
+
),
|
| 1595 |
+
# https://huggingface.co/garage-bAInd/Platypus2-13B/blob/main/config.json
|
| 1596 |
+
dict(
|
| 1597 |
+
name="Platypus2-13B",
|
| 1598 |
+
hf_config=dict(org="garage-bAInd", name="Platypus2-13B"),
|
| 1599 |
+
padded_vocab_size=32000,
|
| 1600 |
+
n_layer=40,
|
| 1601 |
+
n_head=40,
|
| 1602 |
+
n_embd=5120,
|
| 1603 |
+
rotary_percentage=1.0,
|
| 1604 |
+
parallel_residual=False,
|
| 1605 |
+
bias=False,
|
| 1606 |
+
norm_class_name="RMSNorm",
|
| 1607 |
+
norm_eps=1e-05,
|
| 1608 |
+
mlp_class_name="LLaMAMLP",
|
| 1609 |
+
intermediate_size=13824,
|
| 1610 |
+
),
|
| 1611 |
+
# https://huggingface.co/garage-bAInd/Platypus2-70B/blob/main/config.json
|
| 1612 |
+
dict(
|
| 1613 |
+
name="Platypus2-70B",
|
| 1614 |
+
hf_config=dict(org="garage-bAInd", name="Platypus2-70B"),
|
| 1615 |
+
padded_vocab_size=32000,
|
| 1616 |
+
n_layer=80,
|
| 1617 |
+
n_head=64,
|
| 1618 |
+
n_embd=8192,
|
| 1619 |
+
rotary_percentage=1.0,
|
| 1620 |
+
parallel_residual=False,
|
| 1621 |
+
bias=False,
|
| 1622 |
+
norm_class_name="RMSNorm",
|
| 1623 |
+
mlp_class_name="LLaMAMLP",
|
| 1624 |
+
intermediate_size=28672,
|
| 1625 |
+
),
|
| 1626 |
+
# https://huggingface.co/garage-bAInd/Camel-Platypus2-13B/blob/main/config.json
|
| 1627 |
+
dict(
|
| 1628 |
+
name="Camel-Platypus2-13B",
|
| 1629 |
+
hf_config=dict(org="garage-bAInd", name="Camel-Platypus2-13B"),
|
| 1630 |
+
padded_vocab_size=32000,
|
| 1631 |
+
n_layer=40,
|
| 1632 |
+
n_head=40,
|
| 1633 |
+
n_embd=5120,
|
| 1634 |
+
rotary_percentage=1.0,
|
| 1635 |
+
parallel_residual=False,
|
| 1636 |
+
bias=False,
|
| 1637 |
+
norm_class_name="RMSNorm",
|
| 1638 |
+
mlp_class_name="LLaMAMLP",
|
| 1639 |
+
intermediate_size=13824,
|
| 1640 |
+
),
|
| 1641 |
+
# https://huggingface.co/garage-bAInd/Camel-Platypus2-70B/blob/main/config.json
|
| 1642 |
+
dict(
|
| 1643 |
+
name="Camel-Platypus2-70B",
|
| 1644 |
+
hf_config=dict(org="garage-bAInd", name="Camel-Platypus2-70B"),
|
| 1645 |
+
padded_vocab_size=32000,
|
| 1646 |
+
n_layer=80,
|
| 1647 |
+
n_head=64,
|
| 1648 |
+
n_embd=8192,
|
| 1649 |
+
n_query_groups=8,
|
| 1650 |
+
rotary_percentage=1.0,
|
| 1651 |
+
parallel_residual=False,
|
| 1652 |
+
bias=False,
|
| 1653 |
+
norm_class_name="RMSNorm",
|
| 1654 |
+
mlp_class_name="LLaMAMLP",
|
| 1655 |
+
intermediate_size=28672,
|
| 1656 |
+
),
|
| 1657 |
+
# https://huggingface.co/garage-bAInd/Stable-Platypus2-13B/blob/main/config.json
|
| 1658 |
+
dict(
|
| 1659 |
+
name="Stable-Platypus2-13B",
|
| 1660 |
+
hf_config=dict(org="garage-bAInd", name="Stable-Platypus2-13B"),
|
| 1661 |
+
padded_vocab_size=32000,
|
| 1662 |
+
n_layer=40,
|
| 1663 |
+
n_head=40,
|
| 1664 |
+
n_embd=5120,
|
| 1665 |
+
rotary_percentage=1.0,
|
| 1666 |
+
parallel_residual=False,
|
| 1667 |
+
bias=False,
|
| 1668 |
+
norm_class_name="RMSNorm",
|
| 1669 |
+
mlp_class_name="LLaMAMLP",
|
| 1670 |
+
intermediate_size=13824,
|
| 1671 |
+
),
|
| 1672 |
+
# https://huggingface.co/garage-bAInd/Platypus2-70B-instruct/blob/main/config.json
|
| 1673 |
+
dict(
|
| 1674 |
+
name="Platypus2-70B-instruct",
|
| 1675 |
+
hf_config=dict(org="garage-bAInd", name="Platypus2-70B-instruct"),
|
| 1676 |
+
padded_vocab_size=32000,
|
| 1677 |
+
n_layer=80,
|
| 1678 |
+
n_head=64,
|
| 1679 |
+
n_embd=8192,
|
| 1680 |
+
n_query_groups=8,
|
| 1681 |
+
rotary_percentage=1.0,
|
| 1682 |
+
parallel_residual=False,
|
| 1683 |
+
bias=False,
|
| 1684 |
+
norm_class_name="RMSNorm",
|
| 1685 |
+
mlp_class_name="LLaMAMLP",
|
| 1686 |
+
intermediate_size=28672,
|
| 1687 |
+
),
|
| 1688 |
+
]
|
| 1689 |
+
configs.extend(platypus)
|
| 1690 |
+
|
| 1691 |
+
|
| 1692 |
+
##################################
|
| 1693 |
+
# togethercomputer LLaMA-2-7B-32K
|
| 1694 |
+
##################################
|
| 1695 |
+
together_llama2_32k = [
|
| 1696 |
+
# https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/blob/main/config.json
|
| 1697 |
+
dict(
|
| 1698 |
+
name="LLaMA-2-7B-32K",
|
| 1699 |
+
hf_config=dict(org="togethercomputer", name="LLaMA-2-7B-32K"),
|
| 1700 |
+
vocab_size=32000,
|
| 1701 |
+
padding_multiple=64,
|
| 1702 |
+
n_layer=32,
|
| 1703 |
+
rotary_percentage=1.0,
|
| 1704 |
+
parallel_residual=False,
|
| 1705 |
+
bias=False,
|
| 1706 |
+
norm_class_name="RMSNorm",
|
| 1707 |
+
mlp_class_name="LLaMAMLP",
|
| 1708 |
+
intermediate_size=11008,
|
| 1709 |
+
rope_condense_ratio=8,
|
| 1710 |
+
)
|
| 1711 |
+
]
|
| 1712 |
+
configs.extend(together_llama2_32k)
|
| 1713 |
+
|
| 1714 |
+
|
| 1715 |
+
################
|
| 1716 |
+
# Microsoft Phi
|
| 1717 |
+
################
|
| 1718 |
+
phi = [
|
| 1719 |
+
# https://huggingface.co/microsoft/phi-1_5/blob/main/config.json
|
| 1720 |
+
dict(
|
| 1721 |
+
name="phi-1_5",
|
| 1722 |
+
hf_config=dict(org="microsoft", name="phi-1_5"),
|
| 1723 |
+
vocab_size=50257,
|
| 1724 |
+
padded_vocab_size=51200,
|
| 1725 |
+
block_size=2048,
|
| 1726 |
+
n_embd=2048,
|
| 1727 |
+
n_layer=24,
|
| 1728 |
+
rotary_percentage=0.5, # 32 / (n_embd / n_head) = 32 / 64
|
| 1729 |
+
shared_attention_norm=True,
|
| 1730 |
+
lm_head_bias=True,
|
| 1731 |
+
gelu_approximate="tanh",
|
| 1732 |
+
),
|
| 1733 |
+
# https://huggingface.co/microsoft/phi-2/blob/main/config.json
|
| 1734 |
+
dict(
|
| 1735 |
+
name="phi-2",
|
| 1736 |
+
hf_config=dict(org="microsoft", name="phi-2"),
|
| 1737 |
+
vocab_size=50257,
|
| 1738 |
+
padded_vocab_size=51200,
|
| 1739 |
+
block_size=2048,
|
| 1740 |
+
n_embd=2560,
|
| 1741 |
+
n_layer=32,
|
| 1742 |
+
rotary_percentage=0.4, # 32 / (n_embd / n_head) = 32 / 80
|
| 1743 |
+
shared_attention_norm=True,
|
| 1744 |
+
lm_head_bias=True,
|
| 1745 |
+
gelu_approximate="tanh",
|
| 1746 |
+
),
|
| 1747 |
+
# https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json
|
| 1748 |
+
dict(
|
| 1749 |
+
name="Phi-3-mini-4k-instruct",
|
| 1750 |
+
hf_config=dict(org="microsoft", name="Phi-3-mini-4k-instruct"),
|
| 1751 |
+
vocab_size=32000,
|
| 1752 |
+
padded_vocab_size=32064,
|
| 1753 |
+
block_size=4096,
|
| 1754 |
+
n_embd=3072,
|
| 1755 |
+
n_layer=32,
|
| 1756 |
+
rotary_percentage=1.0,
|
| 1757 |
+
bias=False,
|
| 1758 |
+
norm_class_name="RMSNorm",
|
| 1759 |
+
intermediate_size=8192,
|
| 1760 |
+
mlp_class_name="LLaMAMLP",
|
| 1761 |
+
parallel_residual=False,
|
| 1762 |
+
sliding_window_size=2048,
|
| 1763 |
+
),
|
| 1764 |
+
# https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/config.json
|
| 1765 |
+
dict(
|
| 1766 |
+
name="Phi-3-mini-128k-instruct",
|
| 1767 |
+
hf_config=dict(org="microsoft", name="Phi-3-mini-128k-instruct"),
|
| 1768 |
+
vocab_size=32000,
|
| 1769 |
+
padded_vocab_size=32064,
|
| 1770 |
+
block_size=131072,
|
| 1771 |
+
n_embd=3072,
|
| 1772 |
+
n_layer=32,
|
| 1773 |
+
rotary_percentage=1.0,
|
| 1774 |
+
bias=False,
|
| 1775 |
+
norm_class_name="RMSNorm",
|
| 1776 |
+
intermediate_size=8192,
|
| 1777 |
+
mlp_class_name="LLaMAMLP",
|
| 1778 |
+
parallel_residual=False,
|
| 1779 |
+
sliding_window_size=262145,
|
| 1780 |
+
),
|
| 1781 |
+
# https://huggingface.co/microsoft/Phi-3.5-mini-instruct/blob/main/config.json
|
| 1782 |
+
dict(
|
| 1783 |
+
name="Phi-3.5-mini-instruct",
|
| 1784 |
+
hf_config=dict(org="microsoft", name="Phi-3.5-mini-instruct"),
|
| 1785 |
+
vocab_size=32000,
|
| 1786 |
+
padded_vocab_size=32064,
|
| 1787 |
+
block_size=4096,
|
| 1788 |
+
n_embd=3072,
|
| 1789 |
+
n_layer=32,
|
| 1790 |
+
rotary_percentage=1.0,
|
| 1791 |
+
bias=False,
|
| 1792 |
+
norm_class_name="RMSNorm",
|
| 1793 |
+
intermediate_size=8192,
|
| 1794 |
+
mlp_class_name="LLaMAMLP",
|
| 1795 |
+
parallel_residual=False,
|
| 1796 |
+
),
|
| 1797 |
+
# https://huggingface.co/microsoft/phi-4/blob/main/config.json
|
| 1798 |
+
dict(
|
| 1799 |
+
name="phi-4",
|
| 1800 |
+
hf_config=dict(org="microsoft", name="phi-4"),
|
| 1801 |
+
vocab_size=100352,
|
| 1802 |
+
padded_vocab_size=100352,
|
| 1803 |
+
block_size=16384,
|
| 1804 |
+
n_embd=5120,
|
| 1805 |
+
n_layer=40,
|
| 1806 |
+
n_head=40,
|
| 1807 |
+
n_query_groups=10,
|
| 1808 |
+
rotary_percentage=1.0,
|
| 1809 |
+
bias=False,
|
| 1810 |
+
norm_class_name="RMSNorm",
|
| 1811 |
+
intermediate_size=17920,
|
| 1812 |
+
rope_base=250000,
|
| 1813 |
+
mlp_class_name="LLaMAMLP",
|
| 1814 |
+
parallel_residual=False,
|
| 1815 |
+
),
|
| 1816 |
+
# https://huggingface.co/microsoft/Phi-4-reasoning/blob/main/config.json
|
| 1817 |
+
dict(
|
| 1818 |
+
name="Phi-4-reasoning",
|
| 1819 |
+
hf_config=dict(org="microsoft", name="Phi-4-reasoning"),
|
| 1820 |
+
vocab_size=100352,
|
| 1821 |
+
padded_vocab_size=100352,
|
| 1822 |
+
block_size=32768,
|
| 1823 |
+
n_embd=5120,
|
| 1824 |
+
n_layer=40,
|
| 1825 |
+
n_head=40,
|
| 1826 |
+
n_query_groups=10,
|
| 1827 |
+
rotary_percentage=1.0,
|
| 1828 |
+
bias=False,
|
| 1829 |
+
norm_class_name="RMSNorm",
|
| 1830 |
+
intermediate_size=17920,
|
| 1831 |
+
rope_base=500000,
|
| 1832 |
+
mlp_class_name="LLaMAMLP",
|
| 1833 |
+
parallel_residual=False,
|
| 1834 |
+
),
|
| 1835 |
+
# https://huggingface.co/microsoft/Phi-4-reasoning-plus/blob/main/config.json
|
| 1836 |
+
dict(
|
| 1837 |
+
name="Phi-4-reasoning-plus",
|
| 1838 |
+
hf_config=dict(org="microsoft", name="Phi-4-reasoning-plus"),
|
| 1839 |
+
vocab_size=100352,
|
| 1840 |
+
padded_vocab_size=100352,
|
| 1841 |
+
block_size=32768,
|
| 1842 |
+
n_embd=5120,
|
| 1843 |
+
n_layer=40,
|
| 1844 |
+
n_head=40,
|
| 1845 |
+
n_query_groups=10,
|
| 1846 |
+
rotary_percentage=1.0,
|
| 1847 |
+
bias=False,
|
| 1848 |
+
norm_class_name="RMSNorm",
|
| 1849 |
+
intermediate_size=17920,
|
| 1850 |
+
rope_base=500000,
|
| 1851 |
+
mlp_class_name="LLaMAMLP",
|
| 1852 |
+
parallel_residual=False,
|
| 1853 |
+
),
|
| 1854 |
+
# https://huggingface.co/microsoft/Phi-4-mini-instruct/blob/main/config.json
|
| 1855 |
+
dict(
|
| 1856 |
+
name="Phi-4-mini-instruct",
|
| 1857 |
+
hf_config=dict(org="microsoft", name="Phi-4-mini-instruct"),
|
| 1858 |
+
vocab_size=200019,
|
| 1859 |
+
padded_vocab_size=200064,
|
| 1860 |
+
block_size=131072,
|
| 1861 |
+
n_embd=3072,
|
| 1862 |
+
n_layer=32,
|
| 1863 |
+
n_head=24,
|
| 1864 |
+
n_query_groups=8,
|
| 1865 |
+
rotary_percentage=0.75,
|
| 1866 |
+
bias=False,
|
| 1867 |
+
norm_class_name="RMSNorm",
|
| 1868 |
+
intermediate_size=8192,
|
| 1869 |
+
mlp_class_name="LLaMAMLP",
|
| 1870 |
+
parallel_residual=False,
|
| 1871 |
+
sliding_window_size=262145,
|
| 1872 |
+
),
|
| 1873 |
+
# https://huggingface.co/microsoft/Phi-4-mini-reasoning/blob/main/config.json
|
| 1874 |
+
dict(
|
| 1875 |
+
name="Phi-4-mini-reasoning",
|
| 1876 |
+
hf_config=dict(org="microsoft", name="Phi-4-mini-reasoning"),
|
| 1877 |
+
vocab_size=200019,
|
| 1878 |
+
padded_vocab_size=200064,
|
| 1879 |
+
block_size=131072,
|
| 1880 |
+
n_embd=3072,
|
| 1881 |
+
n_layer=32,
|
| 1882 |
+
n_head=24,
|
| 1883 |
+
n_query_groups=8,
|
| 1884 |
+
rotary_percentage=0.75,
|
| 1885 |
+
bias=False,
|
| 1886 |
+
norm_class_name="RMSNorm",
|
| 1887 |
+
intermediate_size=8192,
|
| 1888 |
+
mlp_class_name="LLaMAMLP",
|
| 1889 |
+
parallel_residual=False,
|
| 1890 |
+
sliding_window_size=262145,
|
| 1891 |
+
),
|
| 1892 |
+
]
|
| 1893 |
+
configs.extend(phi)
|
| 1894 |
+
|
| 1895 |
+
|
| 1896 |
+
#############
|
| 1897 |
+
# Mistral AI
|
| 1898 |
+
#############
|
| 1899 |
+
|
| 1900 |
+
configs.append(
|
| 1901 |
+
# https://huggingface.co/mistralai/mathstral-7B-v0.1/blob/main/config.json
|
| 1902 |
+
dict(
|
| 1903 |
+
name="Mathstral-7B-v0.1",
|
| 1904 |
+
hf_config=dict(org="mistralai", name="mathstral-7B-v0.1"),
|
| 1905 |
+
padded_vocab_size=32768,
|
| 1906 |
+
block_size=32768,
|
| 1907 |
+
n_layer=32,
|
| 1908 |
+
n_query_groups=8,
|
| 1909 |
+
rotary_percentage=1.0,
|
| 1910 |
+
parallel_residual=False,
|
| 1911 |
+
bias=False,
|
| 1912 |
+
norm_class_name="RMSNorm",
|
| 1913 |
+
norm_eps=1e-05,
|
| 1914 |
+
mlp_class_name="LLaMAMLP",
|
| 1915 |
+
intermediate_size=14336,
|
| 1916 |
+
sliding_window_size=4096,
|
| 1917 |
+
)
|
| 1918 |
+
)
|
| 1919 |
+
|
| 1920 |
+
mistral = [
|
| 1921 |
+
# https://huggingface.co/mistralai/Mistral-7B-v0.1/blob/main/config.json
|
| 1922 |
+
dict(
|
| 1923 |
+
name="Mistral-7B-{}v0.1",
|
| 1924 |
+
hf_config=dict(org="mistralai", name="Mistral-7B-{}v0.1"),
|
| 1925 |
+
padded_vocab_size=32000,
|
| 1926 |
+
block_size=4096, # should be 32768 but sliding window attention is not implemented
|
| 1927 |
+
n_layer=32,
|
| 1928 |
+
n_query_groups=8,
|
| 1929 |
+
rotary_percentage=1.0,
|
| 1930 |
+
parallel_residual=False,
|
| 1931 |
+
bias=False,
|
| 1932 |
+
norm_class_name="RMSNorm",
|
| 1933 |
+
norm_eps=1e-05,
|
| 1934 |
+
mlp_class_name="LLaMAMLP",
|
| 1935 |
+
intermediate_size=14336,
|
| 1936 |
+
sliding_window_size=4096,
|
| 1937 |
+
),
|
| 1938 |
+
# https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json
|
| 1939 |
+
dict(
|
| 1940 |
+
name="Mixtral-8x7B-{}v0.1",
|
| 1941 |
+
hf_config=dict(org="mistralai", name="Mixtral-8x7B-{}v0.1"),
|
| 1942 |
+
padded_vocab_size=32000,
|
| 1943 |
+
block_size=32768,
|
| 1944 |
+
n_layer=32,
|
| 1945 |
+
n_query_groups=8,
|
| 1946 |
+
rotary_percentage=1.0,
|
| 1947 |
+
parallel_residual=False,
|
| 1948 |
+
bias=False,
|
| 1949 |
+
norm_class_name="RMSNorm",
|
| 1950 |
+
norm_eps=1e-05,
|
| 1951 |
+
mlp_class_name="LLaMAMoE",
|
| 1952 |
+
intermediate_size=14336,
|
| 1953 |
+
rope_base=1000000,
|
| 1954 |
+
n_expert=8,
|
| 1955 |
+
n_expert_per_token=2,
|
| 1956 |
+
),
|
| 1957 |
+
# https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1/blob/main/config.json
|
| 1958 |
+
dict(
|
| 1959 |
+
name="Mixtral-8x22B-{}v0.1",
|
| 1960 |
+
hf_config=dict(org="mistralai", name="Mixtral-8x22B-{}v0.1"),
|
| 1961 |
+
padded_vocab_size=32768,
|
| 1962 |
+
block_size=65536,
|
| 1963 |
+
n_layer=56,
|
| 1964 |
+
n_query_groups=8,
|
| 1965 |
+
rotary_percentage=1.0,
|
| 1966 |
+
parallel_residual=False,
|
| 1967 |
+
bias=False,
|
| 1968 |
+
norm_class_name="RMSNorm",
|
| 1969 |
+
norm_eps=1e-05,
|
| 1970 |
+
mlp_class_name="LLaMAMoE",
|
| 1971 |
+
intermediate_size=16384,
|
| 1972 |
+
n_head=48,
|
| 1973 |
+
n_embd=6144,
|
| 1974 |
+
rope_base=1000000,
|
| 1975 |
+
n_expert=8,
|
| 1976 |
+
n_expert_per_token=2,
|
| 1977 |
+
),
|
| 1978 |
+
]
|
| 1979 |
+
for c in mistral:
|
| 1980 |
+
for kind in ("", "Instruct-"):
|
| 1981 |
+
copy = deepcopy(c)
|
| 1982 |
+
copy["name"] = c["name"].format(kind)
|
| 1983 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 1984 |
+
configs.append(copy)
|
| 1985 |
+
configs.append(
|
| 1986 |
+
# https://huggingface.co/unsloth/mistral-7b-v0.2/blob/main/config.json
|
| 1987 |
+
dict(
|
| 1988 |
+
name="Mistral-7B-v0.2",
|
| 1989 |
+
hf_config=dict(org="unsloth", name="Mistral-7B-v0.2"),
|
| 1990 |
+
padded_vocab_size=32000,
|
| 1991 |
+
block_size=32768,
|
| 1992 |
+
n_layer=32,
|
| 1993 |
+
n_query_groups=8,
|
| 1994 |
+
rotary_percentage=1.0,
|
| 1995 |
+
parallel_residual=False,
|
| 1996 |
+
bias=False,
|
| 1997 |
+
norm_class_name="RMSNorm",
|
| 1998 |
+
norm_eps=1e-05,
|
| 1999 |
+
mlp_class_name="LLaMAMLP",
|
| 2000 |
+
intermediate_size=14336,
|
| 2001 |
+
)
|
| 2002 |
+
)
|
| 2003 |
+
configs.append(
|
| 2004 |
+
# https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/main/config.json
|
| 2005 |
+
dict(
|
| 2006 |
+
name="Mistral-7B-Instruct-v0.2",
|
| 2007 |
+
hf_config=dict(org="mistralai", name="Mistral-7B-Instruct-v0.2"),
|
| 2008 |
+
padded_vocab_size=32000,
|
| 2009 |
+
block_size=32768,
|
| 2010 |
+
n_layer=32,
|
| 2011 |
+
n_query_groups=8,
|
| 2012 |
+
rotary_percentage=1.0,
|
| 2013 |
+
parallel_residual=False,
|
| 2014 |
+
bias=False,
|
| 2015 |
+
norm_class_name="RMSNorm",
|
| 2016 |
+
norm_eps=1e-05,
|
| 2017 |
+
mlp_class_name="LLaMAMLP",
|
| 2018 |
+
intermediate_size=14336,
|
| 2019 |
+
)
|
| 2020 |
+
)
|
| 2021 |
+
configs.append(
|
| 2022 |
+
# https://huggingface.co/mistralai/Mistral-7B-v0.3/blob/main/config.json
|
| 2023 |
+
dict(
|
| 2024 |
+
name="Mistral-7B-v0.3",
|
| 2025 |
+
hf_config=dict(org="mistralai", name="Mistral-7B-v0.3"),
|
| 2026 |
+
padded_vocab_size=32768,
|
| 2027 |
+
block_size=32768,
|
| 2028 |
+
n_layer=32,
|
| 2029 |
+
n_query_groups=8,
|
| 2030 |
+
rotary_percentage=1.0,
|
| 2031 |
+
parallel_residual=False,
|
| 2032 |
+
bias=False,
|
| 2033 |
+
norm_class_name="RMSNorm",
|
| 2034 |
+
norm_eps=1e-05,
|
| 2035 |
+
mlp_class_name="LLaMAMLP",
|
| 2036 |
+
intermediate_size=14336,
|
| 2037 |
+
)
|
| 2038 |
+
)
|
| 2039 |
+
configs.append(
|
| 2040 |
+
# https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3/blob/main/config.json
|
| 2041 |
+
dict(
|
| 2042 |
+
name="Mistral-7B-Instruct-v0.3",
|
| 2043 |
+
hf_config=dict(org="mistralai", name="Mistral-7B-Instruct-v0.3"),
|
| 2044 |
+
padded_vocab_size=32768,
|
| 2045 |
+
block_size=32768,
|
| 2046 |
+
n_layer=32,
|
| 2047 |
+
n_query_groups=8,
|
| 2048 |
+
rotary_percentage=1.0,
|
| 2049 |
+
parallel_residual=False,
|
| 2050 |
+
bias=False,
|
| 2051 |
+
norm_class_name="RMSNorm",
|
| 2052 |
+
norm_eps=1e-05,
|
| 2053 |
+
mlp_class_name="LLaMAMLP",
|
| 2054 |
+
intermediate_size=14336,
|
| 2055 |
+
)
|
| 2056 |
+
)
|
| 2057 |
+
configs.append(
|
| 2058 |
+
# https://huggingface.co/mistralai/Mistral-Large-Instruct-2407/blob/main/config.json
|
| 2059 |
+
dict(
|
| 2060 |
+
name="Mistral-Large-Instruct-2407",
|
| 2061 |
+
hf_config=dict(org="mistralai", name="Mistral-Large-Instruct-2407"),
|
| 2062 |
+
padded_vocab_size=32768,
|
| 2063 |
+
block_size=32768,
|
| 2064 |
+
n_layer=88,
|
| 2065 |
+
n_head=96,
|
| 2066 |
+
n_embd=12288,
|
| 2067 |
+
n_query_groups=8,
|
| 2068 |
+
rotary_percentage=1.0,
|
| 2069 |
+
parallel_residual=False,
|
| 2070 |
+
bias=False,
|
| 2071 |
+
norm_class_name="RMSNorm",
|
| 2072 |
+
norm_eps=1e-05,
|
| 2073 |
+
mlp_class_name="LLaMAMLP",
|
| 2074 |
+
intermediate_size=28672,
|
| 2075 |
+
)
|
| 2076 |
+
)
|
| 2077 |
+
configs.append(
|
| 2078 |
+
# https://huggingface.co/mistralai/Mistral-Large-Instruct-2411/blob/main/config.json
|
| 2079 |
+
dict(
|
| 2080 |
+
name="Mistral-Large-Instruct-2411",
|
| 2081 |
+
hf_config=dict(org="mistralai", name="Mistral-Large-Instruct-2411"),
|
| 2082 |
+
padded_vocab_size=32768,
|
| 2083 |
+
block_size=32768,
|
| 2084 |
+
n_layer=88,
|
| 2085 |
+
n_head=96,
|
| 2086 |
+
n_embd=12288,
|
| 2087 |
+
n_query_groups=8,
|
| 2088 |
+
rotary_percentage=1.0,
|
| 2089 |
+
parallel_residual=False,
|
| 2090 |
+
bias=False,
|
| 2091 |
+
norm_class_name="RMSNorm",
|
| 2092 |
+
norm_eps=1e-05,
|
| 2093 |
+
mlp_class_name="LLaMAMLP",
|
| 2094 |
+
intermediate_size=28672,
|
| 2095 |
+
)
|
| 2096 |
+
)
|
| 2097 |
+
|
| 2098 |
+
|
| 2099 |
+
############
|
| 2100 |
+
# TinyLlama
|
| 2101 |
+
############
|
| 2102 |
+
tiny_llama = [
|
| 2103 |
+
dict(
|
| 2104 |
+
name="tiny-llama-1.1b{}",
|
| 2105 |
+
hf_config=dict(org="TinyLlama", name="TinyLlama-1.1B{}"),
|
| 2106 |
+
block_size=2048,
|
| 2107 |
+
vocab_size=32000,
|
| 2108 |
+
padding_multiple=64,
|
| 2109 |
+
n_layer=22,
|
| 2110 |
+
n_head=32,
|
| 2111 |
+
n_embd=2048,
|
| 2112 |
+
rotary_percentage=1.0,
|
| 2113 |
+
parallel_residual=False,
|
| 2114 |
+
bias=False,
|
| 2115 |
+
norm_class_name="RMSNorm", # original TinyLlama use FusedRMSNorm
|
| 2116 |
+
norm_eps=1e-5,
|
| 2117 |
+
mlp_class_name="LLaMAMLP",
|
| 2118 |
+
intermediate_size=5632,
|
| 2119 |
+
n_query_groups=4,
|
| 2120 |
+
)
|
| 2121 |
+
]
|
| 2122 |
+
for c in tiny_llama:
|
| 2123 |
+
for kind, hf_postfix in (("", "-intermediate-step-1431k-3T"), ("-chat", "-Chat-v1.0")):
|
| 2124 |
+
copy = deepcopy(c)
|
| 2125 |
+
copy["name"] = c["name"].format(kind)
|
| 2126 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(hf_postfix)
|
| 2127 |
+
configs.append(copy)
|
| 2128 |
+
|
| 2129 |
+
|
| 2130 |
+
############
|
| 2131 |
+
# MicroLlama
|
| 2132 |
+
############
|
| 2133 |
+
micro_llama = [
|
| 2134 |
+
dict(
|
| 2135 |
+
name="micro-llama-300M",
|
| 2136 |
+
hf_config=dict(org="keeeeenw", name="MicroLlama"),
|
| 2137 |
+
block_size=2048,
|
| 2138 |
+
vocab_size=32000,
|
| 2139 |
+
padding_multiple=64,
|
| 2140 |
+
n_layer=12,
|
| 2141 |
+
n_head=16,
|
| 2142 |
+
n_embd=1024,
|
| 2143 |
+
rotary_percentage=1.0,
|
| 2144 |
+
parallel_residual=False,
|
| 2145 |
+
bias=False,
|
| 2146 |
+
norm_class_name="RMSNorm", # original TinyLlama and MicroLlama use FusedRMSNorm
|
| 2147 |
+
norm_eps=1e-5,
|
| 2148 |
+
mlp_class_name="LLaMAMLP",
|
| 2149 |
+
intermediate_size=5632,
|
| 2150 |
+
n_query_groups=4,
|
| 2151 |
+
)
|
| 2152 |
+
]
|
| 2153 |
+
configs.extend(micro_llama)
|
| 2154 |
+
|
| 2155 |
+
|
| 2156 |
+
##########################
|
| 2157 |
+
# Trelis Function Calling
|
| 2158 |
+
##########################
|
| 2159 |
+
llama_2_function_calling = [
|
| 2160 |
+
# https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2/blob/main/config.json
|
| 2161 |
+
dict(
|
| 2162 |
+
name="Llama-2-7b-chat-hf-function-calling-v2",
|
| 2163 |
+
hf_config=dict(org="Trelis", name="Llama-2-7b-chat-hf-function-calling-v2"),
|
| 2164 |
+
padding_multiple=64,
|
| 2165 |
+
n_layer=32,
|
| 2166 |
+
rotary_percentage=1.0,
|
| 2167 |
+
parallel_residual=False,
|
| 2168 |
+
bias=False,
|
| 2169 |
+
norm_class_name="RMSNorm",
|
| 2170 |
+
mlp_class_name="LLaMAMLP",
|
| 2171 |
+
intermediate_size=11008,
|
| 2172 |
+
norm_eps=1e-6,
|
| 2173 |
+
block_size=4096,
|
| 2174 |
+
vocab_size=32000,
|
| 2175 |
+
n_head=32,
|
| 2176 |
+
n_embd=4096,
|
| 2177 |
+
rope_base=10000,
|
| 2178 |
+
)
|
| 2179 |
+
]
|
| 2180 |
+
|
| 2181 |
+
configs.extend(llama_2_function_calling)
|
| 2182 |
+
|
| 2183 |
+
##########################
|
| 2184 |
+
# Qwen2
|
| 2185 |
+
##########################
|
| 2186 |
+
qwen_2 = [
|
| 2187 |
+
# https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v2/blob/main/config.json
|
| 2188 |
+
dict(
|
| 2189 |
+
name="Qwen2-7B",
|
| 2190 |
+
hf_config=dict(org="Qwen", name="Qwen2-7B"),
|
| 2191 |
+
block_size=131072,
|
| 2192 |
+
vocab_size=151643,
|
| 2193 |
+
padded_vocab_size=152064,
|
| 2194 |
+
n_layer=28,
|
| 2195 |
+
n_head=28,
|
| 2196 |
+
n_embd=3584,
|
| 2197 |
+
n_query_groups=4,
|
| 2198 |
+
rotary_percentage=1.0,
|
| 2199 |
+
parallel_residual=False,
|
| 2200 |
+
bias=False,
|
| 2201 |
+
attn_bias=True,
|
| 2202 |
+
norm_class_name="RMSNorm",
|
| 2203 |
+
mlp_class_name="LLaMAMLP",
|
| 2204 |
+
intermediate_size=18944,
|
| 2205 |
+
norm_eps=1e-6,
|
| 2206 |
+
rope_base=1000000,
|
| 2207 |
+
),
|
| 2208 |
+
dict(
|
| 2209 |
+
name="Qwen2-0.5B",
|
| 2210 |
+
hf_config=dict(org="Qwen", name="Qwen2-0.5B"),
|
| 2211 |
+
block_size=32768,
|
| 2212 |
+
vocab_size=151643,
|
| 2213 |
+
padded_vocab_size=151936,
|
| 2214 |
+
n_layer=24,
|
| 2215 |
+
n_head=14,
|
| 2216 |
+
n_embd=896,
|
| 2217 |
+
n_query_groups=2,
|
| 2218 |
+
rotary_percentage=1.0,
|
| 2219 |
+
parallel_residual=False,
|
| 2220 |
+
bias=False,
|
| 2221 |
+
attn_bias=True,
|
| 2222 |
+
norm_class_name="RMSNorm",
|
| 2223 |
+
mlp_class_name="LLaMAMLP",
|
| 2224 |
+
intermediate_size=4864,
|
| 2225 |
+
norm_eps=1e-6,
|
| 2226 |
+
rope_base=1000000,
|
| 2227 |
+
),
|
| 2228 |
+
]
|
| 2229 |
+
|
| 2230 |
+
configs.extend(qwen_2)
|
| 2231 |
+
|
| 2232 |
+
##########
|
| 2233 |
+
# Qwen2.5
|
| 2234 |
+
##########
|
| 2235 |
+
qwen_2_5 = [
|
| 2236 |
+
# https://huggingface.co/Qwen/Qwen2.5-0.5B/blob/main/config.json
|
| 2237 |
+
dict(
|
| 2238 |
+
name="Qwen2.5-0.5B{}",
|
| 2239 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-0.5B{}"),
|
| 2240 |
+
block_size=32768,
|
| 2241 |
+
vocab_size=151643,
|
| 2242 |
+
padded_vocab_size=151936,
|
| 2243 |
+
n_layer=24,
|
| 2244 |
+
n_head=14,
|
| 2245 |
+
n_embd=896,
|
| 2246 |
+
n_query_groups=2,
|
| 2247 |
+
rotary_percentage=1.0,
|
| 2248 |
+
parallel_residual=False,
|
| 2249 |
+
bias=False,
|
| 2250 |
+
attn_bias=True,
|
| 2251 |
+
norm_class_name="RMSNorm",
|
| 2252 |
+
mlp_class_name="LLaMAMLP",
|
| 2253 |
+
intermediate_size=4864,
|
| 2254 |
+
norm_eps=1e-6,
|
| 2255 |
+
rope_base=1000000,
|
| 2256 |
+
),
|
| 2257 |
+
# https://huggingface.co/Qwen/Qwen2.5-1.5B/blob/main/config.json
|
| 2258 |
+
dict(
|
| 2259 |
+
name="Qwen2.5-1.5B{}",
|
| 2260 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-1.5B{}"),
|
| 2261 |
+
block_size=131072,
|
| 2262 |
+
vocab_size=151643,
|
| 2263 |
+
padded_vocab_size=151936,
|
| 2264 |
+
n_layer=28,
|
| 2265 |
+
n_head=12,
|
| 2266 |
+
n_embd=1536,
|
| 2267 |
+
n_query_groups=2,
|
| 2268 |
+
rotary_percentage=1.0,
|
| 2269 |
+
parallel_residual=False,
|
| 2270 |
+
bias=False,
|
| 2271 |
+
attn_bias=True,
|
| 2272 |
+
norm_class_name="RMSNorm",
|
| 2273 |
+
mlp_class_name="LLaMAMLP",
|
| 2274 |
+
intermediate_size=8960,
|
| 2275 |
+
norm_eps=1e-6,
|
| 2276 |
+
rope_base=1000000,
|
| 2277 |
+
),
|
| 2278 |
+
# https://huggingface.co/Qwen/Qwen2.5-3B/blob/main/config.json
|
| 2279 |
+
dict(
|
| 2280 |
+
name="Qwen2.5-3B{}",
|
| 2281 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-3B{}"),
|
| 2282 |
+
block_size=32768,
|
| 2283 |
+
vocab_size=151643,
|
| 2284 |
+
padded_vocab_size=151936,
|
| 2285 |
+
n_layer=36,
|
| 2286 |
+
n_head=16,
|
| 2287 |
+
n_embd=2048,
|
| 2288 |
+
n_query_groups=2,
|
| 2289 |
+
rotary_percentage=1.0,
|
| 2290 |
+
parallel_residual=False,
|
| 2291 |
+
bias=False,
|
| 2292 |
+
attn_bias=True,
|
| 2293 |
+
norm_class_name="RMSNorm",
|
| 2294 |
+
mlp_class_name="LLaMAMLP",
|
| 2295 |
+
intermediate_size=11008,
|
| 2296 |
+
norm_eps=1e-6,
|
| 2297 |
+
rope_base=1000000,
|
| 2298 |
+
),
|
| 2299 |
+
# https://huggingface.co/Qwen/Qwen2.5-7B/blob/main/config.json
|
| 2300 |
+
dict(
|
| 2301 |
+
name="Qwen2.5-7B{}",
|
| 2302 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-7B{}"),
|
| 2303 |
+
block_size=131072,
|
| 2304 |
+
vocab_size=151643,
|
| 2305 |
+
padded_vocab_size=152064,
|
| 2306 |
+
n_layer=28,
|
| 2307 |
+
n_head=28,
|
| 2308 |
+
n_embd=3584,
|
| 2309 |
+
n_query_groups=4,
|
| 2310 |
+
rotary_percentage=1.0,
|
| 2311 |
+
parallel_residual=False,
|
| 2312 |
+
bias=False,
|
| 2313 |
+
attn_bias=True,
|
| 2314 |
+
norm_class_name="RMSNorm",
|
| 2315 |
+
mlp_class_name="LLaMAMLP",
|
| 2316 |
+
intermediate_size=18944,
|
| 2317 |
+
norm_eps=1e-6,
|
| 2318 |
+
rope_base=1000000,
|
| 2319 |
+
),
|
| 2320 |
+
# https://huggingface.co/Qwen/Qwen2.5-14B/blob/main/config.json
|
| 2321 |
+
dict(
|
| 2322 |
+
name="Qwen2.5-14B{}",
|
| 2323 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-14B{}"),
|
| 2324 |
+
block_size=131072,
|
| 2325 |
+
vocab_size=151643,
|
| 2326 |
+
padded_vocab_size=152064,
|
| 2327 |
+
n_layer=48,
|
| 2328 |
+
n_head=40,
|
| 2329 |
+
n_embd=5120,
|
| 2330 |
+
n_query_groups=8,
|
| 2331 |
+
rotary_percentage=1.0,
|
| 2332 |
+
parallel_residual=False,
|
| 2333 |
+
bias=False,
|
| 2334 |
+
attn_bias=True,
|
| 2335 |
+
norm_class_name="RMSNorm",
|
| 2336 |
+
mlp_class_name="LLaMAMLP",
|
| 2337 |
+
intermediate_size=13824,
|
| 2338 |
+
norm_eps=1e-5,
|
| 2339 |
+
rope_base=1000000,
|
| 2340 |
+
),
|
| 2341 |
+
# https://huggingface.co/Qwen/Qwen2.5-32B/blob/main/config.json
|
| 2342 |
+
dict(
|
| 2343 |
+
name="Qwen2.5-32B{}",
|
| 2344 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-32B{}"),
|
| 2345 |
+
block_size=131072,
|
| 2346 |
+
vocab_size=151643,
|
| 2347 |
+
padded_vocab_size=152064,
|
| 2348 |
+
n_layer=64,
|
| 2349 |
+
n_head=40,
|
| 2350 |
+
n_embd=5120,
|
| 2351 |
+
n_query_groups=8,
|
| 2352 |
+
rotary_percentage=1.0,
|
| 2353 |
+
parallel_residual=False,
|
| 2354 |
+
bias=False,
|
| 2355 |
+
attn_bias=True,
|
| 2356 |
+
norm_class_name="RMSNorm",
|
| 2357 |
+
mlp_class_name="LLaMAMLP",
|
| 2358 |
+
intermediate_size=27648,
|
| 2359 |
+
norm_eps=1e-5,
|
| 2360 |
+
rope_base=1000000,
|
| 2361 |
+
),
|
| 2362 |
+
# https://huggingface.co/Qwen/Qwen2.5-72B/blob/main/config.json
|
| 2363 |
+
dict(
|
| 2364 |
+
name="Qwen2.5-72B{}",
|
| 2365 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-72B{}"),
|
| 2366 |
+
block_size=131072,
|
| 2367 |
+
vocab_size=151643,
|
| 2368 |
+
padded_vocab_size=152064,
|
| 2369 |
+
n_layer=80,
|
| 2370 |
+
n_head=64,
|
| 2371 |
+
n_embd=8192,
|
| 2372 |
+
n_query_groups=8,
|
| 2373 |
+
rotary_percentage=1.0,
|
| 2374 |
+
parallel_residual=False,
|
| 2375 |
+
bias=False,
|
| 2376 |
+
attn_bias=True,
|
| 2377 |
+
norm_class_name="RMSNorm",
|
| 2378 |
+
mlp_class_name="LLaMAMLP",
|
| 2379 |
+
intermediate_size=29568,
|
| 2380 |
+
norm_eps=1e-5,
|
| 2381 |
+
rope_base=1000000,
|
| 2382 |
+
),
|
| 2383 |
+
]
|
| 2384 |
+
|
| 2385 |
+
qwen_2_5_coder = [
|
| 2386 |
+
# https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B/blob/main/config.json
|
| 2387 |
+
dict(
|
| 2388 |
+
name="Qwen2.5-Coder-0.5B{}",
|
| 2389 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Coder-0.5B{}"),
|
| 2390 |
+
block_size=32768,
|
| 2391 |
+
vocab_size=151643,
|
| 2392 |
+
padded_vocab_size=151936,
|
| 2393 |
+
n_layer=24,
|
| 2394 |
+
n_head=14,
|
| 2395 |
+
n_embd=896,
|
| 2396 |
+
n_query_groups=2,
|
| 2397 |
+
rotary_percentage=1.0,
|
| 2398 |
+
parallel_residual=False,
|
| 2399 |
+
bias=False,
|
| 2400 |
+
attn_bias=True,
|
| 2401 |
+
norm_class_name="RMSNorm",
|
| 2402 |
+
mlp_class_name="LLaMAMLP",
|
| 2403 |
+
intermediate_size=4864,
|
| 2404 |
+
norm_eps=1e-6,
|
| 2405 |
+
rope_base=1000000,
|
| 2406 |
+
),
|
| 2407 |
+
# https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B/blob/main/config.json
|
| 2408 |
+
dict(
|
| 2409 |
+
name="Qwen2.5-Coder-1.5B{}",
|
| 2410 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Coder-1.5B{}"),
|
| 2411 |
+
block_size=32768,
|
| 2412 |
+
vocab_size=151643,
|
| 2413 |
+
padded_vocab_size=151936,
|
| 2414 |
+
n_layer=28,
|
| 2415 |
+
n_head=12,
|
| 2416 |
+
n_embd=1536,
|
| 2417 |
+
n_query_groups=2,
|
| 2418 |
+
rotary_percentage=1.0,
|
| 2419 |
+
parallel_residual=False,
|
| 2420 |
+
bias=False,
|
| 2421 |
+
attn_bias=True,
|
| 2422 |
+
norm_class_name="RMSNorm",
|
| 2423 |
+
mlp_class_name="LLaMAMLP",
|
| 2424 |
+
intermediate_size=8960,
|
| 2425 |
+
norm_eps=1e-6,
|
| 2426 |
+
rope_base=1000000,
|
| 2427 |
+
),
|
| 2428 |
+
# https://huggingface.co/Qwen/Qwen2.5-Coder-3B/blob/main/config.json
|
| 2429 |
+
dict(
|
| 2430 |
+
name="Qwen2.5-Coder-3B{}",
|
| 2431 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Coder-3B{}"),
|
| 2432 |
+
block_size=32768,
|
| 2433 |
+
vocab_size=151643,
|
| 2434 |
+
padded_vocab_size=151936,
|
| 2435 |
+
n_layer=36,
|
| 2436 |
+
n_head=16,
|
| 2437 |
+
n_embd=2048,
|
| 2438 |
+
n_query_groups=2,
|
| 2439 |
+
rotary_percentage=1.0,
|
| 2440 |
+
parallel_residual=False,
|
| 2441 |
+
bias=False,
|
| 2442 |
+
attn_bias=True,
|
| 2443 |
+
norm_class_name="RMSNorm",
|
| 2444 |
+
mlp_class_name="LLaMAMLP",
|
| 2445 |
+
intermediate_size=11008,
|
| 2446 |
+
norm_eps=1e-6,
|
| 2447 |
+
rope_base=1000000,
|
| 2448 |
+
),
|
| 2449 |
+
# https://huggingface.co/Qwen/Qwen2.5-Coder-7B/blob/main/config.json
|
| 2450 |
+
dict(
|
| 2451 |
+
name="Qwen2.5-Coder-7B{}",
|
| 2452 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Coder-7B{}"),
|
| 2453 |
+
block_size=32768,
|
| 2454 |
+
vocab_size=151643,
|
| 2455 |
+
padded_vocab_size=152064,
|
| 2456 |
+
n_layer=28,
|
| 2457 |
+
n_head=28,
|
| 2458 |
+
n_embd=3584,
|
| 2459 |
+
n_query_groups=4,
|
| 2460 |
+
rotary_percentage=1.0,
|
| 2461 |
+
parallel_residual=False,
|
| 2462 |
+
bias=False,
|
| 2463 |
+
attn_bias=True,
|
| 2464 |
+
norm_class_name="RMSNorm",
|
| 2465 |
+
mlp_class_name="LLaMAMLP",
|
| 2466 |
+
intermediate_size=18944,
|
| 2467 |
+
norm_eps=1e-6,
|
| 2468 |
+
rope_base=1000000,
|
| 2469 |
+
),
|
| 2470 |
+
# https://huggingface.co/Qwen/Qwen2.5-Coder-14B/blob/main/config.json
|
| 2471 |
+
dict(
|
| 2472 |
+
name="Qwen2.5-Coder-14B{}",
|
| 2473 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Coder-14B{}"),
|
| 2474 |
+
block_size=32768,
|
| 2475 |
+
vocab_size=151643,
|
| 2476 |
+
padded_vocab_size=152064,
|
| 2477 |
+
n_layer=48,
|
| 2478 |
+
n_head=40,
|
| 2479 |
+
n_embd=5120,
|
| 2480 |
+
n_query_groups=8,
|
| 2481 |
+
rotary_percentage=1.0,
|
| 2482 |
+
parallel_residual=False,
|
| 2483 |
+
bias=False,
|
| 2484 |
+
attn_bias=True,
|
| 2485 |
+
norm_class_name="RMSNorm",
|
| 2486 |
+
mlp_class_name="LLaMAMLP",
|
| 2487 |
+
intermediate_size=13824,
|
| 2488 |
+
norm_eps=1e-5,
|
| 2489 |
+
rope_base=1000000,
|
| 2490 |
+
),
|
| 2491 |
+
# https://huggingface.co/Qwen/Qwen2.5-Coder-32B/blob/main/config.json
|
| 2492 |
+
dict(
|
| 2493 |
+
name="Qwen2.5-Coder-32B{}",
|
| 2494 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Coder-32B{}"),
|
| 2495 |
+
block_size=32768,
|
| 2496 |
+
vocab_size=151643,
|
| 2497 |
+
padded_vocab_size=152064,
|
| 2498 |
+
n_layer=64,
|
| 2499 |
+
n_head=40,
|
| 2500 |
+
n_embd=5120,
|
| 2501 |
+
n_query_groups=8,
|
| 2502 |
+
rotary_percentage=1.0,
|
| 2503 |
+
parallel_residual=False,
|
| 2504 |
+
bias=False,
|
| 2505 |
+
attn_bias=True,
|
| 2506 |
+
norm_class_name="RMSNorm",
|
| 2507 |
+
mlp_class_name="LLaMAMLP",
|
| 2508 |
+
intermediate_size=27648,
|
| 2509 |
+
norm_eps=1e-5,
|
| 2510 |
+
rope_base=1000000,
|
| 2511 |
+
),
|
| 2512 |
+
]
|
| 2513 |
+
|
| 2514 |
+
qwen_2_5.extend(qwen_2_5_coder)
|
| 2515 |
+
|
| 2516 |
+
qwen_2_5_math = [
|
| 2517 |
+
# https://huggingface.co/Qwen/Qwen2.5-Math-1.5B/blob/main/config.json
|
| 2518 |
+
dict(
|
| 2519 |
+
name="Qwen2.5-Math-1.5B{}",
|
| 2520 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Math-1.5B{}"),
|
| 2521 |
+
block_size=4096,
|
| 2522 |
+
vocab_size=151643,
|
| 2523 |
+
padded_vocab_size=151936,
|
| 2524 |
+
n_layer=28,
|
| 2525 |
+
n_head=12,
|
| 2526 |
+
n_embd=1536,
|
| 2527 |
+
n_query_groups=2,
|
| 2528 |
+
rotary_percentage=1.0,
|
| 2529 |
+
parallel_residual=False,
|
| 2530 |
+
bias=False,
|
| 2531 |
+
attn_bias=True,
|
| 2532 |
+
norm_class_name="RMSNorm",
|
| 2533 |
+
mlp_class_name="LLaMAMLP",
|
| 2534 |
+
intermediate_size=8960,
|
| 2535 |
+
norm_eps=1e-6,
|
| 2536 |
+
rope_base=10000,
|
| 2537 |
+
),
|
| 2538 |
+
# https://huggingface.co/Qwen/Qwen2.5-Math-7B/blob/main/config.json
|
| 2539 |
+
dict(
|
| 2540 |
+
name="Qwen2.5-Math-7B{}",
|
| 2541 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Math-7B{}"),
|
| 2542 |
+
block_size=4096,
|
| 2543 |
+
vocab_size=151643,
|
| 2544 |
+
padded_vocab_size=152064,
|
| 2545 |
+
n_layer=28,
|
| 2546 |
+
n_head=28,
|
| 2547 |
+
n_embd=3584,
|
| 2548 |
+
n_query_groups=4,
|
| 2549 |
+
rotary_percentage=1.0,
|
| 2550 |
+
parallel_residual=False,
|
| 2551 |
+
bias=False,
|
| 2552 |
+
attn_bias=True,
|
| 2553 |
+
norm_class_name="RMSNorm",
|
| 2554 |
+
mlp_class_name="LLaMAMLP",
|
| 2555 |
+
intermediate_size=18944,
|
| 2556 |
+
norm_eps=1e-6,
|
| 2557 |
+
rope_base=10000,
|
| 2558 |
+
),
|
| 2559 |
+
# https://huggingface.co/Qwen/Qwen2.5-Math-72B/blob/main/config.json
|
| 2560 |
+
dict(
|
| 2561 |
+
name="Qwen2.5-Math-72B{}",
|
| 2562 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-Math-72B{}"),
|
| 2563 |
+
block_size=4096,
|
| 2564 |
+
vocab_size=151643,
|
| 2565 |
+
padded_vocab_size=152064,
|
| 2566 |
+
n_layer=80,
|
| 2567 |
+
n_head=64,
|
| 2568 |
+
n_embd=8192,
|
| 2569 |
+
n_query_groups=8,
|
| 2570 |
+
rotary_percentage=1.0,
|
| 2571 |
+
parallel_residual=False,
|
| 2572 |
+
bias=False,
|
| 2573 |
+
attn_bias=True,
|
| 2574 |
+
norm_class_name="RMSNorm",
|
| 2575 |
+
mlp_class_name="LLaMAMLP",
|
| 2576 |
+
intermediate_size=29568,
|
| 2577 |
+
norm_eps=1e-5,
|
| 2578 |
+
rope_base=10000,
|
| 2579 |
+
),
|
| 2580 |
+
]
|
| 2581 |
+
|
| 2582 |
+
qwen_2_5.extend(qwen_2_5_math)
|
| 2583 |
+
|
| 2584 |
+
for c in qwen_2_5:
|
| 2585 |
+
for kind in ("", "-Instruct"):
|
| 2586 |
+
copy = deepcopy(c)
|
| 2587 |
+
copy["name"] = c["name"].format(kind)
|
| 2588 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 2589 |
+
configs.append(copy)
|
| 2590 |
+
|
| 2591 |
+
qwen_2_5_1m = [
|
| 2592 |
+
# https://huggingface.co/Qwen/Qwen2.5-7B-Instruct-1M/blob/main/config.json
|
| 2593 |
+
dict(
|
| 2594 |
+
name="Qwen2.5-7B-Instruct-1M",
|
| 2595 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-7B-Instruct-1M"),
|
| 2596 |
+
block_size=1010000,
|
| 2597 |
+
vocab_size=151643,
|
| 2598 |
+
padded_vocab_size=152064,
|
| 2599 |
+
n_layer=28,
|
| 2600 |
+
n_head=28,
|
| 2601 |
+
n_embd=3584,
|
| 2602 |
+
n_query_groups=4,
|
| 2603 |
+
rotary_percentage=1.0,
|
| 2604 |
+
parallel_residual=False,
|
| 2605 |
+
bias=False,
|
| 2606 |
+
attn_bias=True,
|
| 2607 |
+
norm_class_name="RMSNorm",
|
| 2608 |
+
mlp_class_name="LLaMAMLP",
|
| 2609 |
+
intermediate_size=18944,
|
| 2610 |
+
norm_eps=1e-5,
|
| 2611 |
+
rope_base=10000000,
|
| 2612 |
+
),
|
| 2613 |
+
# https://huggingface.co/Qwen/Qwen2.5-14B-Instruct-1M/blob/main/config.json
|
| 2614 |
+
dict(
|
| 2615 |
+
name="Qwen2.5-14B-Instruct-1M",
|
| 2616 |
+
hf_config=dict(org="Qwen", name="Qwen2.5-14B-Instruct-1M"),
|
| 2617 |
+
block_size=1010000,
|
| 2618 |
+
vocab_size=151643,
|
| 2619 |
+
padded_vocab_size=152064,
|
| 2620 |
+
n_layer=48,
|
| 2621 |
+
n_head=40,
|
| 2622 |
+
n_embd=5120,
|
| 2623 |
+
n_query_groups=8,
|
| 2624 |
+
rotary_percentage=1.0,
|
| 2625 |
+
parallel_residual=False,
|
| 2626 |
+
bias=False,
|
| 2627 |
+
attn_bias=True,
|
| 2628 |
+
norm_class_name="RMSNorm",
|
| 2629 |
+
mlp_class_name="LLaMAMLP",
|
| 2630 |
+
intermediate_size=13824,
|
| 2631 |
+
norm_eps=1e-5,
|
| 2632 |
+
rope_base=10000000,
|
| 2633 |
+
),
|
| 2634 |
+
]
|
| 2635 |
+
|
| 2636 |
+
configs.extend(qwen_2_5_1m)
|
| 2637 |
+
|
| 2638 |
+
##########
|
| 2639 |
+
# QwQ
|
| 2640 |
+
##########
|
| 2641 |
+
qwq = [
|
| 2642 |
+
# https://huggingface.co/Qwen/QwQ-32B/blob/main/config.json
|
| 2643 |
+
dict(
|
| 2644 |
+
name="QwQ-32B",
|
| 2645 |
+
hf_config=dict(org="Qwen", name="QwQ-32B"),
|
| 2646 |
+
block_size=131072,
|
| 2647 |
+
vocab_size=151643,
|
| 2648 |
+
padded_vocab_size=152064,
|
| 2649 |
+
n_layer=64,
|
| 2650 |
+
n_head=40,
|
| 2651 |
+
n_embd=5120,
|
| 2652 |
+
n_query_groups=8,
|
| 2653 |
+
rotary_percentage=1.0,
|
| 2654 |
+
parallel_residual=False,
|
| 2655 |
+
bias=False,
|
| 2656 |
+
attn_bias=True,
|
| 2657 |
+
norm_class_name="RMSNorm",
|
| 2658 |
+
mlp_class_name="LLaMAMLP",
|
| 2659 |
+
intermediate_size=27648,
|
| 2660 |
+
norm_eps=1e-5,
|
| 2661 |
+
rope_base=1000000,
|
| 2662 |
+
),
|
| 2663 |
+
# https://huggingface.co/Qwen/QwQ-32B-Preview/blob/main/config.json
|
| 2664 |
+
dict(
|
| 2665 |
+
name="QwQ-32B-Preview",
|
| 2666 |
+
hf_config=dict(org="Qwen", name="QwQ-32B-Preview"),
|
| 2667 |
+
block_size=32768,
|
| 2668 |
+
vocab_size=151643,
|
| 2669 |
+
padded_vocab_size=152064,
|
| 2670 |
+
n_layer=64,
|
| 2671 |
+
n_head=40,
|
| 2672 |
+
n_embd=5120,
|
| 2673 |
+
n_query_groups=8,
|
| 2674 |
+
rotary_percentage=1.0,
|
| 2675 |
+
parallel_residual=False,
|
| 2676 |
+
bias=False,
|
| 2677 |
+
attn_bias=True,
|
| 2678 |
+
norm_class_name="RMSNorm",
|
| 2679 |
+
mlp_class_name="LLaMAMLP",
|
| 2680 |
+
intermediate_size=27648,
|
| 2681 |
+
norm_eps=1e-5,
|
| 2682 |
+
rope_base=1000000,
|
| 2683 |
+
),
|
| 2684 |
+
]
|
| 2685 |
+
|
| 2686 |
+
configs.extend(qwq)
|
| 2687 |
+
|
| 2688 |
+
##########
|
| 2689 |
+
# Qwen3
|
| 2690 |
+
##########
|
| 2691 |
+
qwen_3 = [
|
| 2692 |
+
# https://huggingface.co/Qwen/Qwen3-0.6B/blob/main/config.json
|
| 2693 |
+
dict(
|
| 2694 |
+
name="Qwen3-0.6B{}",
|
| 2695 |
+
hf_config=dict(org="Qwen", name="Qwen3-0.6B{}"),
|
| 2696 |
+
block_size=40960,
|
| 2697 |
+
vocab_size=151643,
|
| 2698 |
+
padded_vocab_size=151936,
|
| 2699 |
+
n_layer=28,
|
| 2700 |
+
n_head=16,
|
| 2701 |
+
n_embd=1024,
|
| 2702 |
+
n_query_groups=8,
|
| 2703 |
+
rotary_percentage=1.0,
|
| 2704 |
+
parallel_residual=False,
|
| 2705 |
+
bias=False,
|
| 2706 |
+
norm_class_name="RMSNorm",
|
| 2707 |
+
mlp_class_name="LLaMAMLP",
|
| 2708 |
+
intermediate_size=3072,
|
| 2709 |
+
norm_eps=1e-6,
|
| 2710 |
+
rope_base=1000000,
|
| 2711 |
+
head_size=128,
|
| 2712 |
+
norm_qk=True,
|
| 2713 |
+
),
|
| 2714 |
+
# https://huggingface.co/Qwen/Qwen3-1.7B/blob/main/config.json
|
| 2715 |
+
dict(
|
| 2716 |
+
name="Qwen3-1.7B{}",
|
| 2717 |
+
hf_config=dict(org="Qwen", name="Qwen3-1.7B{}"),
|
| 2718 |
+
block_size=40960,
|
| 2719 |
+
vocab_size=151643,
|
| 2720 |
+
padded_vocab_size=151936,
|
| 2721 |
+
n_layer=28,
|
| 2722 |
+
n_head=16,
|
| 2723 |
+
n_embd=2048,
|
| 2724 |
+
n_query_groups=8,
|
| 2725 |
+
rotary_percentage=1.0,
|
| 2726 |
+
parallel_residual=False,
|
| 2727 |
+
bias=False,
|
| 2728 |
+
norm_class_name="RMSNorm",
|
| 2729 |
+
mlp_class_name="LLaMAMLP",
|
| 2730 |
+
intermediate_size=6144,
|
| 2731 |
+
norm_eps=1e-6,
|
| 2732 |
+
rope_base=1000000,
|
| 2733 |
+
norm_qk=True,
|
| 2734 |
+
),
|
| 2735 |
+
# https://huggingface.co/Qwen/Qwen3-4B/blob/main/config.json
|
| 2736 |
+
dict(
|
| 2737 |
+
name="Qwen3-4B{}",
|
| 2738 |
+
hf_config=dict(org="Qwen", name="Qwen3-4B{}"),
|
| 2739 |
+
block_size=40960,
|
| 2740 |
+
vocab_size=151643,
|
| 2741 |
+
padded_vocab_size=151936,
|
| 2742 |
+
n_layer=36,
|
| 2743 |
+
n_head=32,
|
| 2744 |
+
n_embd=2560,
|
| 2745 |
+
n_query_groups=8,
|
| 2746 |
+
rotary_percentage=1.0,
|
| 2747 |
+
parallel_residual=False,
|
| 2748 |
+
bias=False,
|
| 2749 |
+
norm_class_name="RMSNorm",
|
| 2750 |
+
mlp_class_name="LLaMAMLP",
|
| 2751 |
+
intermediate_size=9728,
|
| 2752 |
+
norm_eps=1e-6,
|
| 2753 |
+
rope_base=1000000,
|
| 2754 |
+
head_size=128,
|
| 2755 |
+
norm_qk=True,
|
| 2756 |
+
),
|
| 2757 |
+
# https://huggingface.co/Qwen/Qwen3-8B/blob/main/config.json
|
| 2758 |
+
dict(
|
| 2759 |
+
name="Qwen3-8B{}",
|
| 2760 |
+
hf_config=dict(org="Qwen", name="Qwen3-8B{}"),
|
| 2761 |
+
block_size=40960,
|
| 2762 |
+
vocab_size=151643,
|
| 2763 |
+
padded_vocab_size=151936,
|
| 2764 |
+
n_layer=36,
|
| 2765 |
+
n_head=32,
|
| 2766 |
+
n_embd=4096,
|
| 2767 |
+
n_query_groups=8,
|
| 2768 |
+
rotary_percentage=1.0,
|
| 2769 |
+
parallel_residual=False,
|
| 2770 |
+
bias=False,
|
| 2771 |
+
norm_class_name="RMSNorm",
|
| 2772 |
+
mlp_class_name="LLaMAMLP",
|
| 2773 |
+
intermediate_size=12288,
|
| 2774 |
+
norm_eps=1e-6,
|
| 2775 |
+
rope_base=1000000,
|
| 2776 |
+
norm_qk=True,
|
| 2777 |
+
),
|
| 2778 |
+
# https://huggingface.co/Qwen/Qwen3-14B/blob/main/config.json
|
| 2779 |
+
dict(
|
| 2780 |
+
name="Qwen3-14B{}",
|
| 2781 |
+
hf_config=dict(org="Qwen", name="Qwen3-14B{}"),
|
| 2782 |
+
block_size=40960,
|
| 2783 |
+
vocab_size=151643,
|
| 2784 |
+
padded_vocab_size=151936,
|
| 2785 |
+
n_layer=40,
|
| 2786 |
+
n_head=40,
|
| 2787 |
+
n_embd=5120,
|
| 2788 |
+
n_query_groups=8,
|
| 2789 |
+
rotary_percentage=1.0,
|
| 2790 |
+
parallel_residual=False,
|
| 2791 |
+
bias=False,
|
| 2792 |
+
norm_class_name="RMSNorm",
|
| 2793 |
+
mlp_class_name="LLaMAMLP",
|
| 2794 |
+
intermediate_size=17408,
|
| 2795 |
+
norm_eps=1e-6,
|
| 2796 |
+
rope_base=1000000,
|
| 2797 |
+
norm_qk=True,
|
| 2798 |
+
),
|
| 2799 |
+
]
|
| 2800 |
+
for c in qwen_3:
|
| 2801 |
+
for kind in ("", "-Base"):
|
| 2802 |
+
copy = deepcopy(c)
|
| 2803 |
+
copy["name"] = c["name"].format(kind)
|
| 2804 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 2805 |
+
configs.append(copy)
|
| 2806 |
+
qwen_3_32b = [
|
| 2807 |
+
# https://huggingface.co/Qwen/Qwen3-32B/blob/main/config.json
|
| 2808 |
+
dict(
|
| 2809 |
+
name="Qwen3-32B",
|
| 2810 |
+
hf_config=dict(org="Qwen", name="Qwen3-32B"),
|
| 2811 |
+
block_size=40960,
|
| 2812 |
+
vocab_size=151643,
|
| 2813 |
+
padded_vocab_size=151936,
|
| 2814 |
+
n_layer=64,
|
| 2815 |
+
n_head=64,
|
| 2816 |
+
n_embd=5120,
|
| 2817 |
+
n_query_groups=8,
|
| 2818 |
+
rotary_percentage=1.0,
|
| 2819 |
+
parallel_residual=False,
|
| 2820 |
+
bias=False,
|
| 2821 |
+
norm_class_name="RMSNorm",
|
| 2822 |
+
mlp_class_name="LLaMAMLP",
|
| 2823 |
+
intermediate_size=25600,
|
| 2824 |
+
norm_eps=1e-6,
|
| 2825 |
+
rope_base=1000000,
|
| 2826 |
+
head_size=128,
|
| 2827 |
+
norm_qk=True,
|
| 2828 |
+
),
|
| 2829 |
+
]
|
| 2830 |
+
configs.extend(qwen_3_32b)
|
| 2831 |
+
|
| 2832 |
+
qwen_3_moe = [
|
| 2833 |
+
# https://huggingface.co/Qwen/Qwen3-30B-A3B/blob/main/config.json
|
| 2834 |
+
dict(
|
| 2835 |
+
name="Qwen3-30B-A3B",
|
| 2836 |
+
hf_config=dict(org="Qwen", name="Qwen3-30B-A3B"),
|
| 2837 |
+
block_size=40960,
|
| 2838 |
+
head_size=128,
|
| 2839 |
+
vocab_size=151643,
|
| 2840 |
+
padded_vocab_size=151936,
|
| 2841 |
+
n_layer=48,
|
| 2842 |
+
n_head=32,
|
| 2843 |
+
n_embd=2048,
|
| 2844 |
+
n_query_groups=4,
|
| 2845 |
+
rotary_percentage=1.0,
|
| 2846 |
+
parallel_residual=False,
|
| 2847 |
+
bias=False,
|
| 2848 |
+
norm_class_name="RMSNorm",
|
| 2849 |
+
mlp_class_name="LLaMAMoE",
|
| 2850 |
+
intermediate_size=6144,
|
| 2851 |
+
moe_intermediate_size=768,
|
| 2852 |
+
norm_eps=1e-6,
|
| 2853 |
+
rope_base=1000000,
|
| 2854 |
+
norm_qk=True,
|
| 2855 |
+
n_expert=128,
|
| 2856 |
+
n_expert_per_token=8,
|
| 2857 |
+
),
|
| 2858 |
+
# https://huggingface.co/Qwen/Qwen3-30B-A3B-Base/blob/main/config.json
|
| 2859 |
+
dict(
|
| 2860 |
+
name="Qwen3-30B-A3B-Base",
|
| 2861 |
+
hf_config=dict(org="Qwen", name="Qwen3-30B-A3B-Base"),
|
| 2862 |
+
block_size=40960,
|
| 2863 |
+
head_size=128,
|
| 2864 |
+
vocab_size=151643,
|
| 2865 |
+
padded_vocab_size=151936,
|
| 2866 |
+
n_layer=48,
|
| 2867 |
+
n_head=32,
|
| 2868 |
+
n_embd=2048,
|
| 2869 |
+
n_query_groups=4,
|
| 2870 |
+
rotary_percentage=1.0,
|
| 2871 |
+
parallel_residual=False,
|
| 2872 |
+
bias=False,
|
| 2873 |
+
norm_class_name="RMSNorm",
|
| 2874 |
+
mlp_class_name="LLaMAMoE",
|
| 2875 |
+
intermediate_size=6144,
|
| 2876 |
+
moe_intermediate_size=768,
|
| 2877 |
+
norm_eps=1e-6,
|
| 2878 |
+
rope_base=1000000,
|
| 2879 |
+
norm_qk=True,
|
| 2880 |
+
n_expert=128,
|
| 2881 |
+
n_expert_per_token=8,
|
| 2882 |
+
),
|
| 2883 |
+
# https://huggingface.co/Qwen/Qwen3-235B-A22B/blob/main/config.json
|
| 2884 |
+
dict(
|
| 2885 |
+
name="Qwen3-235B-A22B",
|
| 2886 |
+
hf_config=dict(org="Qwen", name="Qwen3-235B-A22B"),
|
| 2887 |
+
block_size=40960,
|
| 2888 |
+
head_size=128,
|
| 2889 |
+
vocab_size=151643,
|
| 2890 |
+
padded_vocab_size=151936,
|
| 2891 |
+
n_layer=94,
|
| 2892 |
+
n_head=64,
|
| 2893 |
+
n_embd=4096,
|
| 2894 |
+
n_query_groups=4,
|
| 2895 |
+
rotary_percentage=1.0,
|
| 2896 |
+
parallel_residual=False,
|
| 2897 |
+
bias=False,
|
| 2898 |
+
norm_class_name="RMSNorm",
|
| 2899 |
+
mlp_class_name="LLaMAMoE",
|
| 2900 |
+
intermediate_size=12288,
|
| 2901 |
+
moe_intermediate_size=1536,
|
| 2902 |
+
norm_eps=1e-6,
|
| 2903 |
+
rope_base=1000000,
|
| 2904 |
+
norm_qk=True,
|
| 2905 |
+
n_expert=128,
|
| 2906 |
+
n_expert_per_token=8,
|
| 2907 |
+
),
|
| 2908 |
+
]
|
| 2909 |
+
configs.extend(qwen_3_moe)
|
| 2910 |
+
|
| 2911 |
+
|
| 2912 |
+
#############
|
| 2913 |
+
# Salamandra
|
| 2914 |
+
#############
|
| 2915 |
+
salamandra = [
|
| 2916 |
+
# https://huggingface.co/BSC-LT/salamandra-2b-instruct/blob/main/config.json
|
| 2917 |
+
dict(
|
| 2918 |
+
name="salamandra-2b{}",
|
| 2919 |
+
hf_config=dict(org="BSC-LT", name="salamandra-2b{}"),
|
| 2920 |
+
block_size=8192,
|
| 2921 |
+
vocab_size=256000,
|
| 2922 |
+
padded_vocab_size=256000,
|
| 2923 |
+
n_layer=24,
|
| 2924 |
+
n_head=16,
|
| 2925 |
+
n_embd=2048,
|
| 2926 |
+
n_query_groups=16,
|
| 2927 |
+
rotary_percentage=1.0,
|
| 2928 |
+
parallel_residual=False,
|
| 2929 |
+
bias=False,
|
| 2930 |
+
norm_class_name="RMSNorm",
|
| 2931 |
+
mlp_class_name="LLaMAMLP",
|
| 2932 |
+
intermediate_size=5440,
|
| 2933 |
+
norm_eps=1e-5,
|
| 2934 |
+
rope_base=10000,
|
| 2935 |
+
),
|
| 2936 |
+
# https://huggingface.co/BSC-LT/salamandra-7b-instruct/blob/main/config.json
|
| 2937 |
+
dict(
|
| 2938 |
+
name="salamandra-7b{}",
|
| 2939 |
+
hf_config=dict(org="BSC-LT", name="salamandra-7b{}"),
|
| 2940 |
+
block_size=8192,
|
| 2941 |
+
vocab_size=256000,
|
| 2942 |
+
padded_vocab_size=256000,
|
| 2943 |
+
n_layer=32,
|
| 2944 |
+
n_head=32,
|
| 2945 |
+
n_embd=4096,
|
| 2946 |
+
n_query_groups=8,
|
| 2947 |
+
rotary_percentage=1.0,
|
| 2948 |
+
parallel_residual=False,
|
| 2949 |
+
bias=False,
|
| 2950 |
+
norm_class_name="RMSNorm",
|
| 2951 |
+
mlp_class_name="LLaMAMLP",
|
| 2952 |
+
intermediate_size=11008,
|
| 2953 |
+
norm_eps=1e-6,
|
| 2954 |
+
rope_base=10000,
|
| 2955 |
+
),
|
| 2956 |
+
]
|
| 2957 |
+
|
| 2958 |
+
for c in salamandra:
|
| 2959 |
+
for kind in ("", "-instruct"):
|
| 2960 |
+
copy = deepcopy(c)
|
| 2961 |
+
copy["name"] = c["name"].format(kind)
|
| 2962 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 2963 |
+
configs.append(copy)
|
| 2964 |
+
|
| 2965 |
+
|
| 2966 |
+
###############
|
| 2967 |
+
# SmolLM2
|
| 2968 |
+
###############
|
| 2969 |
+
smollm2 = [
|
| 2970 |
+
# https://huggingface.co/HuggingFaceTB/SmolLM2-135M/blob/main/config.json
|
| 2971 |
+
dict(
|
| 2972 |
+
name="SmolLM2-135M{}",
|
| 2973 |
+
hf_config=dict(org="HuggingFaceTB", name="SmolLM2-135M{}"),
|
| 2974 |
+
block_size=8192,
|
| 2975 |
+
vocab_size=49152,
|
| 2976 |
+
padded_vocab_size=49152,
|
| 2977 |
+
n_layer=30,
|
| 2978 |
+
n_head=9,
|
| 2979 |
+
n_embd=576,
|
| 2980 |
+
n_query_groups=3,
|
| 2981 |
+
rotary_percentage=1.0,
|
| 2982 |
+
parallel_residual=False,
|
| 2983 |
+
bias=False,
|
| 2984 |
+
norm_class_name="RMSNorm",
|
| 2985 |
+
mlp_class_name="LLaMAMLP",
|
| 2986 |
+
intermediate_size=1536,
|
| 2987 |
+
rope_base=100000,
|
| 2988 |
+
norm_eps=1e-5,
|
| 2989 |
+
),
|
| 2990 |
+
# https://huggingface.co/HuggingFaceTB/SmolLM2-360M/blob/main/config.json
|
| 2991 |
+
dict(
|
| 2992 |
+
name="SmolLM2-360M{}",
|
| 2993 |
+
hf_config=dict(org="HuggingFaceTB", name="SmolLM2-360M{}"),
|
| 2994 |
+
block_size=8192,
|
| 2995 |
+
vocab_size=49152,
|
| 2996 |
+
padded_vocab_size=49152,
|
| 2997 |
+
n_layer=32,
|
| 2998 |
+
n_head=15,
|
| 2999 |
+
n_embd=960,
|
| 3000 |
+
n_query_groups=5,
|
| 3001 |
+
rotary_percentage=1.0,
|
| 3002 |
+
parallel_residual=False,
|
| 3003 |
+
bias=False,
|
| 3004 |
+
norm_class_name="RMSNorm",
|
| 3005 |
+
mlp_class_name="LLaMAMLP",
|
| 3006 |
+
intermediate_size=2560,
|
| 3007 |
+
rope_base=100000,
|
| 3008 |
+
norm_eps=1e-5,
|
| 3009 |
+
),
|
| 3010 |
+
# https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B/blob/main/config.json
|
| 3011 |
+
dict(
|
| 3012 |
+
name="SmolLM2-1.7B{}",
|
| 3013 |
+
hf_config=dict(org="HuggingFaceTB", name="SmolLM2-1.7B{}"),
|
| 3014 |
+
block_size=8192,
|
| 3015 |
+
vocab_size=49152,
|
| 3016 |
+
padded_vocab_size=49152,
|
| 3017 |
+
n_layer=24,
|
| 3018 |
+
n_head=32,
|
| 3019 |
+
n_embd=2048,
|
| 3020 |
+
n_query_groups=32,
|
| 3021 |
+
rotary_percentage=1.0,
|
| 3022 |
+
parallel_residual=False,
|
| 3023 |
+
bias=False,
|
| 3024 |
+
norm_class_name="RMSNorm",
|
| 3025 |
+
mlp_class_name="LLaMAMLP",
|
| 3026 |
+
intermediate_size=8192,
|
| 3027 |
+
rope_base=130000,
|
| 3028 |
+
norm_eps=1e-5,
|
| 3029 |
+
),
|
| 3030 |
+
]
|
| 3031 |
+
|
| 3032 |
+
for c in smollm2:
|
| 3033 |
+
for kind in ("", "-Instruct"):
|
| 3034 |
+
copy = deepcopy(c)
|
| 3035 |
+
copy["name"] = c["name"].format(kind)
|
| 3036 |
+
copy["hf_config"]["name"] = c["hf_config"]["name"].format(kind)
|
| 3037 |
+
configs.append(copy)
|
| 3038 |
+
|
| 3039 |
+
###############
|
| 3040 |
+
# DeepSeek R1 Distill
|
| 3041 |
+
###############
|
| 3042 |
+
|
| 3043 |
+
r1_distill_llama = [
|
| 3044 |
+
# https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B/blob/main/config.json
|
| 3045 |
+
dict(
|
| 3046 |
+
name="R1-Distill-Llama-8B",
|
| 3047 |
+
hf_config=dict(org="deepseek-ai", name="DeepSeek-R1-Distill-Llama-8B"),
|
| 3048 |
+
block_size=131072,
|
| 3049 |
+
vocab_size=128000,
|
| 3050 |
+
padded_vocab_size=128256,
|
| 3051 |
+
n_layer=32,
|
| 3052 |
+
n_head=32,
|
| 3053 |
+
n_query_groups=8,
|
| 3054 |
+
rotary_percentage=1.0,
|
| 3055 |
+
parallel_residual=False,
|
| 3056 |
+
bias=False,
|
| 3057 |
+
norm_class_name="RMSNorm",
|
| 3058 |
+
mlp_class_name="LLaMAMLP",
|
| 3059 |
+
intermediate_size=14336,
|
| 3060 |
+
rope_base=500000,
|
| 3061 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 3062 |
+
),
|
| 3063 |
+
# https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B/blob/main/config.json
|
| 3064 |
+
dict(
|
| 3065 |
+
name="R1-Distill-Llama-70B",
|
| 3066 |
+
hf_config=dict(org="deepseek-ai", name="DeepSeek-R1-Distill-Llama-70B"),
|
| 3067 |
+
block_size=131072,
|
| 3068 |
+
vocab_size=128000,
|
| 3069 |
+
padded_vocab_size=128256,
|
| 3070 |
+
n_layer=80,
|
| 3071 |
+
n_head=64,
|
| 3072 |
+
n_embd=8192,
|
| 3073 |
+
n_query_groups=8,
|
| 3074 |
+
rotary_percentage=1.0,
|
| 3075 |
+
parallel_residual=False,
|
| 3076 |
+
bias=False,
|
| 3077 |
+
norm_class_name="RMSNorm",
|
| 3078 |
+
mlp_class_name="LLaMAMLP",
|
| 3079 |
+
intermediate_size=28672,
|
| 3080 |
+
rope_base=500000,
|
| 3081 |
+
rope_adjustments=dict(factor=8.0, low_freq_factor=1.0, high_freq_factor=4.0, original_max_seq_len=8192),
|
| 3082 |
+
),
|
| 3083 |
+
]
|
| 3084 |
+
|
| 3085 |
+
configs.extend(r1_distill_llama)
|
| 3086 |
+
|
| 3087 |
+
name_to_config = {config["name"]: config for config in configs}
|
litgpt/lora.py
ADDED
|
@@ -0,0 +1,662 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
# Derived from https://github.com/microsoft/LoRA
|
| 4 |
+
# ------------------------------------------------------------------------------------------
|
| 5 |
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
| 6 |
+
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
|
| 7 |
+
# ------------------------------------------------------------------------------------------
|
| 8 |
+
|
| 9 |
+
r"""
|
| 10 |
+
Low Ranking Adaptation for LLMs scheme.
|
| 11 |
+
|
| 12 |
+
┌───────────────────┐
|
| 13 |
+
┆ h ┆
|
| 14 |
+
└───────────────────┘
|
| 15 |
+
▲
|
| 16 |
+
|
|
| 17 |
+
+
|
| 18 |
+
/ \
|
| 19 |
+
┌─────────────────┐ ╭───────────────╮ Matrix initialization:
|
| 20 |
+
┆ ┆ \ B / B = 0
|
| 21 |
+
┆ pretrained ┆ \ r*d / A = N(0, sigma^2)
|
| 22 |
+
┆ weights ┆ ╰─────────╯
|
| 23 |
+
┆ ┆ | r | r - rank
|
| 24 |
+
┆ W e R^(d*d) ┆ | ◀─────▶ |
|
| 25 |
+
┆ ┆ ╭─────────╮
|
| 26 |
+
└─────────────────┘ / A \
|
| 27 |
+
▲ / d*r \
|
| 28 |
+
\ ╰───────────────╯
|
| 29 |
+
\ ▲
|
| 30 |
+
\ /
|
| 31 |
+
\ /
|
| 32 |
+
┌───────────────────┐
|
| 33 |
+
┆ x ┆
|
| 34 |
+
└───────────────────┘
|
| 35 |
+
|
| 36 |
+
With LoRA (Low Ranking Adaptation: https://arxiv.org/abs/2106.09685) instead of learning weights of size d*d,
|
| 37 |
+
we can freeze the pretrained weights and instead learn two matrices of size d*r and r*d (they will store weight updates
|
| 38 |
+
for the pretrained weights): the number of parameters in this case will be reduced drastically (depending on the rank of
|
| 39 |
+
course) yet after multiplication of matrices d*r and r*d we will get a matrix d*d which we can sum with frozen
|
| 40 |
+
pretrained weights and thus fine-tune the model.
|
| 41 |
+
|
| 42 |
+
The goal of this approach is to move weight updates into a separate matrix which is decomposed with
|
| 43 |
+
two matrices of a lower rank.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
import math
|
| 47 |
+
from dataclasses import dataclass
|
| 48 |
+
from typing import Any, Dict, Optional, Tuple, Type, Union
|
| 49 |
+
|
| 50 |
+
import torch
|
| 51 |
+
import torch.nn as nn
|
| 52 |
+
from torch.nn import functional as F
|
| 53 |
+
from typing_extensions import Self
|
| 54 |
+
|
| 55 |
+
import litgpt
|
| 56 |
+
from litgpt.config import Config as BaseConfig
|
| 57 |
+
from litgpt.model import GPT as BaseModel
|
| 58 |
+
from litgpt.model import Block as BaseBlock
|
| 59 |
+
from litgpt.model import CausalSelfAttention as BaseCausalSelfAttention
|
| 60 |
+
from litgpt.scripts.convert_hf_checkpoint import qkv_reassemble
|
| 61 |
+
from litgpt.utils import map_old_state_dict_weights
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class LoRALayer(nn.Module):
|
| 65 |
+
def __init__(self, r: int, lora_alpha: int, lora_dropout: float):
|
| 66 |
+
"""Store LoRA specific attributes in a class.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of
|
| 70 |
+
the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2)
|
| 71 |
+
lora_alpha: alpha is needed for scaling updates as alpha/r
|
| 72 |
+
"This scaling helps to reduce the need to retune hyperparameters when we vary r"
|
| 73 |
+
https://arxiv.org/pdf/2106.09685.pdf (section 4.1)
|
| 74 |
+
lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A)
|
| 75 |
+
"""
|
| 76 |
+
super().__init__()
|
| 77 |
+
assert r >= 0
|
| 78 |
+
self.r = r
|
| 79 |
+
self.lora_alpha = lora_alpha
|
| 80 |
+
# Optional dropout
|
| 81 |
+
if lora_dropout > 0.0:
|
| 82 |
+
self.lora_dropout = nn.Dropout(p=lora_dropout)
|
| 83 |
+
else:
|
| 84 |
+
self.lora_dropout = lambda x: x
|
| 85 |
+
# Mark the weight as unmerged
|
| 86 |
+
self.merged = False
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class LoRALinear(LoRALayer):
|
| 90 |
+
# LoRA implemented in a dense layer
|
| 91 |
+
def __init__(
|
| 92 |
+
self,
|
| 93 |
+
# ↓ this part is for pretrained weights
|
| 94 |
+
in_features: int,
|
| 95 |
+
out_features: int,
|
| 96 |
+
# ↓ the remaining part is for LoRA
|
| 97 |
+
r: int = 0,
|
| 98 |
+
lora_alpha: int = 1,
|
| 99 |
+
lora_dropout: float = 0.0,
|
| 100 |
+
**kwargs: Any,
|
| 101 |
+
):
|
| 102 |
+
"""LoRA wrapper around linear class.
|
| 103 |
+
|
| 104 |
+
This class has three weight matrices:
|
| 105 |
+
1. Pretrained weights are stored as `self.linear.weight`
|
| 106 |
+
2. LoRA A matrix as `self.lora_A`
|
| 107 |
+
3. LoRA B matrix as `self.lora_B`
|
| 108 |
+
Only LoRA's A and B matrices are updated, pretrained weights stay frozen.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
in_features: number of input features of the pretrained weights
|
| 112 |
+
out_features: number of output features of the pretrained weights
|
| 113 |
+
r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of
|
| 114 |
+
the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2)
|
| 115 |
+
lora_alpha: alpha is needed for scaling updates as alpha/r
|
| 116 |
+
"This scaling helps to reduce the need to retune hyperparameters when we vary r"
|
| 117 |
+
https://arxiv.org/pdf/2106.09685.pdf (section 4.1)
|
| 118 |
+
lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A)
|
| 119 |
+
"""
|
| 120 |
+
super().__init__(r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout)
|
| 121 |
+
self.linear = torch.nn.Linear(in_features, out_features, **kwargs)
|
| 122 |
+
|
| 123 |
+
# Actual trainable parameters
|
| 124 |
+
if r > 0:
|
| 125 |
+
self.lora_A = nn.Parameter(torch.empty((r, in_features)))
|
| 126 |
+
self.lora_B = nn.Parameter(torch.empty((out_features, r)))
|
| 127 |
+
self.scaling = self.lora_alpha / self.r
|
| 128 |
+
self.reset_parameters()
|
| 129 |
+
|
| 130 |
+
def reset_parameters(self) -> None:
|
| 131 |
+
"""Reset all the weights, even including pretrained ones."""
|
| 132 |
+
if hasattr(self, "lora_A"):
|
| 133 |
+
# initialize A the same way as the default for nn.Linear and B to zero
|
| 134 |
+
# Wondering why 'a' is equal to math.sqrt(5)?: https://github.com/pytorch/pytorch/issues/15314
|
| 135 |
+
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
|
| 136 |
+
nn.init.zeros_(self.lora_B)
|
| 137 |
+
|
| 138 |
+
def get_lora_AB(self) -> torch.Tensor:
|
| 139 |
+
"""Return merged lora_A and lora_B matrices with the same shape as the pretrained weights."""
|
| 140 |
+
return (self.lora_B @ self.lora_A) * self.scaling
|
| 141 |
+
|
| 142 |
+
def merge(self) -> None:
|
| 143 |
+
"""Merges the LoRA weights into the full-rank weights (W = W + delta_W)."""
|
| 144 |
+
if self.r > 0 and not self.merged:
|
| 145 |
+
pretrained_dtype = self.linear.weight.data.dtype
|
| 146 |
+
lora_data = self.get_lora_AB()
|
| 147 |
+
# if only the pretrained are in quantized form - dequantize, sum with LoRA and quantize the result
|
| 148 |
+
if pretrained_dtype == torch.uint8:
|
| 149 |
+
import bitsandbytes as bnb
|
| 150 |
+
|
| 151 |
+
weight = self.linear.weight
|
| 152 |
+
# dequantize the pretrained weights
|
| 153 |
+
weight_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state).to(lora_data.dtype)
|
| 154 |
+
# add pretrained and LoRA weights
|
| 155 |
+
weight_data += lora_data
|
| 156 |
+
# assign updated weights and quantize by moving to CUDA device
|
| 157 |
+
self.linear.weight = bnb.nn.Params4bit(weight_data, requires_grad=False, **weight.__dict__)
|
| 158 |
+
self.linear.weight.cuda(weight.device)
|
| 159 |
+
else:
|
| 160 |
+
# self.linear might be on CPU and lora_data on CUDA
|
| 161 |
+
# the inplace add will preserve the dtype of linear.weight
|
| 162 |
+
self.linear.weight.data += lora_data.to(device=self.linear.weight.data.device)
|
| 163 |
+
self.merged = True
|
| 164 |
+
|
| 165 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 166 |
+
# if weights are merged or rank is less or equal to zero (LoRA is disabled) - it's only a regular nn.Linear forward pass;
|
| 167 |
+
# otherwise in addition do the forward pass with LoRA weights and add it's output to the output from pretrained weights
|
| 168 |
+
pretrained = self.linear(x)
|
| 169 |
+
if self.r == 0 or self.merged:
|
| 170 |
+
return pretrained
|
| 171 |
+
lora = (self.lora_dropout(x) @ self.lora_A.transpose(0, 1) @ self.lora_B.transpose(0, 1)) * self.scaling
|
| 172 |
+
return pretrained + lora
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class LoRAQKVLinear(LoRALinear):
|
| 176 |
+
# LoRA implemented in a dense layer
|
| 177 |
+
def __init__(
|
| 178 |
+
self,
|
| 179 |
+
# ↓ this part is for pretrained weights
|
| 180 |
+
in_features: int,
|
| 181 |
+
out_features: int,
|
| 182 |
+
# ↓ the remaining part is for LoRA
|
| 183 |
+
head_size: int,
|
| 184 |
+
n_head: int,
|
| 185 |
+
n_query_groups: int,
|
| 186 |
+
r: int = 0,
|
| 187 |
+
lora_alpha: int = 1,
|
| 188 |
+
lora_dropout: float = 0.0,
|
| 189 |
+
enable_lora: Union[bool, Tuple[bool, bool, bool]] = False,
|
| 190 |
+
**kwargs: Any,
|
| 191 |
+
):
|
| 192 |
+
"""LoRA wrapper around linear class that is used for calculation of q, k and v matrices.
|
| 193 |
+
|
| 194 |
+
This class has three weight matrices:
|
| 195 |
+
1. Pretrained weights are stored as `self.linear.weight`
|
| 196 |
+
2. LoRA A matrix as `self.lora_A`
|
| 197 |
+
3. LoRA B matrix as `self.lora_B`
|
| 198 |
+
Only LoRA's A and B matrices are updated, pretrained weights stay frozen.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
in_features: number of input features of the pretrained weights
|
| 202 |
+
out_features: number of output features of the pretrained weights
|
| 203 |
+
head_size: size of a single attention head
|
| 204 |
+
n_head: number of attention heads
|
| 205 |
+
n_query_groups: number of query groups (see diagram in `litgpt/config.py`)
|
| 206 |
+
r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of
|
| 207 |
+
the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2)
|
| 208 |
+
lora_alpha: alpha is needed for scaling updates as alpha/r
|
| 209 |
+
"This scaling helps to reduce the need to retune hyperparameters when we vary r"
|
| 210 |
+
https://arxiv.org/pdf/2106.09685.pdf (section 4.1)
|
| 211 |
+
lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A)
|
| 212 |
+
enable_lora: MergeLinear class is for attention mechanism where qkv are calculated with a single weight matrix. If we
|
| 213 |
+
don't want to apply LoRA we can set it as False. For example if we want to apply LoRA only to `query`
|
| 214 |
+
and `value` but keep `key` without weight updates we should pass `[True, False, True]`
|
| 215 |
+
"""
|
| 216 |
+
super(LoRALinear, self).__init__(r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout)
|
| 217 |
+
self.linear = torch.nn.Linear(in_features, out_features, **kwargs)
|
| 218 |
+
self.head_size = head_size
|
| 219 |
+
self.n_head = n_head
|
| 220 |
+
self.n_query_groups = n_query_groups
|
| 221 |
+
if isinstance(enable_lora, bool):
|
| 222 |
+
enable_lora = [enable_lora] * 3
|
| 223 |
+
assert len(enable_lora) == 3
|
| 224 |
+
self.enable_lora = enable_lora
|
| 225 |
+
|
| 226 |
+
# Actual trainable parameters
|
| 227 |
+
# To better understand initialization let's imagine that we have such parameters:
|
| 228 |
+
# ⚬ in_features: 128 (embeddings_size)
|
| 229 |
+
# ⚬ out_features: 384 (3 * embedding_size)
|
| 230 |
+
# ⚬ r: 2
|
| 231 |
+
# ⚬ enable_lora: [True, False, True]
|
| 232 |
+
if r > 0 and any(enable_lora):
|
| 233 |
+
self.lora_A = nn.Parameter(torch.empty((r * sum(enable_lora), in_features))) # (4, 128)
|
| 234 |
+
enable_q, enable_k, enable_v = enable_lora
|
| 235 |
+
# qkv_shapes will be used to split a tensor with weights correctly
|
| 236 |
+
qkv_shapes = (
|
| 237 |
+
# if `head_size` is explicitly specified in the config, `n_embd` (or `in_features`)
|
| 238 |
+
# might not be equal to `head_size * n_head`, thus we use it directly here
|
| 239 |
+
head_size * n_head * enable_q,
|
| 240 |
+
head_size * n_query_groups * enable_k,
|
| 241 |
+
head_size * n_query_groups * enable_v,
|
| 242 |
+
)
|
| 243 |
+
self.qkv_shapes = [s for s in qkv_shapes if s]
|
| 244 |
+
self.lora_B = nn.Parameter(torch.empty(sum(self.qkv_shapes), r)) # (256, 2))
|
| 245 |
+
# Notes about shapes above
|
| 246 |
+
# - self.lora_A has shape (4, 128): 4 because rank is 2 and LoRA is applied only to two matrices;
|
| 247 |
+
# 128 is the input size of the x (embedding size). (4, 128) and not (128, 4) because later on in
|
| 248 |
+
# F.linear function weights are automatically transposed. In addition conv1d requires channels to
|
| 249 |
+
# be before seq length
|
| 250 |
+
# - self.lora_B has shape (256, 2): 256 because LoRA is applied only to two matrices, so the output is
|
| 251 |
+
# 128*2; 2 tells to have two channels per group for group convolution
|
| 252 |
+
|
| 253 |
+
# Scaling:
|
| 254 |
+
# This balances the pretrained model`s knowledge and the new task-specific adaptation
|
| 255 |
+
# https://lightning.ai/pages/community/tutorial/lora-llm/
|
| 256 |
+
# So, set alpha to 1.0 to fully add LoRA. If the LoRA seems to have too much effect (i.e., overfitted), set
|
| 257 |
+
# alpha to lower value. If the LoRA seems to have too little effect, set alpha to higher than 1.0. You can
|
| 258 |
+
# tune these values to your needs. This value can be even slightly greater than 1.0!
|
| 259 |
+
# https://github.com/cloneofsimo/lora
|
| 260 |
+
self.scaling = self.lora_alpha / self.r
|
| 261 |
+
|
| 262 |
+
self.reset_parameters()
|
| 263 |
+
|
| 264 |
+
@property
|
| 265 |
+
def lora_ind(self) -> torch.Tensor:
|
| 266 |
+
"""Lazy creation of a buffer with LoRA indices to overcome the limitation when FSDP with meta device is used."""
|
| 267 |
+
# Indices are needed to properly pad weight updates with zeros.
|
| 268 |
+
if not hasattr(self, "_lora_ind"):
|
| 269 |
+
enable_q, enable_k, enable_v = self.enable_lora
|
| 270 |
+
kv_embd_size = self.linear.in_features // (self.n_head // self.n_query_groups)
|
| 271 |
+
lora_ind = []
|
| 272 |
+
if enable_q:
|
| 273 |
+
lora_ind.extend(range(0, self.linear.in_features))
|
| 274 |
+
if enable_k:
|
| 275 |
+
lora_ind.extend(range(self.linear.in_features, self.linear.in_features + kv_embd_size))
|
| 276 |
+
if enable_v:
|
| 277 |
+
lora_ind.extend(range(self.linear.in_features + kv_embd_size, self.linear.out_features))
|
| 278 |
+
self.register_buffer(
|
| 279 |
+
"_lora_ind", torch.tensor(lora_ind, device=self.linear.weight.device), persistent=False
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
return self._lora_ind
|
| 283 |
+
|
| 284 |
+
def zero_pad(self, x: torch.Tensor) -> torch.Tensor:
|
| 285 |
+
"""Properly pad the last dimension of weight updates with zeros.
|
| 286 |
+
|
| 287 |
+
If, based on `self.enable_lora`, we want to fine-tune queries and values, but not keys,
|
| 288 |
+
then the weights update should be:
|
| 289 |
+
|
| 290 |
+
[[ΔW,ΔW,ΔW, ..., 0,0,0, ..., ΔW,ΔW,ΔW,],
|
| 291 |
+
[....................................],
|
| 292 |
+
[ΔW,ΔW,ΔW, ..., 0,0,0, ..., ΔW,ΔW,ΔW,]]
|
| 293 |
+
↑ ↑ ↑
|
| 294 |
+
________________________________________
|
| 295 |
+
| query | key | value |
|
| 296 |
+
----------------------------------------
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
x: tensor with weights update that will be padded with zeros if necessary
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
A tensor with weight updates and zeros for deselected q, k or v
|
| 303 |
+
"""
|
| 304 |
+
# we need to do zero padding only if LoRA is disabled for one of QKV matrices
|
| 305 |
+
if all(self.enable_lora):
|
| 306 |
+
return x
|
| 307 |
+
|
| 308 |
+
# Let's image that:
|
| 309 |
+
# ⚬ input x has shape (64, 64, 256): (batch_size, sequence_length, embeddings_size)
|
| 310 |
+
# ⚬ embeddings_size: 128
|
| 311 |
+
# ⚬ self.linear.out_features: 384 (3 * embeddings_size)
|
| 312 |
+
# ⚬ enable_lora: [True, False, True]
|
| 313 |
+
# Then x has embeddings_size of 256 (2 * 128 as enable_lora only for query and value, not keys) and expected
|
| 314 |
+
# embeddings_size is 384 (self.linear.out_features), so that means that we need to pad from 256 to 384 with zeros, but
|
| 315 |
+
# only for key updates (this is where self.lora_ind comes in handy)
|
| 316 |
+
|
| 317 |
+
result = x.new_zeros(*x.shape[:-1], self.linear.out_features) # (64, 64, 384)
|
| 318 |
+
if result.device.type == "mps":
|
| 319 |
+
result[..., self.lora_ind] = x
|
| 320 |
+
return result
|
| 321 |
+
else:
|
| 322 |
+
return result.index_copy_(dim=-1, index=self.lora_ind, source=x) # (64, 64, 384)
|
| 323 |
+
|
| 324 |
+
def conv1d(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
|
| 325 |
+
"""An extension of the `torch.nn.functional.conv1d` function with a logic specific to grouped queries.
|
| 326 |
+
|
| 327 |
+
If the number of heads is equal to the number of query groups - grouped queries are disabled
|
| 328 |
+
(see scheme in `litgpt/config.py:Config`). In this case the combined QKV matrix consists of equally sized
|
| 329 |
+
query, key and value parts, which means we can utilize `groups` argument from `conv1d`: with this argument the
|
| 330 |
+
input and weight matrices will be split in equally sized parts and applied separately (like having multiple
|
| 331 |
+
conv layers side by side).
|
| 332 |
+
|
| 333 |
+
Otherwise QKV matrix consists of unequally sized parts and thus we have to split input and weight matrices manually,
|
| 334 |
+
apply each part of the weight matrix to the corresponding input's part and concatenate the result.
|
| 335 |
+
|
| 336 |
+
Args:
|
| 337 |
+
input: input matrix of shape (B, C, T)
|
| 338 |
+
weight: weight matrix of shape (C_output, rank, 1).
|
| 339 |
+
"C_output" is defined as a sum of embedding sizes for each enabled LoRA layer (see init method of the class).
|
| 340 |
+
|
| 341 |
+
Returns:
|
| 342 |
+
A tensor with a shape (B, C_output, T)
|
| 343 |
+
|
| 344 |
+
"""
|
| 345 |
+
if self.n_head == self.n_query_groups:
|
| 346 |
+
return F.conv1d(input, weight, groups=sum(self.enable_lora)) # (B, C_output, T)
|
| 347 |
+
|
| 348 |
+
# Notation:
|
| 349 |
+
# ⚬ N: number of enabled LoRA layers (self.enable_lora)
|
| 350 |
+
# ⚬ C_output': embeddings size for each LoRA layer (not equal in size)
|
| 351 |
+
# ⚬ r: rank of all LoRA layers (equal in size)
|
| 352 |
+
|
| 353 |
+
input_splitted = input.chunk(sum(self.enable_lora), dim=1) # N * (B, C // N, T)
|
| 354 |
+
weight_splitted = weight.split(self.qkv_shapes) # N * (C_output', r, 1)
|
| 355 |
+
return torch.cat(
|
| 356 |
+
[F.conv1d(a, b) for a, b in zip(input_splitted, weight_splitted)],
|
| 357 |
+
dim=1, # (B, C_output', T)
|
| 358 |
+
) # (B, C_output, T)
|
| 359 |
+
|
| 360 |
+
def get_lora_AB(self) -> torch.Tensor:
|
| 361 |
+
"""Return merged lora_A and lora_B matrices with the same shape as the pretrained weights."""
|
| 362 |
+
# Let's assume that:
|
| 363 |
+
# ⚬ self.linear.weight.data: (384, 128) or (3 * embedding_size, embedding_size)
|
| 364 |
+
# ⚬ self.lora_A.data: (4, 128)
|
| 365 |
+
# ⚬ self.lora_B.data: (256, 2)
|
| 366 |
+
lora = self.conv1d(
|
| 367 |
+
self.lora_A.data.unsqueeze(0), # (4, 128) -> (1, 4, 128)
|
| 368 |
+
self.lora_B.data.unsqueeze(-1), # (256, 2) -> (256, 2, 1)
|
| 369 |
+
).squeeze(0) # (1, 4, 128) @ (256, 2, 1) -> (1, 256, 128) -> (256, 128)
|
| 370 |
+
return self.zero_pad(lora.T * self.scaling).T # (256, 128) after zero_pad (384, 128)
|
| 371 |
+
|
| 372 |
+
def merge(self) -> None:
|
| 373 |
+
"""Merges the LoRA weights into the full-rank weights (W = W + delta_W)."""
|
| 374 |
+
if self.r > 0 and any(self.enable_lora) and not self.merged:
|
| 375 |
+
super().merge()
|
| 376 |
+
|
| 377 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 378 |
+
"""Do the forward pass.
|
| 379 |
+
|
| 380 |
+
If LoRA's weights are merged with pretrained ones then it's a simple matrix multiplication.
|
| 381 |
+
If not, then multiply pretrained weights with input, apply LoRA on input and do summation.
|
| 382 |
+
|
| 383 |
+
Args:
|
| 384 |
+
x: input tensor of shape (batch_size, context_length, embedding_size)
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
Output tensor of shape (batch_size, context_length, 3 * embedding_size)
|
| 388 |
+
"""
|
| 389 |
+
|
| 390 |
+
# Let's assume that:
|
| 391 |
+
# ⚬ x: (64, 64, 128) or (batch_size, context_length, embedding_size)
|
| 392 |
+
# ⚬ self.linear.weight: (384, 128) or (3 * embedding_size, embedding_size)
|
| 393 |
+
# ⚬ self.lora_A.data: (4, 128)
|
| 394 |
+
# ⚬ self.lora_B.data: (256, 2)
|
| 395 |
+
|
| 396 |
+
# if weights are merged or LoRA is disabled (r <= 0 or all `enable_lora` are False) - it's only a regular nn.Linear forward pass;
|
| 397 |
+
# otherwise in addition do the forward pass with LoRA weights and add it's output to the output from pretrained weights
|
| 398 |
+
pretrained = self.linear(x)
|
| 399 |
+
if self.r == 0 or not any(self.enable_lora) or self.merged:
|
| 400 |
+
return pretrained
|
| 401 |
+
after_A = F.linear(self.lora_dropout(x), self.lora_A) # (64, 64, 128) @ (4, 128) -> (64, 64, 4)
|
| 402 |
+
# For F.conv1d:
|
| 403 |
+
# ⚬ input: input tensor of shape (mini-batch, in_channels, iW)
|
| 404 |
+
# ⚬ weight: filters of shape (out_channels, in_channels/groups, kW)
|
| 405 |
+
after_B = self.conv1d(
|
| 406 |
+
after_A.transpose(-2, -1), # (64, 64, 4) -> (64, 4, 64)
|
| 407 |
+
self.lora_B.unsqueeze(-1), # (256, 2) -> (256, 2, 1)
|
| 408 |
+
).transpose(-2, -1) # (64, 4, 64) @ (256, 2, 1) -> (64, 256, 64) -> (64, 64, 256)
|
| 409 |
+
lora = self.zero_pad(after_B) * self.scaling # (64, 64, 256) after zero_pad (64, 64, 384)
|
| 410 |
+
return pretrained + lora
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def mark_only_lora_as_trainable(model: nn.Module, bias: str = "none") -> None:
|
| 414 |
+
"""Freeze all modules except LoRA's and depending on 'bias' value unfreezes bias weights.
|
| 415 |
+
|
| 416 |
+
Args:
|
| 417 |
+
model: model with LoRA layers
|
| 418 |
+
bias:
|
| 419 |
+
``"none"``: all bias weights will be frozen,
|
| 420 |
+
``"lora_only"``: only bias weight for LoRA layers will be unfrozen,
|
| 421 |
+
``"all"``: all bias weights will be unfrozen.
|
| 422 |
+
|
| 423 |
+
Raises:
|
| 424 |
+
NotImplementedError: if `bias` not in ["none", "lora_only", "all"]
|
| 425 |
+
"""
|
| 426 |
+
# freeze all layers except LoRA's
|
| 427 |
+
for n, p in model.named_parameters():
|
| 428 |
+
if "lora_" not in n:
|
| 429 |
+
p.requires_grad = False
|
| 430 |
+
|
| 431 |
+
# depending on the `bias` value unfreeze bias weights
|
| 432 |
+
if bias == "none":
|
| 433 |
+
return
|
| 434 |
+
if bias == "all":
|
| 435 |
+
for n, p in model.named_parameters():
|
| 436 |
+
if "bias" in n:
|
| 437 |
+
p.requires_grad = True
|
| 438 |
+
elif bias == "lora_only":
|
| 439 |
+
for m in model.modules():
|
| 440 |
+
if isinstance(m, LoRALayer) and hasattr(m, "bias") and m.bias is not None:
|
| 441 |
+
m.bias.requires_grad = True
|
| 442 |
+
else:
|
| 443 |
+
raise NotImplementedError
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
def lora_filter(key: str, value: Any) -> bool:
|
| 447 |
+
return "lora_" in key
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
@dataclass
|
| 451 |
+
class Config(BaseConfig):
|
| 452 |
+
"""
|
| 453 |
+
Args:
|
| 454 |
+
lora_r: rank of the weight update matrices. To make sense of using LoRA the rank should be smaller than the rank of
|
| 455 |
+
the weights of the model. The rank can be as low as 1: https://arxiv.org/pdf/2106.09685.pdf (section 7.2)
|
| 456 |
+
lora_alpha: alpha is needed for scaling updates as alpha/r
|
| 457 |
+
"This scaling helps to reduce the need to retune hyperparameters when we vary r"
|
| 458 |
+
https://arxiv.org/pdf/2106.09685.pdf (section 4.1)
|
| 459 |
+
lora_dropout: dropout that is applied on the input in the LoRA branch (before multiplying by matrix A)
|
| 460 |
+
lora_*: whether to apply LoRA to the specified weights or not
|
| 461 |
+
"""
|
| 462 |
+
|
| 463 |
+
lora_r: int = 0
|
| 464 |
+
lora_alpha: int = 1
|
| 465 |
+
lora_dropout: float = 0.0
|
| 466 |
+
lora_query: bool = False
|
| 467 |
+
lora_key: bool = False
|
| 468 |
+
lora_value: bool = False
|
| 469 |
+
lora_projection: bool = False
|
| 470 |
+
lora_mlp: bool = False
|
| 471 |
+
lora_head: bool = False
|
| 472 |
+
|
| 473 |
+
@property
|
| 474 |
+
def mlp_class(self) -> Type:
|
| 475 |
+
return getattr(litgpt.lora, self.mlp_class_name)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class GPT(BaseModel):
|
| 479 |
+
# Copy & paste from :class:`model.GPT`. Note that :class:`Block` is new here.
|
| 480 |
+
def __init__(self, config: Config) -> None:
|
| 481 |
+
nn.Module.__init__(self)
|
| 482 |
+
assert config.padded_vocab_size is not None
|
| 483 |
+
self.config = config
|
| 484 |
+
|
| 485 |
+
self.lm_head = create_lora_linear(
|
| 486 |
+
config,
|
| 487 |
+
config.n_embd,
|
| 488 |
+
config.padded_vocab_size,
|
| 489 |
+
bias=config.lm_head_bias,
|
| 490 |
+
use_r=config.lora_head,
|
| 491 |
+
)
|
| 492 |
+
self.transformer = nn.ModuleDict(
|
| 493 |
+
dict(
|
| 494 |
+
wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
|
| 495 |
+
h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)),
|
| 496 |
+
ln_f=config.norm_class(config.n_embd, eps=config.norm_eps),
|
| 497 |
+
)
|
| 498 |
+
)
|
| 499 |
+
self.mask_cache: Optional[torch.Tensor] = None
|
| 500 |
+
self.max_seq_length = self.config.block_size
|
| 501 |
+
|
| 502 |
+
@classmethod
|
| 503 |
+
def from_name(cls, name: str, **kwargs: Any) -> Self:
|
| 504 |
+
return cls(Config.from_name(name, **kwargs))
|
| 505 |
+
|
| 506 |
+
def _init_weights(self, module: nn.Module) -> None:
|
| 507 |
+
"""Meant to be used with `gpt.apply(gpt._init_weights)`. Unused method left for completeness."""
|
| 508 |
+
super()._init_weights(module)
|
| 509 |
+
if isinstance(module, LoRALinear):
|
| 510 |
+
module.reset_parameters()
|
| 511 |
+
|
| 512 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 513 |
+
"""For compatibility with base checkpoints."""
|
| 514 |
+
mapping = {"lm_head.weight": "lm_head.linear.weight", "lm_head.bias": "lm_head.linear.bias"}
|
| 515 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 516 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
class Block(BaseBlock):
|
| 520 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 521 |
+
super().__init__(config, block_idx)
|
| 522 |
+
self.attn = CausalSelfAttention(config, block_idx)
|
| 523 |
+
self.mlp = config.mlp_class(config)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
class CausalSelfAttention(BaseCausalSelfAttention):
|
| 527 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 528 |
+
super().__init__(config, block_idx)
|
| 529 |
+
# key, query, value projections for all heads, but in a batch
|
| 530 |
+
shape = (config.n_head + 2 * config.n_query_groups) * config.head_size
|
| 531 |
+
self.qkv = LoRAQKVLinear(
|
| 532 |
+
in_features=config.n_embd,
|
| 533 |
+
out_features=shape,
|
| 534 |
+
r=config.lora_r,
|
| 535 |
+
lora_alpha=config.lora_alpha,
|
| 536 |
+
lora_dropout=config.lora_dropout,
|
| 537 |
+
enable_lora=(config.lora_query, config.lora_key, config.lora_value),
|
| 538 |
+
bias=config.bias or config.attn_bias,
|
| 539 |
+
# for MQA/GQA support
|
| 540 |
+
head_size=config.head_size,
|
| 541 |
+
n_head=config.n_head,
|
| 542 |
+
n_query_groups=config.n_query_groups,
|
| 543 |
+
)
|
| 544 |
+
# output projection
|
| 545 |
+
self.proj = create_lora_linear(
|
| 546 |
+
config,
|
| 547 |
+
config.head_size * config.n_head,
|
| 548 |
+
config.n_embd,
|
| 549 |
+
use_r=config.lora_projection,
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 553 |
+
"""For compatibility with base and/or legacy checkpoints."""
|
| 554 |
+
mapping = {
|
| 555 |
+
"qkv.weight": "qkv.linear.weight",
|
| 556 |
+
"qkv.bias": "qkv.linear.bias",
|
| 557 |
+
"proj.weight": "proj.linear.weight",
|
| 558 |
+
"proj.bias": "proj.linear.bias",
|
| 559 |
+
}
|
| 560 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 561 |
+
|
| 562 |
+
for attr in ("weight", "bias"):
|
| 563 |
+
legacy_key = f"{prefix}attn.linear.{attr}"
|
| 564 |
+
current_key = f"{prefix}qkv.linear.{attr}"
|
| 565 |
+
if legacy_key in state_dict:
|
| 566 |
+
state_dict[current_key] = qkv_reassemble(state_dict.pop(legacy_key), self.config)
|
| 567 |
+
|
| 568 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def create_lora_linear(
|
| 572 |
+
config: Config,
|
| 573 |
+
in_size: int,
|
| 574 |
+
out_size: int,
|
| 575 |
+
bias: Optional[Union[float, bool]] = None,
|
| 576 |
+
use_r: Optional[bool] = None,
|
| 577 |
+
) -> LoRALinear:
|
| 578 |
+
if bias is None:
|
| 579 |
+
bias = config.bias
|
| 580 |
+
if use_r is None:
|
| 581 |
+
use_r = config.lora_mlp
|
| 582 |
+
return LoRALinear(
|
| 583 |
+
in_size,
|
| 584 |
+
out_size,
|
| 585 |
+
bias=bias,
|
| 586 |
+
r=(config.lora_r if use_r else 0),
|
| 587 |
+
lora_alpha=config.lora_alpha,
|
| 588 |
+
lora_dropout=config.lora_dropout,
|
| 589 |
+
)
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
class GptNeoxMLP(litgpt.model.GptNeoxMLP):
|
| 593 |
+
def __init__(self, config: Config) -> None:
|
| 594 |
+
nn.Module.__init__(self)
|
| 595 |
+
self.fc = create_lora_linear(config, config.n_embd, config.intermediate_size)
|
| 596 |
+
self.proj = create_lora_linear(config, config.intermediate_size, config.n_embd)
|
| 597 |
+
self.config = config
|
| 598 |
+
|
| 599 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 600 |
+
"""For compatibility with base checkpoints."""
|
| 601 |
+
mapping = {
|
| 602 |
+
"fc.weight": "fc.linear.weight",
|
| 603 |
+
"fc.bias": "fc.linear.bias",
|
| 604 |
+
"proj.weight": "proj.linear.weight",
|
| 605 |
+
"proj.bias": "proj.linear.bias",
|
| 606 |
+
}
|
| 607 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 608 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
class LLaMAMLP(litgpt.model.LLaMAMLP):
|
| 612 |
+
def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None:
|
| 613 |
+
nn.Module.__init__(self)
|
| 614 |
+
self.intermediate_size = intermediate_size or config.intermediate_size
|
| 615 |
+
self.fc_1 = create_lora_linear(config, config.n_embd, self.intermediate_size)
|
| 616 |
+
self.fc_2 = create_lora_linear(config, config.n_embd, self.intermediate_size)
|
| 617 |
+
self.proj = create_lora_linear(config, self.intermediate_size, config.n_embd)
|
| 618 |
+
self.config = config
|
| 619 |
+
|
| 620 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 621 |
+
"""For compatibility with base checkpoints."""
|
| 622 |
+
mapping = {
|
| 623 |
+
"fc_1.weight": "fc_1.linear.weight",
|
| 624 |
+
"fc_1.bias": "fc_1.linear.bias",
|
| 625 |
+
"fc_2.weight": "fc_2.linear.weight",
|
| 626 |
+
"fc_2.bias": "fc_2.linear.bias",
|
| 627 |
+
"proj.weight": "proj.linear.weight",
|
| 628 |
+
"proj.bias": "proj.linear.bias",
|
| 629 |
+
}
|
| 630 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 631 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class GemmaMLP(LLaMAMLP):
|
| 635 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 636 |
+
x_fc_1 = self.fc_1(x)
|
| 637 |
+
x_fc_2 = self.fc_2(x)
|
| 638 |
+
x = torch.nn.functional.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2
|
| 639 |
+
return self.proj(x)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
class LLaMAMoE(litgpt.model.LLaMAMoE):
|
| 643 |
+
def __init__(self, config: Config) -> None:
|
| 644 |
+
nn.Module.__init__(self)
|
| 645 |
+
self.gate = create_lora_linear(config, config.n_embd, config.n_expert, bias=False)
|
| 646 |
+
self.experts = nn.ModuleList(
|
| 647 |
+
LLaMAMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(config.n_expert)
|
| 648 |
+
)
|
| 649 |
+
self.config = config
|
| 650 |
+
|
| 651 |
+
def _load_from_state_dict(self, state_dict: Dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 652 |
+
"""For compatibility with base checkpoints."""
|
| 653 |
+
mapping = {"gate.weight": "gate.linear.weight"}
|
| 654 |
+
state_dict = map_old_state_dict_weights(state_dict, mapping, prefix)
|
| 655 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
def merge_lora_weights(model: GPT) -> None:
|
| 659 |
+
"""Merge LoRA weights into the full-rank weights to speed up inference."""
|
| 660 |
+
for module in model.modules():
|
| 661 |
+
if isinstance(module, LoRALinear):
|
| 662 |
+
module.merge()
|
litgpt/model.py
ADDED
|
@@ -0,0 +1,876 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
"""Full definition of a decoder-only transformer-based language model, all of it in this single file.
|
| 4 |
+
|
| 5 |
+
Based on the nanoGPT implementation: https://github.com/karpathy/nanoGPT and
|
| 6 |
+
https://github.com/EleutherAI/gpt-neox/tree/main/megatron/model.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import math
|
| 10 |
+
from functools import partial
|
| 11 |
+
from typing import Any, List, Optional, Tuple, Union
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.nn as nn
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
from typing_extensions import Self
|
| 17 |
+
|
| 18 |
+
from litgpt.config import Config
|
| 19 |
+
from litgpt.scripts.convert_hf_checkpoint import qkv_reassemble
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class GPT(nn.Module):
|
| 23 |
+
def __init__(self, config: Config) -> None:
|
| 24 |
+
super().__init__()
|
| 25 |
+
assert config.padded_vocab_size is not None
|
| 26 |
+
self.config = config
|
| 27 |
+
|
| 28 |
+
self.lm_head = nn.Linear(config.n_embd, config.padded_vocab_size, bias=config.lm_head_bias)
|
| 29 |
+
self.transformer = nn.ModuleDict(
|
| 30 |
+
dict(
|
| 31 |
+
wte=nn.Embedding(config.padded_vocab_size, config.n_embd),
|
| 32 |
+
h=nn.ModuleList(Block(config, block_idx) for block_idx in range(config.n_layer)),
|
| 33 |
+
ln_f=config.norm_class(config.n_embd, eps=config.norm_eps),
|
| 34 |
+
)
|
| 35 |
+
)
|
| 36 |
+
self.mask_cache: Optional[torch.Tensor] = None
|
| 37 |
+
self.max_seq_length = self.config.block_size
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def max_seq_length(self) -> int:
|
| 41 |
+
return self._max_seq_length
|
| 42 |
+
|
| 43 |
+
@max_seq_length.setter
|
| 44 |
+
def max_seq_length(self, value: int) -> None:
|
| 45 |
+
"""
|
| 46 |
+
When doing inference, the sequences used might be shorter than the model's context length.
|
| 47 |
+
This allows setting a smaller number to avoid allocating unused memory
|
| 48 |
+
"""
|
| 49 |
+
if value > self.config.block_size:
|
| 50 |
+
raise ValueError(
|
| 51 |
+
f"Cannot attend to {value}, block size is only {self.config.block_size}."
|
| 52 |
+
" This is likely because the input text exceeds the supported context length of this model."
|
| 53 |
+
)
|
| 54 |
+
self._max_seq_length = value
|
| 55 |
+
if not hasattr(self, "cos"):
|
| 56 |
+
# first call
|
| 57 |
+
cos, sin = self.rope_cache()
|
| 58 |
+
self.register_buffer("cos", cos, persistent=False)
|
| 59 |
+
self.register_buffer("sin", sin, persistent=False)
|
| 60 |
+
# override
|
| 61 |
+
elif value != self.cos.size(0):
|
| 62 |
+
self.cos, self.sin = self.rope_cache(device=self.cos.device)
|
| 63 |
+
# the mask and kv cache size will get updated on `set_kv_cache`. we cannot update it here because we don't know
|
| 64 |
+
# if the kv cache is expected
|
| 65 |
+
if self.mask_cache is not None and self.mask_cache.shape[-1] < value:
|
| 66 |
+
print(
|
| 67 |
+
f"Warning: KV cache has length {self.mask_cache.shape[-1]} < {value} = max_seq_length. Call 'set_kv_cache' before doing any forwards!"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def reset_parameters(self) -> None:
|
| 71 |
+
# Trigger resetting the rope-cache
|
| 72 |
+
self.cos, self.sin = self.rope_cache(device=self.cos.device)
|
| 73 |
+
|
| 74 |
+
def _init_weights(self, module: nn.Module) -> None:
|
| 75 |
+
"""Meant to be used with `gpt.apply(gpt._init_weights)`."""
|
| 76 |
+
if isinstance(module, nn.Linear):
|
| 77 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| 78 |
+
if module.bias is not None:
|
| 79 |
+
torch.nn.init.zeros_(module.bias)
|
| 80 |
+
elif isinstance(module, nn.Embedding):
|
| 81 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
| 82 |
+
|
| 83 |
+
def forward(
|
| 84 |
+
self,
|
| 85 |
+
idx: torch.Tensor,
|
| 86 |
+
input_pos: Optional[torch.Tensor] = None,
|
| 87 |
+
input_pos_maxp1: Optional[int] = None,
|
| 88 |
+
lm_head_chunk_size: int = 0,
|
| 89 |
+
) -> Union[torch.Tensor, List[torch.Tensor]]:
|
| 90 |
+
"""
|
| 91 |
+
If `input_pos` is provided, the KV cache uses K and V vectors for
|
| 92 |
+
positions smaller than entries in `input_pos`. For efficiency, pass
|
| 93 |
+
`input_pos_maxp1` as `max(input_pos) + 1` if already available from
|
| 94 |
+
your forward algorithm. This slices the KV cache buffers and speeds
|
| 95 |
+
up multi-head attention.
|
| 96 |
+
|
| 97 |
+
Without `input_pos_maxp1`, the computation uses the full KV cache
|
| 98 |
+
(`max_seq_length`) with masking applied. Note that inferring
|
| 99 |
+
`input_pos_maxp1` from `input_pos` causes graph breaks and prevents
|
| 100 |
+
compilation.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
idx: Token indices of input sequences, shape `(B, T)`, where `B`
|
| 104 |
+
is batch size.
|
| 105 |
+
input_pos: Optional. Positions of input tokens. The default is
|
| 106 |
+
`arange(T)`. Can have shape `(T,)` or `(B, T)` (batched index).
|
| 107 |
+
input_pos_maxp1: Optional. See above.
|
| 108 |
+
lm_head_chunk_size: Optional. If `lm_head_chunk_size > 0`, the final
|
| 109 |
+
`lm_head` computation is done in chunks of this size.
|
| 110 |
+
|
| 111 |
+
Returns:
|
| 112 |
+
Logit outputs, shape `(B, T, config.padded_vocab_size)`. If
|
| 113 |
+
`lm_head_chunk_size > 0`, this is a list of chunks of shape
|
| 114 |
+
`(B, lm_head_chunk_size, config.padded_vocab_size)`, the final
|
| 115 |
+
entry can be shorter.
|
| 116 |
+
|
| 117 |
+
"""
|
| 118 |
+
T = idx.size(1)
|
| 119 |
+
if self.max_seq_length < T:
|
| 120 |
+
raise ValueError(f"Cannot forward sequence of length {T}, max seq length is only {self.max_seq_length}.")
|
| 121 |
+
|
| 122 |
+
if input_pos is not None: # use the kv cache
|
| 123 |
+
if input_pos.dim() > 2:
|
| 124 |
+
# otherwise, things go wrong in `apply_rope`
|
| 125 |
+
raise ValueError(f"input_pos must have 1 or 2 dimensions, input_pos.shape = {input_pos.shape}")
|
| 126 |
+
if input_pos.shape[-1] != T:
|
| 127 |
+
raise ValueError(f"input_pos.shape[-1] = {input_pos.shape[-1]} != {T} = idx.shape[1], must be the same")
|
| 128 |
+
cos = batched_index_select(self.cos, 0, input_pos)
|
| 129 |
+
sin = batched_index_select(self.sin, 0, input_pos)
|
| 130 |
+
if input_pos.dim() == 1:
|
| 131 |
+
cos = cos.unsqueeze(0)
|
| 132 |
+
sin = sin.unsqueeze(0)
|
| 133 |
+
if self.mask_cache is None:
|
| 134 |
+
raise TypeError("You need to call `gpt.set_kv_cache()`")
|
| 135 |
+
mask = batched_index_select(self.mask_cache, 2, input_pos)
|
| 136 |
+
if mask.dim() > 4:
|
| 137 |
+
# the mask cache has a batch dim of 1 in addition to the one
|
| 138 |
+
# we get if input_pos has a batch dimension
|
| 139 |
+
mask = mask.view(*(mask.shape[0:1] + mask.shape[2:]))
|
| 140 |
+
if input_pos_maxp1 is not None:
|
| 141 |
+
# Shorten final dimension so it just covers all `input_pos` entries
|
| 142 |
+
if input_pos_maxp1 > self.max_seq_length:
|
| 143 |
+
raise ValueError(f"Positions in 'input_pos' must be in [0,{self.max_seq_length})")
|
| 144 |
+
mask = mask[..., :input_pos_maxp1]
|
| 145 |
+
else:
|
| 146 |
+
# unsqueeze to have a batch dimension
|
| 147 |
+
cos = self.cos[:T].unsqueeze(0)
|
| 148 |
+
sin = self.sin[:T].unsqueeze(0)
|
| 149 |
+
# `cos`, `sin` have shape (1, T, config.rope_n_elem)
|
| 150 |
+
mask = None # defaults to causal mask
|
| 151 |
+
input_pos_maxp1 = None
|
| 152 |
+
|
| 153 |
+
x = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
|
| 154 |
+
if self.config.scale_embeddings:
|
| 155 |
+
x = x * torch.tensor(self.config.n_embd**0.5, dtype=x.dtype)
|
| 156 |
+
|
| 157 |
+
for block_idx, block in enumerate(self.transformer.h):
|
| 158 |
+
if self.config.rope_indices is not None:
|
| 159 |
+
x = block(
|
| 160 |
+
x,
|
| 161 |
+
cos[..., self.config.rope_indices[block_idx]],
|
| 162 |
+
sin[..., self.config.rope_indices[block_idx]],
|
| 163 |
+
mask,
|
| 164 |
+
input_pos,
|
| 165 |
+
input_pos_maxp1,
|
| 166 |
+
)
|
| 167 |
+
else:
|
| 168 |
+
x = block(x, cos, sin, mask, input_pos, input_pos_maxp1)
|
| 169 |
+
x = self.transformer.ln_f(x)
|
| 170 |
+
clamp_head = (
|
| 171 |
+
partial(do_softcapping, thresh=self.config.final_logit_softcapping)
|
| 172 |
+
if self.config.final_logit_softcapping is not None
|
| 173 |
+
else nn.Identity()
|
| 174 |
+
)
|
| 175 |
+
if lm_head_chunk_size > 0:
|
| 176 |
+
# chunk the lm head logits to reduce the peak memory used by autograd
|
| 177 |
+
return [clamp_head(self.lm_head(x_i)) for x_i in x.split(lm_head_chunk_size, dim=1)]
|
| 178 |
+
else:
|
| 179 |
+
return clamp_head(self.lm_head(x)) # (B, T, padded_vocab_size)
|
| 180 |
+
|
| 181 |
+
@classmethod
|
| 182 |
+
def from_name(cls, name: str, **kwargs: Any) -> Self:
|
| 183 |
+
return cls(Config.from_name(name, **kwargs))
|
| 184 |
+
|
| 185 |
+
def rope_cache(self, device: Optional[torch.device] = None) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 186 |
+
if self.config.rope_adjustments is None:
|
| 187 |
+
extra_config = None
|
| 188 |
+
|
| 189 |
+
else:
|
| 190 |
+
adjusted_params_required = ["factor", "low_freq_factor", "high_freq_factor", "original_max_seq_len"]
|
| 191 |
+
params_present = [param in self.config.rope_adjustments for param in adjusted_params_required]
|
| 192 |
+
num_params_present = sum(params_present)
|
| 193 |
+
|
| 194 |
+
if num_params_present == 0:
|
| 195 |
+
extra_config = None # uses standard RoPE
|
| 196 |
+
elif num_params_present == 4:
|
| 197 |
+
# These parameters should always be used together so that we don't interfere with standard rope
|
| 198 |
+
extra_config = {name: self.config.rope_adjustments[name] for name in adjusted_params_required}
|
| 199 |
+
elif "factor" in self.config.rope_adjustments:
|
| 200 |
+
# linear RoPE
|
| 201 |
+
adjusted_params_required = ["factor"]
|
| 202 |
+
extra_config = {name: self.config.rope_adjustments[name] for name in adjusted_params_required}
|
| 203 |
+
else:
|
| 204 |
+
# Some but not all parameters are specified; raise an error
|
| 205 |
+
missing_params = [
|
| 206 |
+
param for param, present in zip(adjusted_params_required, params_present) if not present
|
| 207 |
+
]
|
| 208 |
+
raise ValueError(
|
| 209 |
+
f"The following adjusted RoPE parameters are missing in rope_adjustments: {', '.join(missing_params)}. "
|
| 210 |
+
"All adjusted RoPE parameters must be specified together."
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
return build_rope_cache(
|
| 214 |
+
seq_len=self.max_seq_length,
|
| 215 |
+
n_elem=self.config.rope_n_elem,
|
| 216 |
+
device=device,
|
| 217 |
+
condense_ratio=self.config.rope_condense_ratio,
|
| 218 |
+
base=self.config.rope_base,
|
| 219 |
+
extra_config=extra_config,
|
| 220 |
+
rope_local_base_freq=self.config.rope_local_base_freq,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
def set_kv_cache(
|
| 224 |
+
self,
|
| 225 |
+
batch_size: int,
|
| 226 |
+
max_seq_length: Optional[int] = None,
|
| 227 |
+
rope_cache_length: Optional[int] = None,
|
| 228 |
+
device: Optional[torch.device] = None,
|
| 229 |
+
dtype: Optional[torch.dtype] = None,
|
| 230 |
+
) -> None:
|
| 231 |
+
if rope_cache_length is None:
|
| 232 |
+
if len(self.cos.shape) == 2:
|
| 233 |
+
rope_cache_length = self.cos.size(-1)
|
| 234 |
+
else:
|
| 235 |
+
rope_cache_length = self.cos[..., 0].size(-1)
|
| 236 |
+
|
| 237 |
+
if max_seq_length is None:
|
| 238 |
+
max_seq_length = self.max_seq_length
|
| 239 |
+
|
| 240 |
+
# initialize the kv cache for all blocks
|
| 241 |
+
for block in self.transformer.h:
|
| 242 |
+
block.attn.kv_cache = block.attn.build_kv_cache(
|
| 243 |
+
batch_size,
|
| 244 |
+
max_seq_length,
|
| 245 |
+
rope_cache_length,
|
| 246 |
+
device,
|
| 247 |
+
dtype,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
if self.mask_cache is None or self.mask_cache.size(3) != max_seq_length:
|
| 251 |
+
# passing `attn_mask` to SDPA disables the flash implementation. since we only need the mask
|
| 252 |
+
# for the kv-cache support (only during inference), we only create it in that situation
|
| 253 |
+
self.mask_cache = build_mask_cache(max_seq_length, device)
|
| 254 |
+
|
| 255 |
+
def clear_kv_cache(self) -> None:
|
| 256 |
+
self.mask_cache = None
|
| 257 |
+
for block in self.transformer.h:
|
| 258 |
+
block.attn.kv_cache = None
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
class Block(nn.Module):
|
| 262 |
+
def __init__(
|
| 263 |
+
self,
|
| 264 |
+
config: Config,
|
| 265 |
+
block_idx: int,
|
| 266 |
+
) -> None:
|
| 267 |
+
super().__init__()
|
| 268 |
+
if not config.parallel_residual and config.shared_attention_norm:
|
| 269 |
+
raise NotImplementedError(
|
| 270 |
+
"No checkpoint amongst the ones we support uses this configuration"
|
| 271 |
+
" (non-parallel residual and shared attention norm)."
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
self.norm_1 = nn.Identity() if not config.norm_1 else config.norm_class(config.n_embd, eps=config.norm_eps)
|
| 275 |
+
self.attn = CausalSelfAttention(config, block_idx)
|
| 276 |
+
self.post_attention_norm = (
|
| 277 |
+
config.norm_class(config.n_embd, eps=config.norm_eps) if config.post_attention_norm else nn.Identity()
|
| 278 |
+
)
|
| 279 |
+
self.norm_2 = (
|
| 280 |
+
nn.Identity()
|
| 281 |
+
if not config.norm_2
|
| 282 |
+
else (None if config.shared_attention_norm else config.norm_class(config.n_embd, eps=config.norm_eps))
|
| 283 |
+
)
|
| 284 |
+
self.mlp = config.mlp_class(config)
|
| 285 |
+
self.post_mlp_norm = (
|
| 286 |
+
config.norm_class(config.n_embd, eps=config.norm_eps) if config.post_mlp_norm else nn.Identity()
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
self.config = config
|
| 290 |
+
|
| 291 |
+
def forward(
|
| 292 |
+
self,
|
| 293 |
+
x: torch.Tensor,
|
| 294 |
+
cos: torch.Tensor,
|
| 295 |
+
sin: torch.Tensor,
|
| 296 |
+
mask: Optional[torch.Tensor] = None,
|
| 297 |
+
input_pos: Optional[torch.Tensor] = None,
|
| 298 |
+
input_pos_maxp1: Optional[int] = None,
|
| 299 |
+
) -> torch.Tensor:
|
| 300 |
+
"""
|
| 301 |
+
Non-parallel residual Parallel residual
|
| 302 |
+
┌─ x ┌─ x ──────────────────┐ Note: if `shared_attention_norm` is True,
|
| 303 |
+
│ ↓ │ ↓ ↓ the output from `norm_1` is reused
|
| 304 |
+
│ norm_1 │ norm_1 ───────► norm_2
|
| 305 |
+
│ ↓ │ ↓ ↓
|
| 306 |
+
│ attn │ attn MLP
|
| 307 |
+
│ ↓ │ ↓ ↓
|
| 308 |
+
| post_attn_norm | post_attn_norm post_mlp_norm
|
| 309 |
+
| ↓ | ↓ ↓
|
| 310 |
+
┌─ └► + └► + ◄─────────────────┘
|
| 311 |
+
| ↓
|
| 312 |
+
│ norm_2
|
| 313 |
+
│ ↓
|
| 314 |
+
│ MLP
|
| 315 |
+
│ ↓
|
| 316 |
+
| post_mlp_norm
|
| 317 |
+
| ↓
|
| 318 |
+
└───► +
|
| 319 |
+
"""
|
| 320 |
+
|
| 321 |
+
x_normed = self.norm_1(x)
|
| 322 |
+
attention_output = self.attn(x_normed, cos, sin, mask, input_pos, input_pos_maxp1)
|
| 323 |
+
attention_output = self.post_attention_norm(attention_output)
|
| 324 |
+
|
| 325 |
+
if self.config.parallel_residual:
|
| 326 |
+
if not self.config.shared_attention_norm:
|
| 327 |
+
x_normed = self.norm_2(x)
|
| 328 |
+
x = attention_output + x
|
| 329 |
+
else:
|
| 330 |
+
x = attention_output + x
|
| 331 |
+
x_normed = self.norm_2(x)
|
| 332 |
+
|
| 333 |
+
return self.post_mlp_norm(self.mlp(x_normed)) + x
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
class CausalSelfAttention(nn.Module):
|
| 337 |
+
def __init__(self, config: Config, block_idx: int) -> None:
|
| 338 |
+
super().__init__()
|
| 339 |
+
# key, query and value projections for all heads, but in a batch
|
| 340 |
+
self.qkv = nn.Linear(
|
| 341 |
+
config.n_embd,
|
| 342 |
+
(config.n_head + 2 * config.n_query_groups) * config.head_size, # support for grouped/multi queries
|
| 343 |
+
bias=config.bias or config.attn_bias,
|
| 344 |
+
)
|
| 345 |
+
# output projection
|
| 346 |
+
self.proj = nn.Linear(config.head_size * config.n_head, config.n_embd, bias=config.bias)
|
| 347 |
+
# disabled by default
|
| 348 |
+
self.kv_cache: Optional[KVCache] = None
|
| 349 |
+
self.apply_sliding_window_attention = False
|
| 350 |
+
if config.sliding_window_size is not None and config.sliding_window_indices is not None:
|
| 351 |
+
self.apply_sliding_window_attention = config.sliding_window_indices[block_idx]
|
| 352 |
+
|
| 353 |
+
if config.norm_qk:
|
| 354 |
+
norm_q_size = config.n_head * config.head_size if config.norm_qk_type == "olmo2" else config.head_size
|
| 355 |
+
norm_k_size = (
|
| 356 |
+
config.n_query_groups * config.head_size if config.norm_qk_type == "olmo2" else config.head_size
|
| 357 |
+
)
|
| 358 |
+
self.norm_q = config.norm_class(norm_q_size, eps=config.norm_eps)
|
| 359 |
+
self.norm_k = config.norm_class(norm_k_size, eps=config.norm_eps)
|
| 360 |
+
else:
|
| 361 |
+
self.norm_q = self.norm_k = None
|
| 362 |
+
|
| 363 |
+
self.config = config
|
| 364 |
+
self.block_idx = block_idx
|
| 365 |
+
|
| 366 |
+
def forward(
|
| 367 |
+
self,
|
| 368 |
+
x: torch.Tensor,
|
| 369 |
+
cos: torch.Tensor,
|
| 370 |
+
sin: torch.Tensor,
|
| 371 |
+
mask: Optional[torch.Tensor] = None,
|
| 372 |
+
input_pos: Optional[torch.Tensor] = None,
|
| 373 |
+
input_pos_maxp1: Optional[int] = None,
|
| 374 |
+
) -> torch.Tensor:
|
| 375 |
+
# Notation:
|
| 376 |
+
# - B | batch size
|
| 377 |
+
# - T | time-step (sequence length)
|
| 378 |
+
# - C | model's embeddings size (n_embd)
|
| 379 |
+
# - C* | attentions's embeddings size
|
| 380 |
+
# - hs | head size
|
| 381 |
+
# - nh_(q,k,v) | number of heads for query, key and value
|
| 382 |
+
# - n_query_groups = nh_k = nh_v | number of query groups sharing key and value heads
|
| 383 |
+
# alternative notation: num_kv_groups = n_query_groups
|
| 384 |
+
# ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐
|
| 385 |
+
# │ v ││ v ││ v ││ v │ │ v │ │ v │ │ v │
|
| 386 |
+
# └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘
|
| 387 |
+
# │ │ │ │ │ │ │
|
| 388 |
+
# ┌───┐┌───┐┌───┐┌───┐ ┌───┐ ┌───┐ ┌───┐
|
| 389 |
+
# │ k ││ k ││ k ││ k │ │ k │ │ k │ │ k │
|
| 390 |
+
# └───┘└───┘└───┘└───┘ └───┘ └───┘ └───┘
|
| 391 |
+
# │ │ │ │ ┌──┴──┐ ┌──┴──┐ ┌────┬──┴─┬────┐
|
| 392 |
+
# ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐ ┌───┐┌───┐┌───┐┌───┐
|
| 393 |
+
# │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │ │ q ││ q ││ q ││ q │
|
| 394 |
+
# └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘ └───┘└───┘└───┘└───┘
|
| 395 |
+
# ◀──────────────────▶ ◀──────────────────▶ ◀──────────────────▶
|
| 396 |
+
# MHA GQA MQA
|
| 397 |
+
# n_query_groups=4 n_query_groups=2 n_query_groups=1
|
| 398 |
+
#
|
| 399 |
+
# credit https://arxiv.org/pdf/2305.13245.pdf
|
| 400 |
+
head_size = self.config.head_size
|
| 401 |
+
n_head = self.config.n_head
|
| 402 |
+
n_query_groups = self.config.n_query_groups
|
| 403 |
+
rope_n_elem = self.config.rope_n_elem
|
| 404 |
+
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
|
| 405 |
+
|
| 406 |
+
# Perform a single multiplication operation using a combined QKV matrix to calculate `query`, `key`, and `value`
|
| 407 |
+
# instead of individually multiplying the input `x` with the respective weight matrices.
|
| 408 |
+
qkv = self.qkv(x) # (B, T, 3xC*)
|
| 409 |
+
|
| 410 |
+
# Define query, key and value sizes.
|
| 411 |
+
# If grouped/multi query is enabled, these sizes are not equal (see the diagram above).
|
| 412 |
+
query_size = n_head * head_size
|
| 413 |
+
key_size = value_size = n_query_groups * head_size
|
| 414 |
+
# Split qkv into query, key and value matrices.
|
| 415 |
+
q, k, v = qkv.split((query_size, key_size, value_size), dim=-1) # 3x(B, T, C*)
|
| 416 |
+
|
| 417 |
+
if self.config.norm_qk and self.config.norm_qk_type == "olmo2":
|
| 418 |
+
q = self.norm_q(q)
|
| 419 |
+
k = self.norm_k(k)
|
| 420 |
+
|
| 421 |
+
# To place the num_heads (nh) dimension right after the batch (B) dimension, the first step is to decouple the
|
| 422 |
+
# embedding size (C) into num_heads (nh) and head_size (hs).
|
| 423 |
+
|
| 424 |
+
# The original GQA paper is followed here and the term query groups is used.
|
| 425 |
+
# alternative notation: Query groups are also referred to as KV groups.
|
| 426 |
+
q = q.view(B, T, n_head, head_size) # (B, T, nh_q, hs)
|
| 427 |
+
k = k.view(B, T, n_query_groups, head_size) # (B, T, n_query_groups, hs)
|
| 428 |
+
v = v.view(B, T, n_query_groups, head_size) # (B, T, n_query_groups, hs)
|
| 429 |
+
|
| 430 |
+
# The tensors `query`, `key`, and `value` are now accurately structured: within each batch element (B), there are
|
| 431 |
+
# multiple heads (nh), and within each head, there is a sequence of elements (T), each represented by a vector
|
| 432 |
+
# of size `hs`.
|
| 433 |
+
q = q.transpose(1, 2) # (B, nh_q, T, hs)
|
| 434 |
+
k = k.transpose(1, 2) # (B, nh_k, T, hs)
|
| 435 |
+
v = v.transpose(1, 2) # (B, nh_v, T, hs)
|
| 436 |
+
|
| 437 |
+
if self.config.norm_qk and self.config.norm_qk_type == "default":
|
| 438 |
+
q = self.norm_q(q)
|
| 439 |
+
k = self.norm_k(k)
|
| 440 |
+
|
| 441 |
+
# Unlike standard positional embeddings rotary embeddings must be applied at every layer.
|
| 442 |
+
q_roped = apply_rope(q[..., :rope_n_elem], cos, sin)
|
| 443 |
+
k_roped = apply_rope(k[..., :rope_n_elem], cos, sin)
|
| 444 |
+
q = torch.cat((q_roped, q[..., rope_n_elem:]), dim=-1) # (B, nh_q, T, hs)
|
| 445 |
+
k = torch.cat((k_roped, k[..., rope_n_elem:]), dim=-1) # (B, nh_k, T, hs)
|
| 446 |
+
|
| 447 |
+
# Apply kv-cache during inference.
|
| 448 |
+
if input_pos is not None:
|
| 449 |
+
if not isinstance(self.kv_cache, KVCache):
|
| 450 |
+
raise TypeError("You need to call `gpt.set_kv_cache()`")
|
| 451 |
+
k, v = self.kv_cache(input_pos, k, v)
|
| 452 |
+
if input_pos_maxp1 is not None:
|
| 453 |
+
# Subselect along sequence dimension
|
| 454 |
+
k = k[..., :input_pos_maxp1, :]
|
| 455 |
+
v = v[..., :input_pos_maxp1, :]
|
| 456 |
+
# k, v: (B, nh_k, input_pos_maxp1, hs)
|
| 457 |
+
# If input_pos_maxp1 is None -> max_seq_length
|
| 458 |
+
|
| 459 |
+
# Grouped queries: balance the number of heads across all three matrices.
|
| 460 |
+
# NOTE: flash attention requires it in training mode.
|
| 461 |
+
# Multi-query: this step can be skipped since there is only 1 head, allowing us to use broadcasting.
|
| 462 |
+
if n_query_groups != n_head and (input_pos is None or n_query_groups != 1):
|
| 463 |
+
q_per_kv = n_head // n_query_groups
|
| 464 |
+
k = k.repeat_interleave(q_per_kv, dim=1) # (B, nh_q, T, hs)
|
| 465 |
+
v = v.repeat_interleave(q_per_kv, dim=1) # (B, nh_q, T, hs)
|
| 466 |
+
|
| 467 |
+
if self.apply_sliding_window_attention:
|
| 468 |
+
"""
|
| 469 |
+
Global Window Sliding window Sliding window
|
| 470 |
+
attention mask + bias = attention mask
|
| 471 |
+
┌────────────────────────┐ ┌───────────────────────┐ ┌─────────────────────────┐
|
| 472 |
+
│ True False False False │ │ True True True True │ │ True False False False │
|
| 473 |
+
│ True True False False │ │ True True True True │ │ True True False False │
|
| 474 |
+
│ True True True False │ │ False True True True │ │ False True True False │
|
| 475 |
+
│ True True True True │ │ False False True True │ │ False False True True │
|
| 476 |
+
└────────────────────────┘ └───────────────────────┘ └─────────────────────────┘
|
| 477 |
+
"""
|
| 478 |
+
if mask is None:
|
| 479 |
+
mask = torch.ones(T, T, dtype=q.dtype, device=q.device).triu(diagonal=1)
|
| 480 |
+
mask.masked_fill_(mask.bool(), float("-inf"))
|
| 481 |
+
mask = mask.view(1, 1, *mask.shape)
|
| 482 |
+
sliding_window_bias = torch.ones_like(mask).tril(diagonal=-self.config.sliding_window_size)
|
| 483 |
+
sliding_window_bias.masked_fill_(sliding_window_bias.bool(), float("-inf"))
|
| 484 |
+
mask += sliding_window_bias
|
| 485 |
+
|
| 486 |
+
# Efficient attention using Flash Attention CUDA kernels.
|
| 487 |
+
# NOTE: efficient implementation is disabled if `mask` is not None or softcapping is enabled.
|
| 488 |
+
# ↓ (B, nh, T, hs) @ (B, nh, T, hs).mT --> (B, nh, T, T) @ (B, nh, T, hs) --> (B, nh, T, hs)
|
| 489 |
+
y = self.scaled_dot_product_attention(q, k, v, mask)
|
| 490 |
+
|
| 491 |
+
# Re-assemble all head outputs side by side.
|
| 492 |
+
y = y.reshape(B, T, head_size * n_head)
|
| 493 |
+
|
| 494 |
+
# Output projection.
|
| 495 |
+
return self.proj(y) # (B, T, C)
|
| 496 |
+
|
| 497 |
+
def scaled_dot_product_attention(
|
| 498 |
+
self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, mask: Optional[torch.Tensor] = None
|
| 499 |
+
) -> torch.Tensor:
|
| 500 |
+
scale = 1.0 / math.sqrt(self.config.attention_scores_scalar or self.config.head_size)
|
| 501 |
+
|
| 502 |
+
# with softcapping we cannot use SDPA
|
| 503 |
+
if self.config.attention_logit_softcapping is not None:
|
| 504 |
+
scores = q @ k.mT * scale
|
| 505 |
+
scores = do_softcapping(scores, self.config.attention_logit_softcapping)
|
| 506 |
+
if mask is None:
|
| 507 |
+
mask = torch.ones(q.size(2), q.size(2), dtype=q.dtype, device=q.device).triu(diagonal=1)
|
| 508 |
+
mask.masked_fill_(mask.bool(), torch.finfo(q.dtype).min)
|
| 509 |
+
scores = scores + mask
|
| 510 |
+
scores = F.softmax(scores, dim=-1, dtype=torch.float).to(dtype=q.dtype)
|
| 511 |
+
y = scores @ v
|
| 512 |
+
else:
|
| 513 |
+
y = F.scaled_dot_product_attention(
|
| 514 |
+
q, k, v, attn_mask=mask, dropout_p=0.0, scale=scale, is_causal=mask is None
|
| 515 |
+
)
|
| 516 |
+
return y.transpose(1, 2)
|
| 517 |
+
|
| 518 |
+
def build_kv_cache(
|
| 519 |
+
self,
|
| 520 |
+
batch_size: int,
|
| 521 |
+
max_seq_length: int,
|
| 522 |
+
rope_cache_length: Optional[int] = None,
|
| 523 |
+
device: Optional[torch.device] = None,
|
| 524 |
+
dtype: Optional[torch.dtype] = None,
|
| 525 |
+
) -> "KVCache":
|
| 526 |
+
v_shape = (batch_size, self.config.n_query_groups, max_seq_length, self.config.head_size)
|
| 527 |
+
if rope_cache_length is None:
|
| 528 |
+
if self.config.rotary_percentage != 1.0:
|
| 529 |
+
raise TypeError("Please pass the `rope_cache_length=gpt.cos.size(-1)` value")
|
| 530 |
+
k_shape = v_shape
|
| 531 |
+
else:
|
| 532 |
+
k_shape = (
|
| 533 |
+
batch_size,
|
| 534 |
+
self.config.n_query_groups,
|
| 535 |
+
max_seq_length,
|
| 536 |
+
rope_cache_length + self.config.head_size - self.config.rope_n_elem,
|
| 537 |
+
)
|
| 538 |
+
return KVCache(k_shape, v_shape, device=device, dtype=dtype)
|
| 539 |
+
|
| 540 |
+
def _load_from_state_dict(self, state_dict: dict, prefix: str, *args: Any, **kwargs: Any) -> None:
|
| 541 |
+
"""For compatibility with legacy checkpoints."""
|
| 542 |
+
|
| 543 |
+
for attr in ("weight", "bias"):
|
| 544 |
+
legacy_key = f"{prefix}attn.{attr}"
|
| 545 |
+
current_key = f"{prefix}qkv.{attr}"
|
| 546 |
+
if legacy_key in state_dict:
|
| 547 |
+
state_dict[current_key] = qkv_reassemble(state_dict.pop(legacy_key), self.config)
|
| 548 |
+
|
| 549 |
+
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
class GptNeoxMLP(nn.Module):
|
| 553 |
+
def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None:
|
| 554 |
+
super().__init__()
|
| 555 |
+
self.intermediate_size = intermediate_size or config.intermediate_size
|
| 556 |
+
self.fc = nn.Linear(config.n_embd, self.intermediate_size, bias=config.bias)
|
| 557 |
+
self.proj = nn.Linear(self.intermediate_size, config.n_embd, bias=config.bias)
|
| 558 |
+
self.config = config
|
| 559 |
+
|
| 560 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 561 |
+
x = self.fc(x)
|
| 562 |
+
x = F.gelu(x, approximate=self.config.gelu_approximate)
|
| 563 |
+
return self.proj(x)
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
class LLaMAMLP(nn.Module):
|
| 567 |
+
def __init__(self, config: Config, intermediate_size: Optional[int] = None) -> None:
|
| 568 |
+
super().__init__()
|
| 569 |
+
self.intermediate_size = intermediate_size or config.intermediate_size
|
| 570 |
+
self.fc_1 = nn.Linear(config.n_embd, self.intermediate_size, bias=config.bias)
|
| 571 |
+
self.fc_2 = nn.Linear(config.n_embd, self.intermediate_size, bias=config.bias)
|
| 572 |
+
self.proj = nn.Linear(self.intermediate_size, config.n_embd, bias=config.bias)
|
| 573 |
+
self.config = config
|
| 574 |
+
|
| 575 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 576 |
+
x_fc_1 = self.fc_1(x)
|
| 577 |
+
x_fc_2 = self.fc_2(x)
|
| 578 |
+
x = F.silu(x_fc_1) * x_fc_2
|
| 579 |
+
return self.proj(x)
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
class GemmaMLP(LLaMAMLP):
|
| 583 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 584 |
+
x_fc_1 = self.fc_1(x)
|
| 585 |
+
x_fc_2 = self.fc_2(x)
|
| 586 |
+
x = F.gelu(x_fc_1, approximate=self.config.gelu_approximate) * x_fc_2
|
| 587 |
+
return self.proj(x)
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
class LLaMAMoE(nn.Module):
|
| 591 |
+
def __init__(self, config: Config) -> None:
|
| 592 |
+
super().__init__()
|
| 593 |
+
self.gate = nn.Linear(config.n_embd, config.n_expert, bias=False)
|
| 594 |
+
self.experts = nn.ModuleList(
|
| 595 |
+
LLaMAMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(config.n_expert)
|
| 596 |
+
)
|
| 597 |
+
self.config = config
|
| 598 |
+
|
| 599 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 600 |
+
"""
|
| 601 |
+
Derived from: https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219
|
| 602 |
+
See also figure 1 in https://arxiv.org/abs/2211.15841
|
| 603 |
+
"""
|
| 604 |
+
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
|
| 605 |
+
x = x.view(-1, C) # (B*T, C)
|
| 606 |
+
router = self.gate(x) # (B*T, n_expert)
|
| 607 |
+
probs, indices = torch.topk(router, self.config.n_expert_per_token) # (B*T, n_expert_per_token)
|
| 608 |
+
probs = probs.softmax(dim=1, dtype=torch.float).to(dtype=x.dtype)
|
| 609 |
+
masks = indices.unsqueeze(-1) == torch.arange(self.config.n_expert, device=x.device)
|
| 610 |
+
masks = masks.permute(2, 0, 1) # (n_expert, B*T, n_expert_per_token)
|
| 611 |
+
y = torch.zeros_like(x) # (B*T, C)
|
| 612 |
+
for mask, expert in zip(masks, self.experts):
|
| 613 |
+
token_idx, expert_idx = torch.where(mask)
|
| 614 |
+
y[token_idx] += probs[token_idx, expert_idx, None] * expert(x[token_idx])
|
| 615 |
+
return y.view(B, T, C)
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
def build_rope_cache(
|
| 619 |
+
seq_len: int,
|
| 620 |
+
n_elem: int,
|
| 621 |
+
device: Optional[torch.device] = None,
|
| 622 |
+
base: int = 10000,
|
| 623 |
+
condense_ratio: int = 1,
|
| 624 |
+
extra_config: Optional[dict] = None,
|
| 625 |
+
rope_local_base_freq: Optional[float] = None,
|
| 626 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 627 |
+
"""
|
| 628 |
+
Enhanced Transformer with Rotary Position Embedding.
|
| 629 |
+
|
| 630 |
+
Args:
|
| 631 |
+
seq_len (int): Sequence length.
|
| 632 |
+
n_elem (int): Number of elements (head dimension).
|
| 633 |
+
device (torch.device, optional): Device for tensor allocations.
|
| 634 |
+
base (int, optional): Base for computing inverse frequencies.
|
| 635 |
+
condense_ratio (int, optional): Ratio to condense the position indices.
|
| 636 |
+
extra_config (dict, optional): Configuration parameters for frequency adjustments (used by Llama 3.1 and 3.2)
|
| 637 |
+
|
| 638 |
+
Returns:
|
| 639 |
+
Tuple[torch.Tensor, torch.Tensor]: Cosine and sine caches for RoPE.
|
| 640 |
+
Shapes are `(seq_len, n_elem)`.
|
| 641 |
+
"""
|
| 642 |
+
|
| 643 |
+
# Compute the inverse frequencies theta
|
| 644 |
+
theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, device=device).float() / n_elem))
|
| 645 |
+
|
| 646 |
+
if extra_config is not None:
|
| 647 |
+
factor = extra_config["factor"]
|
| 648 |
+
if "original_max_seq_len" in extra_config:
|
| 649 |
+
orig_context_len = extra_config["original_max_seq_len"]
|
| 650 |
+
low_freq_factor = extra_config["low_freq_factor"]
|
| 651 |
+
high_freq_factor = extra_config["high_freq_factor"]
|
| 652 |
+
|
| 653 |
+
wavelen = 2 * torch.pi / theta
|
| 654 |
+
ratio = orig_context_len / wavelen
|
| 655 |
+
smooth_factor = (ratio - low_freq_factor) / (high_freq_factor - low_freq_factor)
|
| 656 |
+
smooth_factor = torch.clamp(smooth_factor, min=0.0, max=1.0)
|
| 657 |
+
|
| 658 |
+
# Compute adjusted_theta without masked indexing
|
| 659 |
+
adjusted_theta = (1 - smooth_factor) * (theta / factor) + smooth_factor * theta
|
| 660 |
+
theta = adjusted_theta
|
| 661 |
+
else:
|
| 662 |
+
theta = theta / factor
|
| 663 |
+
|
| 664 |
+
# Create position indices `[0, 1, ..., seq_len - 1]`
|
| 665 |
+
seq_idx = torch.arange(seq_len, device=device) / condense_ratio
|
| 666 |
+
|
| 667 |
+
# Calculate the product of position index and $\theta_i$
|
| 668 |
+
idx_theta = torch.outer(seq_idx, theta).repeat(1, 2)
|
| 669 |
+
# If `n_elem` is odd, the final dimension of `idx_theta` has size
|
| 670 |
+
# `n_elem + 1`, so need to cut something off.
|
| 671 |
+
# Due to a current bug in Hugging Face, in the case `n_elem == 1`, we leave
|
| 672 |
+
# `idx_theta`, `cos`, `sin` as is. Things work out in `apply_rope` due to
|
| 673 |
+
# broadcasting. If we shorten `idx_theta`, unit tests comparing to
|
| 674 |
+
# Hugging Face fail.
|
| 675 |
+
# https://github.com/huggingface/transformers/issues/35233
|
| 676 |
+
if idx_theta.shape[-1] > n_elem > 1:
|
| 677 |
+
idx_theta = idx_theta[..., :n_elem]
|
| 678 |
+
|
| 679 |
+
# if rope_local_base_freq is given, have a separate rope value for local embedding
|
| 680 |
+
# For now, we use default RoPE for local embedding
|
| 681 |
+
if rope_local_base_freq is not None:
|
| 682 |
+
local_theta = 1.0 / (rope_local_base_freq ** (torch.arange(0, n_elem, 2, device=device).float() / n_elem))
|
| 683 |
+
local_idx_theta = torch.outer(seq_idx, local_theta)
|
| 684 |
+
local_idx_theta = local_idx_theta.repeat(1, 2)
|
| 685 |
+
if local_idx_theta.shape[-1] > n_elem > 1:
|
| 686 |
+
local_idx_theta = local_idx_theta[..., :n_elem]
|
| 687 |
+
|
| 688 |
+
idx_theta = torch.stack((idx_theta, local_idx_theta), dim=-1)
|
| 689 |
+
|
| 690 |
+
return torch.cos(idx_theta), torch.sin(idx_theta)
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
def batched_index_select(t, dim, idx):
|
| 694 |
+
"""index_select for batched index and unbatched t"""
|
| 695 |
+
if idx.dim() == 1:
|
| 696 |
+
return torch.index_select(t, dim, idx)
|
| 697 |
+
|
| 698 |
+
*batch_shape, idx_size = idx.shape
|
| 699 |
+
res = torch.index_select(t, dim, idx.reshape(-1)) # flat index
|
| 700 |
+
# split out single batch idx
|
| 701 |
+
res = res.view(*t.shape[:dim], -1, idx_size, *t.shape[dim + 1 :])
|
| 702 |
+
if dim > 0:
|
| 703 |
+
# move batch dim to front, this is np.rollaxis(res, dim, 0) for tensors
|
| 704 |
+
dims = [dim] + list(range(res.dim()))
|
| 705 |
+
del dims[dim + 1]
|
| 706 |
+
res = res.permute(dims)
|
| 707 |
+
# unflatten batch dims
|
| 708 |
+
res = res.view(*batch_shape, *res.shape[1:])
|
| 709 |
+
return res
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
def batched_index_copy_(t, dim, idx, val):
|
| 713 |
+
"""Index copy for batched t, idx, val"""
|
| 714 |
+
|
| 715 |
+
if t.device.type == "mps":
|
| 716 |
+
# Normalize negative dimensions
|
| 717 |
+
if dim < 0:
|
| 718 |
+
dim = t.dim() + dim
|
| 719 |
+
if idx.dim() == 1:
|
| 720 |
+
idx_shape = [1] * val.dim()
|
| 721 |
+
idx_shape[dim] = -1
|
| 722 |
+
idx_expanded = idx.view(*idx_shape)
|
| 723 |
+
idx_expanded = idx_expanded.expand_as(val)
|
| 724 |
+
t.scatter_(dim, idx_expanded, val)
|
| 725 |
+
return t
|
| 726 |
+
|
| 727 |
+
elif idx.dim() == 2:
|
| 728 |
+
assert dim != 0, "Cannot index the batch dimension"
|
| 729 |
+
batch_size = idx.size(0)
|
| 730 |
+
idx_size = idx.size(1)
|
| 731 |
+
assert batch_size == t.size(0) == val.size(0)
|
| 732 |
+
|
| 733 |
+
idx_shape = [batch_size] + [1] * (val.dim() - 1)
|
| 734 |
+
idx_shape[dim] = idx_size
|
| 735 |
+
idx_expanded = idx.view(*idx_shape)
|
| 736 |
+
idx_expanded = idx_expanded.expand_as(val)
|
| 737 |
+
|
| 738 |
+
t.scatter_(dim, idx_expanded, val)
|
| 739 |
+
return t
|
| 740 |
+
else:
|
| 741 |
+
raise NotImplementedError(f"idx.dim() == {idx.dim()} not supported")
|
| 742 |
+
|
| 743 |
+
else:
|
| 744 |
+
if idx.dim() == 1:
|
| 745 |
+
return t.index_copy_(dim, idx, val)
|
| 746 |
+
|
| 747 |
+
assert idx.dim() == 2, f"multiple batch dims not yet {idx.shape=}"
|
| 748 |
+
assert dim != 0, f"cannot index batch dim {dim=}"
|
| 749 |
+
batch_size, idx_size = idx.shape
|
| 750 |
+
assert batch_size == t.size(0)
|
| 751 |
+
assert batch_size == val.size(0)
|
| 752 |
+
|
| 753 |
+
# if we can view the batch and indexed dimensions together, we could
|
| 754 |
+
# do index trickery. This is, sadly, not the case for kvcache so we
|
| 755 |
+
# fall back to for loop
|
| 756 |
+
for i in range(batch_size):
|
| 757 |
+
unbatched_dim = dim if dim < 0 else dim - 1
|
| 758 |
+
t[i].index_copy_(unbatched_dim, idx[i], val[i])
|
| 759 |
+
return t
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
|
| 763 |
+
"""
|
| 764 |
+
Applies RoPE transform to `x`. Note that `cos`, `sin` need to have a batch
|
| 765 |
+
dimension.
|
| 766 |
+
|
| 767 |
+
Args:
|
| 768 |
+
x: Input tensor, `(B, ..., T, head_size)`
|
| 769 |
+
cos: Cached cosines, `(B, T, head_size)` or `(1, T, head_size)`
|
| 770 |
+
sin: Cached sines, `(B, T, head_size)` or `(1, T, head_size)`
|
| 771 |
+
|
| 772 |
+
Returns:
|
| 773 |
+
Encoded tensor, `(B, ..., T, head_size)`
|
| 774 |
+
"""
|
| 775 |
+
if cos.dim() != 3:
|
| 776 |
+
raise ValueError(f"cos must be three-dimensional, but shape is {cos.shape}")
|
| 777 |
+
if cos.shape != sin.shape:
|
| 778 |
+
raise ValueError(f"cos, sin must have same shape, but cos.shape={cos.shape}, sin.shape={sin.shape}")
|
| 779 |
+
head_size_half = x.size(-1) // 2
|
| 780 |
+
x1 = x[..., :head_size_half] # (B, ..., T, head_size/2)
|
| 781 |
+
x2 = x[..., head_size_half:] # (B, ..., T, head_size/2)
|
| 782 |
+
rotated = torch.cat((-x2, x1), dim=-1) # (B, ..., T, head_size)
|
| 783 |
+
dims_diff = x.dim() - cos.dim()
|
| 784 |
+
if dims_diff > 0:
|
| 785 |
+
# Ensure that shapes of `x`, `cos`, `sin` align
|
| 786 |
+
new_shape = cos.shape[0:1] + (1,) * dims_diff + cos.shape[1:]
|
| 787 |
+
cos = cos.view(*new_shape)
|
| 788 |
+
sin = sin.view(*new_shape)
|
| 789 |
+
|
| 790 |
+
roped = (x * cos) + (rotated * sin)
|
| 791 |
+
return roped.to(dtype=x.dtype)
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
def do_softcapping(x: torch.Tensor, thresh: float) -> torch.Tensor:
|
| 795 |
+
return torch.tanh(x / thresh) * thresh
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
class KVCache(nn.Module):
|
| 799 |
+
"""
|
| 800 |
+
Buffers `k`, `v` have shape
|
| 801 |
+
`(batch_size, n_query_groups, max_seq_length, head_size)`.
|
| 802 |
+
"""
|
| 803 |
+
|
| 804 |
+
def __init__(
|
| 805 |
+
self,
|
| 806 |
+
k_shape: Tuple[int, int, int, int],
|
| 807 |
+
v_shape: Tuple[int, int, int, int],
|
| 808 |
+
device: Optional[torch.device] = None,
|
| 809 |
+
dtype: Optional[torch.dtype] = None,
|
| 810 |
+
) -> None:
|
| 811 |
+
super().__init__()
|
| 812 |
+
self.register_buffer("k", torch.zeros(k_shape, device=device, dtype=dtype), persistent=False)
|
| 813 |
+
self.register_buffer("v", torch.zeros(v_shape, device=device, dtype=dtype), persistent=False)
|
| 814 |
+
|
| 815 |
+
def forward(self, input_pos: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 816 |
+
"""
|
| 817 |
+
Writes new values `k` and `v` into the cache at the positions specified
|
| 818 |
+
by `input_pos` along the sequence dimension (`max_seq_length`). The batch
|
| 819 |
+
size of `k` and `v` (`bs`) must be smaller or equal to `KVCache` batch
|
| 820 |
+
size. Returns the full buffers, adjusted to the batch size `bs`.
|
| 821 |
+
|
| 822 |
+
Args:
|
| 823 |
+
input_pos: Position index, `(bs, T)` or `(T,)`
|
| 824 |
+
k: New values, `(bs, n_query_groups, T, head_size)`
|
| 825 |
+
v: New values, `(bs, n_query_groups, T, head_size)`
|
| 826 |
+
|
| 827 |
+
Returns:
|
| 828 |
+
k_full, v_full, `(bs, n_query_groups, max_seq_length, head_size)`
|
| 829 |
+
|
| 830 |
+
"""
|
| 831 |
+
# move the buffer to the activation dtype for when AMP is used
|
| 832 |
+
if self.k.dtype != k.dtype:
|
| 833 |
+
self.k = self.k.to(k.dtype)
|
| 834 |
+
if self.v.dtype != v.dtype:
|
| 835 |
+
self.v = self.v.to(v.dtype)
|
| 836 |
+
# update the cache
|
| 837 |
+
bs = k.size(0)
|
| 838 |
+
k = batched_index_copy_(self.k[:bs, ...], -2, input_pos, k)
|
| 839 |
+
v = batched_index_copy_(self.v[:bs, ...], -2, input_pos, v)
|
| 840 |
+
return k, v
|
| 841 |
+
|
| 842 |
+
def reset_parameters(self) -> None:
|
| 843 |
+
torch.nn.init.zeros_(self.k)
|
| 844 |
+
torch.nn.init.zeros_(self.v)
|
| 845 |
+
|
| 846 |
+
|
| 847 |
+
def build_mask_cache(max_seq_length: int, device: Optional[torch.device] = None) -> torch.Tensor:
|
| 848 |
+
ones = torch.ones((max_seq_length, max_seq_length), device=device, dtype=torch.bool)
|
| 849 |
+
return torch.tril(ones).unsqueeze(0).unsqueeze(0)
|
| 850 |
+
|
| 851 |
+
|
| 852 |
+
class RMSNorm(torch.nn.Module):
|
| 853 |
+
"""Root Mean Square Layer Normalization.
|
| 854 |
+
|
| 855 |
+
Derived from https://github.com/bzhangGo/rmsnorm/blob/master/rmsnorm_torch.py. BSD 3-Clause License:
|
| 856 |
+
https://github.com/bzhangGo/rmsnorm/blob/master/LICENSE.
|
| 857 |
+
"""
|
| 858 |
+
|
| 859 |
+
def __init__(self, size: int, dim: int = -1, eps: float = 1e-6, add_unit_offset: bool = False) -> None:
|
| 860 |
+
super().__init__()
|
| 861 |
+
self.weight = torch.nn.Parameter(torch.ones(size))
|
| 862 |
+
self.eps = eps
|
| 863 |
+
self.dim = dim
|
| 864 |
+
self.add_unit_offset = add_unit_offset
|
| 865 |
+
|
| 866 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 867 |
+
dtype = x.dtype
|
| 868 |
+
x = x.float()
|
| 869 |
+
# NOTE: the original RMSNorm paper implementation is not equivalent
|
| 870 |
+
norm_x = torch.mean(x * x, dim=self.dim, keepdim=True)
|
| 871 |
+
x_normed = x * torch.rsqrt(norm_x + self.eps)
|
| 872 |
+
weight = (1 + self.weight) if self.add_unit_offset else self.weight
|
| 873 |
+
return (x_normed * weight.float()).to(dtype=dtype)
|
| 874 |
+
|
| 875 |
+
def reset_parameters(self) -> None:
|
| 876 |
+
torch.nn.init.ones_(self.weight)
|
litgpt/perplexity.py
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import pprint
|
| 5 |
+
import time
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
from dataclasses import asdict
|
| 9 |
+
from datetime import timedelta
|
| 10 |
+
from functools import partial
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Dict, Optional, Tuple, Union
|
| 13 |
+
|
| 14 |
+
import lightning as L
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
from lightning.fabric.strategies import FSDPStrategy
|
| 18 |
+
from lightning.fabric.utilities.throughput import ThroughputMonitor, measure_flops
|
| 19 |
+
from torch.utils.data import DataLoader
|
| 20 |
+
from torchmetrics.aggregation import RunningMean
|
| 21 |
+
from typing_extensions import Literal
|
| 22 |
+
|
| 23 |
+
from litgpt import Tokenizer
|
| 24 |
+
from litgpt.args import EvalArgs, LogArgs, TrainArgs
|
| 25 |
+
from litgpt.config import name_to_config
|
| 26 |
+
from litgpt.data import DataModule, TinyLlama, Arxiv
|
| 27 |
+
from litgpt.model import GPT, Block, CausalSelfAttention, Config, LLaMAMLP
|
| 28 |
+
from litgpt.utils import (
|
| 29 |
+
CycleIterator,
|
| 30 |
+
capture_hparams,
|
| 31 |
+
check_nvlink_connectivity,
|
| 32 |
+
choose_logger,
|
| 33 |
+
chunked_cross_entropy,
|
| 34 |
+
copy_config_files,
|
| 35 |
+
extend_checkpoint_dir,
|
| 36 |
+
find_resume_path,
|
| 37 |
+
get_default_supported_precision,
|
| 38 |
+
init_out_dir,
|
| 39 |
+
instantiate_torch_optimizer,
|
| 40 |
+
num_parameters,
|
| 41 |
+
parse_devices,
|
| 42 |
+
reset_parameters,
|
| 43 |
+
save_config,
|
| 44 |
+
save_hyperparameters,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def setup(
|
| 49 |
+
model_name: str,
|
| 50 |
+
model_config: Optional[Config] = None,
|
| 51 |
+
out_dir: Path = Path("out/pretrain"),
|
| 52 |
+
precision: Literal["bf16-true", "bf16-mixed", "32-true", None] = None,
|
| 53 |
+
initial_checkpoint_dir: Optional[Path] = None,
|
| 54 |
+
resume: Union[bool, Literal["auto"], Path] = False,
|
| 55 |
+
data: Optional[DataModule] = None,
|
| 56 |
+
data_dir: Path = None,
|
| 57 |
+
train: TrainArgs = TrainArgs(
|
| 58 |
+
save_interval=1000,
|
| 59 |
+
log_interval=1,
|
| 60 |
+
global_batch_size=512,
|
| 61 |
+
micro_batch_size=4,
|
| 62 |
+
max_tokens=int(3e12), # 3 trillion
|
| 63 |
+
max_norm=1.0,
|
| 64 |
+
min_lr=4e-5,
|
| 65 |
+
lr_warmup_steps=2000,
|
| 66 |
+
tie_embeddings=False,
|
| 67 |
+
),
|
| 68 |
+
eval: EvalArgs = EvalArgs(interval=1000, max_iters=100),
|
| 69 |
+
log: LogArgs = LogArgs(),
|
| 70 |
+
optimizer: Union[str, Dict] = "AdamW",
|
| 71 |
+
devices: Union[int, str] = "auto",
|
| 72 |
+
num_nodes: int = 1,
|
| 73 |
+
tokenizer_dir: Optional[Path] = None,
|
| 74 |
+
logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "tensorboard",
|
| 75 |
+
seed: int = 42,
|
| 76 |
+
multi_month: bool = False,
|
| 77 |
+
):
|
| 78 |
+
"""Pretrain a model.
|
| 79 |
+
|
| 80 |
+
Arguments:
|
| 81 |
+
model_name: The name of the model to pretrain. Choose from names in ``litgpt.config``. Use "list" to list the supported models.
|
| 82 |
+
model_config: A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
|
| 83 |
+
``model_config``. Overrides the `model_name` if specified.
|
| 84 |
+
out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
|
| 85 |
+
/teamspace/jobs/<job-name>/share.
|
| 86 |
+
precision: The precision to use for finetuning. Determines a compatible precision setting by default.
|
| 87 |
+
initial_checkpoint_dir: Optional path to a checkpoint directory to initialize the model from.
|
| 88 |
+
Useful for continued pretraining. Mutually exclusive with ``resume``.
|
| 89 |
+
resume: Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
|
| 90 |
+
from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
|
| 91 |
+
``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
|
| 92 |
+
data: Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
|
| 93 |
+
train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details.
|
| 94 |
+
eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details.
|
| 95 |
+
optimizer: An optimizer name (such as "AdamW") or config.
|
| 96 |
+
|
| 97 |
+
devices: How many devices/GPUs to use. Uses all GPUs by default.
|
| 98 |
+
num_nodes: How many nodes the code is being run on.
|
| 99 |
+
tokenizer_dir: Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
|
| 100 |
+
module require this.
|
| 101 |
+
logger_name: The name of the logger to send metrics to.
|
| 102 |
+
seed: The random seed to use for reproducibility.
|
| 103 |
+
"""
|
| 104 |
+
if model_name == "list":
|
| 105 |
+
available_models = "\n".join(sorted(name_to_config))
|
| 106 |
+
print(f"Available values:\n{available_models}")
|
| 107 |
+
quit()
|
| 108 |
+
|
| 109 |
+
if initial_checkpoint_dir is not None:
|
| 110 |
+
initial_checkpoint_dir = extend_checkpoint_dir(initial_checkpoint_dir)
|
| 111 |
+
|
| 112 |
+
if tokenizer_dir is not None:
|
| 113 |
+
tokenizer_dir = extend_checkpoint_dir(tokenizer_dir)
|
| 114 |
+
|
| 115 |
+
if model_config is None:
|
| 116 |
+
# Support both model_name options: meta-llama/Meta-Llama-3-8B & Meta-Llama-3-8B
|
| 117 |
+
try:
|
| 118 |
+
model_config = Config.from_name(model_name)
|
| 119 |
+
except ValueError:
|
| 120 |
+
print(f"Model name {model_name} is not supported.\n")
|
| 121 |
+
available_models = "\n".join(sorted(name_to_config))
|
| 122 |
+
print(f"Available values:\n{available_models}")
|
| 123 |
+
quit()
|
| 124 |
+
|
| 125 |
+
hparams = capture_hparams()
|
| 126 |
+
|
| 127 |
+
config = Config.from_name(model_name) if model_config is None else model_config
|
| 128 |
+
precision = precision or get_default_supported_precision(training=True)
|
| 129 |
+
devices = parse_devices(devices)
|
| 130 |
+
out_dir = init_out_dir(out_dir)
|
| 131 |
+
# in case the dataset requires the Tokenizer
|
| 132 |
+
tokenizer = Tokenizer(tokenizer_dir) if tokenizer_dir is not None else None
|
| 133 |
+
|
| 134 |
+
logger = choose_logger(
|
| 135 |
+
logger_name,
|
| 136 |
+
out_dir,
|
| 137 |
+
name=f"pretrain-{config.name}",
|
| 138 |
+
resume=bool(resume),
|
| 139 |
+
log_interval=train.log_interval,
|
| 140 |
+
log_args=asdict(log),
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
if devices * num_nodes > 1:
|
| 144 |
+
strategy = FSDPStrategy(auto_wrap_policy={Block}, state_dict_type="full", sharding_strategy="HYBRID_SHARD")
|
| 145 |
+
else:
|
| 146 |
+
strategy = "auto"
|
| 147 |
+
|
| 148 |
+
fabric = L.Fabric(devices=devices, num_nodes=num_nodes, strategy=strategy, precision=precision, loggers=[logger])
|
| 149 |
+
|
| 150 |
+
if torch.cuda.is_available() and devices > 1:
|
| 151 |
+
check_nvlink_connectivity(fabric)
|
| 152 |
+
|
| 153 |
+
fabric.launch()
|
| 154 |
+
|
| 155 |
+
fabric.print(pprint.pformat(hparams))
|
| 156 |
+
if logger_name in ("tensorboard", "wandb", "mlflow"):
|
| 157 |
+
fabric.logger.log_hyperparams(hparams)
|
| 158 |
+
|
| 159 |
+
main(
|
| 160 |
+
fabric=fabric,
|
| 161 |
+
devices=devices,
|
| 162 |
+
seed=seed,
|
| 163 |
+
initial_checkpoint_dir=initial_checkpoint_dir,
|
| 164 |
+
resume=resume,
|
| 165 |
+
config=config,
|
| 166 |
+
data=data,
|
| 167 |
+
data_dir=data_dir,
|
| 168 |
+
out_dir=out_dir,
|
| 169 |
+
tokenizer=tokenizer,
|
| 170 |
+
train=train,
|
| 171 |
+
eval=eval,
|
| 172 |
+
optimizer=optimizer,
|
| 173 |
+
multi_month=multi_month,
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def main(
|
| 178 |
+
fabric: L.Fabric,
|
| 179 |
+
devices: int,
|
| 180 |
+
seed: int,
|
| 181 |
+
initial_checkpoint_dir: Optional[Path],
|
| 182 |
+
resume: Union[bool, Literal["auto"], Path],
|
| 183 |
+
config: Config,
|
| 184 |
+
data: DataModule,
|
| 185 |
+
data_dir: Path,
|
| 186 |
+
out_dir: Path,
|
| 187 |
+
tokenizer: Optional[Tokenizer],
|
| 188 |
+
train: TrainArgs,
|
| 189 |
+
eval: EvalArgs,
|
| 190 |
+
optimizer: Union[str, Dict],
|
| 191 |
+
multi_month: bool = False,
|
| 192 |
+
) -> None:
|
| 193 |
+
validate_args(train, eval, initial_checkpoint_dir, resume)
|
| 194 |
+
|
| 195 |
+
if fabric.global_rank == 0:
|
| 196 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 197 |
+
|
| 198 |
+
fabric.seed_everything(seed) # same seed for every process to init model (FSDP)
|
| 199 |
+
|
| 200 |
+
t0 = time.perf_counter()
|
| 201 |
+
with fabric.init_module(empty_init=True):
|
| 202 |
+
model = GPT(config)
|
| 203 |
+
|
| 204 |
+
initialize_weights(fabric, model, n_layer=config.n_layer, n_embd=config.n_embd)
|
| 205 |
+
|
| 206 |
+
if train.tie_embeddings:
|
| 207 |
+
model.transformer.wte.weight = model.lm_head.weight
|
| 208 |
+
if train.max_seq_length:
|
| 209 |
+
model.max_seq_length = train.max_seq_length
|
| 210 |
+
|
| 211 |
+
fabric.print(f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.")
|
| 212 |
+
fabric.print(f"Total parameters: {num_parameters(model):,}")
|
| 213 |
+
|
| 214 |
+
model = torch.compile(model)
|
| 215 |
+
model = fabric.setup(model)
|
| 216 |
+
|
| 217 |
+
extra_kwargs = {"fused": fabric.device.type == "cuda"}
|
| 218 |
+
optimizer = instantiate_torch_optimizer(optimizer, model.parameters(), **extra_kwargs)
|
| 219 |
+
optimizer = fabric.setup_optimizers(optimizer)
|
| 220 |
+
|
| 221 |
+
if initial_checkpoint_dir:
|
| 222 |
+
ckpt_path = initial_checkpoint_dir / "lit_model.pth"
|
| 223 |
+
|
| 224 |
+
if fabric.global_rank == 0:
|
| 225 |
+
try:
|
| 226 |
+
obj = torch.load(ckpt_path, map_location="cpu")
|
| 227 |
+
except Exception as e:
|
| 228 |
+
raise RuntimeError(f"[load] Unable to read {ckpt_path}: {e}")
|
| 229 |
+
|
| 230 |
+
if isinstance(obj, dict) and "model" in obj:
|
| 231 |
+
import os
|
| 232 |
+
print(f"[fix] {ckpt_path} the full state, extract 'model' and write bacl", flush=True)
|
| 233 |
+
sd = obj["model"]
|
| 234 |
+
if len(sd) and next(iter(sd)).startswith("module."):
|
| 235 |
+
sd = {k[7:]: v for k, v in sd.items()}
|
| 236 |
+
|
| 237 |
+
tmp = ckpt_path.with_suffix(".pth.tmp")
|
| 238 |
+
torch.save(sd, tmp)
|
| 239 |
+
os.replace(tmp, ckpt_path)
|
| 240 |
+
try:
|
| 241 |
+
os.sync()
|
| 242 |
+
except Exception:
|
| 243 |
+
pass
|
| 244 |
+
print(f"[fix] rewrite to make it 'model' only: {ckpt_path}", flush=True)
|
| 245 |
+
else:
|
| 246 |
+
print(f"[ok] {ckpt_path} already 'model' only", flush=True)
|
| 247 |
+
|
| 248 |
+
fabric.barrier()
|
| 249 |
+
|
| 250 |
+
fabric.load_raw(ckpt_path, model, strict=False)
|
| 251 |
+
|
| 252 |
+
from litgpt.data import Arxiv
|
| 253 |
+
|
| 254 |
+
if not multi_month:
|
| 255 |
+
if isinstance(data, Arxiv):
|
| 256 |
+
data.arxiv_train = str(data_dir).rstrip("/") + "/train"
|
| 257 |
+
data.arxiv_val = str(data_dir).rstrip("/") + "/train"
|
| 258 |
+
else:
|
| 259 |
+
NotImplementedError()
|
| 260 |
+
train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train, model.max_seq_length)
|
| 261 |
+
train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
|
| 262 |
+
|
| 263 |
+
state = {
|
| 264 |
+
"model": model,
|
| 265 |
+
"optimizer": optimizer,
|
| 266 |
+
"train_dataloader": train_dataloader,
|
| 267 |
+
"iter_num": 0,
|
| 268 |
+
"step_count": 0,
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
resume = find_resume_path(resume, out_dir)
|
| 272 |
+
if resume:
|
| 273 |
+
fabric.print(f"Resuming training from {resume}")
|
| 274 |
+
fabric.load(resume, state)
|
| 275 |
+
|
| 276 |
+
train_time = time.perf_counter()
|
| 277 |
+
|
| 278 |
+
# work around PyTorch issue https://github.com/pytorch/pytorch/issues/152162
|
| 279 |
+
# which does not like the lazy initialization to be called in dynamo.
|
| 280 |
+
# Happens with PyTorch 2.7.
|
| 281 |
+
if (
|
| 282 |
+
torch.__version__.startswith("2.7.")
|
| 283 |
+
and (model._forward_module.__class__.__name__ == "OptimizedModule")
|
| 284 |
+
and (model._forward_module._orig_mod.__class__.__name__ == "FullyShardedDataParallel")
|
| 285 |
+
):
|
| 286 |
+
from torch.distributed.fsdp._runtime_utils import _root_pre_forward
|
| 287 |
+
|
| 288 |
+
_root_pre_forward(model._forward_module._orig_mod, model._forward_module._orig_mod, [], {})
|
| 289 |
+
|
| 290 |
+
ppl(
|
| 291 |
+
fabric=fabric,
|
| 292 |
+
state=state,
|
| 293 |
+
val_dataloader=val_dataloader,
|
| 294 |
+
out_dir=out_dir,
|
| 295 |
+
train=train,
|
| 296 |
+
eval=eval,
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
total_tokens = state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size
|
| 301 |
+
|
| 302 |
+
# Print formatted output
|
| 303 |
+
separator = "-" * 40
|
| 304 |
+
fabric.print(separator)
|
| 305 |
+
fabric.print("| Performance")
|
| 306 |
+
fabric.print(f"| - Total tokens : {total_tokens:,}")
|
| 307 |
+
fabric.print(f"| - Training Time : {(time.perf_counter() - train_time):.2f} s")
|
| 308 |
+
fabric.print(f"| - Tok/sec : {total_tokens / train_time:.2f} tok/s")
|
| 309 |
+
fabric.print("| " + "-" * 40)
|
| 310 |
+
|
| 311 |
+
if fabric.device.type == "cuda":
|
| 312 |
+
memory_used = torch.cuda.max_memory_allocated() / 1e9
|
| 313 |
+
fabric.print("| Memory Usage")
|
| 314 |
+
fabric.print(f"| - Memory Used : {memory_used:.2f} GB")
|
| 315 |
+
fabric.print(separator)
|
| 316 |
+
|
| 317 |
+
else:
|
| 318 |
+
months = [
|
| 319 |
+
"2407", "2408", "2409", "2410", "2411", "2412",
|
| 320 |
+
"2501", "2502", "2503", "2504", "2505", "2506"
|
| 321 |
+
]
|
| 322 |
+
for month in months:
|
| 323 |
+
if isinstance(data, Arxiv):
|
| 324 |
+
data.arxiv_train = str(data_dir).rstrip("/") + f"/{month}/train"
|
| 325 |
+
data.arxiv_val = str(data_dir).rstrip("/") + f"/{month}/train"
|
| 326 |
+
else:
|
| 327 |
+
NotImplementedError()
|
| 328 |
+
train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train, model.max_seq_length)
|
| 329 |
+
train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
|
| 330 |
+
|
| 331 |
+
state = {
|
| 332 |
+
"model": model,
|
| 333 |
+
"optimizer": optimizer,
|
| 334 |
+
"train_dataloader": train_dataloader,
|
| 335 |
+
"iter_num": 0,
|
| 336 |
+
"step_count": 0,
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
resume = find_resume_path(resume, out_dir)
|
| 340 |
+
if resume:
|
| 341 |
+
fabric.print(f"Resuming training from {resume}")
|
| 342 |
+
fabric.load(resume, state)
|
| 343 |
+
|
| 344 |
+
train_time = time.perf_counter()
|
| 345 |
+
|
| 346 |
+
# work around PyTorch issue https://github.com/pytorch/pytorch/issues/152162
|
| 347 |
+
# which does not like the lazy initialization to be called in dynamo.
|
| 348 |
+
# Happens with PyTorch 2.7.
|
| 349 |
+
if (
|
| 350 |
+
torch.__version__.startswith("2.7.")
|
| 351 |
+
and (model._forward_module.__class__.__name__ == "OptimizedModule")
|
| 352 |
+
and (model._forward_module._orig_mod.__class__.__name__ == "FullyShardedDataParallel")
|
| 353 |
+
):
|
| 354 |
+
from torch.distributed.fsdp._runtime_utils import _root_pre_forward
|
| 355 |
+
|
| 356 |
+
_root_pre_forward(model._forward_module._orig_mod, model._forward_module._orig_mod, [], {})
|
| 357 |
+
|
| 358 |
+
ppl(
|
| 359 |
+
fabric=fabric,
|
| 360 |
+
state=state,
|
| 361 |
+
val_dataloader=val_dataloader,
|
| 362 |
+
out_dir=out_dir/month,
|
| 363 |
+
train=train,
|
| 364 |
+
eval=eval,
|
| 365 |
+
month=month,
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
total_tokens = state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size
|
| 369 |
+
|
| 370 |
+
# Print formatted output
|
| 371 |
+
separator = "-" * 40
|
| 372 |
+
fabric.print(separator)
|
| 373 |
+
fabric.print("| Performance")
|
| 374 |
+
fabric.print(f"| - Total tokens : {total_tokens:,}")
|
| 375 |
+
fabric.print(f"| - Training Time : {(time.perf_counter() - train_time):.2f} s")
|
| 376 |
+
fabric.print(f"| - Tok/sec : {total_tokens / train_time:.2f} tok/s")
|
| 377 |
+
fabric.print("| " + "-" * 40)
|
| 378 |
+
|
| 379 |
+
if fabric.device.type == "cuda":
|
| 380 |
+
memory_used = torch.cuda.max_memory_allocated() / 1e9
|
| 381 |
+
fabric.print("| Memory Usage")
|
| 382 |
+
fabric.print(f"| - Memory Used : {memory_used:.2f} GB")
|
| 383 |
+
fabric.print(separator)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def ppl(
|
| 387 |
+
fabric: L.Fabric,
|
| 388 |
+
state: dict,
|
| 389 |
+
val_dataloader: DataLoader,
|
| 390 |
+
out_dir: Path,
|
| 391 |
+
train: TrainArgs,
|
| 392 |
+
eval: EvalArgs,
|
| 393 |
+
month: Optional[str] = None,
|
| 394 |
+
) -> None:
|
| 395 |
+
model = state["model"]
|
| 396 |
+
|
| 397 |
+
with torch.device("meta"):
|
| 398 |
+
meta_model = GPT(model.config)
|
| 399 |
+
x = torch.randint(0, 1, (train.micro_batch_size, meta_model.max_seq_length))
|
| 400 |
+
model_fwd = lambda: meta_model(x) # noqa: F821
|
| 401 |
+
model_loss = lambda y: chunked_cross_entropy(y, x, chunk_size=0) # noqa: F821
|
| 402 |
+
measured_flops = measure_flops(meta_model, model_fwd, model_loss)
|
| 403 |
+
fabric.print(f"Measured TFLOPs: {measured_flops * fabric.world_size / 1e12:.2f}")
|
| 404 |
+
del meta_model, x
|
| 405 |
+
|
| 406 |
+
val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters)
|
| 407 |
+
metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)}
|
| 408 |
+
fabric.log_dict(metrics, step=state["iter_num"])
|
| 409 |
+
fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}")
|
| 410 |
+
|
| 411 |
+
if month is not None:
|
| 412 |
+
metrics = {
|
| 413 |
+
"month": month,
|
| 414 |
+
"val_loss": float(val_loss),
|
| 415 |
+
"val_ppl": float(math.exp(val_loss)),
|
| 416 |
+
"rank": fabric.global_rank,
|
| 417 |
+
}
|
| 418 |
+
else:
|
| 419 |
+
metrics = {
|
| 420 |
+
"val_loss": float(val_loss),
|
| 421 |
+
"val_ppl": float(math.exp(val_loss)),
|
| 422 |
+
}
|
| 423 |
+
jsonl_path = out_dir.parent / "ppl_metrics.jsonl"
|
| 424 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 425 |
+
with open(jsonl_path, "a", encoding="utf-8") as f:
|
| 426 |
+
f.write(json.dumps(metrics, ensure_ascii=False) + "\n")
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
@torch.no_grad()
|
| 430 |
+
def validate(
|
| 431 |
+
fabric: L.Fabric, model: nn.Module, val_dataloader: DataLoader, max_iters: int, verbose: bool = True
|
| 432 |
+
) -> torch.Tensor:
|
| 433 |
+
fabric.barrier()
|
| 434 |
+
if verbose:
|
| 435 |
+
fabric.print("Validating ...")
|
| 436 |
+
model.eval()
|
| 437 |
+
|
| 438 |
+
losses = []
|
| 439 |
+
for k, batch in enumerate(val_dataloader):
|
| 440 |
+
if k >= max_iters:
|
| 441 |
+
break
|
| 442 |
+
input_ids = batch[:, 0 : model.max_seq_length].contiguous().long()
|
| 443 |
+
targets = batch[:, 1 : (model.max_seq_length + 1)].contiguous().long()
|
| 444 |
+
logits = model(input_ids)
|
| 445 |
+
loss = chunked_cross_entropy(logits, targets)
|
| 446 |
+
losses.append(loss)
|
| 447 |
+
|
| 448 |
+
val_loss = torch.stack(losses).mean()
|
| 449 |
+
model.train()
|
| 450 |
+
fabric.barrier()
|
| 451 |
+
return val_loss
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def get_dataloaders(
|
| 455 |
+
fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs, block_size: int
|
| 456 |
+
) -> Tuple[DataLoader, DataLoader]:
|
| 457 |
+
data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=block_size)
|
| 458 |
+
with fabric.rank_zero_first():
|
| 459 |
+
data.prepare_data()
|
| 460 |
+
data.setup()
|
| 461 |
+
train_dataloader = data.train_dataloader()
|
| 462 |
+
val_dataloader = data.val_dataloader()
|
| 463 |
+
return train_dataloader, val_dataloader
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def save_checkpoint(fabric, state, tokenizer_dir, checkpoint_file):
|
| 467 |
+
model = state["model"]
|
| 468 |
+
checkpoint_file.parent.mkdir(parents=True, exist_ok=True)
|
| 469 |
+
fabric.print(f"Saving checkpoint to {str(checkpoint_file)!r}")
|
| 470 |
+
fabric.save(checkpoint_file, state)
|
| 471 |
+
if fabric.global_rank == 0:
|
| 472 |
+
save_hyperparameters(setup, checkpoint_file.parent)
|
| 473 |
+
if tokenizer_dir is not None:
|
| 474 |
+
copy_config_files(tokenizer_dir, checkpoint_file.parent)
|
| 475 |
+
save_config(model.config, checkpoint_file.parent)
|
| 476 |
+
|
| 477 |
+
def initialize_weights(fabric: L.Fabric, model: GPT, n_layer: int, n_embd: int) -> None:
|
| 478 |
+
"""GPT-NeoX weight initialization (https://arxiv.org/abs/2204.06745)."""
|
| 479 |
+
# Adapted from https://github.com/jzhang38/TinyLlama
|
| 480 |
+
|
| 481 |
+
def init_weights(module, std):
|
| 482 |
+
nn.init.normal_(module.weight, mean=0.0, std=std)
|
| 483 |
+
if getattr(module, "bias", None) is not None:
|
| 484 |
+
nn.init.zeros_(module.bias)
|
| 485 |
+
|
| 486 |
+
for mod in model.modules():
|
| 487 |
+
if isinstance(mod, (nn.Embedding, nn.Linear)):
|
| 488 |
+
mod.reset_parameters = partial(init_weights, mod, std=math.sqrt(2.0 / 5 / n_embd))
|
| 489 |
+
|
| 490 |
+
# need a separate loop because `mod.proj` below is a `nn.Linear` too
|
| 491 |
+
for mod in model.modules():
|
| 492 |
+
if isinstance(mod, (LLaMAMLP, CausalSelfAttention)):
|
| 493 |
+
mod.proj.reset_parameters = partial(init_weights, mod.proj, std=(1 / math.sqrt(n_embd) / n_layer))
|
| 494 |
+
|
| 495 |
+
if not isinstance(fabric.strategy, FSDPStrategy):
|
| 496 |
+
reset_parameters(model)
|
| 497 |
+
|
| 498 |
+
def validate_args(train: TrainArgs, eval: EvalArgs, initial_checkpoint_dir, resume) -> None:
|
| 499 |
+
issues = []
|
| 500 |
+
unsupported = [(train, ["max_steps", "epochs"]), (eval, ["max_new_tokens"])]
|
| 501 |
+
for args, names in unsupported:
|
| 502 |
+
for name in names:
|
| 503 |
+
if getattr(args, name) is not None:
|
| 504 |
+
issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}")
|
| 505 |
+
required = [(train, ["max_tokens", "max_norm"])]
|
| 506 |
+
for args, names in required:
|
| 507 |
+
for name in names:
|
| 508 |
+
if getattr(args, name) is None:
|
| 509 |
+
issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}")
|
| 510 |
+
if initial_checkpoint_dir and resume:
|
| 511 |
+
issues.append("Can't provide both `--resume` and `--initial_checkpoint_dir`. Choose one.")
|
| 512 |
+
if issues:
|
| 513 |
+
raise ValueError("\n".join(issues))
|
litgpt/pretrain.py
ADDED
|
@@ -0,0 +1,564 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import pprint
|
| 5 |
+
import time
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
from dataclasses import asdict
|
| 9 |
+
from datetime import timedelta
|
| 10 |
+
from functools import partial
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Dict, Optional, Tuple, Union
|
| 13 |
+
|
| 14 |
+
import lightning as L
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
from lightning.fabric.strategies import FSDPStrategy
|
| 18 |
+
from lightning.fabric.utilities.throughput import ThroughputMonitor, measure_flops
|
| 19 |
+
from torch.utils.data import DataLoader
|
| 20 |
+
from torchmetrics.aggregation import RunningMean
|
| 21 |
+
from typing_extensions import Literal
|
| 22 |
+
|
| 23 |
+
from litgpt import Tokenizer
|
| 24 |
+
from litgpt.args import EvalArgs, LogArgs, TrainArgs
|
| 25 |
+
from litgpt.config import name_to_config
|
| 26 |
+
from litgpt.data import DataModule, TinyLlama, Arxiv
|
| 27 |
+
from litgpt.model import GPT, Block, CausalSelfAttention, Config, LLaMAMLP
|
| 28 |
+
from litgpt.utils import (
|
| 29 |
+
CycleIterator,
|
| 30 |
+
capture_hparams,
|
| 31 |
+
check_nvlink_connectivity,
|
| 32 |
+
choose_logger,
|
| 33 |
+
chunked_cross_entropy,
|
| 34 |
+
copy_config_files,
|
| 35 |
+
extend_checkpoint_dir,
|
| 36 |
+
find_resume_path,
|
| 37 |
+
get_default_supported_precision,
|
| 38 |
+
init_out_dir,
|
| 39 |
+
instantiate_torch_optimizer,
|
| 40 |
+
num_parameters,
|
| 41 |
+
parse_devices,
|
| 42 |
+
reset_parameters,
|
| 43 |
+
save_config,
|
| 44 |
+
save_hyperparameters,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def setup(
|
| 49 |
+
model_name: str,
|
| 50 |
+
model_config: Optional[Config] = None,
|
| 51 |
+
out_dir: Path = Path("out/pretrain"),
|
| 52 |
+
precision: Literal["bf16-true", "bf16-mixed", "32-true", None] = None,
|
| 53 |
+
initial_checkpoint_dir: Optional[Path] = None,
|
| 54 |
+
resume: Union[bool, Literal["auto"], Path] = False,
|
| 55 |
+
data: Optional[DataModule] = None,
|
| 56 |
+
data_dir: Path = None,
|
| 57 |
+
train: TrainArgs = TrainArgs(
|
| 58 |
+
save_interval=1000,
|
| 59 |
+
log_interval=1,
|
| 60 |
+
global_batch_size=512,
|
| 61 |
+
micro_batch_size=4,
|
| 62 |
+
max_tokens=int(3e12), # 3 trillion
|
| 63 |
+
max_norm=1.0,
|
| 64 |
+
min_lr=4e-5,
|
| 65 |
+
lr_warmup_steps=2000,
|
| 66 |
+
tie_embeddings=False,
|
| 67 |
+
),
|
| 68 |
+
eval: EvalArgs = EvalArgs(interval=1000, max_iters=100),
|
| 69 |
+
log: LogArgs = LogArgs(),
|
| 70 |
+
optimizer: Union[str, Dict] = "AdamW",
|
| 71 |
+
devices: Union[int, str] = "auto",
|
| 72 |
+
num_nodes: int = 1,
|
| 73 |
+
tokenizer_dir: Optional[Path] = None,
|
| 74 |
+
logger_name: Literal["wandb", "tensorboard", "csv", "mlflow"] = "tensorboard",
|
| 75 |
+
seed: int = 42,
|
| 76 |
+
):
|
| 77 |
+
"""Pretrain a model.
|
| 78 |
+
|
| 79 |
+
Arguments:
|
| 80 |
+
model_name: The name of the model to pretrain. Choose from names in ``litgpt.config``. Use "list" to list the supported models.
|
| 81 |
+
model_config: A ``litgpt.Config`` object to define the model architecture. Mutually exclusive with
|
| 82 |
+
``model_config``. Overrides the `model_name` if specified.
|
| 83 |
+
out_dir: Directory in which to save checkpoints and logs. If running in a Lightning Studio Job, look for it in
|
| 84 |
+
/teamspace/jobs/<job-name>/share.
|
| 85 |
+
precision: The precision to use for finetuning. Determines a compatible precision setting by default.
|
| 86 |
+
initial_checkpoint_dir: Optional path to a checkpoint directory to initialize the model from.
|
| 87 |
+
Useful for continued pretraining. Mutually exclusive with ``resume``.
|
| 88 |
+
resume: Path to a checkpoint directory to resume from in case training was interrupted, or ``True`` to resume
|
| 89 |
+
from the latest checkpoint in ``out_dir``. An error will be raised if no checkpoint is found. Passing
|
| 90 |
+
``'auto'`` will resume from the latest checkpoint but not error if no checkpoint exists.
|
| 91 |
+
data: Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
|
| 92 |
+
train: Training-related arguments. See ``litgpt.args.TrainArgs`` for details.
|
| 93 |
+
eval: Evaluation-related arguments. See ``litgpt.args.EvalArgs`` for details.
|
| 94 |
+
optimizer: An optimizer name (such as "AdamW") or config.
|
| 95 |
+
|
| 96 |
+
devices: How many devices/GPUs to use. Uses all GPUs by default.
|
| 97 |
+
num_nodes: How many nodes the code is being run on.
|
| 98 |
+
tokenizer_dir: Optional path to the tokenizer dir that was used for preprocessing the dataset. Only some data
|
| 99 |
+
module require this.
|
| 100 |
+
logger_name: The name of the logger to send metrics to.
|
| 101 |
+
seed: The random seed to use for reproducibility.
|
| 102 |
+
"""
|
| 103 |
+
if model_name == "list":
|
| 104 |
+
available_models = "\n".join(sorted(name_to_config))
|
| 105 |
+
print(f"Available values:\n{available_models}")
|
| 106 |
+
quit()
|
| 107 |
+
|
| 108 |
+
if initial_checkpoint_dir is not None:
|
| 109 |
+
initial_checkpoint_dir = extend_checkpoint_dir(initial_checkpoint_dir)
|
| 110 |
+
|
| 111 |
+
if tokenizer_dir is not None:
|
| 112 |
+
tokenizer_dir = extend_checkpoint_dir(tokenizer_dir)
|
| 113 |
+
|
| 114 |
+
if model_config is None:
|
| 115 |
+
# Support both model_name options: meta-llama/Meta-Llama-3-8B & Meta-Llama-3-8B
|
| 116 |
+
try:
|
| 117 |
+
model_config = Config.from_name(model_name)
|
| 118 |
+
except ValueError:
|
| 119 |
+
print(f"Model name {model_name} is not supported.\n")
|
| 120 |
+
available_models = "\n".join(sorted(name_to_config))
|
| 121 |
+
print(f"Available values:\n{available_models}")
|
| 122 |
+
quit()
|
| 123 |
+
|
| 124 |
+
hparams = capture_hparams()
|
| 125 |
+
|
| 126 |
+
from litgpt.data import Arxiv
|
| 127 |
+
if isinstance(data, Arxiv):
|
| 128 |
+
data.arxiv_train = str(data_dir).rstrip("/") + "/train"
|
| 129 |
+
data.arxiv_val = str(data_dir).rstrip("/") + "/train"
|
| 130 |
+
else:
|
| 131 |
+
data = TinyLlama() if data is None else data
|
| 132 |
+
|
| 133 |
+
config = Config.from_name(model_name) if model_config is None else model_config
|
| 134 |
+
precision = precision or get_default_supported_precision(training=True)
|
| 135 |
+
devices = parse_devices(devices)
|
| 136 |
+
out_dir = init_out_dir(out_dir)
|
| 137 |
+
# in case the dataset requires the Tokenizer
|
| 138 |
+
tokenizer = Tokenizer(tokenizer_dir) if tokenizer_dir is not None else None
|
| 139 |
+
|
| 140 |
+
logger = choose_logger(
|
| 141 |
+
logger_name,
|
| 142 |
+
out_dir,
|
| 143 |
+
name=f"pretrain-{config.name}",
|
| 144 |
+
resume=bool(resume),
|
| 145 |
+
log_interval=train.log_interval,
|
| 146 |
+
log_args=asdict(log),
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
if devices * num_nodes > 1:
|
| 150 |
+
strategy = FSDPStrategy(auto_wrap_policy={Block}, state_dict_type="full", sharding_strategy="HYBRID_SHARD")
|
| 151 |
+
else:
|
| 152 |
+
strategy = "auto"
|
| 153 |
+
|
| 154 |
+
fabric = L.Fabric(devices=devices, num_nodes=num_nodes, strategy=strategy, precision=precision, loggers=[logger])
|
| 155 |
+
|
| 156 |
+
if torch.cuda.is_available() and devices > 1:
|
| 157 |
+
check_nvlink_connectivity(fabric)
|
| 158 |
+
|
| 159 |
+
fabric.launch()
|
| 160 |
+
|
| 161 |
+
fabric.print(pprint.pformat(hparams))
|
| 162 |
+
if logger_name in ("tensorboard", "wandb", "mlflow"):
|
| 163 |
+
fabric.logger.log_hyperparams(hparams)
|
| 164 |
+
|
| 165 |
+
main(
|
| 166 |
+
fabric=fabric,
|
| 167 |
+
devices=devices,
|
| 168 |
+
num_nodes=num_nodes,
|
| 169 |
+
seed=seed,
|
| 170 |
+
initial_checkpoint_dir=initial_checkpoint_dir,
|
| 171 |
+
resume=resume,
|
| 172 |
+
config=config,
|
| 173 |
+
data=data,
|
| 174 |
+
out_dir=out_dir,
|
| 175 |
+
tokenizer_dir=tokenizer_dir,
|
| 176 |
+
tokenizer=tokenizer,
|
| 177 |
+
train=train,
|
| 178 |
+
eval=eval,
|
| 179 |
+
optimizer=optimizer,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def main(
|
| 184 |
+
fabric: L.Fabric,
|
| 185 |
+
devices: int,
|
| 186 |
+
seed: int,
|
| 187 |
+
initial_checkpoint_dir: Optional[Path],
|
| 188 |
+
resume: Union[bool, Literal["auto"], Path],
|
| 189 |
+
config: Config,
|
| 190 |
+
data: DataModule,
|
| 191 |
+
out_dir: Path,
|
| 192 |
+
tokenizer_dir: Optional[Path],
|
| 193 |
+
tokenizer: Optional[Tokenizer],
|
| 194 |
+
train: TrainArgs,
|
| 195 |
+
eval: EvalArgs,
|
| 196 |
+
optimizer: Union[str, Dict],
|
| 197 |
+
num_nodes: int = 1,
|
| 198 |
+
) -> None:
|
| 199 |
+
validate_args(train, eval, initial_checkpoint_dir, resume)
|
| 200 |
+
|
| 201 |
+
if fabric.global_rank == 0:
|
| 202 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 203 |
+
|
| 204 |
+
fabric.seed_everything(seed) # same seed for every process to init model (FSDP)
|
| 205 |
+
|
| 206 |
+
t0 = time.perf_counter()
|
| 207 |
+
with fabric.init_module(empty_init=True):
|
| 208 |
+
model = GPT(config)
|
| 209 |
+
|
| 210 |
+
initialize_weights(fabric, model, n_layer=config.n_layer, n_embd=config.n_embd)
|
| 211 |
+
|
| 212 |
+
if train.tie_embeddings:
|
| 213 |
+
model.transformer.wte.weight = model.lm_head.weight
|
| 214 |
+
if train.max_seq_length:
|
| 215 |
+
model.max_seq_length = train.max_seq_length
|
| 216 |
+
|
| 217 |
+
fabric.print(f"Time to instantiate model: {time.perf_counter() - t0:.02f} seconds.")
|
| 218 |
+
fabric.print(f"Total parameters: {num_parameters(model):,}")
|
| 219 |
+
|
| 220 |
+
model = torch.compile(model)
|
| 221 |
+
model = fabric.setup(model)
|
| 222 |
+
|
| 223 |
+
extra_kwargs = {"fused": fabric.device.type == "cuda"}
|
| 224 |
+
optimizer = instantiate_torch_optimizer(optimizer, model.parameters(), **extra_kwargs)
|
| 225 |
+
optimizer = fabric.setup_optimizers(optimizer)
|
| 226 |
+
|
| 227 |
+
train_dataloader, val_dataloader = get_dataloaders(fabric, data, tokenizer, train, model.max_seq_length)
|
| 228 |
+
train_dataloader, val_dataloader = fabric.setup_dataloaders(train_dataloader, val_dataloader)
|
| 229 |
+
|
| 230 |
+
if initial_checkpoint_dir:
|
| 231 |
+
ckpt_path = initial_checkpoint_dir / "lit_model.pth"
|
| 232 |
+
|
| 233 |
+
if fabric.global_rank == 0:
|
| 234 |
+
try:
|
| 235 |
+
obj = torch.load(ckpt_path, map_location="cpu")
|
| 236 |
+
except Exception as e:
|
| 237 |
+
raise RuntimeError(f"[load] 无法读取 {ckpt_path}: {e}")
|
| 238 |
+
|
| 239 |
+
if isinstance(obj, dict) and "model" in obj:
|
| 240 |
+
import os
|
| 241 |
+
print(f"[fix] {ckpt_path} 是整包 state,提取 model 权重并原子覆盖...", flush=True)
|
| 242 |
+
sd = obj["model"]
|
| 243 |
+
# 去掉可能的 'module.' 前缀
|
| 244 |
+
if len(sd) and next(iter(sd)).startswith("module."):
|
| 245 |
+
sd = {k[7:]: v for k, v in sd.items()}
|
| 246 |
+
|
| 247 |
+
tmp = ckpt_path.with_suffix(".pth.tmp")
|
| 248 |
+
torch.save(sd, tmp)
|
| 249 |
+
os.replace(tmp, ckpt_path) # 原子替换,避免半文件
|
| 250 |
+
try:
|
| 251 |
+
os.sync() # 刷盘(若权限允许)
|
| 252 |
+
except Exception:
|
| 253 |
+
pass
|
| 254 |
+
print(f"[fix] 已覆盖为纯权重: {ckpt_path}", flush=True)
|
| 255 |
+
else:
|
| 256 |
+
print(f"[ok] {ckpt_path} 已是纯权重", flush=True)
|
| 257 |
+
|
| 258 |
+
fabric.barrier()
|
| 259 |
+
|
| 260 |
+
fabric.load_raw(ckpt_path, model, strict=False)
|
| 261 |
+
|
| 262 |
+
state = {
|
| 263 |
+
"model": model,
|
| 264 |
+
"optimizer": optimizer,
|
| 265 |
+
"train_dataloader": train_dataloader,
|
| 266 |
+
"iter_num": 0,
|
| 267 |
+
"step_count": 0,
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
resume = find_resume_path(resume, out_dir)
|
| 271 |
+
if resume:
|
| 272 |
+
fabric.print(f"Resuming training from {resume}")
|
| 273 |
+
fabric.load(resume, state)
|
| 274 |
+
|
| 275 |
+
train_time = time.perf_counter()
|
| 276 |
+
|
| 277 |
+
# work around PyTorch issue https://github.com/pytorch/pytorch/issues/152162
|
| 278 |
+
# which does not like the lazy initialization to be called in dynamo.
|
| 279 |
+
# Happens with PyTorch 2.7.
|
| 280 |
+
if (
|
| 281 |
+
torch.__version__.startswith("2.7.")
|
| 282 |
+
and (model._forward_module.__class__.__name__ == "OptimizedModule")
|
| 283 |
+
and (model._forward_module._orig_mod.__class__.__name__ == "FullyShardedDataParallel")
|
| 284 |
+
):
|
| 285 |
+
from torch.distributed.fsdp._runtime_utils import _root_pre_forward
|
| 286 |
+
|
| 287 |
+
_root_pre_forward(model._forward_module._orig_mod, model._forward_module._orig_mod, [], {})
|
| 288 |
+
|
| 289 |
+
fit(
|
| 290 |
+
fabric=fabric,
|
| 291 |
+
devices=devices,
|
| 292 |
+
num_nodes=num_nodes,
|
| 293 |
+
state=state,
|
| 294 |
+
train_dataloader=train_dataloader,
|
| 295 |
+
val_dataloader=val_dataloader,
|
| 296 |
+
out_dir=out_dir,
|
| 297 |
+
tokenizer_dir=tokenizer_dir,
|
| 298 |
+
train=train,
|
| 299 |
+
eval=eval,
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
# Save final checkpoi
|
| 303 |
+
save_checkpoint(fabric, state, tokenizer_dir, out_dir / "final" / "lit_model.pth")
|
| 304 |
+
|
| 305 |
+
total_tokens = state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size
|
| 306 |
+
|
| 307 |
+
# Print formatted output
|
| 308 |
+
separator = "-" * 40
|
| 309 |
+
fabric.print(separator)
|
| 310 |
+
fabric.print("| Performance")
|
| 311 |
+
fabric.print(f"| - Total tokens : {total_tokens:,}")
|
| 312 |
+
fabric.print(f"| - Training Time : {(time.perf_counter() - train_time):.2f} s")
|
| 313 |
+
fabric.print(f"| - Tok/sec : {total_tokens / train_time:.2f} tok/s")
|
| 314 |
+
fabric.print("| " + "-" * 40)
|
| 315 |
+
|
| 316 |
+
if fabric.device.type == "cuda":
|
| 317 |
+
memory_used = torch.cuda.max_memory_allocated() / 1e9
|
| 318 |
+
fabric.print("| Memory Usage")
|
| 319 |
+
fabric.print(f"| - Memory Used : {memory_used:.2f} GB")
|
| 320 |
+
fabric.print(separator)
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def fit(
|
| 324 |
+
fabric: L.Fabric,
|
| 325 |
+
devices: int,
|
| 326 |
+
state: dict,
|
| 327 |
+
train_dataloader: DataLoader,
|
| 328 |
+
val_dataloader: DataLoader,
|
| 329 |
+
out_dir: Path,
|
| 330 |
+
tokenizer_dir: Optional[Path],
|
| 331 |
+
train: TrainArgs,
|
| 332 |
+
eval: EvalArgs,
|
| 333 |
+
num_nodes: int = 1,
|
| 334 |
+
) -> None:
|
| 335 |
+
model = state["model"]
|
| 336 |
+
optimizer = state["optimizer"]
|
| 337 |
+
|
| 338 |
+
if eval.initial_validation:
|
| 339 |
+
val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters)
|
| 340 |
+
val_loss = f"{val_loss:.3f}"
|
| 341 |
+
else:
|
| 342 |
+
fabric.print("Verifying settings ...")
|
| 343 |
+
validate(fabric, model, val_dataloader, max_iters=2, verbose=False) # sanity check
|
| 344 |
+
val_loss = "n/a"
|
| 345 |
+
|
| 346 |
+
throughput = ThroughputMonitor(fabric, window_size=5)
|
| 347 |
+
|
| 348 |
+
with torch.device("meta"):
|
| 349 |
+
meta_model = GPT(model.config)
|
| 350 |
+
x = torch.randint(0, 1, (train.micro_batch_size, meta_model.max_seq_length))
|
| 351 |
+
model_fwd = lambda: meta_model(x) # noqa: F821
|
| 352 |
+
model_loss = lambda y: chunked_cross_entropy(y, x, chunk_size=0) # noqa: F821
|
| 353 |
+
measured_flops = measure_flops(meta_model, model_fwd, model_loss)
|
| 354 |
+
fabric.print(f"Measured TFLOPs: {measured_flops * fabric.world_size / 1e12:.2f}")
|
| 355 |
+
del meta_model, x
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
max_tokens_per_device = train.max_tokens // fabric.world_size
|
| 359 |
+
tokens_per_iter = train.micro_batch_size * model.max_seq_length
|
| 360 |
+
max_iters = max_tokens_per_device // tokens_per_iter
|
| 361 |
+
log_iter_interval = train.log_interval * train.gradient_accumulation_iters(devices, num_nodes)
|
| 362 |
+
initial_iter = state["iter_num"]
|
| 363 |
+
train_iterator = CycleIterator(train_dataloader)
|
| 364 |
+
|
| 365 |
+
running_loss = RunningMean(window=train.gradient_accumulation_iters(devices, num_nodes), sync_on_compute=False).to(
|
| 366 |
+
fabric.device
|
| 367 |
+
)
|
| 368 |
+
fabric.barrier()
|
| 369 |
+
total_t0 = time.perf_counter()
|
| 370 |
+
|
| 371 |
+
warmup_iters = train.warmup_iters(devices, num_nodes, max_iters, train_dataloader)
|
| 372 |
+
|
| 373 |
+
for train_data in train_iterator:
|
| 374 |
+
if state["iter_num"] >= max_iters:
|
| 375 |
+
break
|
| 376 |
+
|
| 377 |
+
# determine and set the learning rate for this iteration
|
| 378 |
+
lr = get_lr(optimizer.defaults["lr"], state["iter_num"], warmup_iters, max_iters, train.min_lr)
|
| 379 |
+
for param_group in optimizer.param_groups:
|
| 380 |
+
param_group["lr"] = lr
|
| 381 |
+
|
| 382 |
+
state["iter_num"] += 1
|
| 383 |
+
iter_t0 = time.perf_counter()
|
| 384 |
+
|
| 385 |
+
input_ids = train_data[:, 0 : model.max_seq_length].contiguous().long()
|
| 386 |
+
targets = train_data[:, 1 : (model.max_seq_length + 1)].contiguous().long()
|
| 387 |
+
|
| 388 |
+
is_accumulating = state["iter_num"] % train.gradient_accumulation_iters(devices, num_nodes) != 0
|
| 389 |
+
with fabric.no_backward_sync(model, enabled=is_accumulating):
|
| 390 |
+
logits = model(input_ids)
|
| 391 |
+
loss = chunked_cross_entropy(logits, targets)
|
| 392 |
+
fabric.backward(loss / train.gradient_accumulation_iters(devices, num_nodes))
|
| 393 |
+
|
| 394 |
+
running_loss.update(loss.detach())
|
| 395 |
+
|
| 396 |
+
if not is_accumulating:
|
| 397 |
+
fabric.clip_gradients(model, optimizer, max_norm=train.max_norm)
|
| 398 |
+
optimizer.step()
|
| 399 |
+
optimizer.zero_grad()
|
| 400 |
+
state["step_count"] += 1
|
| 401 |
+
|
| 402 |
+
if state["iter_num"] % log_iter_interval == 0:
|
| 403 |
+
loss = running_loss.compute().item() # expensive device-to-host synchronization
|
| 404 |
+
t1 = time.perf_counter()
|
| 405 |
+
throughput.update(
|
| 406 |
+
time=(t1 - total_t0),
|
| 407 |
+
flops=(measured_flops * log_iter_interval),
|
| 408 |
+
batches=state["iter_num"],
|
| 409 |
+
samples=(state["iter_num"] * train.micro_batch_size),
|
| 410 |
+
lengths=(state["iter_num"] * train.micro_batch_size * model.max_seq_length),
|
| 411 |
+
)
|
| 412 |
+
metrics = {
|
| 413 |
+
"loss": loss,
|
| 414 |
+
"iter": state["iter_num"],
|
| 415 |
+
"step": state["step_count"],
|
| 416 |
+
"epoch": train_iterator.epoch,
|
| 417 |
+
"iter_time": t1 - iter_t0,
|
| 418 |
+
"remaining_time": (
|
| 419 |
+
(t1 - total_t0) / (state["iter_num"] - initial_iter) * (max_iters - state["iter_num"])
|
| 420 |
+
),
|
| 421 |
+
"tokens": state["iter_num"] * train.micro_batch_size * model.max_seq_length,
|
| 422 |
+
"total_tokens": (state["iter_num"] * train.micro_batch_size * model.max_seq_length * fabric.world_size),
|
| 423 |
+
"learning_rate": lr,
|
| 424 |
+
}
|
| 425 |
+
if isinstance(val_loss, float):
|
| 426 |
+
val_loss = f"{val_loss:.3f}"
|
| 427 |
+
fabric.print(
|
| 428 |
+
f"Epoch {metrics['epoch'] + 1} | iter {metrics['iter']} step {metrics['step']} |"
|
| 429 |
+
f" loss train: {metrics['loss']:.3f},"
|
| 430 |
+
f" val: {val_loss} |"
|
| 431 |
+
f" iter time: {metrics['iter_time'] * 1000:.2f} ms"
|
| 432 |
+
f"{' (step)' if not is_accumulating else ''}"
|
| 433 |
+
f" remaining time: {timedelta(seconds=int(metrics['remaining_time']))!s}",
|
| 434 |
+
flush=True
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
throughput_metrics = throughput.compute()
|
| 438 |
+
metrics.update(throughput_metrics)
|
| 439 |
+
fabric.log_dict(metrics, step=state["iter_num"] - 1)
|
| 440 |
+
|
| 441 |
+
if val_dataloader is not None and not is_accumulating and state["step_count"] % eval.interval == 0:
|
| 442 |
+
t0 = time.perf_counter()
|
| 443 |
+
val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters)
|
| 444 |
+
val_loss = val_loss.item()
|
| 445 |
+
td = time.perf_counter() - t0
|
| 446 |
+
|
| 447 |
+
fabric.print(f"iter {state['iter_num']}: val loss {val_loss:.4f}, val time: {td * 1000:.2f} ms")
|
| 448 |
+
metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)}
|
| 449 |
+
fabric.log_dict(metrics, step=state["iter_num"] - 1)
|
| 450 |
+
fabric.barrier()
|
| 451 |
+
|
| 452 |
+
if train.save_interval is not None and not is_accumulating and state["step_count"] % train.save_interval == 0:
|
| 453 |
+
save_checkpoint(fabric, state, tokenizer_dir, out_dir / f"step-{state['step_count']:08d}" / "lit_model.pth")
|
| 454 |
+
|
| 455 |
+
# Final validation
|
| 456 |
+
if eval.final_validation:
|
| 457 |
+
val_loss = validate(fabric, model, val_dataloader, max_iters=eval.max_iters)
|
| 458 |
+
metrics = {"val_loss": val_loss, "val_ppl": math.exp(val_loss)}
|
| 459 |
+
fabric.log_dict(metrics, step=state["iter_num"])
|
| 460 |
+
fabric.print(f"Final evaluation | val loss: {val_loss.item():.3f} | val ppl: {math.exp(val_loss):.3f}")
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
@torch.no_grad()
|
| 464 |
+
def validate(
|
| 465 |
+
fabric: L.Fabric, model: nn.Module, val_dataloader: DataLoader, max_iters: int, verbose: bool = True
|
| 466 |
+
) -> torch.Tensor:
|
| 467 |
+
fabric.barrier()
|
| 468 |
+
if verbose:
|
| 469 |
+
fabric.print("Validating ...")
|
| 470 |
+
model.eval()
|
| 471 |
+
|
| 472 |
+
losses = []
|
| 473 |
+
for k, batch in enumerate(val_dataloader):
|
| 474 |
+
if k >= max_iters:
|
| 475 |
+
break
|
| 476 |
+
input_ids = batch[:, 0 : model.max_seq_length].contiguous().long()
|
| 477 |
+
targets = batch[:, 1 : (model.max_seq_length + 1)].contiguous().long()
|
| 478 |
+
logits = model(input_ids)
|
| 479 |
+
loss = chunked_cross_entropy(logits, targets)
|
| 480 |
+
losses.append(loss)
|
| 481 |
+
|
| 482 |
+
val_loss = torch.stack(losses).mean()
|
| 483 |
+
model.train()
|
| 484 |
+
fabric.barrier()
|
| 485 |
+
return val_loss
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def get_dataloaders(
|
| 489 |
+
fabric: L.Fabric, data: DataModule, tokenizer: Tokenizer, train: TrainArgs, block_size: int
|
| 490 |
+
) -> Tuple[DataLoader, DataLoader]:
|
| 491 |
+
data.connect(tokenizer=tokenizer, batch_size=train.micro_batch_size, max_seq_length=block_size)
|
| 492 |
+
with fabric.rank_zero_first():
|
| 493 |
+
data.prepare_data()
|
| 494 |
+
data.setup()
|
| 495 |
+
train_dataloader = data.train_dataloader()
|
| 496 |
+
val_dataloader = data.val_dataloader()
|
| 497 |
+
return train_dataloader, val_dataloader
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
# learning rate decay scheduler (cosine with linear warmup)
|
| 501 |
+
def get_lr(learning_rate: float, it: int, warmup_iters: int, max_iters: int, min_lr: float) -> float:
|
| 502 |
+
# 1) linear warmup for warmup_iters steps
|
| 503 |
+
if it < warmup_iters:
|
| 504 |
+
return learning_rate * it / warmup_iters
|
| 505 |
+
# 2) if it > max_iters, return min learning rate
|
| 506 |
+
if it > max_iters:
|
| 507 |
+
return min_lr
|
| 508 |
+
# 3) in between, use cosine decay down to min learning rate
|
| 509 |
+
decay_ratio = (it - warmup_iters) / (max_iters - warmup_iters)
|
| 510 |
+
assert 0 <= decay_ratio <= 1
|
| 511 |
+
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
|
| 512 |
+
return min_lr + coeff * (learning_rate - min_lr)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def initialize_weights(fabric: L.Fabric, model: GPT, n_layer: int, n_embd: int) -> None:
|
| 516 |
+
"""GPT-NeoX weight initialization (https://arxiv.org/abs/2204.06745)."""
|
| 517 |
+
# Adapted from https://github.com/jzhang38/TinyLlama
|
| 518 |
+
|
| 519 |
+
def init_weights(module, std):
|
| 520 |
+
nn.init.normal_(module.weight, mean=0.0, std=std)
|
| 521 |
+
if getattr(module, "bias", None) is not None:
|
| 522 |
+
nn.init.zeros_(module.bias)
|
| 523 |
+
|
| 524 |
+
for mod in model.modules():
|
| 525 |
+
if isinstance(mod, (nn.Embedding, nn.Linear)):
|
| 526 |
+
mod.reset_parameters = partial(init_weights, mod, std=math.sqrt(2.0 / 5 / n_embd))
|
| 527 |
+
|
| 528 |
+
# need a separate loop because `mod.proj` below is a `nn.Linear` too
|
| 529 |
+
for mod in model.modules():
|
| 530 |
+
if isinstance(mod, (LLaMAMLP, CausalSelfAttention)):
|
| 531 |
+
mod.proj.reset_parameters = partial(init_weights, mod.proj, std=(1 / math.sqrt(n_embd) / n_layer))
|
| 532 |
+
|
| 533 |
+
if not isinstance(fabric.strategy, FSDPStrategy):
|
| 534 |
+
reset_parameters(model)
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
def save_checkpoint(fabric, state, tokenizer_dir, checkpoint_file):
|
| 538 |
+
model = state["model"]
|
| 539 |
+
checkpoint_file.parent.mkdir(parents=True, exist_ok=True)
|
| 540 |
+
fabric.print(f"Saving checkpoint to {str(checkpoint_file)!r}")
|
| 541 |
+
fabric.save(checkpoint_file, state)
|
| 542 |
+
if fabric.global_rank == 0:
|
| 543 |
+
save_hyperparameters(setup, checkpoint_file.parent)
|
| 544 |
+
if tokenizer_dir is not None:
|
| 545 |
+
copy_config_files(tokenizer_dir, checkpoint_file.parent)
|
| 546 |
+
save_config(model.config, checkpoint_file.parent)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
def validate_args(train: TrainArgs, eval: EvalArgs, initial_checkpoint_dir, resume) -> None:
|
| 550 |
+
issues = []
|
| 551 |
+
unsupported = [(train, ["max_steps", "epochs"]), (eval, ["max_new_tokens"])]
|
| 552 |
+
for args, names in unsupported:
|
| 553 |
+
for name in names:
|
| 554 |
+
if getattr(args, name) is not None:
|
| 555 |
+
issues.append(f"{__file__} doesn't support the {name!r} argument. This is set in {args}")
|
| 556 |
+
required = [(train, ["max_tokens", "max_norm"])]
|
| 557 |
+
for args, names in required:
|
| 558 |
+
for name in names:
|
| 559 |
+
if getattr(args, name) is None:
|
| 560 |
+
issues.append(f"{__file__} requires the {name!r} argument. This is set in {args}")
|
| 561 |
+
if initial_checkpoint_dir and resume:
|
| 562 |
+
issues.append("Can't provide both `--resume` and `--initial_checkpoint_dir`. Choose one.")
|
| 563 |
+
if issues:
|
| 564 |
+
raise ValueError("\n".join(issues))
|
litgpt/prompts.py
ADDED
|
@@ -0,0 +1,541 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
import importlib
|
| 3 |
+
import re
|
| 4 |
+
from abc import abstractmethod
|
| 5 |
+
from json import dumps
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union
|
| 8 |
+
|
| 9 |
+
import yaml
|
| 10 |
+
|
| 11 |
+
from litgpt.config import Config
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from litgpt import Tokenizer
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class PromptStyle:
|
| 18 |
+
"""Base interface for prompt styles."""
|
| 19 |
+
|
| 20 |
+
@abstractmethod
|
| 21 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 22 |
+
return prompt
|
| 23 |
+
|
| 24 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 25 |
+
return ([tokenizer.eos_id],)
|
| 26 |
+
|
| 27 |
+
@classmethod
|
| 28 |
+
def from_name(cls, name: str) -> "PromptStyle":
|
| 29 |
+
return prompt_styles[name]()
|
| 30 |
+
|
| 31 |
+
@classmethod
|
| 32 |
+
def from_config(cls, config: Config) -> "PromptStyle":
|
| 33 |
+
return model_name_to_prompt_style(config.name)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Default(PromptStyle):
|
| 37 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 38 |
+
return prompt
|
| 39 |
+
|
| 40 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 41 |
+
return ([tokenizer.eos_id],)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Alpaca(PromptStyle):
|
| 45 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 46 |
+
if kwargs.get("input"):
|
| 47 |
+
sys_prompt = sys_prompt or (
|
| 48 |
+
"Below is an instruction that describes a task, paired with an input that provides further context. "
|
| 49 |
+
"Write a response that appropriately completes the request.\n\n"
|
| 50 |
+
)
|
| 51 |
+
return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Input:\n{kwargs['input']}\n\n### Response:\n"
|
| 52 |
+
|
| 53 |
+
sys_prompt = sys_prompt or (
|
| 54 |
+
"Below is an instruction that describes a task. "
|
| 55 |
+
"Write a response that appropriately completes the request.\n\n"
|
| 56 |
+
)
|
| 57 |
+
return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Response:\n"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class FLAN(PromptStyle):
|
| 61 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 62 |
+
sys_prompt = sys_prompt or (
|
| 63 |
+
"Below is an instruction that describes a task. "
|
| 64 |
+
"Write a response that appropriately completes the request.\n\n"
|
| 65 |
+
)
|
| 66 |
+
return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Response:\n"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class Longform(PromptStyle):
|
| 70 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 71 |
+
sys_prompt = sys_prompt or (
|
| 72 |
+
"Below is an instruction that describes a task, paired with an input that provides further context. "
|
| 73 |
+
"Write a response that appropriately completes the request.\n\n"
|
| 74 |
+
)
|
| 75 |
+
return f"{sys_prompt}### Instruction:\n{prompt}\n\n### Response:\n"
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class StableLMAlpha(PromptStyle):
|
| 79 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 80 |
+
sys_prompt = sys_prompt or (
|
| 81 |
+
"# StableLM Tuned (Alpha version)\n- StableLM is a helpful and harmless open-source AI language"
|
| 82 |
+
" model developed by StabilityAI.\n- StableLM is excited to be able to help the user, but will refuse to do"
|
| 83 |
+
" anything that could be considered harmful to the user.\n- StableLM is more than just an information"
|
| 84 |
+
" source, StableLM is also able to write poetry, short stories, and make jokes.\n- StableLM will refuse to"
|
| 85 |
+
" participate in anything that could harm a human."
|
| 86 |
+
)
|
| 87 |
+
return f"<|SYSTEM|>{sys_prompt}<|USER|>{prompt}<|ASSISTANT|>"
|
| 88 |
+
|
| 89 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 90 |
+
return (
|
| 91 |
+
[tokenizer.eos_id],
|
| 92 |
+
[tokenizer.token_to_id("<|SYSTEM|>")],
|
| 93 |
+
[tokenizer.token_to_id("<|ASSISTANT|>")],
|
| 94 |
+
[tokenizer.token_to_id("<|USER|>")],
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class StableLMZephyr(PromptStyle):
|
| 99 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 100 |
+
return f"<|user|>\n{prompt}<|endoftext|>\n<|assistant|>\n"
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class Falcon(PromptStyle):
|
| 104 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 105 |
+
return f"{prompt}\nAnswer:"
|
| 106 |
+
|
| 107 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 108 |
+
return (
|
| 109 |
+
[tokenizer.eos_id],
|
| 110 |
+
# the model rarely emits the eos token and instead outputs newlines, but we cannot use them
|
| 111 |
+
# to stop or else things like code generation wouldn't work
|
| 112 |
+
[tokenizer.token_to_id("User"), tokenizer.token_to_id(":")],
|
| 113 |
+
[193, tokenizer.token_to_id("User")], # 193: '\n'
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class Falcon3(PromptStyle):
|
| 118 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 119 |
+
return f"<|user|>\n{prompt}<|endoftext|>\n<|assistant|>\n"
|
| 120 |
+
|
| 121 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 122 |
+
return (
|
| 123 |
+
[tokenizer.eos_id],
|
| 124 |
+
[tokenizer.token_to_id("<|endoftext|>")],
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class Llama2FunctionCalling(PromptStyle):
|
| 129 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 130 |
+
# Has to be before the llama config
|
| 131 |
+
b_func, e_func = "<FUNCTIONS>", "</FUNCTIONS>\n\n"
|
| 132 |
+
b_inst, e_inst = "[INST]", "[/INST]"
|
| 133 |
+
b_sys, e_sys = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
| 134 |
+
# This is an example for how to format functions for the model
|
| 135 |
+
function_metadata = {
|
| 136 |
+
"function": "search_bing",
|
| 137 |
+
"description": (
|
| 138 |
+
"Search the web for content on Bing. This allows users to search online/the internet/the web for"
|
| 139 |
+
" content."
|
| 140 |
+
),
|
| 141 |
+
"arguments": [{"name": "query", "type": "string", "description": "The search query string"}],
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
system_prompt = sys_prompt or (
|
| 145 |
+
"You are a helpful, respectful and honest assistant. Always answer as helpfully as"
|
| 146 |
+
"possible. Your only response should be JSON formatted functions"
|
| 147 |
+
)
|
| 148 |
+
# replace the curly braces with double curly braces to escape them
|
| 149 |
+
function_list = dumps(function_metadata).replace("{", "{{").replace("}", "}}")
|
| 150 |
+
return (
|
| 151 |
+
f"{b_func}{function_list.strip()}{e_func}{b_inst}{b_sys}{system_prompt.strip()}{e_sys}{prompt}{e_inst}\n\n"
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class Llama2(PromptStyle):
|
| 156 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 157 |
+
b_inst, e_inst = "[INST]", "[/INST]"
|
| 158 |
+
b_sys, e_sys = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
| 159 |
+
sys_prompt = sys_prompt or (
|
| 160 |
+
"You are a helpful, respectful and honest assistant. Always answer as helpfully as"
|
| 161 |
+
" possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist,"
|
| 162 |
+
" toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and"
|
| 163 |
+
" positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why"
|
| 164 |
+
" instead of answering something not correct. If you don't know the answer to a question, please don't"
|
| 165 |
+
" share false information."
|
| 166 |
+
)
|
| 167 |
+
return f"{b_inst} {b_sys}{sys_prompt}{e_sys} {prompt} {e_inst} "
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class Llama3(PromptStyle):
|
| 171 |
+
def apply(
|
| 172 |
+
self, prompt: Union[str, List[Dict[str, str]]], *, sys_prompt: Optional[str] = None, **kwargs: str
|
| 173 |
+
) -> str:
|
| 174 |
+
default_system_prompt = sys_prompt or "You are a helpful assistant."
|
| 175 |
+
|
| 176 |
+
# https://github.com/meta-llama/llama3/blob/359887376f0aaf30e433f23e25df858d8c2a9833/llama/tokenizer.py#L202-L229
|
| 177 |
+
if isinstance(prompt, str):
|
| 178 |
+
return (
|
| 179 |
+
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n"
|
| 180 |
+
f"{default_system_prompt}<|eot_id|>" # No newline
|
| 181 |
+
"<|start_header_id|>user<|end_header_id|>\n\n"
|
| 182 |
+
f"{prompt}<|eot_id|>" # No newline
|
| 183 |
+
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
| 184 |
+
)
|
| 185 |
+
elif isinstance(prompt, list):
|
| 186 |
+
|
| 187 |
+
def encode_header(role: str) -> List[str]:
|
| 188 |
+
return [f"<|start_header_id|>{role}<|end_header_id|>\n\n"]
|
| 189 |
+
|
| 190 |
+
def encode_message(message: Dict[str, str]) -> List[str]:
|
| 191 |
+
tokens = encode_header(message["role"])
|
| 192 |
+
# NOTE: Meta stripped this. I'm not sure I agree, but who am I to argue?
|
| 193 |
+
tokens.append(message["content"].strip())
|
| 194 |
+
tokens.append("<|eot_id|>")
|
| 195 |
+
return tokens
|
| 196 |
+
|
| 197 |
+
def has_system_prompt(messages: List[Dict[str, str]]) -> bool:
|
| 198 |
+
return messages[0].get("role", "") == "system" if len(messages) else False
|
| 199 |
+
|
| 200 |
+
tokens = ["<|begin_of_text|>"]
|
| 201 |
+
if not has_system_prompt(prompt):
|
| 202 |
+
tokens.extend(encode_message({"role": "system", "content": default_system_prompt}))
|
| 203 |
+
for i, message in enumerate(prompt):
|
| 204 |
+
if i != 0 and message["role"] == "system":
|
| 205 |
+
raise ValueError("'system' role is only allowed at the beginning of the conversation list.")
|
| 206 |
+
if message["role"] not in ["assistant", "user", "system"]:
|
| 207 |
+
raise ValueError(
|
| 208 |
+
f"Unknown role: '{message['role']}'. Supported roles are 'assistant', 'user', and 'system'."
|
| 209 |
+
)
|
| 210 |
+
tokens.extend(encode_message(message))
|
| 211 |
+
tokens.extend(encode_header("assistant"))
|
| 212 |
+
return "".join(tokens)
|
| 213 |
+
else:
|
| 214 |
+
raise ValueError(f"Unsupported prompt type: {type(prompt)}")
|
| 215 |
+
|
| 216 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 217 |
+
return (
|
| 218 |
+
[tokenizer.eos_id],
|
| 219 |
+
[tokenizer.token_to_id("<|eot_id|>")],
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class R1Base(PromptStyle):
|
| 224 |
+
def apply(
|
| 225 |
+
self, prompt: Union[str, List[Dict[str, str]]], *, sys_prompt: Optional[str] = None, **kwargs: str
|
| 226 |
+
) -> str:
|
| 227 |
+
default_system_prompt = sys_prompt or ""
|
| 228 |
+
|
| 229 |
+
bos_token = "<|begin▁of▁sentence|>"
|
| 230 |
+
eos_token = ""
|
| 231 |
+
|
| 232 |
+
if isinstance(prompt, str):
|
| 233 |
+
return f"{default_system_prompt}<|User|>{prompt}<|Assistant|>" # Prepares for assistant response
|
| 234 |
+
elif isinstance(prompt, list):
|
| 235 |
+
|
| 236 |
+
def encode_message(message: Dict[str, str]) -> str:
|
| 237 |
+
role = message["role"]
|
| 238 |
+
content = message["content"].strip()
|
| 239 |
+
|
| 240 |
+
if role == "system":
|
| 241 |
+
return content # System prompt is prepended at the start
|
| 242 |
+
elif role == "user":
|
| 243 |
+
return f"<|User|>{content}"
|
| 244 |
+
elif role == "assistant":
|
| 245 |
+
return f"<|Assistant|>{content}{eos_token}"
|
| 246 |
+
else:
|
| 247 |
+
raise ValueError(f"Unknown role: '{role}'. Supported roles are 'assistant', 'user', and 'system'.")
|
| 248 |
+
|
| 249 |
+
# Extract system prompt (if any)
|
| 250 |
+
system_prompt = ""
|
| 251 |
+
if prompt[0].get("role") == "system":
|
| 252 |
+
system_prompt = prompt[0]["content"]
|
| 253 |
+
prompt = prompt[1:] # Remove system message from the list
|
| 254 |
+
|
| 255 |
+
# Construct the formatted prompt
|
| 256 |
+
formatted_prompt = system_prompt
|
| 257 |
+
for message in prompt:
|
| 258 |
+
formatted_prompt += encode_message(message)
|
| 259 |
+
|
| 260 |
+
formatted_prompt += "<|Assistant|>" # Prepares for assistant response
|
| 261 |
+
return formatted_prompt
|
| 262 |
+
else:
|
| 263 |
+
raise ValueError(f"Unsupported prompt type: {type(prompt)}")
|
| 264 |
+
|
| 265 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 266 |
+
return (
|
| 267 |
+
[tokenizer.eos_id],
|
| 268 |
+
[tokenizer.token_to_id("<|end▁of▁sentence|>")],
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
class FreeWilly2(PromptStyle):
|
| 273 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 274 |
+
sys_prompt = sys_prompt or "This is a system prompt, please behave and help the user."
|
| 275 |
+
return f"### System:\n{sys_prompt}\n\n### User:\n{prompt}\n\n### Assistant:\n"
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class Platypus(PromptStyle):
|
| 279 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 280 |
+
return f"### Instruction:\n\n{prompt}\n\n### Response:\n"
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class StableCode(PromptStyle):
|
| 284 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 285 |
+
return f"###Instruction\n{prompt}###Response\n"
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
class CodeLlama(PromptStyle):
|
| 289 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 290 |
+
# for CodeLLama, we don't set a default system prompt, but it is supported:
|
| 291 |
+
# https://huggingface.co/blog/codellama#conversational-instructions
|
| 292 |
+
# Mistral does not: https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1#instruction-format
|
| 293 |
+
b_inst, e_inst = "[INST]", "[/INST]"
|
| 294 |
+
if sys_prompt:
|
| 295 |
+
b_sys, e_sys = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
| 296 |
+
return f"{b_inst} {b_sys}{sys_prompt}{e_sys}{prompt} {e_inst}"
|
| 297 |
+
return f"{b_inst} {prompt} {e_inst}"
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
class Phi1(PromptStyle):
|
| 301 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 302 |
+
return f"{prompt}\n\nAnswer:"
|
| 303 |
+
|
| 304 |
+
def stop_tokens(self, tokenizer: "Tokenizer") -> Tuple[List[int], ...]:
|
| 305 |
+
return (
|
| 306 |
+
[tokenizer.eos_id],
|
| 307 |
+
[tokenizer.token_to_id("Answer"), tokenizer.token_to_id(":")],
|
| 308 |
+
[198, tokenizer.token_to_id("Answer"), tokenizer.token_to_id(":")],
|
| 309 |
+
# the model rarely emits the eos token and instead outputs newlines, but we cannot use them
|
| 310 |
+
# to stop or else things like code generation wouldn't work
|
| 311 |
+
# [198, 198], # '\n', '\n'
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
class Phi2(PromptStyle):
|
| 316 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 317 |
+
return f"Instruct: {prompt}\nOutput:"
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class Phi3(PromptStyle):
|
| 321 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 322 |
+
sys_prompt = sys_prompt or "You are a helpful assistant."
|
| 323 |
+
return f"<|system|>\n{sys_prompt}<|end|>\n<|user|>\n{prompt}<|end|>\n<|assistant|>\n"
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
class Phi4(PromptStyle):
|
| 327 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 328 |
+
res = ""
|
| 329 |
+
if sys_prompt:
|
| 330 |
+
res += f"<|im_start|>system<|im_sep|>{sys_prompt}<|im_end|>"
|
| 331 |
+
res += f"<|im_start|>user<|im_sep|>{prompt}<|im_end|><|im_start|>assistant<|im_sep|>"
|
| 332 |
+
return res
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
class Phi4Reasoning(PromptStyle):
|
| 336 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 337 |
+
sys_prompt = (
|
| 338 |
+
sys_prompt
|
| 339 |
+
or "You are Phi, a language model trained by Microsoft to help users. Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: <think> {Thought section} </think> {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:"
|
| 340 |
+
)
|
| 341 |
+
return f"<|im_start>system<|im_sep|>{sys_prompt}<|im_end|><|im_start|>user<|im_sep|>{prompt}<|im_end|><|im_start|>assistant<|im_sep|>"
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
class Phi4Mini(PromptStyle):
|
| 345 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 346 |
+
res = ""
|
| 347 |
+
if sys_prompt:
|
| 348 |
+
res += f"<|system|>{sys_prompt}<|end|>"
|
| 349 |
+
res += f"<|user|>{prompt}<|end|><|assistant|>"
|
| 350 |
+
return res
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
class Phi4MiniReasoning(PromptStyle):
|
| 354 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 355 |
+
sys_prompt = sys_prompt or "Your name is Phi, an AI math expert developed by Microsoft."
|
| 356 |
+
return f"<|system|>{sys_prompt}<|end|><|user|>{prompt}<|end|><|assistant|>"
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
class TinyLlama(PromptStyle):
|
| 360 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 361 |
+
sys_prompt = sys_prompt or "You are a friendly chatbot who always gives helpful, detailed, and polite answers."
|
| 362 |
+
return f"<|system|>\n{sys_prompt}</s>\n<|user|>\n{prompt}</s>\n<|assistant|>\n"
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class Gemma(PromptStyle):
|
| 366 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 367 |
+
return f"<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model\n"
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
class OLMo(PromptStyle):
|
| 371 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 372 |
+
return f"<|endoftext|><|user|>\n{prompt}\n<|assistant|>\n"
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
class ChatML(PromptStyle):
|
| 376 |
+
def __init__(self, system_message: Optional[str] = None):
|
| 377 |
+
self.system_message = system_message
|
| 378 |
+
|
| 379 |
+
def apply(self, prompt: str, *, sys_prompt: Optional[str] = None, **kwargs: str) -> str:
|
| 380 |
+
sys_prompt = sys_prompt or self.system_message
|
| 381 |
+
return (
|
| 382 |
+
f"<|im_start|>system\n{sys_prompt}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
class Qwen2_5(ChatML):
|
| 387 |
+
def __init__(self):
|
| 388 |
+
super().__init__("You are Qwen, created by Alibaba Cloud. You are a helpful assistant.")
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class Qwen2_5_Math(ChatML):
|
| 392 |
+
def __init__(self):
|
| 393 |
+
super().__init__("Please reason step by step, and put your final answer within \\boxed{}.")
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
class QwQ(ChatML):
|
| 397 |
+
def __init__(self):
|
| 398 |
+
super().__init__(
|
| 399 |
+
"You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."
|
| 400 |
+
)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
class Qwen3(ChatML):
|
| 404 |
+
def __init__(self):
|
| 405 |
+
super().__init__()
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
class SmolLM2(ChatML):
|
| 409 |
+
def __init__(self):
|
| 410 |
+
super().__init__("You are a helpful AI assistant named SmolLM, trained by Hugging Face")
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class Salamandra(ChatML):
|
| 414 |
+
def __init__(self):
|
| 415 |
+
super().__init__(
|
| 416 |
+
"I am Salamandra, an AI language model developed at the Barcelona Supercomputing Centre (BSC) by the Language Technologies Unit. My knowledge base was last updated on August 2023. Today Date: 2024-09-30\nSoy Salamandra, un modelo lingüístico de IA desarrollado en el Barcelona Supercomputing Centre (BSC) por la Language Technologies Unit. Mi base de conocimientos se actualizó por última vez en agosto de 2023.\nSoc Salamandra, un model de llenguatge d'IA desenvolupat al Barcelona Supercomputing Centre (BSC) per la Language Technologies Unit."
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
# Maps prompt style names to PromptStyle classes
|
| 421 |
+
prompt_styles: Dict[str, Type[PromptStyle]] = {
|
| 422 |
+
# Dataset-specific prompt styles
|
| 423 |
+
"default": Default,
|
| 424 |
+
"alpaca": Alpaca,
|
| 425 |
+
"flan": FLAN,
|
| 426 |
+
"longform": Longform,
|
| 427 |
+
# Model-specific prompt styles
|
| 428 |
+
"stablelm-alpha": StableLMAlpha,
|
| 429 |
+
"stablelm-zephyr": StableLMZephyr,
|
| 430 |
+
"falcon": Falcon,
|
| 431 |
+
"llama2-function-calling": Llama2FunctionCalling,
|
| 432 |
+
"llama2": Llama2,
|
| 433 |
+
"freewilly2": FreeWilly2,
|
| 434 |
+
"platypus": Platypus,
|
| 435 |
+
"stablecode": StableCode,
|
| 436 |
+
"codellama": CodeLlama,
|
| 437 |
+
"phi-1": Phi1,
|
| 438 |
+
"phi-2": Phi2,
|
| 439 |
+
"phi-3": Phi3,
|
| 440 |
+
"phi-4": Phi4,
|
| 441 |
+
"phi-4-reasoning": Phi4Reasoning,
|
| 442 |
+
"phi-4-mini": Phi4Mini,
|
| 443 |
+
"phi-4-mini-reasoning": Phi4MiniReasoning,
|
| 444 |
+
"tinyllama": TinyLlama,
|
| 445 |
+
"gemma": Gemma,
|
| 446 |
+
"llama3": Llama3,
|
| 447 |
+
"olmo": OLMo,
|
| 448 |
+
"qwen2.5": Qwen2_5,
|
| 449 |
+
"qwen2.5-math": Qwen2_5_Math,
|
| 450 |
+
"qwq": QwQ,
|
| 451 |
+
"qwen3": Qwen3,
|
| 452 |
+
"smollm2": SmolLM2,
|
| 453 |
+
"salamandra": Salamandra,
|
| 454 |
+
}
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def model_name_to_prompt_style(model_name: str) -> PromptStyle:
|
| 458 |
+
if re.search(r"stablelm-tuned-alpha", model_name):
|
| 459 |
+
return StableLMAlpha()
|
| 460 |
+
if re.search(r"stablelm-zephyr-3b", model_name):
|
| 461 |
+
return StableLMZephyr()
|
| 462 |
+
if re.search("stablecode-instruct", model_name):
|
| 463 |
+
return StableCode()
|
| 464 |
+
if re.search(r"Falcon3.*-Instruct", model_name):
|
| 465 |
+
return Falcon3()
|
| 466 |
+
if re.search(r"falcon.*-instruct", model_name):
|
| 467 |
+
return Falcon()
|
| 468 |
+
if re.search("Llama-2-7b-chat-hf-function-calling-v2", model_name):
|
| 469 |
+
return Llama2FunctionCalling()
|
| 470 |
+
if re.search("Llama-2.*-chat", model_name):
|
| 471 |
+
return Llama2()
|
| 472 |
+
if re.search("Llama-3.*-Instruct", model_name):
|
| 473 |
+
return Llama3()
|
| 474 |
+
if re.search("Llama-3.*-Instruct-*", model_name):
|
| 475 |
+
return Llama3()
|
| 476 |
+
if re.search("OLMo-2.*-(Instruct|SFT|DPO)", model_name):
|
| 477 |
+
return Llama3()
|
| 478 |
+
if re.search("R1", model_name):
|
| 479 |
+
return R1Base()
|
| 480 |
+
if re.search("FreeWilly2", model_name):
|
| 481 |
+
return FreeWilly2()
|
| 482 |
+
if re.search("Platypus", model_name):
|
| 483 |
+
return Platypus()
|
| 484 |
+
if re.search("CodeLlama|Mi[sx]tral.*Instruct", model_name):
|
| 485 |
+
return CodeLlama()
|
| 486 |
+
if re.search("phi-1", model_name):
|
| 487 |
+
return Phi1()
|
| 488 |
+
if re.search("phi-2", model_name):
|
| 489 |
+
return Phi2()
|
| 490 |
+
if re.search("Phi-3", model_name):
|
| 491 |
+
return Phi3()
|
| 492 |
+
if re.search("Phi-4-reasoning", model_name):
|
| 493 |
+
return Phi4Reasoning()
|
| 494 |
+
if re.search("Phi-4-mini-reasoning", model_name):
|
| 495 |
+
return Phi4MiniReasoning()
|
| 496 |
+
if re.search("Phi-4-mini", model_name):
|
| 497 |
+
return Phi4Mini()
|
| 498 |
+
if re.search("phi-4", model_name):
|
| 499 |
+
return Phi4()
|
| 500 |
+
if re.search(r"tiny-llama.*chat", model_name):
|
| 501 |
+
return TinyLlama()
|
| 502 |
+
if re.search(r"(Code)?Gemma.*-it", model_name):
|
| 503 |
+
return Gemma()
|
| 504 |
+
if re.search(r"OLMo.*-hf", model_name):
|
| 505 |
+
return OLMo()
|
| 506 |
+
if re.search(r"Qwen2\.5-Math-.*", model_name):
|
| 507 |
+
return Qwen2_5_Math()
|
| 508 |
+
if re.search(r"Qwen2\.5-.*", model_name):
|
| 509 |
+
return Qwen2_5()
|
| 510 |
+
if re.search(r"QwQ-.*", model_name):
|
| 511 |
+
return QwQ()
|
| 512 |
+
if re.search(r"Qwen3-.*", model_name):
|
| 513 |
+
return Qwen3()
|
| 514 |
+
if re.search(r"SmolLM2.*-Instruct", model_name):
|
| 515 |
+
return SmolLM2()
|
| 516 |
+
if re.search(r"salamandra-.*-instruct", model_name):
|
| 517 |
+
return Salamandra()
|
| 518 |
+
return Default()
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def save_prompt_style(style: Union[str, PromptStyle], checkpoint_dir: Path) -> None:
|
| 522 |
+
style = PromptStyle.from_name(style) if isinstance(style, str) else style
|
| 523 |
+
cls = type(style)
|
| 524 |
+
# Allow saving the full module path for user-defined prompt classes
|
| 525 |
+
config = {"class_path": f"{cls.__module__}.{cls.__name__}"}
|
| 526 |
+
with open(checkpoint_dir / "prompt_style.yaml", "w", encoding="utf-8") as file:
|
| 527 |
+
yaml.dump(config, file)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def load_prompt_style(checkpoint_dir: Path) -> PromptStyle:
|
| 531 |
+
with open(checkpoint_dir / "prompt_style.yaml", encoding="utf-8") as file:
|
| 532 |
+
config = yaml.safe_load(file)
|
| 533 |
+
# Support loading the full module path for user-defined prompt classes
|
| 534 |
+
full_module_path, cls_name = config["class_path"].rsplit(".", 1)
|
| 535 |
+
module = importlib.import_module(full_module_path)
|
| 536 |
+
cls = getattr(module, cls_name)
|
| 537 |
+
return cls()
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def has_prompt_style(checkpoint_dir: Path) -> bool:
|
| 541 |
+
return (checkpoint_dir / "prompt_style.yaml").is_file()
|
litgpt/tokenizer.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Iterable, Iterator, Optional, Union
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from litgpt.utils import fix_and_load_json
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Tokenizer:
|
| 13 |
+
def __init__(self, checkpoint_dir: Union[Path, str]) -> None:
|
| 14 |
+
checkpoint_dir = Path(checkpoint_dir)
|
| 15 |
+
if not checkpoint_dir.exists():
|
| 16 |
+
raise NotADirectoryError(f"The checkpoint directory does not exist: {str(checkpoint_dir)}")
|
| 17 |
+
|
| 18 |
+
self.model_name = checkpoint_dir.stem
|
| 19 |
+
self.use_bos = self.check_if_bos_token_used(checkpoint_dir)
|
| 20 |
+
self.bos_id = None
|
| 21 |
+
self.eos_id = None
|
| 22 |
+
|
| 23 |
+
# some checkpoints have both files, `.json` takes precedence
|
| 24 |
+
if (vocabulary_path := checkpoint_dir / "tokenizer.json").is_file():
|
| 25 |
+
from tokenizers import Tokenizer as HFTokenizer
|
| 26 |
+
|
| 27 |
+
self.processor = HFTokenizer.from_file(str(vocabulary_path))
|
| 28 |
+
self.backend = "huggingface"
|
| 29 |
+
|
| 30 |
+
if (special_tokens_path := checkpoint_dir / "tokenizer_config.json").is_file():
|
| 31 |
+
with open(special_tokens_path, encoding="utf-8") as fp:
|
| 32 |
+
config = json.load(fp)
|
| 33 |
+
bos_token = config.get("bos_token")
|
| 34 |
+
eos_token = config.get("eos_token")
|
| 35 |
+
if bos_token is not None and isinstance(bos_token, dict):
|
| 36 |
+
bos_token = bos_token.get("content")
|
| 37 |
+
if eos_token is not None and isinstance(eos_token, dict):
|
| 38 |
+
eos_token = eos_token.get("content")
|
| 39 |
+
self.bos_id = self.token_to_id(bos_token) if bos_token is not None else None
|
| 40 |
+
self.eos_id = self.token_to_id(eos_token) if eos_token is not None else None
|
| 41 |
+
if (special_tokens_path := checkpoint_dir / "generation_config.json").is_file():
|
| 42 |
+
try:
|
| 43 |
+
with open(special_tokens_path, encoding="utf-8") as fp:
|
| 44 |
+
config = json.load(fp)
|
| 45 |
+
except json.JSONDecodeError: # Some files like the Llama 3.2 one have bugs
|
| 46 |
+
with open(special_tokens_path, encoding="utf-8") as fp:
|
| 47 |
+
json_string = fp.read()
|
| 48 |
+
config = fix_and_load_json(json_string)
|
| 49 |
+
if self.bos_id is None:
|
| 50 |
+
self.bos_id = config.get("bos_token_id")
|
| 51 |
+
if self.eos_id is None:
|
| 52 |
+
self.eos_id = config.get("eos_token_id")
|
| 53 |
+
|
| 54 |
+
elif (vocabulary_path := checkpoint_dir / "tokenizer.model").is_file():
|
| 55 |
+
from sentencepiece import SentencePieceProcessor
|
| 56 |
+
|
| 57 |
+
self.processor = SentencePieceProcessor(model_file=str(vocabulary_path))
|
| 58 |
+
self.backend = "sentencepiece"
|
| 59 |
+
self.bos_id = self.processor.bos_id()
|
| 60 |
+
self.eos_id = self.processor.eos_id()
|
| 61 |
+
else:
|
| 62 |
+
raise NotImplementedError
|
| 63 |
+
|
| 64 |
+
# NOTE: A temporary fix until it's resolved on Tokenizers side.
|
| 65 |
+
# LlaMA tokenizer strips leading spaces if to decode a single token at a time.
|
| 66 |
+
# https://github.com/huggingface/transformers/issues/31643
|
| 67 |
+
self.apply_decoding_fix = None
|
| 68 |
+
if (config_path := checkpoint_dir / "tokenizer_config.json").is_file():
|
| 69 |
+
with open(config_path, encoding="utf-8") as fp:
|
| 70 |
+
self.apply_decoding_fix = "LlamaTokenizer" in json.load(fp)["tokenizer_class"]
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def vocab_size(self) -> int:
|
| 74 |
+
if self.backend == "huggingface":
|
| 75 |
+
return self.processor.get_vocab_size(with_added_tokens=False)
|
| 76 |
+
if self.backend == "sentencepiece":
|
| 77 |
+
return self.processor.vocab_size()
|
| 78 |
+
raise RuntimeError
|
| 79 |
+
|
| 80 |
+
def token_to_id(self, token: str) -> int:
|
| 81 |
+
if self.backend == "huggingface":
|
| 82 |
+
id_ = self.processor.token_to_id(token)
|
| 83 |
+
elif self.backend == "sentencepiece":
|
| 84 |
+
id_ = self.processor.piece_to_id(token)
|
| 85 |
+
else:
|
| 86 |
+
raise RuntimeError
|
| 87 |
+
if id_ is None:
|
| 88 |
+
raise ValueError(f"token {token!r} not found in the collection.")
|
| 89 |
+
return id_
|
| 90 |
+
|
| 91 |
+
def check_if_bos_token_used(self, checkpoint_dir: Path) -> bool:
|
| 92 |
+
if not (tokenizer_config_path := checkpoint_dir / "tokenizer_config.json").is_file():
|
| 93 |
+
return False
|
| 94 |
+
with open(tokenizer_config_path, encoding="utf-8") as fp:
|
| 95 |
+
config = json.load(fp)
|
| 96 |
+
# for LlaMA-3 tokenizer there is no `add_bos_token` at all and `tokenizer_class` is only
|
| 97 |
+
# `PreTrainedTokenizerFast`
|
| 98 |
+
if checkpoint_dir.stem.startswith(("Meta-Llama-3", "Llama-3")):
|
| 99 |
+
return True
|
| 100 |
+
if checkpoint_dir.stem.startswith("SmolLM2") and checkpoint_dir.name.endswith("Instruct"):
|
| 101 |
+
return True
|
| 102 |
+
if "add_bos_token" in config:
|
| 103 |
+
return config["add_bos_token"]
|
| 104 |
+
# if `add_bos_token` isn't in the config file, but LLaMA tokenizer is used - return True.
|
| 105 |
+
# ex: https://huggingface.co/stabilityai/StableBeluga2/blob/main/tokenizer_config.json#L2
|
| 106 |
+
return config.get("tokenizer_class") == "LlamaTokenizer"
|
| 107 |
+
|
| 108 |
+
def encode(
|
| 109 |
+
self,
|
| 110 |
+
string: str,
|
| 111 |
+
device: Optional[torch.device] = None,
|
| 112 |
+
bos: Optional[bool] = None,
|
| 113 |
+
eos: bool = False,
|
| 114 |
+
max_length: int = -1,
|
| 115 |
+
) -> torch.Tensor:
|
| 116 |
+
if self.backend == "huggingface":
|
| 117 |
+
tokens = self.processor.encode(string).ids
|
| 118 |
+
elif self.backend == "sentencepiece":
|
| 119 |
+
tokens = self.processor.encode(string)
|
| 120 |
+
else:
|
| 121 |
+
raise RuntimeError(f"`{self.backend}` is not supported.")
|
| 122 |
+
if tokens is None:
|
| 123 |
+
raise ValueError("`self.processor` returned tokens of None value.")
|
| 124 |
+
|
| 125 |
+
if bos or (bos is None and self.use_bos):
|
| 126 |
+
if self.bos_id is None:
|
| 127 |
+
raise NotImplementedError("This tokenizer does not have a defined bos token.")
|
| 128 |
+
if not tokens or tokens[0] != self.bos_id:
|
| 129 |
+
tokens = [self.bos_id] + tokens
|
| 130 |
+
# if the processor misbehaves and adds `bos` token no matter what
|
| 131 |
+
elif tokens and tokens[0] == self.bos_id:
|
| 132 |
+
tokens = tokens[1:]
|
| 133 |
+
|
| 134 |
+
if eos and (not tokens or tokens[-1] != self.eos_id):
|
| 135 |
+
tokens = tokens + [self.eos_id]
|
| 136 |
+
# if the processor misbehaves and adds `eos` token no matter what
|
| 137 |
+
elif tokens and tokens[-1] == self.eos_id:
|
| 138 |
+
tokens = tokens[:-1]
|
| 139 |
+
|
| 140 |
+
if max_length > 0:
|
| 141 |
+
tokens = tokens[:max_length]
|
| 142 |
+
return torch.tensor(tokens, dtype=torch.int, device=device)
|
| 143 |
+
|
| 144 |
+
def decode(self, tensor: torch.Tensor) -> str:
|
| 145 |
+
tokens = [tensor.item()] if tensor.ndim == 0 else tensor.tolist()
|
| 146 |
+
if len(tokens) == 1 and self.apply_decoding_fix:
|
| 147 |
+
dummy_token_id = 33 # \x1e
|
| 148 |
+
dummy_token = self.processor.decode([dummy_token_id])
|
| 149 |
+
if dummy_token != "\x1e":
|
| 150 |
+
dummy_token_id = 165 # \x1e is different in salamandra tokenizers
|
| 151 |
+
dummy_token = self.processor.decode([dummy_token_id])
|
| 152 |
+
return self.processor.decode([dummy_token_id] + tokens)[len(dummy_token) :]
|
| 153 |
+
return self.processor.decode(tokens)
|
| 154 |
+
|
| 155 |
+
def decode_stream(
|
| 156 |
+
self, token_stream: Iterable[torch.Tensor], device: Optional[torch.device] = None
|
| 157 |
+
) -> Iterator[str]:
|
| 158 |
+
if self.backend == "huggingface":
|
| 159 |
+
try:
|
| 160 |
+
for token in token_stream:
|
| 161 |
+
yield self.decode(token)
|
| 162 |
+
except KeyboardInterrupt:
|
| 163 |
+
return
|
| 164 |
+
elif self.backend == "sentencepiece":
|
| 165 |
+
# TODO: Is there a way to not have to do this?
|
| 166 |
+
# This may actually affect our tokens per second.
|
| 167 |
+
|
| 168 |
+
# sentencepiece does not support decoding token-by-token because it adds spaces based on the surrounding tokens
|
| 169 |
+
# meaning that we need to decode everything each time
|
| 170 |
+
so_far = torch.tensor([], dtype=torch.long, device=device)
|
| 171 |
+
decoded_so_far = ""
|
| 172 |
+
try:
|
| 173 |
+
for token in token_stream:
|
| 174 |
+
so_far = so_far.to(device=token.device)
|
| 175 |
+
so_far = torch.cat((so_far, token.view(-1)))
|
| 176 |
+
decoded_new = self.decode(so_far)
|
| 177 |
+
yield decoded_new[len(decoded_so_far) :]
|
| 178 |
+
decoded_so_far = decoded_new
|
| 179 |
+
except KeyboardInterrupt:
|
| 180 |
+
return
|
| 181 |
+
else:
|
| 182 |
+
raise NotImplementedError(self.backend)
|
litgpt/utils.py
ADDED
|
@@ -0,0 +1,875 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright Lightning AI. Licensed under the Apache License 2.0, see LICENSE file.
|
| 2 |
+
|
| 3 |
+
"""Utility functions for training and inference."""
|
| 4 |
+
|
| 5 |
+
import inspect
|
| 6 |
+
import json
|
| 7 |
+
import math
|
| 8 |
+
import os
|
| 9 |
+
import pickle
|
| 10 |
+
import random
|
| 11 |
+
import re
|
| 12 |
+
import shutil
|
| 13 |
+
import subprocess
|
| 14 |
+
import sys
|
| 15 |
+
import warnings
|
| 16 |
+
from dataclasses import asdict, is_dataclass
|
| 17 |
+
from io import BytesIO
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Literal, Mapping, Optional, TypeVar, Union
|
| 20 |
+
|
| 21 |
+
import lightning as L
|
| 22 |
+
import psutil
|
| 23 |
+
import torch
|
| 24 |
+
import torch.nn as nn
|
| 25 |
+
import torch.utils._device
|
| 26 |
+
import yaml
|
| 27 |
+
from lightning.fabric.loggers import CSVLogger, TensorBoardLogger
|
| 28 |
+
from lightning.fabric.strategies import FSDPStrategy
|
| 29 |
+
from lightning.fabric.utilities.load import _lazy_load as lazy_load
|
| 30 |
+
from lightning.pytorch.cli import instantiate_class
|
| 31 |
+
from lightning.pytorch.loggers import MLFlowLogger, WandbLogger
|
| 32 |
+
from lightning_utilities.core.imports import module_available
|
| 33 |
+
from packaging import version
|
| 34 |
+
from torch.serialization import normalize_storage_type
|
| 35 |
+
from typing_extensions import Self
|
| 36 |
+
|
| 37 |
+
if TYPE_CHECKING:
|
| 38 |
+
from litgpt import GPT, Config
|
| 39 |
+
|
| 40 |
+
_THUNDER_AVAILABLE = module_available("thunder")
|
| 41 |
+
_TRITON_AVAILABLE = module_available("triton")
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def init_out_dir(out_dir: Path) -> Path:
|
| 45 |
+
if not isinstance(out_dir, Path):
|
| 46 |
+
out_dir = Path(out_dir)
|
| 47 |
+
if not out_dir.is_absolute() and "LIGHTNING_ARTIFACTS_DIR" in os.environ:
|
| 48 |
+
return Path(os.getenv("LIGHTNING_ARTIFACTS_DIR")) / out_dir
|
| 49 |
+
return out_dir
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def find_resume_path(resume: Union[bool, Literal["auto"], Path], out_dir: Path) -> Optional[Path]:
|
| 53 |
+
if not resume or isinstance(resume, Path):
|
| 54 |
+
return resume
|
| 55 |
+
|
| 56 |
+
resume_path = max(out_dir.rglob("step-*/*.pth"), key=(lambda p: int(p.parent.name.split("-")[1])), default=None)
|
| 57 |
+
if resume == "auto":
|
| 58 |
+
return resume_path
|
| 59 |
+
if resume is True and resume_path is None:
|
| 60 |
+
raise FileNotFoundError(
|
| 61 |
+
f"You passed `--resume=True`, but no checkpoint file was found in `--out_dir={out_dir}`."
|
| 62 |
+
)
|
| 63 |
+
return resume_path
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def num_parameters(module: nn.Module, requires_grad: Optional[bool] = None) -> int:
|
| 67 |
+
total = 0
|
| 68 |
+
for p in module.parameters():
|
| 69 |
+
if requires_grad is None or p.requires_grad == requires_grad:
|
| 70 |
+
if hasattr(p, "quant_state"):
|
| 71 |
+
# bitsandbytes 4bit layer support
|
| 72 |
+
total += math.prod(p.quant_state.shape)
|
| 73 |
+
else:
|
| 74 |
+
total += p.numel()
|
| 75 |
+
return total
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def reset_parameters(module: nn.Module) -> None:
|
| 79 |
+
"""Calls `reset_parameters` on the module and all its submodules."""
|
| 80 |
+
for mod in module.modules():
|
| 81 |
+
if callable(getattr(mod, "reset_parameters", None)):
|
| 82 |
+
mod.reset_parameters()
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def check_valid_checkpoint_dir(
|
| 86 |
+
checkpoint_dir: Path,
|
| 87 |
+
model_filename: str = "lit_model.pth",
|
| 88 |
+
verbose: bool = True,
|
| 89 |
+
raise_error: bool = False,
|
| 90 |
+
ignore_tokenizer_files: bool = False,
|
| 91 |
+
) -> None:
|
| 92 |
+
files = {
|
| 93 |
+
model_filename: (checkpoint_dir / model_filename).is_file(),
|
| 94 |
+
"model_config.yaml": (checkpoint_dir / "model_config.yaml").is_file(),
|
| 95 |
+
}
|
| 96 |
+
if not ignore_tokenizer_files:
|
| 97 |
+
files.update(
|
| 98 |
+
{
|
| 99 |
+
"tokenizer.json OR tokenizer.model": (checkpoint_dir / "tokenizer.json").is_file()
|
| 100 |
+
or (checkpoint_dir / "tokenizer.model").is_file(),
|
| 101 |
+
"tokenizer_config.json": (checkpoint_dir / "tokenizer_config.json").is_file(),
|
| 102 |
+
}
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
if checkpoint_dir.is_dir():
|
| 106 |
+
if all(files.values()):
|
| 107 |
+
# we're good
|
| 108 |
+
return
|
| 109 |
+
problem = f" is missing the files: {[f for f, exists in files.items() if not exists]!r}"
|
| 110 |
+
else:
|
| 111 |
+
problem = " is not a checkpoint directory"
|
| 112 |
+
|
| 113 |
+
# list locally available checkpoints
|
| 114 |
+
available = list(Path("checkpoints").glob("*/*"))
|
| 115 |
+
if available:
|
| 116 |
+
options = "\n".join([""] + [repr(str(p.resolve())) for p in available])
|
| 117 |
+
extra = f"\nYou have downloaded locally:{options}\n"
|
| 118 |
+
else:
|
| 119 |
+
extra = ""
|
| 120 |
+
|
| 121 |
+
if verbose:
|
| 122 |
+
error_message = (
|
| 123 |
+
f"checkpoint_dir {str(checkpoint_dir.absolute())!r}{problem}."
|
| 124 |
+
"\nFind download instructions at https://github.com/Lightning-AI/litgpt/blob/main/tutorials\n"
|
| 125 |
+
f"{extra}\nSee all download options by running:\n litgpt download"
|
| 126 |
+
)
|
| 127 |
+
print(error_message, file=sys.stderr)
|
| 128 |
+
|
| 129 |
+
if raise_error:
|
| 130 |
+
raise FileNotFoundError(f"checkpoint_dir {str(checkpoint_dir.absolute())!r}{problem}.")
|
| 131 |
+
else:
|
| 132 |
+
raise SystemExit(1)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class SavingProxyForStorage:
|
| 136 |
+
def __init__(self, obj, saver, protocol_version=5):
|
| 137 |
+
self.protocol_version = protocol_version
|
| 138 |
+
self.saver = saver
|
| 139 |
+
if not (isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj)):
|
| 140 |
+
raise TypeError(f"expected storage, not {type(obj)}")
|
| 141 |
+
|
| 142 |
+
# this logic is taken from PyTorch 2.0+ torch/serialization.py
|
| 143 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
| 144 |
+
# PT upstream wants to deprecate this eventually...
|
| 145 |
+
storage = obj._untyped_storage
|
| 146 |
+
storage_type_str = obj._pickle_storage_type()
|
| 147 |
+
storage_type = getattr(torch, storage_type_str)
|
| 148 |
+
storage_numel = obj._size()
|
| 149 |
+
else:
|
| 150 |
+
storage = obj
|
| 151 |
+
storage_type = normalize_storage_type(type(obj))
|
| 152 |
+
storage_numel = storage.nbytes()
|
| 153 |
+
|
| 154 |
+
storage_key = saver._write_storage_and_return_key(storage)
|
| 155 |
+
location = torch.serialization.location_tag(storage)
|
| 156 |
+
|
| 157 |
+
self.storage_info = ("storage", storage_type, storage_key, location, storage_numel)
|
| 158 |
+
|
| 159 |
+
def __reduce_ex__(self, protocol_version):
|
| 160 |
+
assert False, "this should be handled with out of band"
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class SavingProxyForTensor:
|
| 164 |
+
def __init__(self, tensor, saver, protocol_version=5):
|
| 165 |
+
self.protocol_version = protocol_version
|
| 166 |
+
self.reduce_ret_fn, reduce_args = tensor.__reduce_ex__(protocol_version)
|
| 167 |
+
if reduce_args[0] == torch._utils._rebuild_tensor_v2:
|
| 168 |
+
# for Tensors with Python attributes
|
| 169 |
+
(a0, a1, (storage, *a2_other), *other_reduce_args) = reduce_args
|
| 170 |
+
assert isinstance(storage, (torch.storage.TypedStorage, torch.storage.UntypedStorage)), (
|
| 171 |
+
"Please check for updates"
|
| 172 |
+
)
|
| 173 |
+
storage_proxy = SavingProxyForStorage(storage, saver, protocol_version=protocol_version)
|
| 174 |
+
self.reduce_args = (a0, a1, (storage_proxy, *a2_other), *other_reduce_args)
|
| 175 |
+
else:
|
| 176 |
+
(storage, *other_reduce_args) = reduce_args
|
| 177 |
+
assert isinstance(storage, (torch.storage.TypedStorage, torch.storage.UntypedStorage)), (
|
| 178 |
+
"Please check for updates"
|
| 179 |
+
)
|
| 180 |
+
storage_proxy = SavingProxyForStorage(storage, saver, protocol_version=protocol_version)
|
| 181 |
+
self.reduce_args = (storage_proxy, *other_reduce_args)
|
| 182 |
+
|
| 183 |
+
def __reduce_ex__(self, protocol_version):
|
| 184 |
+
if protocol_version != self.protocol_version:
|
| 185 |
+
raise RuntimeError(f"Unexpected protocol version: expected {self.protocol_version}, got {protocol_version}")
|
| 186 |
+
return self.reduce_ret_fn, self.reduce_args
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class IncrementalPyTorchPickler(pickle.Pickler):
|
| 190 |
+
def __init__(self, saver, *args, **kwargs):
|
| 191 |
+
super().__init__(*args, **kwargs)
|
| 192 |
+
self.storage_dtypes = {}
|
| 193 |
+
self.saver = saver
|
| 194 |
+
self.id_map = {}
|
| 195 |
+
|
| 196 |
+
# this logic is taken from PyTorch 2.0+ torch/serialization.py
|
| 197 |
+
def persistent_id(self, obj):
|
| 198 |
+
# FIXME: the docs say that persistent_id should only return a string
|
| 199 |
+
# but torch store returns tuples. This works only in the binary protocol
|
| 200 |
+
# see
|
| 201 |
+
# https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects
|
| 202 |
+
# https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537
|
| 203 |
+
if isinstance(obj, SavingProxyForStorage):
|
| 204 |
+
return obj.storage_info
|
| 205 |
+
|
| 206 |
+
if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj):
|
| 207 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
| 208 |
+
# TODO: Once we decide to break serialization FC, this case
|
| 209 |
+
# can be deleted
|
| 210 |
+
storage = obj._untyped_storage
|
| 211 |
+
storage_dtype = obj.dtype
|
| 212 |
+
storage_type_str = obj._pickle_storage_type()
|
| 213 |
+
storage_type = getattr(torch, storage_type_str)
|
| 214 |
+
storage_numel = obj._size()
|
| 215 |
+
|
| 216 |
+
else:
|
| 217 |
+
storage = obj
|
| 218 |
+
storage_dtype = torch.uint8
|
| 219 |
+
storage_type = normalize_storage_type(type(obj))
|
| 220 |
+
storage_numel = storage.nbytes()
|
| 221 |
+
|
| 222 |
+
# If storage is allocated, ensure that any other saved storages
|
| 223 |
+
# pointing to the same data all have the same dtype. If storage is
|
| 224 |
+
# not allocated, don't perform this check
|
| 225 |
+
if storage.data_ptr() != 0:
|
| 226 |
+
if storage.data_ptr() in self.storage_dtypes:
|
| 227 |
+
if storage_dtype != self.storage_dtypes[storage.data_ptr()]:
|
| 228 |
+
raise RuntimeError(
|
| 229 |
+
"Cannot save multiple tensors or storages that view the same data as different types"
|
| 230 |
+
)
|
| 231 |
+
else:
|
| 232 |
+
self.storage_dtypes[storage.data_ptr()] = storage_dtype
|
| 233 |
+
|
| 234 |
+
storage_key = self.id_map.get(storage._cdata)
|
| 235 |
+
if storage_key is None:
|
| 236 |
+
storage_key = self.saver._write_storage_and_return_key(storage)
|
| 237 |
+
self.id_map[storage._cdata] = storage_key
|
| 238 |
+
location = torch.serialization.location_tag(storage)
|
| 239 |
+
|
| 240 |
+
return ("storage", storage_type, storage_key, location, storage_numel)
|
| 241 |
+
|
| 242 |
+
return None
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class incremental_save:
|
| 246 |
+
def __init__(self, name):
|
| 247 |
+
self.name = name
|
| 248 |
+
self.zipfile = torch._C.PyTorchFileWriter(str(name))
|
| 249 |
+
self.has_saved = False
|
| 250 |
+
self.next_key = 0
|
| 251 |
+
self.protocol_version = 2
|
| 252 |
+
|
| 253 |
+
def __enter__(self):
|
| 254 |
+
return self
|
| 255 |
+
|
| 256 |
+
def store_early(self, tensor):
|
| 257 |
+
if isinstance(tensor, torch.Tensor):
|
| 258 |
+
return SavingProxyForTensor(tensor, self, protocol_version=self.protocol_version)
|
| 259 |
+
raise TypeError(f"can only store tensors early, not {type(tensor)}")
|
| 260 |
+
|
| 261 |
+
def save(self, obj):
|
| 262 |
+
if self.has_saved:
|
| 263 |
+
raise RuntimeError("have already saved")
|
| 264 |
+
# Write the pickle data for `obj`
|
| 265 |
+
data_buf = BytesIO()
|
| 266 |
+
pickler = IncrementalPyTorchPickler(self, data_buf, protocol=self.protocol_version)
|
| 267 |
+
pickler.dump(obj)
|
| 268 |
+
data_value = data_buf.getvalue()
|
| 269 |
+
self.zipfile.write_record("data.pkl", data_value, len(data_value))
|
| 270 |
+
self.has_saved = True
|
| 271 |
+
|
| 272 |
+
def _write_storage_and_return_key(self, storage):
|
| 273 |
+
if self.has_saved:
|
| 274 |
+
raise RuntimeError("have already saved")
|
| 275 |
+
key = self.next_key
|
| 276 |
+
self.next_key += 1
|
| 277 |
+
name = f"data/{key}"
|
| 278 |
+
if storage.device.type != "cpu":
|
| 279 |
+
storage = storage.cpu()
|
| 280 |
+
num_bytes = storage.nbytes()
|
| 281 |
+
|
| 282 |
+
current_version = version.parse(torch.__version__)
|
| 283 |
+
threshold_version = version.parse("2.2.2")
|
| 284 |
+
if current_version <= threshold_version:
|
| 285 |
+
self.zipfile.write_record(name, storage.data_ptr(), num_bytes)
|
| 286 |
+
else:
|
| 287 |
+
self.zipfile.write_record(name, storage, num_bytes)
|
| 288 |
+
|
| 289 |
+
return key
|
| 290 |
+
|
| 291 |
+
def __exit__(self, type, value, traceback):
|
| 292 |
+
self.zipfile.write_end_of_file()
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
T = TypeVar("T")
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def chunked_cross_entropy(
|
| 299 |
+
logits: Union[torch.Tensor, List[torch.Tensor]],
|
| 300 |
+
targets: torch.Tensor,
|
| 301 |
+
chunk_size: int = 128,
|
| 302 |
+
ignore_index: int = -100,
|
| 303 |
+
) -> torch.Tensor:
|
| 304 |
+
# with large max_sequence_lengths, the beginning of `backward` allocates a large memory chunk which can dominate
|
| 305 |
+
# the memory usage in fine-tuning settings with low number of parameters.
|
| 306 |
+
# as a workaround hack, the cross entropy computation is chunked to force it to deallocate on the go, reducing
|
| 307 |
+
# the memory spike's magnitude
|
| 308 |
+
|
| 309 |
+
# lm_head was chunked (we are fine-tuning)
|
| 310 |
+
if isinstance(logits, list):
|
| 311 |
+
# don't want to chunk cross entropy
|
| 312 |
+
if chunk_size == 0:
|
| 313 |
+
logits = torch.cat(logits, dim=1)
|
| 314 |
+
logits = logits.reshape(-1, logits.size(-1))
|
| 315 |
+
targets = targets.reshape(-1)
|
| 316 |
+
return torch.nn.functional.cross_entropy(logits, targets, ignore_index=ignore_index)
|
| 317 |
+
|
| 318 |
+
# chunk cross entropy
|
| 319 |
+
logit_chunks = [logit_chunk.reshape(-1, logit_chunk.size(-1)) for logit_chunk in logits]
|
| 320 |
+
target_chunks = [target_chunk.reshape(-1) for target_chunk in targets.split(logits[0].size(1), dim=1)]
|
| 321 |
+
loss_chunks = [
|
| 322 |
+
torch.nn.functional.cross_entropy(logit_chunk, target_chunk, ignore_index=ignore_index, reduction="none")
|
| 323 |
+
for logit_chunk, target_chunk in zip(logit_chunks, target_chunks)
|
| 324 |
+
]
|
| 325 |
+
non_masked_elems = (targets != ignore_index).sum()
|
| 326 |
+
# See [non_masked_elems div note]
|
| 327 |
+
return torch.cat(loss_chunks).sum() / non_masked_elems.maximum(torch.ones_like(non_masked_elems))
|
| 328 |
+
|
| 329 |
+
# no chunking at all
|
| 330 |
+
logits = logits.reshape(-1, logits.size(-1))
|
| 331 |
+
targets = targets.reshape(-1)
|
| 332 |
+
if chunk_size == 0:
|
| 333 |
+
return torch.nn.functional.cross_entropy(logits, targets, ignore_index=ignore_index)
|
| 334 |
+
|
| 335 |
+
# lm_head wasn't chunked, chunk cross entropy
|
| 336 |
+
logit_chunks = logits.split(chunk_size)
|
| 337 |
+
target_chunks = targets.split(chunk_size)
|
| 338 |
+
loss_chunks = [
|
| 339 |
+
torch.nn.functional.cross_entropy(logit_chunk, target_chunk, ignore_index=ignore_index, reduction="none")
|
| 340 |
+
for logit_chunk, target_chunk in zip(logit_chunks, target_chunks)
|
| 341 |
+
]
|
| 342 |
+
non_masked_elems = (targets != ignore_index).sum()
|
| 343 |
+
# [non_masked_elems div note]:
|
| 344 |
+
# max(1, non_masked_elems) would be more ergonomic to avoid a division by zero. However that
|
| 345 |
+
# results in a python int which is then passed back to torch division. By using the
|
| 346 |
+
# `x.maximum(torch.ones_like(x))` pattern we avoid a cudaStreamSynchronize.
|
| 347 |
+
return torch.cat(loss_chunks).sum() / non_masked_elems.maximum(torch.ones_like(non_masked_elems))
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def map_old_state_dict_weights(state_dict: Dict, mapping: Mapping, prefix: str) -> Dict:
|
| 351 |
+
for checkpoint_name, attribute_name in mapping.items():
|
| 352 |
+
full_checkpoint_name = prefix + checkpoint_name
|
| 353 |
+
if full_checkpoint_name in state_dict:
|
| 354 |
+
full_attribute_name = prefix + attribute_name
|
| 355 |
+
state_dict[full_attribute_name] = state_dict.pop(full_checkpoint_name)
|
| 356 |
+
return state_dict
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def get_default_supported_precision(training: bool) -> str:
|
| 360 |
+
"""
|
| 361 |
+
Return the default precision that is supported by the hardware: either `bf16` or `16`.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
training: If True, returns '-mixed' version of the precision; if False, returns '-true' version.
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
The default precision that is suitable for the task and is supported by the hardware.
|
| 368 |
+
"""
|
| 369 |
+
import torch
|
| 370 |
+
|
| 371 |
+
if torch.cuda.is_available():
|
| 372 |
+
if torch.cuda.is_bf16_supported():
|
| 373 |
+
return "bf16-mixed" if training else "bf16-true"
|
| 374 |
+
else:
|
| 375 |
+
return "16-mixed" if training else "16-true"
|
| 376 |
+
return "bf16-mixed" if training else "bf16-true"
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def load_checkpoint(fabric: L.Fabric, model: nn.Module, checkpoint_path: Path, strict: bool = True) -> None:
|
| 380 |
+
if isinstance(fabric.strategy, FSDPStrategy):
|
| 381 |
+
fabric.load_raw(checkpoint_path, model, strict=strict)
|
| 382 |
+
else:
|
| 383 |
+
state_dict = lazy_load(checkpoint_path)
|
| 384 |
+
state_dict = state_dict.get("model", state_dict)
|
| 385 |
+
model.load_state_dict(state_dict, strict=strict)
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def load_checkpoint_update(
|
| 389 |
+
fabric: L.Fabric, adapter_path: Path, model: nn.Module, checkpoint_path: Path, strict: bool = True
|
| 390 |
+
) -> None:
|
| 391 |
+
if isinstance(fabric.strategy, FSDPStrategy):
|
| 392 |
+
fabric.load_raw(checkpoint_path, model, strict=strict)
|
| 393 |
+
else:
|
| 394 |
+
state_dict = lazy_load(checkpoint_path)
|
| 395 |
+
state_dict = state_dict.get("model", state_dict)
|
| 396 |
+
adapter_cp = lazy_load(adapter_path)
|
| 397 |
+
state_dict.update(adapter_cp)
|
| 398 |
+
model.load_state_dict(state_dict, strict=strict)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def flops_per_param(max_seq_length: int, n_layer: int, n_embd: int, n_params: int) -> int:
|
| 402 |
+
flops_per_token = 2 * n_params # each parameter is used for a MAC (2 FLOPS) per network operation
|
| 403 |
+
# this assumes that all samples have a fixed length equal to the block size
|
| 404 |
+
# which is most likely false during finetuning
|
| 405 |
+
flops_per_seq = flops_per_token * max_seq_length
|
| 406 |
+
attn_flops_per_seq = n_layer * 2 * 2 * (n_embd * (max_seq_length**2))
|
| 407 |
+
return flops_per_seq + attn_flops_per_seq
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def estimate_flops(model: "GPT", training: bool) -> int:
|
| 411 |
+
"""Measures estimated FLOPs for MFU.
|
| 412 |
+
|
| 413 |
+
Refs:
|
| 414 |
+
* https://ar5iv.labs.arxiv.org/html/2205.05198#A1
|
| 415 |
+
* https://ar5iv.labs.arxiv.org/html/2204.02311#A2
|
| 416 |
+
"""
|
| 417 |
+
# using all parameters for this is a naive over estimation because not all model parameters actually contribute to
|
| 418 |
+
# this FLOP computation (e.g. embedding, norm). For this reason, the result will be higher by a fixed percentage
|
| 419 |
+
# (~10%) compared to the measured FLOPs, making those lower but more realistic.
|
| 420 |
+
# For a proper estimate, this needs a more fine-grained calculation as in Appendix A of the paper.
|
| 421 |
+
n_trainable_params = num_parameters(model, requires_grad=True)
|
| 422 |
+
trainable_flops = flops_per_param(
|
| 423 |
+
model.max_seq_length, model.config.n_layer, model.config.n_embd, n_trainable_params
|
| 424 |
+
)
|
| 425 |
+
# forward + backward + gradients (assumes no gradient accumulation)
|
| 426 |
+
ops_per_step = 3 if training else 1
|
| 427 |
+
n_frozen_params = num_parameters(model, requires_grad=False)
|
| 428 |
+
frozen_flops = flops_per_param(model.max_seq_length, model.config.n_layer, model.config.n_embd, n_frozen_params)
|
| 429 |
+
# forward + backward
|
| 430 |
+
frozen_ops_per_step = 2 if training else 1
|
| 431 |
+
return ops_per_step * trainable_flops + frozen_ops_per_step * frozen_flops
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
class CycleIterator:
|
| 435 |
+
"""An iterator that cycles through an iterable indefinitely.
|
| 436 |
+
|
| 437 |
+
Example:
|
| 438 |
+
>>> iterator = CycleIterator([1, 2, 3])
|
| 439 |
+
>>> [next(iterator) for _ in range(5)]
|
| 440 |
+
[1, 2, 3, 1, 2]
|
| 441 |
+
|
| 442 |
+
Note:
|
| 443 |
+
Unlike ``itertools.cycle``, this iterator does not cache the values of the iterable.
|
| 444 |
+
"""
|
| 445 |
+
|
| 446 |
+
def __init__(self, iterable: Iterable) -> None:
|
| 447 |
+
self.iterable = iterable
|
| 448 |
+
self.epoch = 0
|
| 449 |
+
self._iterator = None
|
| 450 |
+
|
| 451 |
+
def __next__(self) -> Any:
|
| 452 |
+
if self._iterator is None:
|
| 453 |
+
self._iterator = iter(self.iterable)
|
| 454 |
+
try:
|
| 455 |
+
return next(self._iterator)
|
| 456 |
+
except StopIteration:
|
| 457 |
+
self._iterator = iter(self.iterable)
|
| 458 |
+
self.epoch += 1
|
| 459 |
+
return next(self._iterator)
|
| 460 |
+
|
| 461 |
+
def __iter__(self) -> Self:
|
| 462 |
+
return self
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
def copy_config_files(source_dir: Path, out_dir: Path) -> None:
|
| 466 |
+
"""Copies the specified configuration and tokenizer files into the output directory."""
|
| 467 |
+
|
| 468 |
+
config_files = ["config.json", "generation_config.json", "model_config.yaml"]
|
| 469 |
+
tokenizer_files = ["tokenizer.json", "tokenizer.model", "tokenizer_config.json"]
|
| 470 |
+
|
| 471 |
+
for file_name in config_files + tokenizer_files:
|
| 472 |
+
src_path = source_dir / file_name
|
| 473 |
+
if src_path.exists():
|
| 474 |
+
shutil.copy(src_path, out_dir)
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def CLI(*args: Any, **kwargs: Any) -> Any:
|
| 478 |
+
from jsonargparse import CLI, set_config_read_mode, set_docstring_parse_options
|
| 479 |
+
|
| 480 |
+
set_docstring_parse_options(attribute_docstrings=True)
|
| 481 |
+
set_config_read_mode(urls_enabled=True)
|
| 482 |
+
|
| 483 |
+
return CLI(*args, **kwargs)
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def capture_hparams() -> Dict[str, Any]:
|
| 487 |
+
"""Captures the local variables ('hyperparameters') from where this function gets called."""
|
| 488 |
+
caller_frame = inspect.currentframe().f_back
|
| 489 |
+
locals_of_caller = caller_frame.f_locals
|
| 490 |
+
hparams = {}
|
| 491 |
+
for name, value in locals_of_caller.items():
|
| 492 |
+
if value is None or isinstance(value, (int, float, str, bool, Path)):
|
| 493 |
+
hparams[name] = value
|
| 494 |
+
elif is_dataclass(value):
|
| 495 |
+
hparams[name] = asdict(value)
|
| 496 |
+
else:
|
| 497 |
+
hparams[name] = str(value)
|
| 498 |
+
return hparams
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def save_hyperparameters(function: callable, checkpoint_dir: Path) -> None:
|
| 502 |
+
"""Captures the CLI parameters passed to `function` without running `function` and saves them to the checkpoint."""
|
| 503 |
+
from jsonargparse import capture_parser
|
| 504 |
+
|
| 505 |
+
# TODO: Make this more robust
|
| 506 |
+
# This hack strips away the subcommands from the top-level CLI
|
| 507 |
+
# to parse the file as if it was called as a script
|
| 508 |
+
known_commands = [
|
| 509 |
+
("finetune_full",), # For subcommands, use `("finetune", "full")` etc
|
| 510 |
+
("finetune_lora",),
|
| 511 |
+
("finetune_adapter",),
|
| 512 |
+
("finetune_adapter_v2",),
|
| 513 |
+
("finetune",),
|
| 514 |
+
("pretrain",),
|
| 515 |
+
]
|
| 516 |
+
for known_command in known_commands:
|
| 517 |
+
unwanted = slice(1, 1 + len(known_command))
|
| 518 |
+
if tuple(sys.argv[unwanted]) == known_command:
|
| 519 |
+
sys.argv[unwanted] = []
|
| 520 |
+
|
| 521 |
+
parser = capture_parser(lambda: CLI(function))
|
| 522 |
+
config = parser.parse_args()
|
| 523 |
+
parser.save(config, checkpoint_dir / "hyperparameters.yaml", overwrite=True)
|
| 524 |
+
|
| 525 |
+
|
| 526 |
+
def save_config(config: "Config", checkpoint_dir: Path) -> None:
|
| 527 |
+
config_dict = asdict(config)
|
| 528 |
+
with open(checkpoint_dir / "model_config.yaml", "w", encoding="utf-8") as fp:
|
| 529 |
+
yaml.dump(config_dict, fp)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def parse_devices(devices: Union[str, int]) -> int:
|
| 533 |
+
if devices in (-1, "auto"):
|
| 534 |
+
return torch.cuda.device_count() or 1
|
| 535 |
+
if isinstance(devices, int) and devices > 0:
|
| 536 |
+
return devices
|
| 537 |
+
raise ValueError(f"Devices must be 'auto' or a positive integer, got: {devices!r}")
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def choose_logger(
|
| 541 |
+
logger_name: Literal["csv", "tensorboard", "wandb", "mlflow"],
|
| 542 |
+
out_dir: Path,
|
| 543 |
+
name: str,
|
| 544 |
+
log_interval: int = 1,
|
| 545 |
+
log_args: Optional[Dict] = None,
|
| 546 |
+
resume: Optional[bool] = None,
|
| 547 |
+
**kwargs: Any,
|
| 548 |
+
):
|
| 549 |
+
if logger_name == "csv":
|
| 550 |
+
return CSVLogger(root_dir=(out_dir / "logs"), name="csv", flush_logs_every_n_steps=log_interval, **kwargs)
|
| 551 |
+
if logger_name == "tensorboard":
|
| 552 |
+
return TensorBoardLogger(root_dir=(out_dir / "logs"), name="tensorboard", **kwargs)
|
| 553 |
+
if logger_name == "wandb":
|
| 554 |
+
project = log_args.pop("project", name)
|
| 555 |
+
run = log_args.pop("run", os.environ.get("WANDB_RUN_NAME"))
|
| 556 |
+
group = log_args.pop("group", os.environ.get("WANDB_RUN_GROUP"))
|
| 557 |
+
return WandbLogger(project=project, name=run, group=group, resume=resume, **kwargs)
|
| 558 |
+
if logger_name == "mlflow":
|
| 559 |
+
return MLFlowLogger(experiment_name=name, **kwargs)
|
| 560 |
+
raise ValueError(f"`--logger_name={logger_name}` is not a valid option. Choose from 'csv', 'tensorboard', 'wandb'.")
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def get_argument_names(cls):
|
| 564 |
+
sig = inspect.signature(cls.__init__)
|
| 565 |
+
return {
|
| 566 |
+
name
|
| 567 |
+
for name, param in sig.parameters.items()
|
| 568 |
+
if param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]
|
| 569 |
+
}
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
def instantiate_bnb_optimizer(optimizer, model_parameters):
|
| 573 |
+
if (isinstance(optimizer, str) and "AdamW" not in optimizer) or (
|
| 574 |
+
isinstance(optimizer, dict) and "AdamW" not in optimizer.get("class_path", "")
|
| 575 |
+
):
|
| 576 |
+
raise ValueError("The chosen quantization format only supports the AdamW optimizer.")
|
| 577 |
+
|
| 578 |
+
import bitsandbytes as bnb
|
| 579 |
+
|
| 580 |
+
if isinstance(optimizer, str):
|
| 581 |
+
optimizer = bnb.optim.PagedAdamW(model_parameters)
|
| 582 |
+
else:
|
| 583 |
+
optim_args = get_argument_names(bnb.optim.PagedAdamW)
|
| 584 |
+
allowed_kwargs = {key: optimizer["init_args"][key] for key in optim_args & optimizer["init_args"].keys()}
|
| 585 |
+
optimizer = bnb.optim.PagedAdamW(model_parameters, **allowed_kwargs)
|
| 586 |
+
return optimizer
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
def instantiate_torch_optimizer(optimizer, model_parameters, **kwargs):
|
| 590 |
+
# Special care taken where some optimizers do not have some parameters referenced in some of the code, for example "fused" in the pretrain.py script:
|
| 591 |
+
# bnb.optim.AdamW8bit
|
| 592 |
+
# grokadamw.GrokAdamW
|
| 593 |
+
# torch.optim.RMSprop
|
| 594 |
+
|
| 595 |
+
if isinstance(optimizer, str):
|
| 596 |
+
if "." in optimizer:
|
| 597 |
+
class_module, class_name = optimizer.rsplit(".", 1)
|
| 598 |
+
else:
|
| 599 |
+
class_module, class_name = "torch.optim", optimizer
|
| 600 |
+
|
| 601 |
+
module = __import__(class_module, fromlist=[class_name])
|
| 602 |
+
optimizer_cls = getattr(module, class_name)
|
| 603 |
+
|
| 604 |
+
valid_params = set(inspect.signature(optimizer_cls).parameters)
|
| 605 |
+
kwargs = {key: value for key, value in dict(kwargs).items() if key in valid_params}
|
| 606 |
+
optimizer = optimizer_cls(model_parameters, **kwargs)
|
| 607 |
+
elif isinstance(optimizer, dict):
|
| 608 |
+
optimizer = dict(optimizer)
|
| 609 |
+
class_module, class_name = optimizer["class_path"].rsplit(".", 1)
|
| 610 |
+
module = __import__(class_module, fromlist=[class_name])
|
| 611 |
+
optimizer_cls = getattr(module, class_name)
|
| 612 |
+
|
| 613 |
+
valid_params = set(inspect.signature(optimizer_cls).parameters)
|
| 614 |
+
kwargs = {key: value for key, value in dict(kwargs).items() if key in valid_params}
|
| 615 |
+
|
| 616 |
+
optimizer["init_args"].update(kwargs)
|
| 617 |
+
optimizer = instantiate_class(model_parameters, optimizer)
|
| 618 |
+
else:
|
| 619 |
+
raise ValueError(f'Unrecognized "optimizer" value: {optimizer}')
|
| 620 |
+
|
| 621 |
+
return optimizer
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
def extend_checkpoint_dir(checkpoint_dir: Path) -> Path:
|
| 625 |
+
new_checkpoint_dir = "checkpoints" / checkpoint_dir
|
| 626 |
+
should_return_new_dir = (
|
| 627 |
+
not checkpoint_dir.is_dir()
|
| 628 |
+
and checkpoint_dir.parts[0] != "checkpoints"
|
| 629 |
+
and not checkpoint_dir.is_absolute()
|
| 630 |
+
and new_checkpoint_dir.exists()
|
| 631 |
+
)
|
| 632 |
+
return new_checkpoint_dir if should_return_new_dir else checkpoint_dir
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
def check_file_size_on_cpu_and_warn(checkpoint_path, device, size_limit=4_509_715_660):
|
| 636 |
+
"""
|
| 637 |
+
Checks the file size and raises a warning if it exceeds the size_limit.
|
| 638 |
+
The default size limit is 4.2 GB, the size of TinyLlama 1.1B: 4.2 * 1024 * 1024 * 1024 = 4_509_715_660
|
| 639 |
+
"""
|
| 640 |
+
size = 0.0
|
| 641 |
+
if os.path.exists(checkpoint_path):
|
| 642 |
+
size = os.path.getsize(checkpoint_path)
|
| 643 |
+
if size > size_limit and str(device) == "cpu":
|
| 644 |
+
warnings.warn(
|
| 645 |
+
f"The file size of {checkpoint_path} is over {size_limit / 1024 / 1024 / 1024:.1f} GB. Using a model "
|
| 646 |
+
"with more than 1B parameters on a CPU can be slow, it is recommended to switch to a GPU."
|
| 647 |
+
)
|
| 648 |
+
return size
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
def auto_download_checkpoint(model_name, access_token=None, ignore_tokenizer_files=False):
|
| 652 |
+
from litgpt.scripts.download import download_from_hub # moved here due to circular import issue
|
| 653 |
+
|
| 654 |
+
checkpoint_dir = extend_checkpoint_dir(Path(model_name))
|
| 655 |
+
try:
|
| 656 |
+
check_valid_checkpoint_dir(
|
| 657 |
+
checkpoint_dir, verbose=False, raise_error=True, ignore_tokenizer_files=ignore_tokenizer_files
|
| 658 |
+
)
|
| 659 |
+
except FileNotFoundError as e:
|
| 660 |
+
if access_token is None:
|
| 661 |
+
access_token = os.getenv("HF_TOKEN")
|
| 662 |
+
|
| 663 |
+
if checkpoint_dir.parts[0] != "checkpoints" and not checkpoint_dir.is_absolute():
|
| 664 |
+
download_from_hub(repo_id=str(model_name), access_token=access_token)
|
| 665 |
+
checkpoint_dir = Path("checkpoints") / checkpoint_dir
|
| 666 |
+
else:
|
| 667 |
+
raise e
|
| 668 |
+
|
| 669 |
+
return checkpoint_dir
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
def check_nvlink_connectivity(fabric=None):
|
| 673 |
+
"""Checks GPU connectivity for both NVIDIA and AMD GPUs.
|
| 674 |
+
|
| 675 |
+
This function delegates to vendor-specific implementations based on
|
| 676 |
+
the detected GPU vendor.
|
| 677 |
+
"""
|
| 678 |
+
if fabric is not None:
|
| 679 |
+
custom_print = fabric.print
|
| 680 |
+
else:
|
| 681 |
+
custom_print = print
|
| 682 |
+
|
| 683 |
+
if os.getenv("RANK", "0") == "0":
|
| 684 |
+
try:
|
| 685 |
+
if torch.cuda.is_available():
|
| 686 |
+
device_properties = torch.cuda.get_device_properties(0)
|
| 687 |
+
gpu_name = device_properties.name.lower()
|
| 688 |
+
if "nvidia" in gpu_name:
|
| 689 |
+
_check_nvidia_connectivity(custom_print)
|
| 690 |
+
elif "advanced micro devices" in gpu_name or "amd" in gpu_name:
|
| 691 |
+
_check_amd_connectivity(custom_print)
|
| 692 |
+
else:
|
| 693 |
+
custom_print(f"Unrecognized GPU vendor: {device_properties.name}")
|
| 694 |
+
else:
|
| 695 |
+
custom_print("No GPUs available")
|
| 696 |
+
except Exception as e:
|
| 697 |
+
custom_print(f"An error occurred while checking GPU connectivity: {e}")
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def _check_nvidia_connectivity(custom_print):
|
| 701 |
+
"""Checks NVLink connectivity on NVIDIA GPUs."""
|
| 702 |
+
result = subprocess.run(["nvidia-smi", "topo", "-m"], stdout=subprocess.PIPE, text=True)
|
| 703 |
+
if result.returncode != 0:
|
| 704 |
+
custom_print("Failed to run nvidia-smi")
|
| 705 |
+
return
|
| 706 |
+
|
| 707 |
+
lines = result.stdout.strip().split("\n")
|
| 708 |
+
start_index = next((i for i, line in enumerate(lines) if "GPU0" in line), None)
|
| 709 |
+
if start_index is None:
|
| 710 |
+
custom_print("Failed to parse nvidia-smi output")
|
| 711 |
+
return
|
| 712 |
+
|
| 713 |
+
headers_line = lines[start_index]
|
| 714 |
+
headers = headers_line.split()
|
| 715 |
+
gpu_regex = re.compile(r"^GPU\d+$")
|
| 716 |
+
gpu_count = len([header for header in headers if gpu_regex.match(header)])
|
| 717 |
+
|
| 718 |
+
all_nvlink = True
|
| 719 |
+
for line in lines[start_index + 1 : start_index + 1 + gpu_count]:
|
| 720 |
+
columns = line.split()
|
| 721 |
+
connections = columns[1 : 1 + gpu_count]
|
| 722 |
+
if not all("NV" in conn for conn in connections if conn != "X"):
|
| 723 |
+
all_nvlink = False
|
| 724 |
+
break
|
| 725 |
+
|
| 726 |
+
if all_nvlink:
|
| 727 |
+
custom_print("All GPUs are fully connected via NVLink.")
|
| 728 |
+
else:
|
| 729 |
+
custom_print(
|
| 730 |
+
"Warning: Not all GPUs are fully connected via NVLink. Some GPUs are connected via slower interfaces. "
|
| 731 |
+
"It is recommended to switch to a different machine with faster GPU connections for optimal multi-GPU training performance."
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
def _check_amd_connectivity(custom_print):
|
| 736 |
+
"""Checks XGMI connectivity on AMD GPUs."""
|
| 737 |
+
result = subprocess.run(["rocm-smi", "--showtopotype"], stdout=subprocess.PIPE, text=True)
|
| 738 |
+
if result.returncode != 0:
|
| 739 |
+
custom_print("Failed to run rocm-smi")
|
| 740 |
+
return
|
| 741 |
+
|
| 742 |
+
lines = result.stdout.strip().split("\n")
|
| 743 |
+
gpu_header_index = next((i for i, line in enumerate(lines) if re.match(r"^\s*GPU0", line)), None)
|
| 744 |
+
if gpu_header_index is None or gpu_header_index == 0:
|
| 745 |
+
custom_print("Failed to parse rocm-smi output (no GPU headers found)")
|
| 746 |
+
return
|
| 747 |
+
|
| 748 |
+
header_line = lines[gpu_header_index - 1]
|
| 749 |
+
headers = header_line.strip().split()
|
| 750 |
+
gpu_regex = re.compile(r"^GPU\d+$")
|
| 751 |
+
gpu_count = len([header for header in headers if gpu_regex.match(header)])
|
| 752 |
+
|
| 753 |
+
gpu_lines = []
|
| 754 |
+
for line in lines[gpu_header_index : gpu_header_index + gpu_count]:
|
| 755 |
+
if re.match(r"^\s*GPU\d+", line):
|
| 756 |
+
gpu_lines.append(line.strip())
|
| 757 |
+
if len(gpu_lines) != gpu_count:
|
| 758 |
+
custom_print("Mismatch in GPU count when parsing rocm-smi output")
|
| 759 |
+
return
|
| 760 |
+
|
| 761 |
+
all_xgmi = True
|
| 762 |
+
for line in gpu_lines:
|
| 763 |
+
columns = line.split()
|
| 764 |
+
connections = columns[1 : 1 + gpu_count]
|
| 765 |
+
for conn in connections:
|
| 766 |
+
if conn not in ("XGMI", "0"):
|
| 767 |
+
all_xgmi = False
|
| 768 |
+
break
|
| 769 |
+
if not all_xgmi:
|
| 770 |
+
break
|
| 771 |
+
|
| 772 |
+
if all_xgmi:
|
| 773 |
+
custom_print("All GPUs are fully connected via XGMI.")
|
| 774 |
+
else:
|
| 775 |
+
custom_print(
|
| 776 |
+
"Warning: Not all GPUs are fully connected via XGMI. Some GPUs are connected via slower interfaces. "
|
| 777 |
+
"It is recommended to switch to a different machine with faster GPU connections for optimal multi-GPU training performance."
|
| 778 |
+
)
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
def fix_and_load_json(s):
|
| 782 |
+
# Remove trailing commas before } or ]
|
| 783 |
+
s = re.sub(r",(\s*[}\]])", r"\1", s)
|
| 784 |
+
|
| 785 |
+
# Insert missing commas between properties
|
| 786 |
+
# Match positions where a value is followed by a newline and then a quote without a comma
|
| 787 |
+
pattern = r'(?<=[}\]0-9truefalsenull"])\s*(\n\s*)"'
|
| 788 |
+
replacement = r',\1"'
|
| 789 |
+
s = re.sub(pattern, replacement, s)
|
| 790 |
+
|
| 791 |
+
# Now try to parse the JSON
|
| 792 |
+
try:
|
| 793 |
+
return json.loads(s)
|
| 794 |
+
except json.JSONDecodeError as e:
|
| 795 |
+
raise ValueError(f"Failed to parse JSON after fixing: {e}")
|
| 796 |
+
|
| 797 |
+
|
| 798 |
+
def create_finetuning_performance_report(training_time, token_counts, device_type):
|
| 799 |
+
tok_sec = token_counts["raw_tokens_plus_prompt_template_and_padding"] / training_time
|
| 800 |
+
output = f"""
|
| 801 |
+
| ------------------------------------------------------
|
| 802 |
+
| Token Counts
|
| 803 |
+
| - Input Tokens : {token_counts["raw_tokens"]:>5}
|
| 804 |
+
| - Tokens w/ Prompt : {token_counts["raw_tokens_plus_prompt_template"]:>5}
|
| 805 |
+
| - Total Tokens (w/ Padding) : {token_counts["raw_tokens_plus_prompt_template_and_padding"]:>5}
|
| 806 |
+
| -----------------------------------------------------
|
| 807 |
+
| Performance
|
| 808 |
+
| - Training Time : {training_time:.2f} s
|
| 809 |
+
| - Tok/sec : {tok_sec:.2f} tok/s
|
| 810 |
+
| -----------------------------------------------------
|
| 811 |
+
"""
|
| 812 |
+
|
| 813 |
+
if device_type == "cuda":
|
| 814 |
+
memory_used = torch.cuda.max_memory_allocated() / 1e9
|
| 815 |
+
output += "| Memory Usage \n"
|
| 816 |
+
output += f"| - Memory Used : {memory_used:.02f} GB \n"
|
| 817 |
+
output += "-------------------------------------------------------\n"
|
| 818 |
+
|
| 819 |
+
return output
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
def select_sft_generate_example(eval, data):
|
| 823 |
+
if eval.evaluate_example == "first":
|
| 824 |
+
if len(data.test_dataset.data):
|
| 825 |
+
instruction = data.test_dataset.data[0]["instruction"]
|
| 826 |
+
else:
|
| 827 |
+
instruction = data.train_dataset.data[0]["instruction"]
|
| 828 |
+
|
| 829 |
+
elif eval.evaluate_example == "random":
|
| 830 |
+
if len(data.test_dataset.data):
|
| 831 |
+
random_idx = random.randint(0, len(data.test_dataset.data) - 1)
|
| 832 |
+
instruction = data.test_dataset.data[random_idx]["instruction"]
|
| 833 |
+
else:
|
| 834 |
+
random_idx = random.randint(0, len(data.train_dataset.data) - 1)
|
| 835 |
+
instruction = data.train_dataset.data[random_idx]["instruction"]
|
| 836 |
+
|
| 837 |
+
elif isinstance(eval.evaluate_example, int):
|
| 838 |
+
index = eval.evaluate_example
|
| 839 |
+
if len(data.test_dataset.data) > index:
|
| 840 |
+
instruction = data.test_dataset.data[index]["instruction"]
|
| 841 |
+
elif len(data.train_dataset.data) > index:
|
| 842 |
+
instruction = data.train_dataset.data[index]["instruction"]
|
| 843 |
+
else:
|
| 844 |
+
raise IndexError(f"Index {index} is out of range for both test and training datasets.")
|
| 845 |
+
|
| 846 |
+
else:
|
| 847 |
+
raise ValueError(f"Unknown evaluation example type: {eval.evaluate_example}")
|
| 848 |
+
return instruction
|
| 849 |
+
|
| 850 |
+
|
| 851 |
+
def _RunIf(thunder: bool = False, **kwargs):
|
| 852 |
+
import pytest
|
| 853 |
+
from lightning.fabric.utilities.testing import _runif_reasons
|
| 854 |
+
|
| 855 |
+
reasons, marker_kwargs = _runif_reasons(**kwargs)
|
| 856 |
+
|
| 857 |
+
if thunder and not module_available("thunder"):
|
| 858 |
+
# if we require Thunder, but it's not available, we should skip
|
| 859 |
+
reasons.append("Thunder")
|
| 860 |
+
|
| 861 |
+
return pytest.mark.skipif(condition=len(reasons) > 0, reason=f"Requires: [{' + '.join(reasons)}]", **marker_kwargs)
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def kill_process_tree(pid: int):
|
| 865 |
+
"""
|
| 866 |
+
Kill a process and all its child processes given the parent PID.
|
| 867 |
+
"""
|
| 868 |
+
try:
|
| 869 |
+
parent = psutil.Process(pid)
|
| 870 |
+
children = parent.children(recursive=True)
|
| 871 |
+
for child in children:
|
| 872 |
+
child.kill()
|
| 873 |
+
parent.kill()
|
| 874 |
+
except psutil.NoSuchProcess:
|
| 875 |
+
pass # Process already exited
|
out/eval/tinyllama_benches/monthly_metrics.csv
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
month,task,metric,value
|
| 2 |
+
2407,arc_challenge,acc,0.26621160409556316
|
| 3 |
+
2408,arc_challenge,acc,0.25426621160409557
|
| 4 |
+
2409,arc_challenge,acc,0.23890784982935154
|
| 5 |
+
2410,arc_challenge,acc,0.24146757679180889
|
| 6 |
+
2411,arc_challenge,acc,0.2380546075085324
|
| 7 |
+
2412,arc_challenge,acc,0.23464163822525597
|
| 8 |
+
2501,arc_challenge,acc,0.22525597269624573
|
| 9 |
+
2502,arc_challenge,acc,0.21928327645051193
|
| 10 |
+
2503,arc_challenge,acc,0.22013651877133106
|
| 11 |
+
2504,arc_challenge,acc,0.22781569965870307
|
| 12 |
+
2505,arc_challenge,acc,0.2175767918088737
|
| 13 |
+
2506,arc_challenge,acc,0.23976109215017063
|
| 14 |
+
2407,arc_challenge,acc_norm,0.3046075085324232
|
| 15 |
+
2408,arc_challenge,acc_norm,0.2883959044368601
|
| 16 |
+
2409,arc_challenge,acc_norm,0.2764505119453925
|
| 17 |
+
2410,arc_challenge,acc_norm,0.26535836177474403
|
| 18 |
+
2411,arc_challenge,acc_norm,0.27047781569965873
|
| 19 |
+
2412,arc_challenge,acc_norm,0.25170648464163825
|
| 20 |
+
2501,arc_challenge,acc_norm,0.2568259385665529
|
| 21 |
+
2502,arc_challenge,acc_norm,0.24061433447098976
|
| 22 |
+
2503,arc_challenge,acc_norm,0.26023890784982934
|
| 23 |
+
2504,arc_challenge,acc_norm,0.24829351535836178
|
| 24 |
+
2505,arc_challenge,acc_norm,0.26535836177474403
|
| 25 |
+
2506,arc_challenge,acc_norm,0.2687713310580205
|
| 26 |
+
2407,arc_easy,acc,0.5757575757575758
|
| 27 |
+
2408,arc_easy,acc,0.5391414141414141
|
| 28 |
+
2409,arc_easy,acc,0.5260942760942761
|
| 29 |
+
2410,arc_easy,acc,0.5244107744107744
|
| 30 |
+
2411,arc_easy,acc,0.5071548821548821
|
| 31 |
+
2412,arc_easy,acc,0.48569023569023567
|
| 32 |
+
2501,arc_easy,acc,0.4941077441077441
|
| 33 |
+
2502,arc_easy,acc,0.47095959595959597
|
| 34 |
+
2503,arc_easy,acc,0.4852693602693603
|
| 35 |
+
2504,arc_easy,acc,0.4659090909090909
|
| 36 |
+
2505,arc_easy,acc,0.4722222222222222
|
| 37 |
+
2506,arc_easy,acc,0.47769360269360267
|
| 38 |
+
2407,arc_easy,acc_norm,0.531986531986532
|
| 39 |
+
2408,arc_easy,acc_norm,0.5004208754208754
|
| 40 |
+
2409,arc_easy,acc_norm,0.49957912457912457
|
| 41 |
+
2410,arc_easy,acc_norm,0.49326599326599324
|
| 42 |
+
2411,arc_easy,acc_norm,0.4819023569023569
|
| 43 |
+
2412,arc_easy,acc_norm,0.4511784511784512
|
| 44 |
+
2501,arc_easy,acc_norm,0.4583333333333333
|
| 45 |
+
2502,arc_easy,acc_norm,0.4318181818181818
|
| 46 |
+
2503,arc_easy,acc_norm,0.4537037037037037
|
| 47 |
+
2504,arc_easy,acc_norm,0.44023569023569026
|
| 48 |
+
2505,arc_easy,acc_norm,0.44191919191919193
|
| 49 |
+
2506,arc_easy,acc_norm,0.45664983164983164
|
| 50 |
+
2407,hellaswag,acc,0.416849233220474
|
| 51 |
+
2408,hellaswag,acc,0.3974307906791476
|
| 52 |
+
2409,hellaswag,acc,0.38498307110137425
|
| 53 |
+
2410,hellaswag,acc,0.3686516630153356
|
| 54 |
+
2411,hellaswag,acc,0.35610436168094006
|
| 55 |
+
2412,hellaswag,acc,0.3496315475004979
|
| 56 |
+
2501,hellaswag,acc,0.34405496912965544
|
| 57 |
+
2502,hellaswag,acc,0.33409679346743676
|
| 58 |
+
2503,hellaswag,acc,0.33738299143596895
|
| 59 |
+
2504,hellaswag,acc,0.3301135232025493
|
| 60 |
+
2505,hellaswag,acc,0.3280223063134834
|
| 61 |
+
2506,hellaswag,acc,0.3253335988846843
|
| 62 |
+
2407,hellaswag,acc_norm,0.5473013343955387
|
| 63 |
+
2408,hellaswag,acc_norm,0.5153355905198168
|
| 64 |
+
2409,hellaswag,acc_norm,0.489344752041426
|
| 65 |
+
2410,hellaswag,acc_norm,0.46484763991236805
|
| 66 |
+
2411,hellaswag,acc_norm,0.44343756223859787
|
| 67 |
+
2412,hellaswag,acc_norm,0.4257120095598486
|
| 68 |
+
2501,hellaswag,acc_norm,0.4239195379406493
|
| 69 |
+
2502,hellaswag,acc_norm,0.4017128062139016
|
| 70 |
+
2503,hellaswag,acc_norm,0.40201155148376816
|
| 71 |
+
2504,hellaswag,acc_norm,0.39543915554670384
|
| 72 |
+
2505,hellaswag,acc_norm,0.3903604859589723
|
| 73 |
+
2506,hellaswag,acc_norm,0.38697470623381797
|
| 74 |
+
2407,mmlu,acc,0.241917105825381
|
| 75 |
+
2408,mmlu,acc,0.2318758011679248
|
| 76 |
+
2409,mmlu,acc,0.22959692351516878
|
| 77 |
+
2410,mmlu,acc,0.23807149978635522
|
| 78 |
+
2411,mmlu,acc,0.23508047286711295
|
| 79 |
+
2412,mmlu,acc,0.2352229027204102
|
| 80 |
+
2501,mmlu,acc,0.2380002848597066
|
| 81 |
+
2502,mmlu,acc,0.23593505198689646
|
| 82 |
+
2503,mmlu,acc,0.2318758011679248
|
| 83 |
+
2504,mmlu,acc,0.23251673550776242
|
| 84 |
+
2505,mmlu,acc,0.23678963110667997
|
| 85 |
+
2506,mmlu,acc,0.24291411479846176
|
| 86 |
+
2407,sciq,acc,0.882
|
| 87 |
+
2408,sciq,acc,0.87
|
| 88 |
+
2409,sciq,acc,0.882
|
| 89 |
+
2410,sciq,acc,0.866
|
| 90 |
+
2411,sciq,acc,0.857
|
| 91 |
+
2412,sciq,acc,0.855
|
| 92 |
+
2501,sciq,acc,0.854
|
| 93 |
+
2502,sciq,acc,0.836
|
| 94 |
+
2503,sciq,acc,0.82
|
| 95 |
+
2504,sciq,acc,0.833
|
| 96 |
+
2505,sciq,acc,0.834
|
| 97 |
+
2506,sciq,acc,0.847
|
| 98 |
+
2407,sciq,acc_norm,0.844
|
| 99 |
+
2408,sciq,acc_norm,0.804
|
| 100 |
+
2409,sciq,acc_norm,0.819
|
| 101 |
+
2410,sciq,acc_norm,0.811
|
| 102 |
+
2411,sciq,acc_norm,0.822
|
| 103 |
+
2412,sciq,acc_norm,0.811
|
| 104 |
+
2501,sciq,acc_norm,0.802
|
| 105 |
+
2502,sciq,acc_norm,0.781
|
| 106 |
+
2503,sciq,acc_norm,0.771
|
| 107 |
+
2504,sciq,acc_norm,0.778
|
| 108 |
+
2505,sciq,acc_norm,0.786
|
| 109 |
+
2506,sciq,acc_norm,0.795
|
| 110 |
+
2407,truthfulqa_mc1,acc,0.2423500611995104
|
| 111 |
+
2408,truthfulqa_mc1,acc,0.2594859241126071
|
| 112 |
+
2409,truthfulqa_mc1,acc,0.2386780905752754
|
| 113 |
+
2410,truthfulqa_mc1,acc,0.2521419828641371
|
| 114 |
+
2411,truthfulqa_mc1,acc,0.2607099143206854
|
| 115 |
+
2412,truthfulqa_mc1,acc,0.26805385556915545
|
| 116 |
+
2501,truthfulqa_mc1,acc,0.27906976744186046
|
| 117 |
+
2502,truthfulqa_mc1,acc,0.2974296205630355
|
| 118 |
+
2503,truthfulqa_mc1,acc,0.26193390452876375
|
| 119 |
+
2504,truthfulqa_mc1,acc,0.27050183598531213
|
| 120 |
+
2505,truthfulqa_mc1,acc,0.26438188494492043
|
| 121 |
+
2506,truthfulqa_mc1,acc,0.26805385556915545
|
| 122 |
+
2407,truthfulqa_mc2,acc,0.41171296428698934
|
| 123 |
+
2408,truthfulqa_mc2,acc,0.4127800755882328
|
| 124 |
+
2409,truthfulqa_mc2,acc,0.40520433625245705
|
| 125 |
+
2410,truthfulqa_mc2,acc,0.42608784023385377
|
| 126 |
+
2411,truthfulqa_mc2,acc,0.42431987710612645
|
| 127 |
+
2412,truthfulqa_mc2,acc,0.426638224947852
|
| 128 |
+
2501,truthfulqa_mc2,acc,0.43294939737693783
|
| 129 |
+
2502,truthfulqa_mc2,acc,0.45673716541282944
|
| 130 |
+
2503,truthfulqa_mc2,acc,0.4308698731928784
|
| 131 |
+
2504,truthfulqa_mc2,acc,0.4490762477503484
|
| 132 |
+
2505,truthfulqa_mc2,acc,0.43051964732093856
|
| 133 |
+
2506,truthfulqa_mc2,acc,0.4367369684704035
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/config.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"LlamaForCausalLM"
|
| 4 |
+
],
|
| 5 |
+
"bos_token_id": 1,
|
| 6 |
+
"eos_token_id": 2,
|
| 7 |
+
"hidden_act": "silu",
|
| 8 |
+
"hidden_size": 2048,
|
| 9 |
+
"initializer_range": 0.02,
|
| 10 |
+
"intermediate_size": 5632,
|
| 11 |
+
"max_position_embeddings": 2048,
|
| 12 |
+
"model_type": "llama",
|
| 13 |
+
"num_attention_heads": 32,
|
| 14 |
+
"num_hidden_layers": 22,
|
| 15 |
+
"num_key_value_heads": 4,
|
| 16 |
+
"pretraining_tp": 1,
|
| 17 |
+
"rms_norm_eps": 1e-05,
|
| 18 |
+
"rope_scaling": null,
|
| 19 |
+
"tie_word_embeddings": false,
|
| 20 |
+
"torch_dtype": "float32",
|
| 21 |
+
"transformers_version": "4.31.0.dev0",
|
| 22 |
+
"use_cache": true,
|
| 23 |
+
"vocab_size": 32000
|
| 24 |
+
}
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/generation_config.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token_id": 1,
|
| 3 |
+
"eos_token_id": 2,
|
| 4 |
+
"pad_token_id": 0,
|
| 5 |
+
"max_length": 2048,
|
| 6 |
+
"transformers_version": "4.31.0.dev0"
|
| 7 |
+
}
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/log.txt
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 1 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 2 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 3 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 4 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 5 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 6 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 7 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 8 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 9 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 10 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
| 11 |
0%| | 0/100 [00:00<?, ?it/s]
|
|
|
|
|
|
| 1 |
+
{'access_token': None,
|
| 2 |
+
'batch_size': 4,
|
| 3 |
+
'checkpoint_dir': PosixPath('out/finetune/tinyllama_full_arc/2407/final'),
|
| 4 |
+
'device': None,
|
| 5 |
+
'dtype': None,
|
| 6 |
+
'force_conversion': False,
|
| 7 |
+
'limit': None,
|
| 8 |
+
'num_fewshot': None,
|
| 9 |
+
'out_dir': PosixPath('out/eval/tinyllama_full_arc_arxiv_mc/2407'),
|
| 10 |
+
'save_filepath': None,
|
| 11 |
+
'seed': 1234,
|
| 12 |
+
'tasks': 'arxiv_mc'}
|
| 13 |
+
INFO 09-24 14:45:19 [__init__.py:241] Automatically detected platform cuda.
|
| 14 |
+
Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
|
| 15 |
+
{'checkpoint_dir': PosixPath('out/finetune/tinyllama_full_arc/2407/final'),
|
| 16 |
+
'output_dir': PosixPath('out/eval/tinyllama_full_arc_arxiv_mc/2407')}
|
| 17 |
+
|
| 18 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 19 |
+
|
| 20 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 21 |
+
|
| 22 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 23 |
+
|
| 24 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 25 |
+
|
| 26 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 27 |
+
|
| 28 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 29 |
+
|
| 30 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 31 |
+
|
| 32 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 33 |
+
|
| 34 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 35 |
+
|
| 36 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 37 |
+
|
| 38 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 39 |
+
|
| 40 |
0%| | 0/100 [00:00<?, ?it/s]
|
| 41 |
+
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/model_config.yaml
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
attention_logit_softcapping: null
|
| 2 |
+
attention_scores_scalar: null
|
| 3 |
+
attn_bias: false
|
| 4 |
+
bias: false
|
| 5 |
+
block_size: 2048
|
| 6 |
+
final_logit_softcapping: null
|
| 7 |
+
gelu_approximate: none
|
| 8 |
+
head_size: 64
|
| 9 |
+
hf_config:
|
| 10 |
+
name: TinyLlama-1.1B-intermediate-step-1431k-3T
|
| 11 |
+
org: TinyLlama
|
| 12 |
+
intermediate_size: 5632
|
| 13 |
+
lm_head_bias: false
|
| 14 |
+
mlp_class_name: LLaMAMLP
|
| 15 |
+
moe_intermediate_size: null
|
| 16 |
+
n_embd: 2048
|
| 17 |
+
n_expert: 0
|
| 18 |
+
n_expert_per_token: 0
|
| 19 |
+
n_head: 32
|
| 20 |
+
n_layer: 22
|
| 21 |
+
n_query_groups: 4
|
| 22 |
+
name: tiny-llama-1.1b
|
| 23 |
+
norm_1: true
|
| 24 |
+
norm_2: true
|
| 25 |
+
norm_class_name: RMSNorm
|
| 26 |
+
norm_eps: 1.0e-05
|
| 27 |
+
norm_qk: false
|
| 28 |
+
norm_qk_type: default
|
| 29 |
+
padded_vocab_size: 32000
|
| 30 |
+
padding_multiple: 64
|
| 31 |
+
parallel_residual: false
|
| 32 |
+
post_attention_norm: false
|
| 33 |
+
post_mlp_norm: false
|
| 34 |
+
rope_adjustments: null
|
| 35 |
+
rope_base: 10000
|
| 36 |
+
rope_condense_ratio: 1
|
| 37 |
+
rope_indices: null
|
| 38 |
+
rope_local_base_freq: null
|
| 39 |
+
rotary_percentage: 1.0
|
| 40 |
+
scale_embeddings: false
|
| 41 |
+
shared_attention_norm: false
|
| 42 |
+
sliding_window_indices: null
|
| 43 |
+
sliding_window_size: null
|
| 44 |
+
vocab_size: 32000
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/results.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/tokenizer_config.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"bos_token": {
|
| 5 |
+
"__type": "AddedToken",
|
| 6 |
+
"content": "<s>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false
|
| 11 |
+
},
|
| 12 |
+
"clean_up_tokenization_spaces": false,
|
| 13 |
+
"eos_token": {
|
| 14 |
+
"__type": "AddedToken",
|
| 15 |
+
"content": "</s>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false
|
| 20 |
+
},
|
| 21 |
+
"legacy": false,
|
| 22 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 23 |
+
"pad_token": null,
|
| 24 |
+
"padding_side": "right",
|
| 25 |
+
"sp_model_kwargs": {},
|
| 26 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 27 |
+
"unk_token": {
|
| 28 |
+
"__type": "AddedToken",
|
| 29 |
+
"content": "<unk>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false
|
| 34 |
+
}
|
| 35 |
+
}
|
out/eval/tinyllama_full_arc_arxiv_mc/2407/values.json
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"tasks": [
|
| 3 |
+
{
|
| 4 |
+
"task": "arxiv_mc",
|
| 5 |
+
"metric": "acc",
|
| 6 |
+
"value": 0.16583333333333333
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"task": "arxiv_mc",
|
| 10 |
+
"metric": "acc_stderr",
|
| 11 |
+
"value": 0.01074491240245708
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"task": "arxiv_mc",
|
| 15 |
+
"metric": "acc_norm",
|
| 16 |
+
"value": 0.23916666666666667
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"task": "arxiv_mc",
|
| 20 |
+
"metric": "acc_norm_stderr",
|
| 21 |
+
"value": 0.012288331311441483
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"task": "arxiv_mc_2407",
|
| 25 |
+
"metric": "acc",
|
| 26 |
+
"value": 0.2
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"task": "arxiv_mc_2407",
|
| 30 |
+
"metric": "acc_stderr",
|
| 31 |
+
"value": 0.04020151261036846
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"task": "arxiv_mc_2407",
|
| 35 |
+
"metric": "acc_norm",
|
| 36 |
+
"value": 0.33
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"task": "arxiv_mc_2407",
|
| 40 |
+
"metric": "acc_norm_stderr",
|
| 41 |
+
"value": 0.047258156262526045
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"task": "arxiv_mc_2408",
|
| 45 |
+
"metric": "acc",
|
| 46 |
+
"value": 0.16
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"task": "arxiv_mc_2408",
|
| 50 |
+
"metric": "acc_stderr",
|
| 51 |
+
"value": 0.036845294917747115
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"task": "arxiv_mc_2408",
|
| 55 |
+
"metric": "acc_norm",
|
| 56 |
+
"value": 0.18
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"task": "arxiv_mc_2408",
|
| 60 |
+
"metric": "acc_norm_stderr",
|
| 61 |
+
"value": 0.03861229196653697
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"task": "arxiv_mc_2409",
|
| 65 |
+
"metric": "acc",
|
| 66 |
+
"value": 0.14
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"task": "arxiv_mc_2409",
|
| 70 |
+
"metric": "acc_stderr",
|
| 71 |
+
"value": 0.03487350880197772
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"task": "arxiv_mc_2409",
|
| 75 |
+
"metric": "acc_norm",
|
| 76 |
+
"value": 0.24
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"task": "arxiv_mc_2409",
|
| 80 |
+
"metric": "acc_norm_stderr",
|
| 81 |
+
"value": 0.04292346959909283
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"task": "arxiv_mc_2410",
|
| 85 |
+
"metric": "acc",
|
| 86 |
+
"value": 0.2
|
| 87 |
+
},
|
| 88 |
+
{
|
| 89 |
+
"task": "arxiv_mc_2410",
|
| 90 |
+
"metric": "acc_stderr",
|
| 91 |
+
"value": 0.04020151261036846
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"task": "arxiv_mc_2410",
|
| 95 |
+
"metric": "acc_norm",
|
| 96 |
+
"value": 0.23
|
| 97 |
+
},
|
| 98 |
+
{
|
| 99 |
+
"task": "arxiv_mc_2410",
|
| 100 |
+
"metric": "acc_norm_stderr",
|
| 101 |
+
"value": 0.042295258468165044
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"task": "arxiv_mc_2411",
|
| 105 |
+
"metric": "acc",
|
| 106 |
+
"value": 0.24
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"task": "arxiv_mc_2411",
|
| 110 |
+
"metric": "acc_stderr",
|
| 111 |
+
"value": 0.04292346959909282
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"task": "arxiv_mc_2411",
|
| 115 |
+
"metric": "acc_norm",
|
| 116 |
+
"value": 0.34
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"task": "arxiv_mc_2411",
|
| 120 |
+
"metric": "acc_norm_stderr",
|
| 121 |
+
"value": 0.04760952285695235
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"task": "arxiv_mc_2412",
|
| 125 |
+
"metric": "acc",
|
| 126 |
+
"value": 0.11
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"task": "arxiv_mc_2412",
|
| 130 |
+
"metric": "acc_stderr",
|
| 131 |
+
"value": 0.031446603773522035
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"task": "arxiv_mc_2412",
|
| 135 |
+
"metric": "acc_norm",
|
| 136 |
+
"value": 0.23
|
| 137 |
+
},
|
| 138 |
+
{
|
| 139 |
+
"task": "arxiv_mc_2412",
|
| 140 |
+
"metric": "acc_norm_stderr",
|
| 141 |
+
"value": 0.04229525846816506
|
| 142 |
+
},
|
| 143 |
+
{
|
| 144 |
+
"task": "arxiv_mc_2501",
|
| 145 |
+
"metric": "acc",
|
| 146 |
+
"value": 0.14
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"task": "arxiv_mc_2501",
|
| 150 |
+
"metric": "acc_stderr",
|
| 151 |
+
"value": 0.03487350880197771
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"task": "arxiv_mc_2501",
|
| 155 |
+
"metric": "acc_norm",
|
| 156 |
+
"value": 0.24
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"task": "arxiv_mc_2501",
|
| 160 |
+
"metric": "acc_norm_stderr",
|
| 161 |
+
"value": 0.04292346959909284
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"task": "arxiv_mc_2502",
|
| 165 |
+
"metric": "acc",
|
| 166 |
+
"value": 0.14
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"task": "arxiv_mc_2502",
|
| 170 |
+
"metric": "acc_stderr",
|
| 171 |
+
"value": 0.034873508801977704
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"task": "arxiv_mc_2502",
|
| 175 |
+
"metric": "acc_norm",
|
| 176 |
+
"value": 0.2
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"task": "arxiv_mc_2502",
|
| 180 |
+
"metric": "acc_norm_stderr",
|
| 181 |
+
"value": 0.040201512610368445
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"task": "arxiv_mc_2503",
|
| 185 |
+
"metric": "acc",
|
| 186 |
+
"value": 0.17
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"task": "arxiv_mc_2503",
|
| 190 |
+
"metric": "acc_stderr",
|
| 191 |
+
"value": 0.0377525168068637
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"task": "arxiv_mc_2503",
|
| 195 |
+
"metric": "acc_norm",
|
| 196 |
+
"value": 0.2
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"task": "arxiv_mc_2503",
|
| 200 |
+
"metric": "acc_norm_stderr",
|
| 201 |
+
"value": 0.04020151261036843
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"task": "arxiv_mc_2504",
|
| 205 |
+
"metric": "acc",
|
| 206 |
+
"value": 0.15
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"task": "arxiv_mc_2504",
|
| 210 |
+
"metric": "acc_stderr",
|
| 211 |
+
"value": 0.035887028128263734
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"task": "arxiv_mc_2504",
|
| 215 |
+
"metric": "acc_norm",
|
| 216 |
+
"value": 0.17
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"task": "arxiv_mc_2504",
|
| 220 |
+
"metric": "acc_norm_stderr",
|
| 221 |
+
"value": 0.0377525168068637
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"task": "arxiv_mc_2505",
|
| 225 |
+
"metric": "acc",
|
| 226 |
+
"value": 0.19
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"task": "arxiv_mc_2505",
|
| 230 |
+
"metric": "acc_stderr",
|
| 231 |
+
"value": 0.03942772444036623
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"task": "arxiv_mc_2505",
|
| 235 |
+
"metric": "acc_norm",
|
| 236 |
+
"value": 0.24
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"task": "arxiv_mc_2505",
|
| 240 |
+
"metric": "acc_norm_stderr",
|
| 241 |
+
"value": 0.042923469599092816
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"task": "arxiv_mc_2506",
|
| 245 |
+
"metric": "acc",
|
| 246 |
+
"value": 0.15
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"task": "arxiv_mc_2506",
|
| 250 |
+
"metric": "acc_stderr",
|
| 251 |
+
"value": 0.03588702812826371
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"task": "arxiv_mc_2506",
|
| 255 |
+
"metric": "acc_norm",
|
| 256 |
+
"value": 0.27
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"task": "arxiv_mc_2506",
|
| 260 |
+
"metric": "acc_norm_stderr",
|
| 261 |
+
"value": 0.0446196043338474
|
| 262 |
+
}
|
| 263 |
+
],
|
| 264 |
+
"groups": [
|
| 265 |
+
{
|
| 266 |
+
"group": "arxiv_mc",
|
| 267 |
+
"metric": "acc",
|
| 268 |
+
"value": 0.16583333333333333
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"group": "arxiv_mc",
|
| 272 |
+
"metric": "acc_stderr",
|
| 273 |
+
"value": 0.01074491240245708
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"group": "arxiv_mc",
|
| 277 |
+
"metric": "acc_norm",
|
| 278 |
+
"value": 0.23916666666666667
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"group": "arxiv_mc",
|
| 282 |
+
"metric": "acc_norm_stderr",
|
| 283 |
+
"value": 0.012288331311441483
|
| 284 |
+
}
|
| 285 |
+
]
|
| 286 |
+
}
|
out/eval/tinyllama_full_ppl/2407_full/ppl_metrics.jsonl
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"month": "2407", "val_loss": 1.3401238918304443, "val_ppl": 3.8195166829675764, "rank": 2}
|
| 2 |
+
{"month": "2407", "val_loss": 1.3308329582214355, "val_ppl": 3.784194150788953, "rank": 1}
|
| 3 |
+
{"month": "2407", "val_loss": 1.4430962800979614, "val_ppl": 4.23378452615237, "rank": 3}
|
| 4 |
+
{"month": "2407", "val_loss": 1.3418890237808228, "val_ppl": 3.826264587618703, "rank": 0}
|
| 5 |
+
{"month": "2408", "val_loss": 1.4649615287780762, "val_ppl": 4.327376757626807, "rank": 2}
|
| 6 |
+
{"month": "2408", "val_loss": 1.4553765058517456, "val_ppl": 4.286096902413764, "rank": 1}
|
| 7 |
+
{"month": "2408", "val_loss": 1.4550942182540894, "val_ppl": 4.284887161171367, "rank": 3}
|
| 8 |
+
{"month": "2408", "val_loss": 1.4382472038269043, "val_ppl": 4.213304277344344, "rank": 0}
|
| 9 |
+
{"month": "2409", "val_loss": 1.4172937870025635, "val_ppl": 4.125939646771969, "rank": 2}
|
| 10 |
+
{"month": "2409", "val_loss": 1.470693588256836, "val_ppl": 4.352252765856388, "rank": 1}
|
| 11 |
+
{"month": "2409", "val_loss": 1.4720611572265625, "val_ppl": 4.35820884343254, "rank": 3}
|
| 12 |
+
{"month": "2409", "val_loss": 1.4188635349273682, "val_ppl": 4.132421418012854, "rank": 0}
|
| 13 |
+
{"month": "2410", "val_loss": 1.3499130010604858, "val_ppl": 3.8570899533641967, "rank": 2}
|
| 14 |
+
{"month": "2410", "val_loss": 1.3595799207687378, "val_ppl": 3.894556935633931, "rank": 1}
|
| 15 |
+
{"month": "2410", "val_loss": 1.4495704174041748, "val_ppl": 4.261283548672918, "rank": 3}
|
| 16 |
+
{"month": "2410", "val_loss": 1.5745606422424316, "val_ppl": 4.828619660488959, "rank": 0}
|
| 17 |
+
{"month": "2411", "val_loss": 1.4264885187149048, "val_ppl": 4.164051500513005, "rank": 2}
|
| 18 |
+
{"month": "2411", "val_loss": 1.4281030893325806, "val_ppl": 4.170780086142855, "rank": 1}
|
| 19 |
+
{"month": "2411", "val_loss": 1.4030014276504517, "val_ppl": 4.067389640213666, "rank": 3}
|
| 20 |
+
{"month": "2411", "val_loss": 1.4369604587554932, "val_ppl": 4.207886315346613, "rank": 0}
|
| 21 |
+
{"month": "2412", "val_loss": 1.4747017621994019, "val_ppl": 4.369732359207785, "rank": 1}
|
| 22 |
+
{"month": "2412", "val_loss": 1.408434271812439, "val_ppl": 4.089547269247822, "rank": 3}
|
| 23 |
+
{"month": "2412", "val_loss": 1.411298155784607, "val_ppl": 4.1012760450356875, "rank": 2}
|
| 24 |
+
{"month": "2412", "val_loss": 1.4237630367279053, "val_ppl": 4.152717904926461, "rank": 0}
|
| 25 |
+
{"month": "2501", "val_loss": 1.3047839403152466, "val_ppl": 3.6868924188297223, "rank": 1}
|
| 26 |
+
{"month": "2501", "val_loss": 1.4822993278503418, "val_ppl": 4.403058124724474, "rank": 2}
|
| 27 |
+
{"month": "2501", "val_loss": 1.355212688446045, "val_ppl": 3.8775855865882565, "rank": 3}
|
| 28 |
+
{"month": "2501", "val_loss": 1.4138273000717163, "val_ppl": 4.111661892031496, "rank": 0}
|
| 29 |
+
{"month": "2502", "val_loss": 1.3723409175872803, "val_ppl": 3.944573818198906, "rank": 1}
|
| 30 |
+
{"month": "2502", "val_loss": 1.4143813848495483, "val_ppl": 4.113940732574589, "rank": 2}
|
| 31 |
+
{"month": "2502", "val_loss": 1.421377182006836, "val_ppl": 4.142821933177419, "rank": 3}
|
| 32 |
+
{"month": "2502", "val_loss": 1.404603362083435, "val_ppl": 4.073910553373853, "rank": 0}
|
| 33 |
+
{"month": "2503", "val_loss": 1.426881194114685, "val_ppl": 4.165686942178515, "rank": 1}
|
| 34 |
+
{"month": "2503", "val_loss": 1.438934326171875, "val_ppl": 4.216200327715984, "rank": 2}
|
| 35 |
+
{"month": "2503", "val_loss": 1.4343088865280151, "val_ppl": 4.196743580268452, "rank": 3}
|
| 36 |
+
{"month": "2503", "val_loss": 1.456992745399475, "val_ppl": 4.293029862886088, "rank": 0}
|
| 37 |
+
{"month": "2504", "val_loss": 1.4529080390930176, "val_ppl": 4.275529862244869, "rank": 2}
|
| 38 |
+
{"month": "2504", "val_loss": 1.4284093379974365, "val_ppl": 4.172057577580683, "rank": 3}
|
| 39 |
+
{"month": "2504", "val_loss": 1.4099334478378296, "val_ppl": 4.095682818453564, "rank": 1}
|
| 40 |
+
{"month": "2504", "val_loss": 1.4458612203598022, "val_ppl": 4.245506885792738, "rank": 0}
|
| 41 |
+
{"month": "2505", "val_loss": 1.4609925746917725, "val_ppl": 4.3102356366241965, "rank": 2}
|
| 42 |
+
{"month": "2505", "val_loss": 1.4186663627624512, "val_ppl": 4.131606699858025, "rank": 3}
|
| 43 |
+
{"month": "2505", "val_loss": 1.4786559343338013, "val_ppl": 4.387045239634099, "rank": 1}
|
| 44 |
+
{"month": "2505", "val_loss": 1.4452773332595825, "val_ppl": 4.243028712645302, "rank": 0}
|
| 45 |
+
{"month": "2506", "val_loss": 1.4663327932357788, "val_ppl": 4.333314805956465, "rank": 2}
|
| 46 |
+
{"month": "2506", "val_loss": 1.3949086666107178, "val_ppl": 4.0346060615577555, "rank": 3}
|
| 47 |
+
{"month": "2506", "val_loss": 1.4446309804916382, "val_ppl": 4.240287105410576, "rank": 1}
|
| 48 |
+
{"month": "2506", "val_loss": 1.443738341331482, "val_ppl": 4.23650374792772, "rank": 0}
|
out/eval/tinyllama_full_ppl/2408_full/ppl_metrics.jsonl
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"month": "2407", "val_loss": 1.362528920173645, "val_ppl": 3.906058933075612, "rank": 2}
|
| 2 |
+
{"month": "2407", "val_loss": 1.4777755737304688, "val_ppl": 4.3831847573976574, "rank": 3}
|
| 3 |
+
{"month": "2407", "val_loss": 1.3729832172393799, "val_ppl": 3.9471082304289324, "rank": 0}
|
| 4 |
+
{"month": "2407", "val_loss": 1.3674103021621704, "val_ppl": 3.9251725111825952, "rank": 1}
|
| 5 |
+
{"month": "2408", "val_loss": 1.2508023977279663, "val_ppl": 3.4931447246362146, "rank": 2}
|
| 6 |
+
{"month": "2408", "val_loss": 1.2579619884490967, "val_ppl": 3.5182439540845136, "rank": 3}
|
| 7 |
+
{"month": "2408", "val_loss": 1.2399872541427612, "val_ppl": 3.4555694202874743, "rank": 0}
|
| 8 |
+
{"month": "2408", "val_loss": 1.2464643716812134, "val_ppl": 3.478024192179584, "rank": 1}
|
| 9 |
+
{"month": "2409", "val_loss": 1.4675408601760864, "val_ppl": 4.338552903664771, "rank": 3}
|
| 10 |
+
{"month": "2409", "val_loss": 1.4120129346847534, "val_ppl": 4.104208598555189, "rank": 2}
|
| 11 |
+
{"month": "2409", "val_loss": 1.4647713899612427, "val_ppl": 4.326554033548489, "rank": 1}
|
| 12 |
+
{"month": "2409", "val_loss": 1.4133905172348022, "val_ppl": 4.109866380838644, "rank": 0}
|
| 13 |
+
{"month": "2410", "val_loss": 1.4405683279037476, "val_ppl": 4.223095237966944, "rank": 3}
|
| 14 |
+
{"month": "2410", "val_loss": 1.3448939323425293, "val_ppl": 3.8377794547343513, "rank": 2}
|
| 15 |
+
{"month": "2410", "val_loss": 1.3568646907806396, "val_ppl": 3.883996661126899, "rank": 1}
|
| 16 |
+
{"month": "2410", "val_loss": 1.573472261428833, "val_ppl": 4.823367142382782, "rank": 0}
|
| 17 |
+
{"month": "2411", "val_loss": 1.3975845575332642, "val_ppl": 4.045416684867629, "rank": 3}
|
| 18 |
+
{"month": "2411", "val_loss": 1.4199227094650269, "val_ppl": 4.1368006923562195, "rank": 2}
|
| 19 |
+
{"month": "2411", "val_loss": 1.4218403100967407, "val_ppl": 4.144741034746768, "rank": 1}
|
| 20 |
+
{"month": "2411", "val_loss": 1.4330910444259644, "val_ppl": 4.1916357201597405, "rank": 0}
|
| 21 |
+
{"month": "2412", "val_loss": 1.4037216901779175, "val_ppl": 4.070320283845813, "rank": 3}
|
| 22 |
+
{"month": "2412", "val_loss": 1.4028061628341675, "val_ppl": 4.066595499659191, "rank": 2}
|
| 23 |
+
{"month": "2412", "val_loss": 1.4695839881896973, "val_ppl": 4.347426184178079, "rank": 1}
|
| 24 |
+
{"month": "2412", "val_loss": 1.417596459388733, "val_ppl": 4.1271886437789655, "rank": 0}
|
| 25 |
+
{"month": "2501", "val_loss": 1.3509228229522705, "val_ppl": 3.8609868945147734, "rank": 3}
|
| 26 |
+
{"month": "2501", "val_loss": 1.2983866930007935, "val_ppl": 3.663381738204649, "rank": 1}
|
| 27 |
+
{"month": "2501", "val_loss": 1.476736307144165, "val_ppl": 4.378631826202121, "rank": 2}
|
| 28 |
+
{"month": "2501", "val_loss": 1.4069976806640625, "val_ppl": 4.083676479811697, "rank": 0}
|
| 29 |
+
{"month": "2502", "val_loss": 1.4095300436019897, "val_ppl": 4.094030935866561, "rank": 2}
|
| 30 |
+
{"month": "2502", "val_loss": 1.4141857624053955, "val_ppl": 4.113136032144681, "rank": 3}
|
| 31 |
+
{"month": "2502", "val_loss": 1.3664177656173706, "val_ppl": 3.9212785667813335, "rank": 1}
|
| 32 |
+
{"month": "2502", "val_loss": 1.3973371982574463, "val_ppl": 4.044416137278543, "rank": 0}
|
| 33 |
+
{"month": "2503", "val_loss": 1.4294711351394653, "val_ppl": 4.1764898090418745, "rank": 2}
|
| 34 |
+
{"month": "2503", "val_loss": 1.4288697242736816, "val_ppl": 4.1739787778462905, "rank": 3}
|
| 35 |
+
{"month": "2503", "val_loss": 1.419816255569458, "val_ppl": 4.136360337246501, "rank": 1}
|
| 36 |
+
{"month": "2503", "val_loss": 1.4511704444885254, "val_ppl": 4.2681071753028474, "rank": 0}
|
| 37 |
+
{"month": "2504", "val_loss": 1.4489697217941284, "val_ppl": 4.2587245830087355, "rank": 2}
|
| 38 |
+
{"month": "2504", "val_loss": 1.4048845767974854, "val_ppl": 4.075056358066205, "rank": 1}
|
| 39 |
+
{"month": "2504", "val_loss": 1.4212480783462524, "val_ppl": 4.142287114224986, "rank": 3}
|
| 40 |
+
{"month": "2504", "val_loss": 1.4392225742340088, "val_ppl": 4.21741581446245, "rank": 0}
|
| 41 |
+
{"month": "2505", "val_loss": 1.417108178138733, "val_ppl": 4.125173906868153, "rank": 3}
|
| 42 |
+
{"month": "2505", "val_loss": 1.454293131828308, "val_ppl": 4.2814559707540765, "rank": 2}
|
| 43 |
+
{"month": "2505", "val_loss": 1.47247314453125, "val_ppl": 4.360004740065115, "rank": 1}
|
| 44 |
+
{"month": "2505", "val_loss": 1.438664197921753, "val_ppl": 4.215061566711977, "rank": 0}
|
| 45 |
+
{"month": "2506", "val_loss": 1.3888366222381592, "val_ppl": 4.010181981617138, "rank": 3}
|
| 46 |
+
{"month": "2506", "val_loss": 1.460412859916687, "val_ppl": 4.307737653470374, "rank": 2}
|
| 47 |
+
{"month": "2506", "val_loss": 1.4377959966659546, "val_ppl": 4.211403633107536, "rank": 1}
|
| 48 |
+
{"month": "2506", "val_loss": 1.438284993171692, "val_ppl": 4.2134634983607855, "rank": 0}
|
out/eval/tinyllama_full_ppl/2409_full/ppl_metrics.jsonl
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"month": "2407", "val_loss": 1.485063910484314, "val_ppl": 4.415247584373104, "rank": 3}
|
| 2 |
+
{"month": "2407", "val_loss": 1.3649768829345703, "val_ppl": 3.915632533012995, "rank": 2}
|
| 3 |
+
{"month": "2407", "val_loss": 1.3764039278030396, "val_ppl": 3.9606332646741, "rank": 1}
|
| 4 |
+
{"month": "2407", "val_loss": 1.379693627357483, "val_ppl": 3.97368401291081, "rank": 0}
|
| 5 |
+
{"month": "2408", "val_loss": 1.3427562713623047, "val_ppl": 3.8295843456464147, "rank": 2}
|
| 6 |
+
{"month": "2408", "val_loss": 1.3247745037078857, "val_ppl": 3.7613370917958093, "rank": 3}
|
| 7 |
+
{"month": "2408", "val_loss": 1.3265122175216675, "val_ppl": 3.7678789014688667, "rank": 0}
|
| 8 |
+
{"month": "2408", "val_loss": 1.31806480884552, "val_ppl": 3.7361841462953675, "rank": 1}
|
| 9 |
+
{"month": "2409", "val_loss": 1.2037886381149292, "val_ppl": 3.332719502488496, "rank": 2}
|
| 10 |
+
{"month": "2409", "val_loss": 1.261081337928772, "val_ppl": 3.5292357212007732, "rank": 1}
|
| 11 |
+
{"month": "2409", "val_loss": 1.2550549507141113, "val_ppl": 3.508031137862438, "rank": 3}
|
| 12 |
+
{"month": "2409", "val_loss": 1.2141947746276855, "val_ppl": 3.367581310364588, "rank": 0}
|
| 13 |
+
{"month": "2410", "val_loss": 1.3304420709609985, "val_ppl": 3.78271524656563, "rank": 2}
|
| 14 |
+
{"month": "2410", "val_loss": 1.4265085458755493, "val_ppl": 4.164134895476417, "rank": 3}
|
| 15 |
+
{"month": "2410", "val_loss": 1.3427306413650513, "val_ppl": 3.829486194667964, "rank": 1}
|
| 16 |
+
{"month": "2410", "val_loss": 1.5633049011230469, "val_ppl": 4.774574696275717, "rank": 0}
|
| 17 |
+
{"month": "2411", "val_loss": 1.3834478855133057, "val_ppl": 3.9886302870557864, "rank": 3}
|
| 18 |
+
{"month": "2411", "val_loss": 1.4033945798873901, "val_ppl": 4.0689890579359425, "rank": 2}
|
| 19 |
+
{"month": "2411", "val_loss": 1.4083776473999023, "val_ppl": 4.089315707592243, "rank": 1}
|
| 20 |
+
{"month": "2411", "val_loss": 1.4190096855163574, "val_ppl": 4.133025417973448, "rank": 0}
|
| 21 |
+
{"month": "2412", "val_loss": 1.3886818885803223, "val_ppl": 4.009561519494955, "rank": 2}
|
| 22 |
+
{"month": "2412", "val_loss": 1.3894152641296387, "val_ppl": 4.012503112391143, "rank": 3}
|
| 23 |
+
{"month": "2412", "val_loss": 1.4546624422073364, "val_ppl": 4.2830374488916, "rank": 1}
|
| 24 |
+
{"month": "2412", "val_loss": 1.4025143384933472, "val_ppl": 4.065408941249854, "rank": 0}
|
| 25 |
+
{"month": "2501", "val_loss": 1.4628865718841553, "val_ppl": 4.318406946594848, "rank": 2}
|
| 26 |
+
{"month": "2501", "val_loss": 1.3351476192474365, "val_ppl": 3.8005569403668753, "rank": 3}
|
| 27 |
+
{"month": "2501", "val_loss": 1.2839248180389404, "val_ppl": 3.610783619993481, "rank": 1}
|
| 28 |
+
{"month": "2501", "val_loss": 1.3923919200897217, "val_ppl": 4.024464747699859, "rank": 0}
|
| 29 |
+
{"month": "2502", "val_loss": 1.3984625339508057, "val_ppl": 4.048970024962219, "rank": 3}
|
| 30 |
+
{"month": "2502", "val_loss": 1.351839303970337, "val_ppl": 3.8645270377038248, "rank": 1}
|
| 31 |
+
{"month": "2502", "val_loss": 1.3960250616073608, "val_ppl": 4.039112790755192, "rank": 2}
|
| 32 |
+
{"month": "2502", "val_loss": 1.383058786392212, "val_ppl": 3.987078616413143, "rank": 0}
|
| 33 |
+
{"month": "2503", "val_loss": 1.4119216203689575, "val_ppl": 4.1038338426656775, "rank": 3}
|
| 34 |
+
{"month": "2503", "val_loss": 1.4145982265472412, "val_ppl": 4.114832903193648, "rank": 2}
|
| 35 |
+
{"month": "2503", "val_loss": 1.4040486812591553, "val_ppl": 4.0716514599059, "rank": 1}
|
| 36 |
+
{"month": "2503", "val_loss": 1.4355839490890503, "val_ppl": 4.202098103836741, "rank": 0}
|
| 37 |
+
{"month": "2504", "val_loss": 1.407296895980835, "val_ppl": 4.084898561186815, "rank": 3}
|
| 38 |
+
{"month": "2504", "val_loss": 1.4341951608657837, "val_ppl": 4.196266329963888, "rank": 2}
|
| 39 |
+
{"month": "2504", "val_loss": 1.3897756338119507, "val_ppl": 4.013949357438815, "rank": 1}
|
| 40 |
+
{"month": "2504", "val_loss": 1.423025131225586, "val_ppl": 4.149654721843654, "rank": 0}
|
| 41 |
+
{"month": "2505", "val_loss": 1.4383201599121094, "val_ppl": 4.213611674743316, "rank": 2}
|
| 42 |
+
{"month": "2505", "val_loss": 1.406368374824524, "val_ppl": 4.081107406807272, "rank": 3}
|
| 43 |
+
{"month": "2505", "val_loss": 1.454991102218628, "val_ppl": 4.284445343374549, "rank": 1}
|
| 44 |
+
{"month": "2505", "val_loss": 1.4232839345932007, "val_ppl": 4.150728805442337, "rank": 0}
|
| 45 |
+
{"month": "2506", "val_loss": 1.443969964981079, "val_ppl": 4.237485136039307, "rank": 2}
|
| 46 |
+
{"month": "2506", "val_loss": 1.3735177516937256, "val_ppl": 3.9492186597714762, "rank": 3}
|
| 47 |
+
{"month": "2506", "val_loss": 1.4226653575897217, "val_ppl": 4.148162054004164, "rank": 1}
|
| 48 |
+
{"month": "2506", "val_loss": 1.4236834049224854, "val_ppl": 4.152387229668601, "rank": 0}
|
out/eval/tinyllama_full_ppl/2410_full/ppl_metrics.jsonl
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"month": "2407", "val_loss": 1.3625130653381348, "val_ppl": 3.9059970036446767, "rank": 2}
|
| 2 |
+
{"month": "2407", "val_loss": 1.4842785596847534, "val_ppl": 4.4117814274051845, "rank": 3}
|
| 3 |
+
{"month": "2407", "val_loss": 1.377859115600586, "val_ppl": 3.9664009253683554, "rank": 1}
|
| 4 |
+
{"month": "2407", "val_loss": 1.3787990808486938, "val_ppl": 3.9701309571737657, "rank": 0}
|
| 5 |
+
{"month": "2408", "val_loss": 1.343483805656433, "val_ppl": 3.832371513347234, "rank": 3}
|
| 6 |
+
{"month": "2408", "val_loss": 1.3664422035217285, "val_ppl": 3.921374395782834, "rank": 2}
|
| 7 |
+
{"month": "2408", "val_loss": 1.344637393951416, "val_ppl": 3.836795043242655, "rank": 1}
|
| 8 |
+
{"month": "2408", "val_loss": 1.3435213565826416, "val_ppl": 3.832515425149129, "rank": 0}
|
| 9 |
+
{"month": "2409", "val_loss": 1.2931017875671387, "val_ppl": 3.6440721816842823, "rank": 2}
|
| 10 |
+
{"month": "2409", "val_loss": 1.3426209688186646, "val_ppl": 3.829066228195459, "rank": 3}
|
| 11 |
+
{"month": "2409", "val_loss": 1.3356683254241943, "val_ppl": 3.80253642916214, "rank": 1}
|
| 12 |
+
{"month": "2409", "val_loss": 1.298521876335144, "val_ppl": 3.663876999837824, "rank": 0}
|
| 13 |
+
{"month": "2410", "val_loss": 1.1201715469360352, "val_ppl": 3.065380014740353, "rank": 1}
|
| 14 |
+
{"month": "2410", "val_loss": 1.2227020263671875, "val_ppl": 3.3963523801706224, "rank": 3}
|
| 15 |
+
{"month": "2410", "val_loss": 1.1426008939743042, "val_ppl": 3.1349113431069515, "rank": 2}
|
| 16 |
+
{"month": "2410", "val_loss": 1.3279122114181519, "val_ppl": 3.7731576031464997, "rank": 0}
|
| 17 |
+
{"month": "2411", "val_loss": 1.3699663877487183, "val_ppl": 3.935218421697037, "rank": 3}
|
| 18 |
+
{"month": "2411", "val_loss": 1.3956365585327148, "val_ppl": 4.0375438877986864, "rank": 1}
|
| 19 |
+
{"month": "2411", "val_loss": 1.3899426460266113, "val_ppl": 4.014619791994358, "rank": 2}
|
| 20 |
+
{"month": "2411", "val_loss": 1.4060338735580444, "val_ppl": 4.079742499505393, "rank": 0}
|
| 21 |
+
{"month": "2412", "val_loss": 1.3763186931610107, "val_ppl": 3.9602956959020594, "rank": 3}
|
| 22 |
+
{"month": "2412", "val_loss": 1.3741707801818848, "val_ppl": 3.951798454310262, "rank": 2}
|
| 23 |
+
{"month": "2412", "val_loss": 1.4407111406326294, "val_ppl": 4.223698392790274, "rank": 1}
|
| 24 |
+
{"month": "2412", "val_loss": 1.3878103494644165, "val_ppl": 4.006068552143023, "rank": 0}
|
| 25 |
+
{"month": "2501", "val_loss": 1.2720528841018677, "val_ppl": 3.5681700884164584, "rank": 1}
|
| 26 |
+
{"month": "2501", "val_loss": 1.4463121891021729, "val_ppl": 4.247421908468978, "rank": 2}
|
| 27 |
+
{"month": "2501", "val_loss": 1.3237147331237793, "val_ppl": 3.7573530488476545, "rank": 3}
|
| 28 |
+
{"month": "2501", "val_loss": 1.3789067268371582, "val_ppl": 3.970558348848071, "rank": 0}
|
| 29 |
+
{"month": "2502", "val_loss": 1.3828585147857666, "val_ppl": 3.9862801977265754, "rank": 3}
|
| 30 |
+
{"month": "2502", "val_loss": 1.3822216987609863, "val_ppl": 3.983742478733221, "rank": 2}
|
| 31 |
+
{"month": "2502", "val_loss": 1.336174726486206, "val_ppl": 3.8044625252956084, "rank": 1}
|
| 32 |
+
{"month": "2502", "val_loss": 1.3672521114349365, "val_ppl": 3.9245516343983025, "rank": 0}
|
| 33 |
+
{"month": "2503", "val_loss": 1.3872737884521484, "val_ppl": 4.003919628511345, "rank": 1}
|
| 34 |
+
{"month": "2503", "val_loss": 1.3932018280029297, "val_ppl": 4.0277255138273516, "rank": 2}
|
| 35 |
+
{"month": "2503", "val_loss": 1.3974146842956543, "val_ppl": 4.04472953520371, "rank": 3}
|
| 36 |
+
{"month": "2503", "val_loss": 1.4191503524780273, "val_ppl": 4.133606838993904, "rank": 0}
|
| 37 |
+
{"month": "2504", "val_loss": 1.4196085929870605, "val_ppl": 4.1355014591586485, "rank": 2}
|
| 38 |
+
{"month": "2504", "val_loss": 1.3901379108428955, "val_ppl": 4.0154037825308855, "rank": 3}
|
| 39 |
+
{"month": "2504", "val_loss": 1.3766632080078125, "val_ppl": 3.961660311618713, "rank": 1}
|
| 40 |
+
{"month": "2504", "val_loss": 1.40700101852417, "val_ppl": 4.083690110575259, "rank": 0}
|
| 41 |
+
{"month": "2505", "val_loss": 1.4231395721435547, "val_ppl": 4.150129639313753, "rank": 2}
|
| 42 |
+
{"month": "2505", "val_loss": 1.391589879989624, "val_ppl": 4.021238259650132, "rank": 3}
|
| 43 |
+
{"month": "2505", "val_loss": 1.4063525199890137, "val_ppl": 4.081042702033581, "rank": 0}
|
| 44 |
+
{"month": "2505", "val_loss": 1.4384384155273438, "val_ppl": 4.214109987447828, "rank": 1}
|
| 45 |
+
{"month": "2506", "val_loss": 1.4291367530822754, "val_ppl": 4.175093499251175, "rank": 2}
|
| 46 |
+
{"month": "2506", "val_loss": 1.357244849205017, "val_ppl": 3.885473475871877, "rank": 3}
|
| 47 |
+
{"month": "2506", "val_loss": 1.4066842794418335, "val_ppl": 4.082396851140804, "rank": 1}
|
| 48 |
+
{"month": "2506", "val_loss": 1.4085280895233154, "val_ppl": 4.089930959209319, "rank": 0}
|
out/eval/tinyllama_full_ppl/2411_full/ppl_metrics.jsonl
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"month": "2407", "val_loss": 1.4872686862945557, "val_ppl": 4.424992954673992, "rank": 3}
|
| 2 |
+
{"month": "2407", "val_loss": 1.3803930282592773, "val_ppl": 3.9764641832063625, "rank": 1}
|
| 3 |
+
{"month": "2407", "val_loss": 1.380758285522461, "val_ppl": 3.9779168809191128, "rank": 0}
|
| 4 |
+
{"month": "2407", "val_loss": 1.3626710176467896, "val_ppl": 3.906614013616793, "rank": 2}
|
| 5 |
+
{"month": "2408", "val_loss": 1.3559932708740234, "val_ppl": 3.88061354339132, "rank": 1}
|
| 6 |
+
{"month": "2408", "val_loss": 1.353184461593628, "val_ppl": 3.869728933609592, "rank": 3}
|
| 7 |
+
{"month": "2408", "val_loss": 1.3515592813491821, "val_ppl": 3.8634450342130005, "rank": 0}
|
| 8 |
+
{"month": "2408", "val_loss": 1.377118468284607, "val_ppl": 3.963464308801716, "rank": 2}
|
| 9 |
+
{"month": "2409", "val_loss": 1.3564050197601318, "val_ppl": 3.8822117106944525, "rank": 1}
|
| 10 |
+
{"month": "2409", "val_loss": 1.313551425933838, "val_ppl": 3.7193593136317187, "rank": 2}
|
| 11 |
+
{"month": "2409", "val_loss": 1.3636155128479004, "val_ppl": 3.910305534842863, "rank": 3}
|
| 12 |
+
{"month": "2409", "val_loss": 1.317995309829712, "val_ppl": 3.735924494197209, "rank": 0}
|
| 13 |
+
{"month": "2410", "val_loss": 1.1867964267730713, "val_ppl": 3.2765676514970967, "rank": 1}
|
| 14 |
+
{"month": "2410", "val_loss": 1.307090163230896, "val_ppl": 3.695405026826404, "rank": 3}
|
| 15 |
+
{"month": "2410", "val_loss": 1.220895767211914, "val_ppl": 3.3902232246774173, "rank": 2}
|
| 16 |
+
{"month": "2410", "val_loss": 1.3971407413482666, "val_ppl": 4.043621661827428, "rank": 0}
|
| 17 |
+
{"month": "2411", "val_loss": 1.1956223249435425, "val_ppl": 3.305614296752107, "rank": 1}
|
| 18 |
+
{"month": "2411", "val_loss": 1.1755108833312988, "val_ppl": 3.239797679744836, "rank": 3}
|
| 19 |
+
{"month": "2411", "val_loss": 1.1895569562911987, "val_ppl": 3.2856252092930816, "rank": 2}
|
| 20 |
+
{"month": "2411", "val_loss": 1.2081102132797241, "val_ppl": 3.3471532661552215, "rank": 0}
|
| 21 |
+
{"month": "2412", "val_loss": 1.4345154762268066, "val_ppl": 4.197610673823818, "rank": 1}
|
| 22 |
+
{"month": "2412", "val_loss": 1.3702863454818726, "val_ppl": 3.9364777267141564, "rank": 3}
|
| 23 |
+
{"month": "2412", "val_loss": 1.366951823234558, "val_ppl": 3.9233733147773107, "rank": 2}
|
| 24 |
+
{"month": "2412", "val_loss": 1.3812273740768433, "val_ppl": 3.979783313925192, "rank": 0}
|
| 25 |
+
{"month": "2501", "val_loss": 1.2640061378479004, "val_ppl": 3.539573139623763, "rank": 1}
|
| 26 |
+
{"month": "2501", "val_loss": 1.3158841133117676, "val_ppl": 3.7280455433470294, "rank": 3}
|
| 27 |
+
{"month": "2501", "val_loss": 1.4420026540756226, "val_ppl": 4.229156880139874, "rank": 2}
|
| 28 |
+
{"month": "2501", "val_loss": 1.3726694583892822, "val_ppl": 3.945869984554801, "rank": 0}
|
| 29 |
+
{"month": "2502", "val_loss": 1.3292156457901, "val_ppl": 3.7780788730366375, "rank": 1}
|
| 30 |
+
{"month": "2502", "val_loss": 1.3757652044296265, "val_ppl": 3.958104323367374, "rank": 3}
|
| 31 |
+
{"month": "2502", "val_loss": 1.37555992603302, "val_ppl": 3.9572918934482804, "rank": 2}
|
| 32 |
+
{"month": "2502", "val_loss": 1.3592376708984375, "val_ppl": 3.8932242520962257, "rank": 0}
|
| 33 |
+
{"month": "2503", "val_loss": 1.382246494293213, "val_ppl": 3.9838412589728835, "rank": 1}
|
| 34 |
+
{"month": "2503", "val_loss": 1.3877980709075928, "val_ppl": 4.006019363704648, "rank": 2}
|
| 35 |
+
{"month": "2503", "val_loss": 1.3916492462158203, "val_ppl": 4.021476992476507, "rank": 3}
|
| 36 |
+
{"month": "2503", "val_loss": 1.413512110710144, "val_ppl": 4.110366144158445, "rank": 0}
|
| 37 |
+
{"month": "2504", "val_loss": 1.3709253072738647, "val_ppl": 3.938993789324889, "rank": 1}
|
| 38 |
+
{"month": "2504", "val_loss": 1.3838448524475098, "val_ppl": 3.99021395570376, "rank": 3}
|
| 39 |
+
{"month": "2504", "val_loss": 1.412560224533081, "val_ppl": 4.1064554050277025, "rank": 2}
|
| 40 |
+
{"month": "2504", "val_loss": 1.4007281064987183, "val_ppl": 4.058153659465043, "rank": 0}
|
| 41 |
+
{"month": "2505", "val_loss": 1.4315732717514038, "val_ppl": 4.1852785955573495, "rank": 1}
|
| 42 |
+
{"month": "2505", "val_loss": 1.3883154392242432, "val_ppl": 4.008092487437342, "rank": 3}
|
| 43 |
+
{"month": "2505", "val_loss": 1.4160114526748657, "val_ppl": 4.120652203588402, "rank": 2}
|
| 44 |
+
{"month": "2505", "val_loss": 1.40048086643219, "val_ppl": 4.057150445306773, "rank": 0}
|
| 45 |
+
{"month": "2506", "val_loss": 1.399654507637024, "val_ppl": 4.053799168222336, "rank": 1}
|
| 46 |
+
{"month": "2506", "val_loss": 1.3464387655258179, "val_ppl": 3.843712765593928, "rank": 3}
|
| 47 |
+
{"month": "2506", "val_loss": 1.41997492313385, "val_ppl": 4.137016695536668, "rank": 2}
|
| 48 |
+
{"month": "2506", "val_loss": 1.4047600030899048, "val_ppl": 4.07454874480537, "rank": 0}
|