function-method-detector / requirements.txt
ejschwartz's picture
I guess torch helps
f082cb2
# This file was autogenerated by uv via the following command:
# uv pip compile requirements.in
aiofiles==24.1.0
# via gradio
annotated-doc==0.0.4
# via
# fastapi
# typer
annotated-types==0.7.0
# via pydantic
anyio==4.12.1
# via
# gradio
# httpx
# starlette
brotli==1.2.0
# via gradio
certifi==2026.1.4
# via
# httpcore
# httpx
click==8.3.1
# via
# typer
# uvicorn
cuda-bindings==12.9.4
# via torch
cuda-pathfinder==1.3.4
# via cuda-bindings
fastapi==0.129.0
# via gradio
ffmpy==1.0.0
# via gradio
filelock==3.24.3
# via
# huggingface-hub
# torch
fsspec==2026.2.0
# via
# gradio-client
# huggingface-hub
# torch
gradio==6.6.0
# via -r requirements.in
gradio-client==2.1.0
# via gradio
groovy==0.1.2
# via gradio
h11==0.16.0
# via
# httpcore
# uvicorn
hf-xet==1.2.0
# via huggingface-hub
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
# gradio
# gradio-client
# huggingface-hub
# safehttpx
huggingface-hub==1.4.1
# via
# gradio
# gradio-client
# tokenizers
# transformers
idna==3.11
# via
# anyio
# httpx
jinja2==3.1.6
# via
# gradio
# torch
markdown-it-py==4.0.0
# via rich
markupsafe==3.0.3
# via
# gradio
# jinja2
mdurl==0.1.2
# via markdown-it-py
mpmath==1.3.0
# via sympy
networkx==3.6.1
# via torch
numpy==2.4.2
# via
# -r requirements.in
# gradio
# pandas
# transformers
nvidia-cublas-cu12==12.8.4.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.8.90
# via torch
nvidia-cuda-nvrtc-cu12==12.8.93
# via torch
nvidia-cuda-runtime-cu12==12.8.90
# via torch
nvidia-cudnn-cu12==9.10.2.21
# via torch
nvidia-cufft-cu12==11.3.3.83
# via torch
nvidia-cufile-cu12==1.13.1.3
# via torch
nvidia-curand-cu12==10.3.9.90
# via torch
nvidia-cusolver-cu12==11.7.3.90
# via torch
nvidia-cusparse-cu12==12.5.8.93
# via
# nvidia-cusolver-cu12
# torch
nvidia-cusparselt-cu12==0.7.1
# via torch
nvidia-nccl-cu12==2.27.5
# via torch
nvidia-nvjitlink-cu12==12.8.93
# via
# nvidia-cufft-cu12
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
# torch
nvidia-nvshmem-cu12==3.4.5
# via torch
nvidia-nvtx-cu12==12.8.90
# via torch
orjson==3.11.7
# via gradio
packaging==26.0
# via
# gradio
# gradio-client
# huggingface-hub
# transformers
pandas==3.0.1
# via
# -r requirements.in
# gradio
pillow==12.1.1
# via gradio
pydantic==2.12.5
# via
# fastapi
# gradio
pydantic-core==2.41.5
# via pydantic
pydub==0.25.1
# via gradio
pygments==2.19.2
# via rich
python-dateutil==2.9.0.post0
# via pandas
python-multipart==0.0.22
# via gradio
pytz==2025.2
# via gradio
pyyaml==6.0.3
# via
# gradio
# huggingface-hub
# transformers
regex==2026.2.19
# via transformers
rich==14.3.3
# via typer
safehttpx==0.1.7
# via gradio
safetensors==0.7.0
# via transformers
semantic-version==2.10.0
# via gradio
shellingham==1.5.4
# via
# huggingface-hub
# typer
six==1.17.0
# via python-dateutil
starlette==0.52.1
# via
# fastapi
# gradio
sympy==1.14.0
# via torch
tokenizers==0.22.2
# via transformers
tomlkit==0.13.3
# via gradio
torch==2.10.0
# via -r requirements.in
tqdm==4.67.3
# via
# huggingface-hub
# transformers
transformers==5.2.0
# via -r requirements.in
triton==3.6.0
# via torch
typer==0.24.0
# via
# gradio
# typer-slim
typer-slim==0.24.0
# via
# huggingface-hub
# transformers
typing-extensions==4.15.0
# via
# anyio
# fastapi
# gradio
# gradio-client
# huggingface-hub
# pydantic
# pydantic-core
# starlette
# torch
# typing-inspection
typing-inspection==0.4.2
# via
# fastapi
# pydantic
uvicorn==0.41.0
# via gradio