llama-cpp / Dockerfile
TomatoeAI's picture
Update Dockerfile
cbdbdf6 verified
raw
history blame contribute delete
646 Bytes
FROM ghcr.io/ggml-org/llama.cpp:full
WORKDIR /app
# パッケージのインストール
RUN apt update && apt install -y python3-pip
RUN pip install -U huggingface_hub
# モデルのダウンロード
RUN python3 -c 'from huggingface_hub import hf_hub_download; \
hf_hub_download(repo_id="bartowski/Tesslate_OmniCoder-9B-GGUF", \
filename="Tesslate_OmniCoder-9B-IQ4_NL.gguf", \
local_dir="/app", \
local_dir_use_symlinks=False)'
# 設定
CMD ["--server", \
"-m", "/app/Tesslate_OmniCoder-9B-IQ4_NL.gguf", \
"--host", "0.0.0.0", \
"--port", "7860", \
"-t", "2", \
"-c", "128000", \
"-n", "32000"]