classification_AI / Dockerfile
drrobot9's picture
Initial commit
86e9c13 verified
FROM python:3.10-slim
ENV PYTHONUNBUFFERED=1
ENV HF_HOME=/data/huggingface
ENV TRANSFORMERS_CACHE=/data/huggingface
ENV TORCH_HOME=/data/torch
ENV XDG_CACHE_HOME=/data/cache
RUN apt-get update && apt-get install -y \
git \
curl \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt .
RUN pip install --upgrade pip \
&& pip install --no-cache-dir -r requirements.txt
RUN python - <<EOF
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoImageProcessor, AutoModelForImageClassification
import open_clip
QWEN_MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
AutoTokenizer.from_pretrained(QWEN_MODEL_ID, trust_remote_code=True)
AutoModelForCausalLM.from_pretrained(
QWEN_MODEL_ID,
trust_remote_code=True,
device_map="cpu"
)
PLANT_MODEL_ID = "drrobot9/BIONEXUS_PLANT_CLASSIFICATION"
AutoImageProcessor.from_pretrained(PLANT_MODEL_ID)
AutoModelForImageClassification.from_pretrained(PLANT_MODEL_ID)
open_clip.create_model_and_transforms("hf-hub:imageomics/bioclip")
print("Models cached successfully for Hugging Face Spaces.")
EOF
COPY app ./app
EXPOSE 7860
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]