File size: 1,575 Bytes
6379283 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | #!/bin/bash
set -e
# Stack 2.9 vLLM Startup Script
# Handles proper startup, logging, and signal handling
echo "๐ Starting Stack 2.9 vLLM Server"
echo "================================"
# Configuration
LOG_DIR="/app/logs"
PID_FILE="/app/vllm.pid"
LOG_FILE="${LOG_DIR}/vllm.log"
# Create log directory if it doesn't exist
mkdir -p "${LOG_DIR}"
# Function to cleanup on exit
cleanup() {
echo "๐ Shutting down vLLM server..."
if [ -f "${PID_FILE}" ]; then
kill "$(cat ${PID_FILE})" 2>/dev/null || true
rm "${PID_FILE}"
fi
exit 0
}
# Trap signals
trap cleanup SIGINT SIGTERM EXIT
# Check if model directory exists
if [ ! -d "/models" ] || [ -z "$(ls -A /models 2>/dev/null)" ]; then
echo "โ ๏ธ Warning: No model found in /models"
echo " Expected model files in /models directory"
echo " Mount a volume with your model or download via HF"
fi
# Check for required environment variables
echo "๐ Environment Configuration:"
echo " MODEL_PATH: ${MODEL_PATH:-/models}"
echo " MODEL_NAME: ${MODEL_NAME:-meta-llama/Llama-3.1-8B-Instruct}"
echo " GPU_MEMORY_UTILIZATION: ${GPU_MEMORY_UTILIZATION:-0.9}"
echo " MAX_MODEL_LEN: ${MAX_MODEL_LEN:-131072}"
echo ""
# Start the server
echo "Starting vLLM server..."
python vllm_server.py 2>&1 | tee -a "${LOG_FILE}" &
echo $! > "${PID_FILE}"
echo "โ
vLLM server started with PID $(cat ${PID_FILE})"
echo " Logs: ${LOG_FILE}"
echo " Health: http://localhost:8000/health"
echo ""
echo "Press Ctrl+C to stop"
# Wait for process
wait "${PID_FILE}" 2>/dev/null || true
|