# 🛡️ CodeSentry Backend Configuration # ── Server ────────────────────────────────── PORT=8000 HOST=0.0.0.0 RELOAD=true CORS_ORIGINS=* # ── LLM Configuration ─────────────────────── # For Local vLLM (AMD MI300X): # VLLM_BASE_URL=http://localhost:8080/v1 # MODEL_NAME=Qwen/Qwen2.5-Coder-32B-Instruct # LLM_API_KEY=not-needed-local # For Groq: # VLLM_BASE_URL=https://api.groq.com/openai/v1 # MODEL_NAME=llama-3.3-70b-versatile # LLM_API_KEY=gsk_your_groq_api_key_here VLLM_BASE_URL=http://localhost:8080/v1 MODEL_NAME=Qwen/Qwen2.5-Coder-32B-Instruct LLM_API_KEY=not-needed-local # ── Analysis Mode ─────────────────────────── # Set to false for static-only scanning (no GPU/API needed) USE_LLM=true # ── Privacy & Security ────────────────────── # HMAC key for cryptographically signing ZDR certificates # CHANGE THIS IN PRODUCTION! ZDR_SIGNING_KEY=codesentry-dev-secret-key-12345