Upload generate-responses.py with huggingface_hub
Browse files- generate-responses.py +14 -5
generate-responses.py
CHANGED
|
@@ -47,6 +47,7 @@ from torch import cuda
|
|
| 47 |
from tqdm.auto import tqdm
|
| 48 |
from transformers import AutoTokenizer
|
| 49 |
from vllm import LLM, SamplingParams
|
|
|
|
| 50 |
|
| 51 |
# Enable HF Transfer for faster downloads
|
| 52 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
@@ -162,7 +163,7 @@ uv run https://huggingface.co/datasets/uv-scripts/vllm/raw/main/generate-respons
|
|
| 162 |
def main(
|
| 163 |
src_dataset_hub_id: str,
|
| 164 |
output_dataset_hub_id: str,
|
| 165 |
-
model_id: str = "Qwen/
|
| 166 |
messages_column: str = "messages",
|
| 167 |
prompt_column: Optional[str] = None,
|
| 168 |
output_column: str = "response",
|
|
@@ -233,7 +234,13 @@ def main(
|
|
| 233 |
# Initialize vLLM
|
| 234 |
logger.info(f"Loading model: {model_id}")
|
| 235 |
if max_model_len is None:
|
| 236 |
-
max_model_len =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
vllm_kwargs = {
|
| 238 |
"model": model_id,
|
| 239 |
"tensor_parallel_size": tensor_parallel_size,
|
|
@@ -241,6 +248,8 @@ def main(
|
|
| 241 |
"enable_prefix_caching": True,
|
| 242 |
"max_model_len": max_model_len,
|
| 243 |
}
|
|
|
|
|
|
|
| 244 |
logger.info(f"Using max_model_len={max_model_len}")
|
| 245 |
|
| 246 |
llm = LLM(**vllm_kwargs)
|
|
@@ -250,7 +259,7 @@ def main(
|
|
| 250 |
logger.info("Loading tokenizer...")
|
| 251 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 252 |
|
| 253 |
-
# Create sampling parameters
|
| 254 |
sampling_params = SamplingParams(
|
| 255 |
temperature=temperature,
|
| 256 |
top_p=top_p,
|
|
@@ -258,7 +267,7 @@ def main(
|
|
| 258 |
min_p=min_p,
|
| 259 |
max_tokens=max_tokens,
|
| 260 |
repetition_penalty=repetition_penalty,
|
| 261 |
-
|
| 262 |
)
|
| 263 |
|
| 264 |
# Load dataset
|
|
@@ -444,7 +453,7 @@ Examples:
|
|
| 444 |
parser.add_argument(
|
| 445 |
"--model-id",
|
| 446 |
type=str,
|
| 447 |
-
default=os.environ.get("MODEL_ID", "Qwen/
|
| 448 |
help="Model to use for generation (default: Qwen3-235B-A22B, or MODEL_ID env var)",
|
| 449 |
)
|
| 450 |
parser.add_argument(
|
|
|
|
| 47 |
from tqdm.auto import tqdm
|
| 48 |
from transformers import AutoTokenizer
|
| 49 |
from vllm import LLM, SamplingParams
|
| 50 |
+
from vllm.sampling_params import GuidedDecodingParams
|
| 51 |
|
| 52 |
# Enable HF Transfer for faster downloads
|
| 53 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
|
|
| 163 |
def main(
|
| 164 |
src_dataset_hub_id: str,
|
| 165 |
output_dataset_hub_id: str,
|
| 166 |
+
model_id: str = "Qwen/Qwen3-235B-A22B-Instruct-2507",
|
| 167 |
messages_column: str = "messages",
|
| 168 |
prompt_column: Optional[str] = None,
|
| 169 |
output_column: str = "response",
|
|
|
|
| 234 |
# Initialize vLLM
|
| 235 |
logger.info(f"Loading model: {model_id}")
|
| 236 |
if max_model_len is None:
|
| 237 |
+
max_model_len = 32768 # Qwen2.5-72B default context length
|
| 238 |
+
# Auto-enable FP8 quantization for large models (>150B) to fit on 4xA100
|
| 239 |
+
quantization = None
|
| 240 |
+
if "235B" in model_id or "405B" in model_id:
|
| 241 |
+
quantization = "fp8"
|
| 242 |
+
logger.info(f"Auto-enabling FP8 quantization for large model: {model_id}")
|
| 243 |
+
|
| 244 |
vllm_kwargs = {
|
| 245 |
"model": model_id,
|
| 246 |
"tensor_parallel_size": tensor_parallel_size,
|
|
|
|
| 248 |
"enable_prefix_caching": True,
|
| 249 |
"max_model_len": max_model_len,
|
| 250 |
}
|
| 251 |
+
if quantization:
|
| 252 |
+
vllm_kwargs["quantization"] = quantization
|
| 253 |
logger.info(f"Using max_model_len={max_model_len}")
|
| 254 |
|
| 255 |
llm = LLM(**vllm_kwargs)
|
|
|
|
| 259 |
logger.info("Loading tokenizer...")
|
| 260 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 261 |
|
| 262 |
+
# Create sampling parameters with guided decoding to enforce valid JSON
|
| 263 |
sampling_params = SamplingParams(
|
| 264 |
temperature=temperature,
|
| 265 |
top_p=top_p,
|
|
|
|
| 267 |
min_p=min_p,
|
| 268 |
max_tokens=max_tokens,
|
| 269 |
repetition_penalty=repetition_penalty,
|
| 270 |
+
guided_decoding=GuidedDecodingParams(json_object=True),
|
| 271 |
)
|
| 272 |
|
| 273 |
# Load dataset
|
|
|
|
| 453 |
parser.add_argument(
|
| 454 |
"--model-id",
|
| 455 |
type=str,
|
| 456 |
+
default=os.environ.get("MODEL_ID", "Qwen/Qwen3-235B-A22B-Instruct-2507"),
|
| 457 |
help="Model to use for generation (default: Qwen3-235B-A22B, or MODEL_ID env var)",
|
| 458 |
)
|
| 459 |
parser.add_argument(
|