Upload generate-responses.py with huggingface_hub
Browse files- generate-responses.py +12 -2
generate-responses.py
CHANGED
|
@@ -47,7 +47,6 @@ from torch import cuda
|
|
| 47 |
from tqdm.auto import tqdm
|
| 48 |
from transformers import AutoTokenizer
|
| 49 |
from vllm import LLM, SamplingParams
|
| 50 |
-
from vllm.sampling_params import GuidedDecodingParams
|
| 51 |
|
| 52 |
# Enable HF Transfer for faster downloads
|
| 53 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
@@ -260,6 +259,17 @@ def main(
|
|
| 260 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 261 |
|
| 262 |
# Create sampling parameters with guided decoding to enforce valid JSON
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
sampling_params = SamplingParams(
|
| 264 |
temperature=temperature,
|
| 265 |
top_p=top_p,
|
|
@@ -267,7 +277,7 @@ def main(
|
|
| 267 |
min_p=min_p,
|
| 268 |
max_tokens=max_tokens,
|
| 269 |
repetition_penalty=repetition_penalty,
|
| 270 |
-
guided_decoding=
|
| 271 |
)
|
| 272 |
|
| 273 |
# Load dataset
|
|
|
|
| 47 |
from tqdm.auto import tqdm
|
| 48 |
from transformers import AutoTokenizer
|
| 49 |
from vllm import LLM, SamplingParams
|
|
|
|
| 50 |
|
| 51 |
# Enable HF Transfer for faster downloads
|
| 52 |
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
|
|
|
| 259 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 260 |
|
| 261 |
# Create sampling parameters with guided decoding to enforce valid JSON
|
| 262 |
+
try:
|
| 263 |
+
from vllm.sampling_params import GuidedDecodingParams
|
| 264 |
+
guided = GuidedDecodingParams(json_object=True)
|
| 265 |
+
except ImportError:
|
| 266 |
+
try:
|
| 267 |
+
from vllm.guided_decoding import GuidedDecodingParams
|
| 268 |
+
guided = GuidedDecodingParams(json_object=True)
|
| 269 |
+
except ImportError:
|
| 270 |
+
guided = {"json_object": True}
|
| 271 |
+
logger.info("Using dict-based guided decoding config")
|
| 272 |
+
|
| 273 |
sampling_params = SamplingParams(
|
| 274 |
temperature=temperature,
|
| 275 |
top_p=top_p,
|
|
|
|
| 277 |
min_p=min_p,
|
| 278 |
max_tokens=max_tokens,
|
| 279 |
repetition_penalty=repetition_penalty,
|
| 280 |
+
guided_decoding=guided,
|
| 281 |
)
|
| 282 |
|
| 283 |
# Load dataset
|