| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Generate responses for prompts in a dataset using transformers continuous batching. |
| |
| Uses transformers' native continuous batching (CB) for efficient GPU inference. |
| No vLLM dependency required - works with any model supported by transformers, |
| including newly released architectures. |
| |
| Example usage: |
| # Local execution |
| uv run generate-responses.py \\ |
| username/input-dataset \\ |
| username/output-dataset \\ |
| --prompt-column question |
| |
| # With custom model and sampling parameters |
| uv run generate-responses.py \\ |
| username/input-dataset \\ |
| username/output-dataset \\ |
| --model-id meta-llama/Llama-3.1-8B-Instruct \\ |
| --messages-column messages \\ |
| --temperature 0.9 \\ |
| --max-tokens 2048 |
| |
| # HF Jobs execution (see script output for full command) |
| hf jobs uv run --flavor l4x1 ... |
| """ |
|
|
| import argparse |
| import logging |
| import os |
| import sys |
| from datetime import datetime |
| from typing import Optional |
|
|
| import torch |
| from datasets import load_dataset |
| from huggingface_hub import DatasetCard, get_token, login |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from transformers.generation import GenerationConfig |
|
|
| |
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" |
|
|
| logging.basicConfig( |
| level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" |
| ) |
| logger = logging.getLogger(__name__) |
|
|
|
|
| def check_gpu_availability() -> int: |
| """Check if CUDA is available and return the number of GPUs.""" |
| if not torch.cuda.is_available(): |
| logger.error("CUDA is not available. This script requires a GPU.") |
| logger.error( |
| "Please run on a machine with NVIDIA GPU or use HF Jobs with GPU flavor." |
| ) |
| sys.exit(1) |
|
|
| num_gpus = torch.cuda.device_count() |
| for i in range(num_gpus): |
| gpu_name = torch.cuda.get_device_name(i) |
| gpu_memory = torch.cuda.get_device_properties(i).total_memory / 1024**3 |
| logger.info(f"GPU {i}: {gpu_name} with {gpu_memory:.1f} GB memory") |
|
|
| return num_gpus |
|
|
|
|
| def create_dataset_card( |
| source_dataset: str, |
| model_id: str, |
| messages_column: str, |
| prompt_column: Optional[str], |
| generation_config: GenerationConfig, |
| num_gpus: int, |
| num_examples: int, |
| generation_time: str, |
| num_skipped: int = 0, |
| attn_implementation: str = "paged|sdpa", |
| ) -> str: |
| """Create a dataset card documenting the generation process.""" |
| filtering_section = "" |
| if num_skipped > 0: |
| skip_percentage = (num_skipped / num_examples) * 100 |
| processed = num_examples - num_skipped |
| filtering_section = f""" |
| |
| ### Filtering Statistics |
| |
| - **Total Examples**: {num_examples:,} |
| - **Processed**: {processed:,} ({100 - skip_percentage:.1f}%) |
| - **Skipped (too long)**: {num_skipped:,} ({skip_percentage:.1f}%) |
| |
| Note: Prompts exceeding the model's maximum context length were skipped and have empty responses.""" |
|
|
| input_col = prompt_column if prompt_column else messages_column |
| input_type = "plain text prompts" if prompt_column else "chat messages" |
|
|
| return f"""--- |
| tags: |
| - generated |
| - transformers |
| - continuous-batching |
| - uv-script |
| --- |
| |
| # Generated Responses Dataset |
| |
| This dataset contains generated responses for prompts from [{source_dataset}](https://huggingface.co/datasets/{source_dataset}). |
| |
| ## Generation Details |
| |
| - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) |
| - **Input Column**: `{input_col}` ({input_type}) |
| - **Model**: [{model_id}](https://huggingface.co/{model_id}) |
| - **Backend**: transformers continuous batching |
| - **Number of Examples**: {num_examples:,} |
| - **Generation Date**: {generation_time}{filtering_section} |
| |
| ### Generation Parameters |
| |
| - **Temperature**: {generation_config.temperature} |
| - **Top P**: {generation_config.top_p} |
| - **Top K**: {generation_config.top_k} |
| - **Max New Tokens**: {generation_config.max_new_tokens} |
| - **Max Batch Tokens**: {generation_config.max_batch_tokens} |
| - **Repetition Penalty**: {generation_config.repetition_penalty} |
| |
| ### Hardware Configuration |
| |
| - **GPUs**: {num_gpus} |
| - **Attention Implementation**: {attn_implementation} |
| |
| ## Dataset Structure |
| |
| The dataset contains all columns from the source dataset plus: |
| - `response`: The generated response from the model |
| |
| ## Generation Script |
| |
| Generated using the transformers continuous batching script from [uv-scripts/transformers-inference](https://huggingface.co/datasets/uv-scripts/transformers-inference). |
| |
| To reproduce this generation: |
| |
| ```bash |
| uv run https://huggingface.co/datasets/uv-scripts/transformers-inference/raw/main/generate-responses.py \\ |
| {source_dataset} \\ |
| <output-dataset> \\ |
| --model-id {model_id} \\ |
| {"--prompt-column " + prompt_column if prompt_column else "--messages-column " + messages_column} \\ |
| --temperature {generation_config.temperature} \\ |
| --top-p {generation_config.top_p} \\ |
| --top-k {generation_config.top_k} \\ |
| --max-tokens {generation_config.max_new_tokens} |
| ``` |
| """ |
|
|
|
|
| def main( |
| src_dataset_hub_id: str, |
| output_dataset_hub_id: str, |
| model_id: str = "Qwen/Qwen3-4B-Instruct-2507", |
| messages_column: str = "messages", |
| prompt_column: Optional[str] = None, |
| output_column: str = "response", |
| temperature: float = 0.7, |
| top_p: float = 0.8, |
| top_k: int = 20, |
| max_tokens: int = 4096, |
| repetition_penalty: float = 1.0, |
| max_batch_tokens: int = 512, |
| dtype: str = "bfloat16", |
| attn_implementation: str = "paged|sdpa", |
| subset: Optional[str] = None, |
| split: str = "train", |
| messages_until_role: Optional[str] = None, |
| skip_long_prompts: bool = True, |
| max_samples: Optional[int] = None, |
| hf_token: Optional[str] = None, |
| ): |
| generation_start_time = datetime.now().isoformat() |
|
|
| |
| num_gpus = check_gpu_availability() |
|
|
| |
| HF_TOKEN = hf_token or os.environ.get("HF_TOKEN") or get_token() |
| if not HF_TOKEN: |
| logger.error("No HuggingFace token found. Please provide token via:") |
| logger.error(" 1. --hf-token argument") |
| logger.error(" 2. HF_TOKEN environment variable") |
| logger.error(" 3. Run 'huggingface-cli login' or use login() in Python") |
| sys.exit(1) |
|
|
| logger.info("HuggingFace token found, authenticating...") |
| login(token=HF_TOKEN) |
|
|
| |
| torch_dtype = getattr(torch, dtype, None) |
| if torch_dtype is None: |
| logger.error(f"Unknown dtype: {dtype}. Use 'bfloat16', 'float16', or 'float32'.") |
| sys.exit(1) |
|
|
| |
| |
| |
| if num_gpus > 1: |
| logger.warning( |
| "Multiple GPUs detected but transformers CB currently works on single GPU only. " |
| "Using cuda:0. Choose a model that fits on one GPU." |
| ) |
| device_map = "cuda" |
| logger.info(f"Loading model: {model_id} (dtype={dtype}, attn={attn_implementation}, device_map={device_map})") |
|
|
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| attn_implementation=attn_implementation, |
| device_map=device_map, |
| dtype=torch_dtype, |
| ) |
|
|
| |
| logger.info("Loading tokenizer...") |
| tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") |
|
|
| |
| do_sample = temperature != 1.0 or top_p != 1.0 or top_k != 0 |
| generation_config = GenerationConfig( |
| max_new_tokens=max_tokens, |
| max_batch_tokens=max_batch_tokens, |
| do_sample=do_sample, |
| temperature=temperature, |
| top_p=top_p, |
| top_k=top_k, |
| repetition_penalty=repetition_penalty, |
| eos_token_id=tokenizer.eos_token_id, |
| pad_token_id=tokenizer.pad_token_id, |
| ) |
|
|
| |
| logger.info(f"Loading dataset: {src_dataset_hub_id}" + (f" ({subset})" if subset else "")) |
| dataset = load_dataset(src_dataset_hub_id, subset, split=split) |
|
|
| if max_samples is not None and max_samples < len(dataset): |
| logger.info(f"Limiting dataset to {max_samples} samples") |
| dataset = dataset.select(range(max_samples)) |
|
|
| total_examples = len(dataset) |
| logger.info(f"Dataset loaded with {total_examples:,} examples") |
|
|
| |
| if prompt_column: |
| if prompt_column not in dataset.column_names: |
| logger.error( |
| f"Column '{prompt_column}' not found. Available columns: {dataset.column_names}" |
| ) |
| sys.exit(1) |
| logger.info(f"Using prompt column: '{prompt_column}'") |
| use_messages = False |
| else: |
| if messages_column not in dataset.column_names: |
| logger.error( |
| f"Column '{messages_column}' not found. Available columns: {dataset.column_names}" |
| ) |
| sys.exit(1) |
| logger.info(f"Using messages column: '{messages_column}'") |
| use_messages = True |
|
|
| |
| effective_max_len = model.config.max_position_embeddings |
| logger.info(f"Model max context length: {effective_max_len}") |
|
|
| |
| logger.info("Preparing and tokenizing prompts...") |
| valid_input_ids = [] |
| valid_indices = [] |
| skipped_info = [] |
|
|
| for i, example in enumerate(dataset): |
| if use_messages: |
| messages = example[messages_column] |
| |
| if messages_until_role: |
| truncated = [] |
| for msg in messages: |
| truncated.append(msg) |
| if msg["role"] == messages_until_role: |
| last_truncated = list(truncated) |
| messages = last_truncated |
| prompt = tokenizer.apply_chat_template( |
| messages, tokenize=False, add_generation_prompt=True |
| ) |
| else: |
| user_prompt = example[prompt_column] |
| messages = [{"role": "user", "content": user_prompt}] |
| prompt = tokenizer.apply_chat_template( |
| messages, tokenize=False, add_generation_prompt=True |
| ) |
|
|
| input_ids = tokenizer.encode(prompt) |
|
|
| if skip_long_prompts: |
| if len(input_ids) <= effective_max_len: |
| valid_input_ids.append(input_ids) |
| valid_indices.append(i) |
| else: |
| skipped_info.append((i, len(input_ids))) |
| else: |
| valid_input_ids.append(input_ids) |
| valid_indices.append(i) |
|
|
| |
| if skip_long_prompts and skipped_info: |
| logger.warning( |
| f"Skipped {len(skipped_info)} prompts exceeding context length ({effective_max_len} tokens)" |
| ) |
| for idx, (prompt_idx, token_count) in enumerate(skipped_info[:10]): |
| logger.info( |
| f" - Example {prompt_idx}: {token_count} tokens (exceeds by {token_count - effective_max_len})" |
| ) |
| if len(skipped_info) > 10: |
| logger.info(f" ... and {len(skipped_info) - 10} more") |
|
|
| skip_percentage = (len(skipped_info) / total_examples) * 100 |
| if skip_percentage > 10: |
| logger.warning(f"WARNING: {skip_percentage:.1f}% of prompts were skipped!") |
|
|
| if not valid_input_ids: |
| logger.error("No valid prompts to process after filtering!") |
| sys.exit(1) |
|
|
| |
| logger.info(f"Starting generation for {len(valid_input_ids):,} prompts using continuous batching...") |
| logger.info(f"max_batch_tokens={max_batch_tokens}, max_new_tokens={max_tokens}") |
|
|
| batch_outputs = model.generate_batch( |
| inputs=valid_input_ids, |
| generation_config=generation_config, |
| progress_bar=True, |
| ) |
|
|
| |
| logger.info("Extracting generated responses...") |
| responses = [""] * total_examples |
|
|
| for request_id, output in batch_outputs.items(): |
| |
| idx = int(request_id.split("_", 1)[1]) |
| original_idx = valid_indices[idx] |
| generated_text = tokenizer.decode( |
| output.generated_tokens, skip_special_tokens=True |
| ) |
| responses[original_idx] = generated_text.strip() |
|
|
| |
| non_empty = sum(1 for r in responses if r) |
| logger.info(f"Generated {non_empty:,} non-empty responses out of {total_examples:,} total") |
|
|
| |
| logger.info("Adding responses to dataset...") |
| dataset = dataset.add_column(output_column, responses) |
|
|
| |
| logger.info("Creating dataset card...") |
| card_content = create_dataset_card( |
| source_dataset=src_dataset_hub_id, |
| model_id=model_id, |
| messages_column=messages_column, |
| prompt_column=prompt_column, |
| generation_config=generation_config, |
| num_gpus=num_gpus, |
| num_examples=total_examples, |
| generation_time=generation_start_time, |
| num_skipped=len(skipped_info) if skip_long_prompts else 0, |
| attn_implementation=attn_implementation, |
| ) |
|
|
| |
| logger.info(f"Pushing dataset to: {output_dataset_hub_id}") |
| dataset.push_to_hub(output_dataset_hub_id, token=HF_TOKEN) |
|
|
| card = DatasetCard(card_content) |
| card.push_to_hub(output_dataset_hub_id, token=HF_TOKEN) |
|
|
| logger.info("Generation complete!") |
| logger.info( |
| f"Dataset available at: https://huggingface.co/datasets/{output_dataset_hub_id}" |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| if len(sys.argv) > 1: |
| parser = argparse.ArgumentParser( |
| description="Generate responses using transformers continuous batching", |
| formatter_class=argparse.RawDescriptionHelpFormatter, |
| epilog=""" |
| Examples: |
| # Basic usage with default Qwen model |
| uv run generate-responses.py input-dataset output-dataset \\ |
| --prompt-column question |
| |
| # With custom model and parameters |
| uv run generate-responses.py input-dataset output-dataset \\ |
| --model-id meta-llama/Llama-3.1-8B-Instruct \\ |
| --messages-column messages \\ |
| --temperature 0.9 \\ |
| --max-tokens 2048 |
| |
| # Increase batch token budget for better GPU utilization |
| uv run generate-responses.py input-dataset output-dataset \\ |
| --prompt-column text \\ |
| --max-batch-tokens 2048 |
| """, |
| ) |
|
|
| parser.add_argument( |
| "src_dataset_hub_id", |
| help="Input dataset on Hugging Face Hub (e.g., username/dataset-name)", |
| ) |
| parser.add_argument( |
| "output_dataset_hub_id", |
| help="Output dataset name on Hugging Face Hub", |
| ) |
| parser.add_argument( |
| "--model-id", |
| type=str, |
| default="Qwen/Qwen3-4B-Instruct-2507", |
| help="Model to use for generation (default: Qwen3-4B-Instruct-2507)", |
| ) |
| parser.add_argument( |
| "--subset", |
| type=str, |
| help="Dataset subset/config name (e.g., 'SFT' for HuggingFaceTB/smoltalk2)", |
| ) |
| parser.add_argument( |
| "--split", |
| type=str, |
| default="train", |
| help="Dataset split to use (default: train)", |
| ) |
| parser.add_argument( |
| "--messages-column", |
| type=str, |
| default="messages", |
| help="Column containing chat messages (default: messages)", |
| ) |
| parser.add_argument( |
| "--prompt-column", |
| type=str, |
| help="Column containing plain text prompts (alternative to --messages-column)", |
| ) |
| parser.add_argument( |
| "--messages-until-role", |
| type=str, |
| help="Truncate messages up to the last message with this role. " |
| "Use 'user' to strip existing assistant responses and regenerate them.", |
| ) |
| parser.add_argument( |
| "--output-column", |
| type=str, |
| default="response", |
| help="Column name for generated responses (default: response)", |
| ) |
| parser.add_argument( |
| "--max-samples", |
| type=int, |
| help="Maximum number of samples to process (default: all)", |
| ) |
| parser.add_argument( |
| "--temperature", |
| type=float, |
| default=0.7, |
| help="Sampling temperature (default: 0.7)", |
| ) |
| parser.add_argument( |
| "--top-p", |
| type=float, |
| default=0.8, |
| help="Top-p sampling parameter (default: 0.8)", |
| ) |
| parser.add_argument( |
| "--top-k", |
| type=int, |
| default=20, |
| help="Top-k sampling parameter (default: 20)", |
| ) |
| parser.add_argument( |
| "--max-tokens", |
| type=int, |
| default=4096, |
| help="Maximum tokens to generate (default: 4096)", |
| ) |
| parser.add_argument( |
| "--repetition-penalty", |
| type=float, |
| default=1.0, |
| help="Repetition penalty (default: 1.0)", |
| ) |
| parser.add_argument( |
| "--max-batch-tokens", |
| type=int, |
| default=512, |
| help="Token budget per batch for continuous batching scheduler (default: 512). " |
| "Increase for better GPU utilization on large GPUs (e.g., 2048-4096 for A100/H100).", |
| ) |
| parser.add_argument( |
| "--dtype", |
| type=str, |
| default="bfloat16", |
| choices=["bfloat16", "float16", "float32"], |
| help="Model dtype (default: bfloat16)", |
| ) |
| parser.add_argument( |
| "--attn-implementation", |
| type=str, |
| default="paged|sdpa", |
| help="Attention implementation (default: paged|sdpa). " |
| "Use 'paged|flash_attention_2' if flash-attn is installed.", |
| ) |
| parser.add_argument( |
| "--hf-token", |
| type=str, |
| help="Hugging Face token (can also use HF_TOKEN env var)", |
| ) |
| parser.add_argument( |
| "--skip-long-prompts", |
| action="store_true", |
| default=True, |
| help="Skip prompts exceeding context length (default: True)", |
| ) |
| parser.add_argument( |
| "--no-skip-long-prompts", |
| dest="skip_long_prompts", |
| action="store_false", |
| help="Fail on prompts that exceed context length", |
| ) |
|
|
| args = parser.parse_args() |
|
|
| main( |
| src_dataset_hub_id=args.src_dataset_hub_id, |
| output_dataset_hub_id=args.output_dataset_hub_id, |
| model_id=args.model_id, |
| messages_column=args.messages_column, |
| prompt_column=args.prompt_column, |
| messages_until_role=args.messages_until_role, |
| output_column=args.output_column, |
| temperature=args.temperature, |
| top_p=args.top_p, |
| top_k=args.top_k, |
| max_tokens=args.max_tokens, |
| repetition_penalty=args.repetition_penalty, |
| max_batch_tokens=args.max_batch_tokens, |
| dtype=args.dtype, |
| attn_implementation=args.attn_implementation, |
| subset=args.subset, |
| split=args.split, |
| skip_long_prompts=args.skip_long_prompts, |
| max_samples=args.max_samples, |
| hf_token=args.hf_token, |
| ) |
| else: |
| print(""" |
| Transformers Continuous Batching - Response Generation |
| ====================================================== |
| |
| This script requires arguments. For usage information: |
| uv run generate-responses.py --help |
| |
| Why transformers CB instead of vLLM? |
| - Works with ANY model supported by transformers (new models immediately!) |
| - No vLLM/flashinfer dependency issues |
| - Simpler setup - no custom Docker images or wheel indexes needed |
| - ~95% of vLLM throughput with PagedAttention and continuous scheduling |
| |
| Example HF Jobs command: |
| hf jobs uv run \\ |
| --flavor l4x1 \\ |
| -s HF_TOKEN \\ |
| https://huggingface.co/datasets/uv-scripts/transformers-inference/raw/main/generate-responses.py \\ |
| username/input-dataset \\ |
| username/output-dataset \\ |
| --prompt-column question \\ |
| --model-id Qwen/Qwen3-4B-Instruct-2507 \\ |
| --temperature 0.7 \\ |
| --max-tokens 4096 |
| """) |
|
|