File size: 11,407 Bytes
b6ae7b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
#!/usr/bin/env python3
"""
Stack 2.9 Data Preparation Pipeline
Loads, cleans, formats, deduplicates, and filters training data for instruction tuning.
"""

import json
import os
import sys
import hashlib
from pathlib import Path
from typing import List, Dict, Any, Optional

import yaml
import pandas as pd
from datasets import Dataset, load_dataset
from transformers import AutoTokenizer


def load_config(config_path: str = None) -> Dict[str, Any]:
    """Load training configuration from YAML file."""
    if config_path is None:
        config_path = Path(__file__).parent / "train_config.yaml"
    
    with open(config_path, 'r') as f:
        return yaml.safe_load(f)


def load_jsonl(file_path: Path) -> List[Dict[str, Any]]:
    """Load data from JSONL file."""
    if not file_path.exists():
        raise FileNotFoundError(f"Training data file not found: {file_path}")
    
    data = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line_num, line in enumerate(f, 1):
            line = line.strip()
            if not line:
                continue
            try:
                data.append(json.loads(line))
            except json.JSONDecodeError as e:
                print(f"Warning: Skipping line {line_num} - JSON decode error: {e}")
                continue
    
    if not data:
        raise ValueError(f"No valid data found in {file_path}")
    
    return data


def format_for_instruction_tuning(
    example: Dict[str, Any],
    model_name: str = "Qwen/Qwen2.5-Coder-32B"
) -> str:
    """
    Format training example for instruction tuning using chat template.
    Handles multiple data formats: messages, instruction/response, prompt/completion.
    """
    # Format 1: OpenAI-style messages (messages field)
    if "messages" in example:
        messages = example["messages"]
        
        # Extract system, user, assistant messages
        system_msg = None
        user_msg = None
        assistant_msg = None
        
        for msg in messages:
            role = msg.get("role", "")
            content = msg.get("content", "")
            
            if role == "system":
                system_msg = content
            elif role == "user":
                user_msg = content
            elif role == "assistant":
                assistant_msg = content
        
        # Build formatted string
        if system_msg:
            return f"### System:\n{system_msg}\n\n### User:\n{user_msg}\n\n### Assistant:\n{assistant_msg}"
        else:
            return f"### User:\n{user_msg}\n\n### Assistant:\n{assistant_msg}"
    
    # Format 2: instruction/response
    if "instruction" in example and "response" in example:
        return f"### Instruction:\n{example['instruction']}\n\n### Response:\n{example['response']}"
    
    # Format 3: prompt/completion
    if "prompt" in example and "completion" in example:
        return f"### Prompt:\n{example['prompt']}\n\n### Completion:\n{example['completion']}"
    
    # Format 4: input/output
    if "input" in example and "output" in example:
        return f"### Input:\n{example['input']}\n\n### Output:\n{example['output']}"
    
    raise ValueError(f"Unknown data format. Expected one of: messages, instruction/response, prompt/completion, input/output. Keys found: {list(example.keys())}")


def deduplicate(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
    """
    Remove duplicate examples based on content hash.
    """
    seen_hashes = set()
    unique_data = []
    
    for example in data:
        # Create hash from the formatted content
        content = json.dumps(example, sort_keys=True)
        content_hash = hashlib.md5(content.encode()).hexdigest()
        
        if content_hash not in seen_hashes:
            seen_hashes.add(content_hash)
            unique_data.append(example)
    
    duplicates_removed = len(data) - len(unique_data)
    if duplicates_removed > 0:
        print(f"Removed {duplicates_removed} duplicate examples")
    
    return unique_data


def quality_filter(
    data: List[Dict[str, Any]],
    min_length: int = 10,
    max_length: int = 128000,
    require_response: bool = True
) -> List[Dict[str, Any]]:
    """
    Filter training data based on quality criteria.
    
    Args:
        data: List of training examples
        min_length: Minimum response length
        max_length: Maximum total length
        require_response: Whether to require non-empty response
    
    Returns:
        Filtered list of examples
    """
    filtered_data = []
    
    for example in data:
        try:
            # Extract response content
            response = ""
            
            if "messages" in example:
                for msg in example["messages"]:
                    if msg.get("role") == "assistant":
                        response = msg.get("content", "")
                        break
            elif "response" in example:
                response = example["response"]
            elif "completion" in example:
                response = example["completion"]
            elif "output" in example:
                response = example["output"]
            
            # Skip if no response
            if require_response and not response:
                continue
            
            # Skip if response too short
            if len(response) < min_length:
                continue
            
            # Skip if total content too long
            if len(json.dumps(example)) > max_length:
                continue
            
            filtered_data.append(example)
            
        except Exception as e:
            print(f"Warning: Skipping example due to error: {e}")
            continue
    
    filtered_count = len(data) - len(filtered_data)
    if filtered_count > 0:
        print(f"Filtered out {filtered_count} low-quality examples")
    
    return filtered_data


def tokenize_dataset(
    texts: List[str],
    tokenizer: AutoTokenizer,
    max_length: int = 131072,
    add_special_tokens: bool = True
) -> Dataset:
    """
    Tokenize text dataset with proper encoding.
    """
    def tokenize_batch(batch):
        return tokenizer(
            batch["text"],
            padding="max_length",
            truncation=True,
            max_length=max_length,
            return_tensors=None,
            add_special_tokens=add_special_tokens
        )
    
    # Create dataset from texts
    df = pd.DataFrame({"text": texts})
    dataset = Dataset.from_pandas(df)
    
    # Tokenize
    dataset = dataset.map(
        tokenize_batch,
        batched=True,
        remove_columns=["text"],
        desc="Tokenizing dataset"
    )
    
    return dataset


def prepare_data(
    config_path: str = None,
    force: bool = False
) -> Dict[str, Any]:
    """
    Main data preparation pipeline.
    
    Args:
        config_path: Path to config file
        force: Force re-creation even if data exists
    
    Returns:
        Dictionary with dataset info
    """
    print("=" * 60)
    print("Stack 2.9 Data Preparation Pipeline")
    print("=" * 60)
    
    # Load config
    config = load_config(config_path)
    data_config = config["data"]
    
    # Set paths
    input_path = Path(data_config["input_path"])
    train_dir = Path(data_config["train_dir"])
    eval_dir = Path(data_config["eval_dir"])
    max_length = data_config["max_length"]
    train_split = data_config["train_split"]
    
    # Check if data already exists
    if not force and train_dir.exists() and eval_dir.exists():
        print(f"Data already exists at {train_dir} and {eval_dir}")
        print("Use force=True to re-create")
        
        # Load and return stats
        train_ds = load_dataset(str(train_dir))
        eval_ds = load_dataset(str(eval_dir))
        
        return {
            "train_samples": len(train_ds["train"]),
            "eval_samples": len(eval_ds["test"]),
            "train_dir": str(train_dir),
            "eval_dir": str(eval_dir)
        }
    
    # Create directories
    train_dir.mkdir(parents=True, exist_ok=True)
    eval_dir.mkdir(parents=True, exist_ok=True)
    
    # Step 1: Load raw data
    print(f"\n๐Ÿ“ Loading data from: {input_path}")
    raw_data = load_jsonl(input_path)
    print(f"   Loaded {len(raw_data)} examples")
    
    # Step 2: Format for instruction tuning
    print("\n๐Ÿ“ Formatting examples for instruction tuning...")
    model_name = config["model"]["name"]
    formatted_texts = []
    
    for i, example in enumerate(raw_data):
        try:
            text = format_for_instruction_tuning(example, model_name)
            formatted_texts.append(text)
        except ValueError as e:
            print(f"   Warning: Skipping example {i}: {e}")
    
    print(f"   Formatted {len(formatted_texts)} examples")
    
    # Step 3: Deduplicate
    print("\n๐Ÿ”„ Deduplicating...")
    unique_texts = deduplicate(formatted_texts)
    print(f"   Unique examples: {len(unique_texts)}")
    
    # Step 4: Quality filter
    print("\n๐Ÿงน Quality filtering...")
    quality_data = quality_filter(unique_texts)
    print(f"   After quality filter: {len(quality_data)}")
    
    # Step 5: Re-format for tokenization
    print("\n๐Ÿ”ข Tokenizing...")
    tokenizer = AutoTokenizer.from_pretrained(
        model_name,
        trust_remote_code=True
    )
    
    # Handle chat template
    if tokenizer.chat_template is None:
        print("   Warning: No chat template found, using raw text")
    
    # Split into train/eval
    print(f"\n๐Ÿ“Š Splitting data ({train_split*100:.0f}% train / {(1-train_split)*100:.0f}% eval)...")
    
    import numpy as np
    indices = np.random.permutation(len(quality_data))
    split_idx = int(len(quality_data) * train_split)
    
    train_indices = indices[:split_idx]
    eval_indices = indices[split_idx:]
    
    train_texts = [quality_data[i] for i in train_indices]
    eval_texts = [quality_data[i] for i in eval_indices]
    
    # Tokenize datasets
    train_dataset = tokenize_dataset(train_texts, tokenizer, max_length)
    eval_dataset = tokenize_dataset(eval_texts, tokenizer, max_length)
    
    # Save datasets
    print(f"\n๐Ÿ’พ Saving datasets...")
    train_dataset.save_to_disk(str(train_dir))
    eval_dataset.save_to_disk(str(eval_dir))
    
    print(f"   Train: {len(train_dataset)} examples -> {train_dir}")
    print(f"   Eval: {len(eval_dataset)} examples -> {eval_dir}")
    
    print("\n" + "=" * 60)
    print("โœ… Data preparation completed!")
    print("=" * 60)
    
    return {
        "train_samples": len(train_dataset),
        "eval_samples": len(eval_dataset),
        "train_dir": str(train_dir),
        "eval_dir": str(eval_dir)
    }


if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description="Stack 2.9 Data Preparation")
    parser.add_argument("--config", type=str, default=None, help="Path to config file")
    parser.add_argument("--force", action="store_true", help="Force re-create data")
    args = parser.parse_args()
    
    try:
        result = prepare_data(args.config, args.force)
        
        print(f"\n๐Ÿ“Š Summary:")
        print(f"   Training samples: {result['train_samples']}")
        print(f"   Evaluation samples: {result['eval_samples']}")
        
    except Exception as e:
        print(f"\nโŒ Error: {e}", file=sys.stderr)
        sys.exit(1)