File size: 17,980 Bytes
5c97387 97353a3 6698c20 5015673 2f3537a 97353a3 d6558bb 5c97387 2f3537a 5c97387 a738515 2bfed52 7efb6b3 2f3537a 5c97387 97353a3 2f3537a 0020ded 2f3537a 2fcce3e a9a8eca 2f3537a a9a8eca 2f3537a 2fcce3e 2f3537a cca5bb1 a9a8eca 2f3537a 2fcce3e 2f3537a 2fcce3e 0c7a5e1 2f3537a 2fcce3e 2f3537a cca5bb1 2f3537a 2fcce3e 97353a3 2f3537a 2fcce3e a738515 2fcce3e 5015673 2f3537a 175e3dd 2f3537a 5015673 2fcce3e 2f3537a 0020ded 2f3537a 2fcce3e 2f3537a 0020ded 2fcce3e 2f3537a 2fcce3e 2f3537a 2fcce3e 2f3537a 2fcce3e 2f3537a 2fcce3e 4425935 97353a3 5c97387 2fcce3e cca5bb1 a76c50f 5c97387 a76c50f 4425935 175e3dd 0020ded ff3a113 97353a3 cca5bb1 2fcce3e cca5bb1 a76c50f 2f3537a a76c50f 2fcce3e a76c50f 2f3537a 2fcce3e 2f3537a 2fcce3e 5c97387 a76c50f cca5bb1 4425935 5c97387 2f3537a a738515 908b6b7 2f3537a 75b52d0 2f3537a b5f9f46 0020ded b5f9f46 75b52d0 b5f9f46 a738515 2f3537a cca5bb1 b5f9f46 0020ded 75b52d0 175e3dd b5f9f46 75b52d0 b5f9f46 cca5bb1 b5f9f46 cca5bb1 a738515 2f3537a b5f9f46 cca5bb1 a738515 75b52d0 a738515 2f3537a cca5bb1 2f3537a a738515 2f3537a a738515 2f3537a a738515 2f3537a a738515 175e3dd a738515 2f3537a a738515 2f3537a a738515 4425935 2f3537a 75b52d0 917097f 2f3537a a738515 2f3537a 75b52d0 2f3537a 75b52d0 2f3537a 75b52d0 bb94bca f63735a 9ad0200 f63735a 2f3537a bcfdd25 23fe7e3 bcfdd25 6d6a9ea 23fe7e3 7b8503d 6d6a9ea bcfdd25 a738515 2f3537a 917097f 2f3537a ff3a113 d6558bb 2aa3809 2f3537a 0020ded 2f3537a d6558bb 6baf7e5 2f3537a 6baf7e5 2f3537a 6baf7e5 2f3537a 175e3dd e1c75d3 2f3537a 6baf7e5 2fcce3e bb94bca 2f3537a 6baf7e5 2f3537a 6baf7e5 a738515 6baf7e5 2f3537a 6baf7e5 cca5bb1 2f3537a 2aa3809 2f3537a cca5bb1 6baf7e5 85d1dc0 2f3537a 75b52d0 2f3537a 75b52d0 2f3537a 75b52d0 175e3dd 75b52d0 2f3537a 75b52d0 2f3537a 75b52d0 cca5bb1 2f3537a 75b52d0 2f3537a 75b52d0 2f3537a 75b52d0 2f3537a 75b52d0 2f3537a 75b52d0 6baf7e5 75b52d0 2f3537a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 |
import json
import logging
import datasets
import requests
import math
import re
from datasets import load_dataset, get_dataset_config_names, get_dataset_infos
from huggingface_hub import HfApi, DatasetCard, DatasetCardData
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DatasetCommandCenter:
def __init__(self, token=None):
self.token = token
self.api = HfApi(token=token)
self.username=self.api.whoami()['name']
print("######################################")
print(self.username)
print("######################################")
# ==========================================
# 1. METADATA & SCHEMA INSPECTION
# ==========================================
def get_dataset_metadata(self, dataset_id):
"""
Fetches Configs and Splits.
"""
configs = ['default']
splits = ['train', 'test', 'validation']
license_name = "unknown"
try:
# 1. Fetch Configs
try:
found_configs = get_dataset_config_names(dataset_id, token=self.token)
if found_configs:
configs = found_configs
except Exception:
pass
# 2. Fetch Metadata (Splits & License)
try:
selected = configs[0]
infos = get_dataset_infos(dataset_id, token=self.token)
print(infos)
info = None
if selected in infos:
info = infos[selected]
elif 'default' in infos:
info = infos['default']
elif infos:
info = list(infos.values())[0]
if info:
splits = list(info.splits.keys())
license_name = info.license or "unknown"
except Exception:
pass
return {
"status": "success",
"configs": configs,
"splits": splits,
"license_detected": license_name
}
except Exception as e:
return {"status": "error", "message": str(e)}
def get_splits_for_config(self, dataset_id, config_name):
try:
infos = get_dataset_infos(dataset_id, config_name=config_name, token=self.token)
if config_name in infos:
splits = list(infos[config_name].splits.keys())
elif len(infos) > 0:
splits = list(infos.values())[0].splits.keys()
else:
splits = ['train', 'test']
return {"status": "success", "splits": splits}
except:
return {"status": "success", "splits": ['train', 'test', 'validation']}
def _sanitize_for_json(self, obj):
"""
Recursively cleans data for JSON serialization.
"""
if isinstance(obj, float):
if math.isnan(obj) or math.isinf(obj):
return None
return obj
elif isinstance(obj, dict):
return {k: self._sanitize_for_json(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [self._sanitize_for_json(v) for v in obj]
elif isinstance(obj, (str, int, bool, type(None))):
return obj
else:
return str(obj)
def _flatten_object(self, obj, parent_key='', sep='.'):
"""
Recursively finds keys for the UI dropdowns.
"""
items = {}
# Transparently parse JSON strings
if isinstance(obj, str):
s = obj.strip()
if (s.startswith('{') and s.endswith('}')) or (s.startswith('[') and s.endswith(']')):
try:
obj = json.loads(s)
except:
pass
if isinstance(obj, dict):
for k, v in obj.items():
new_key = f"{parent_key}{sep}{k}" if parent_key else k
items.update(self._flatten_object(v, new_key, sep=sep))
elif isinstance(obj, list):
new_key = f"{parent_key}" if parent_key else "list_content"
items[new_key] = "List"
else:
items[parent_key] = type(obj).__name__
return items
def inspect_dataset(self, dataset_id, config, split):
try:
conf = config if config != 'default' else None
ds_stream = load_dataset(dataset_id, name=conf, split=split, streaming=True, token=self.token)
sample_rows = []
available_paths = set()
schema_map = {}
for i, row in enumerate(ds_stream):
if i >= 10: break
# CRITICAL FIX: Force Materialization
row = dict(row)
# Clean row for UI
clean_row = self._sanitize_for_json(row)
sample_rows.append(clean_row)
# Schema Discovery
flattened = self._flatten_object(row)
available_paths.update(flattened.keys())
# List Mode Detection
for k, v in row.items():
if k not in schema_map:
schema_map[k] = {"type": "Object"}
val = v
if isinstance(val, str):
try: val = json.loads(val)
except: pass
if isinstance(val, list):
schema_map[k]["type"] = "List"
sorted_paths = sorted(list(available_paths))
schema_tree = {}
for path in sorted_paths:
root = path.split('.')[0]
if root not in schema_tree:
schema_tree[root] = []
schema_tree[root].append(path)
return {
"status": "success",
"samples": sample_rows,
"schema_tree": schema_tree,
"schema": schema_map,
"dataset_id": dataset_id
}
except Exception as e:
return {"status": "error", "message": str(e)}
# ==========================================
# 2. CORE EXTRACTION LOGIC
# ==========================================
def _get_value_by_path(self, obj, path):
"""
Retrieves value. PRIORITY: Direct Key Access (Fastest).
"""
if not path:
return obj
# Handle None/empty path edge cases
if path is None or path == '':
return obj
# 1. Try Direct Access First (handles simple column names)
# This works for dict, UserDict, LazyRow due to duck-typing
try:
# For simple paths (no dots), this is all we need
if '.' not in path:
return obj[path]
except (KeyError, TypeError, AttributeError):
pass
# 2. If direct access failed OR path contains dots, try dot notation
keys = path.split('.')
current = obj
for i, key in enumerate(keys):
if current is None:
return None
try:
# Array/List index access support (e.g. solutions.0.code)
if isinstance(current, list) and key.isdigit():
current = current[int(key)]
else:
# Try dictionary-style access
current = current[key]
except (KeyError, TypeError, IndexError, AttributeError):
return None
# Lazy Parsing: Only parse JSON string if we need to go deeper
is_last_key = (i == len(keys) - 1)
if not is_last_key and isinstance(current, str):
s = current.strip()
if (s.startswith('{') and s.endswith('}')) or (s.startswith('[') and s.endswith(']')):
try:
current = json.loads(s)
except:
return None
return current
def _extract_from_list_logic(self, row, source_col, filter_key, filter_val, target_path):
"""
FROM source_col FIND ITEM WHERE filter_key == filter_val EXTRACT target_path
"""
data = row.get(source_col)
if isinstance(data, str):
try:
data = json.loads(data)
except:
return None
if not isinstance(data, list):
return None
matched_item = None
for item in data:
# String comparison for safety
if str(item.get(filter_key, '')) == str(filter_val):
matched_item = item
break
if matched_item:
return self._get_value_by_path(matched_item, target_path)
return None
def _apply_projection(self, row, recipe):
new_row = {}
# OPTIMIZATION: Only create eval_context if we actually have a Python column.
# This prevents expensive row.copy() calls for Simple Path operations.
eval_context = None
for col_def in recipe['columns']:
t_type = col_def.get('type', 'simple')
target_col = col_def['name']
try:
if t_type == 'simple':
# Fast path - no context needed
new_row[target_col] = self._get_value_by_path(row, col_def['source'])
elif t_type == 'list_search':
# Fast path - no context needed
new_row[target_col] = self._extract_from_list_logic(
row,
col_def['source'],
col_def['filter_key'],
col_def['filter_val'],
col_def['target_key']
)
elif t_type == 'python':
if eval_context is None:
eval_context = row.copy()
eval_context['row'] = row
eval_context['json'] = json
eval_context['re'] = re
eval_context['requests'] = requests
# This evaluates the ENTIRE expression as Python
val = eval(col_def['expression'], {}, eval_context)
new_row[target_col] = val
elif t_type == 'requests':
print(t_type)
# Lazy Context Creation: Only pay the cost if used
eval_context = row.copy()
eval_context['row'] = row
#val = eval(col_def['rpay'], {}, eval_context)
print(col_def['rpay'])
val = json.loads(col_def['rpay'])
print(val)
new_row[target_col] = requests.post(col_def['rurl'], json=val).text
except Exception as e:
raise ValueError(f"Column '{target_col}' failed: {str(e)}")
return new_row
# ==========================================
# 3. DOCUMENTATION (MODEL CARD)
# ==========================================
def _generate_card(self, source_id, target_id, recipe, license_name):
print(source_id)
print(target_id)
card_data = DatasetCardData(
language="en",
license=license_name,
tags=["dataset-command-center", "etl", "generated-dataset"],
base_model=source_id,
)
content = f"""
# {target_id.split('/')[-1]}
This dataset is a transformation of [{source_id}](https://huggingface.co/datasets/{source_id}).
It was generated using the **Hugging Face Dataset Command Center**.
## Transformation Recipe
The following operations were applied to the source data:
| Target Column | Operation Type | Source / Logic |
|---------------|----------------|----------------|
"""
for col in recipe['columns']:
c_type = col.get('type', 'simple')
c_name = col['name']
c_src = col.get('source', '-')
logic = "-"
if c_type == 'simple':
logic = f"Mapped from `{c_src}`"
elif c_type == 'list_search':
logic = f"Get `{col['target_key']}` where `{col['filter_key']} == {col['filter_val']}`"
elif c_type == 'python':
logic = f"Python: `{col.get('expression')}`"
content += f"| **{c_name}** | {c_type} | {logic} |\n"
if recipe.get('filter_rule'):
content += f"\n### Row Filtering\n**Filter Applied:** `{recipe['filter_rule']}`\n"
content += f"\n## Original License\nThis dataset inherits the license: `{license_name}` from the source."
card = DatasetCard.from_template(card_data, content=content)
return card
# ==========================================
# 4. EXECUTION
# ==========================================
def process_and_push(self, source_id, config, split, target_id, recipe, max_rows=None, new_license=None):
logger.info(f"Job started: {source_id} -> {target_id}")
conf = config if config != 'default' else None
def gen():
ds_stream = load_dataset(source_id, name=conf, split=split, streaming=True, token=self.token)
count = 0
for i, row in enumerate(ds_stream):
if max_rows and count >= int(max_rows):
break
# CRITICAL FIX: Force Materialization
row = dict(row)
# 1. Filter
if recipe.get('filter_rule'):
try:
ctx = row.copy()
ctx['row'] = row
ctx['json'] = json
ctx['re'] = re
ctx['requests'] = requests
if not eval(recipe['filter_rule'], {}, ctx):
continue
except Exception as e:
raise ValueError(f"Filter crashed on row {i}: {e}")
# 2. Projection
try:
yield self._apply_projection(row, recipe)
count += 1
except ValueError as ve:
raise ve
except Exception as e:
raise ValueError(f"Unexpected crash on row {i}: {e}")
try:
# 1. Process & Push
new_dataset = datasets.Dataset.from_generator(gen)
new_dataset.push_to_hub(target_id, token=self.token)
# 2. Card
try:
card = self._generate_card(source_id, target_id, recipe, new_license or "unknown")
card.push_to_hub(f'{self.username}/{target_id}', token=self.token)
except Exception as e:
logger.error(f"Failed to push Dataset Card: {e}")
return {"status": "success", "rows_processed": len(new_dataset)}
except Exception as e:
logger.error(f"Job Failed: {e}")
return {"status": "failed", "error": str(e)}
# ==========================================
# 5. PREVIEW
# ==========================================
def preview_transform(self, dataset_id, config, split, recipe):
conf = config if config != 'default' else None
try:
# Load dataset in streaming mode
ds_stream = load_dataset(dataset_id, name=conf, split=split, streaming=True, token=self.token)
processed = []
for i, row in enumerate(ds_stream):
# Stop after 5 successful rows
if len(processed) >= 5:
break
# CRITICAL: Force materialization from LazyRow to standard Dict.
# This fixes the interaction between Streaming datasets and JSON serialization.
row = dict(row)
# --- Filter Logic ---
passed = True
if recipe.get('filter_rule'):
try:
# Create context only for the filter check
ctx = row.copy()
ctx['row'] = row
ctx['json'] = json
ctx['re'] = re
if not eval(recipe['filter_rule'], {}, ctx):
passed = False
except:
# If filter errors out (e.g. missing column), treat as filtered out
passed = False
if passed:
try:
# --- Projection Logic ---
new_row = self._apply_projection(row, recipe)
# --- Sanitization ---
# Convert NaNs, Infinity, and complex objects to prevent browser/Flask crash
clean_new_row = self._sanitize_for_json(new_row)
processed.append(clean_new_row)
except Exception as e:
# Capture specific row errors for the UI
processed.append({"_preview_error": f"Row {i} Error: {str(e)}"})
return processed
except Exception as e:
# Raise global errors (like 404 Dataset Not Found) so the UI sees them
raise e |