| import os |
| import pandas as pd |
| from datasets import load_dataset |
| from tqdm import tqdm |
|
|
| |
| os.makedirs("./datasets", exist_ok=True) |
|
|
| |
| configs = ["codeparrot_github-code-chemistry-python-default", "starcoder-chemistry-default"] |
|
|
| all_dataframes = [] |
|
|
| |
| for config in tqdm(configs, desc="Processing configurations"): |
| print(f"\nLoading configuration: {config}") |
|
|
| |
| dataset = load_dataset("jablonkagroup/chempile-code", config, cache_dir="./datasets") |
|
|
| |
| for split_name, split_data in dataset.items(): |
| |
| df = pd.DataFrame(split_data) |
| df["config"] = config |
| df["split"] = split_name |
| all_dataframes.append(df) |
| |
| df.to_csv(f"./datasets/{config}_{split_name}.csv", index=False) |
|
|
| |
| print("\nMerging all data...") |
| merged_df = pd.concat(all_dataframes, ignore_index=True) |
|
|
| |
| merged_output_path = "./datasets/chempile_code_complete.csv" |
| merged_df.to_csv(merged_output_path, index=False) |
|
|
| merged_df = pd.read_csv(merged_output_path) |
| all_data_path = "./datasets/all_chempile_code" |
| |
| os.makedirs(all_data_path, exist_ok=True) |
|
|
| |
| print("\nSaving in 500MB chunks...") |
| MAX_SIZE_MB = 500 |
| chunk_num = 1 |
| rows_per_chunk = 50000 |
|
|
| start_idx = 0 |
| while start_idx < len(merged_df): |
| |
| end_idx = min(start_idx + rows_per_chunk, len(merged_df)) |
| chunk_df = merged_df.iloc[start_idx:end_idx] |
|
|
| |
| output_path = f"{all_data_path}/chempile_code_complete_{chunk_num:03d}.csv" |
| chunk_df.to_csv(output_path, index=False) |
| size_mb = os.path.getsize(output_path) / (1024 * 1024) |
|
|
| |
| if size_mb > 0: |
| rows_per_chunk = int(rows_per_chunk * (MAX_SIZE_MB / size_mb) * 0.95) |
|
|
| print(f"Saved {output_path}: {size_mb:.1f}MB, {len(chunk_df):,} rows") |
| start_idx = end_idx |
| chunk_num += 1 |
|
|
| print(f"\nTotal: {len(merged_df):,} rows in {chunk_num-1} files") |
|
|