|
|
import pandas as pd |
|
|
import os |
|
|
import glob |
|
|
from pathlib import Path |
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
from tqdm import tqdm |
|
|
import threading |
|
|
from functools import partial |
|
|
|
|
|
def read_single_csv(file_path, expected_columns=None): |
|
|
""" |
|
|
读取单个CSV文件的辅助函数 |
|
|
|
|
|
Args: |
|
|
file_path (str): CSV文件路径 |
|
|
expected_columns (list): 期望的列名列表 |
|
|
|
|
|
Returns: |
|
|
tuple: (DataFrame或None, 文件名, 错误信息或None) |
|
|
""" |
|
|
try: |
|
|
df = pd.read_csv(file_path) |
|
|
|
|
|
|
|
|
if expected_columns and df.columns.tolist() != expected_columns: |
|
|
return None, os.path.basename(file_path), f"列结构不一致" |
|
|
|
|
|
return df, os.path.basename(file_path), None |
|
|
|
|
|
except Exception as e: |
|
|
return None, os.path.basename(file_path), str(e) |
|
|
|
|
|
def merge_single_row_csvs(folder_path, output_file='merged_data.csv', max_workers=None): |
|
|
""" |
|
|
使用多线程合并文件夹中所有单行CSV文件为一个大的CSV文件 |
|
|
|
|
|
Args: |
|
|
folder_path (str): 包含CSV文件的文件夹路径 |
|
|
output_file (str): 输出文件名 |
|
|
max_workers (int): 最大线程数,默认为None(使用系统默认值) |
|
|
""" |
|
|
|
|
|
csv_files = glob.glob(os.path.join(folder_path, "*.csv")) |
|
|
|
|
|
if not csv_files: |
|
|
print("文件夹中没有找到CSV文件") |
|
|
return |
|
|
|
|
|
print(f"找到 {len(csv_files)} 个CSV文件") |
|
|
|
|
|
|
|
|
try: |
|
|
first_df = pd.read_csv(csv_files[0]) |
|
|
expected_columns = first_df.columns.tolist() |
|
|
print(f"期望的列结构: {expected_columns}") |
|
|
except Exception as e: |
|
|
print(f"无法读取第一个文件: {str(e)}") |
|
|
return |
|
|
|
|
|
|
|
|
all_data = [] |
|
|
failed_files = [] |
|
|
|
|
|
|
|
|
read_csv_partial = partial(read_single_csv, expected_columns=expected_columns) |
|
|
|
|
|
|
|
|
print("开始多线程读取文件...") |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=max_workers) as executor: |
|
|
|
|
|
future_to_file = {executor.submit(read_csv_partial, file_path): file_path |
|
|
for file_path in csv_files} |
|
|
|
|
|
|
|
|
with tqdm(total=len(csv_files), desc="读取CSV文件") as pbar: |
|
|
for future in as_completed(future_to_file): |
|
|
df, filename, error = future.result() |
|
|
|
|
|
if df is not None: |
|
|
all_data.append(df) |
|
|
else: |
|
|
failed_files.append((filename, error)) |
|
|
|
|
|
pbar.update(1) |
|
|
|
|
|
pbar.set_postfix({ |
|
|
'成功': len(all_data), |
|
|
'失败': len(failed_files) |
|
|
}) |
|
|
|
|
|
|
|
|
print(f"\n处理完成:") |
|
|
print(f"成功读取: {len(all_data)} 个文件") |
|
|
print(f"失败: {len(failed_files)} 个文件") |
|
|
|
|
|
if failed_files: |
|
|
print("\n失败的文件:") |
|
|
for filename, error in failed_files[:10]: |
|
|
print(f" {filename}: {error}") |
|
|
if len(failed_files) > 10: |
|
|
print(f" ... 还有 {len(failed_files) - 10} 个失败的文件") |
|
|
|
|
|
if not all_data: |
|
|
print("没有成功读取任何数据") |
|
|
return |
|
|
|
|
|
|
|
|
print("\n正在合并数据...") |
|
|
with tqdm(desc="合并数据") as pbar: |
|
|
merged_df = pd.concat(all_data, ignore_index=True) |
|
|
pbar.update(1) |
|
|
|
|
|
|
|
|
print("正在保存文件...") |
|
|
with tqdm(desc="保存文件") as pbar: |
|
|
merged_df.to_csv(output_file, index=False) |
|
|
pbar.update(1) |
|
|
|
|
|
print(f"\n✅ 合并完成!") |
|
|
print(f"共 {len(merged_df)} 行数据已保存到 {output_file}") |
|
|
|
|
|
|
|
|
print(f"\n📊 数据概览:") |
|
|
print(f"总行数: {len(merged_df):,}") |
|
|
print(f"总列数: {len(merged_df.columns)}") |
|
|
print(f"文件大小: {os.path.getsize(output_file) / 1024 / 1024:.2f} MB") |
|
|
print(f"列名: {list(merged_df.columns)}") |
|
|
|
|
|
|
|
|
print(f"\n📝 数据预览:") |
|
|
print(merged_df.head()) |
|
|
|
|
|
def merge_with_batch_processing(folder_path, output_file='merged_data.csv', |
|
|
batch_size=1000, max_workers=None): |
|
|
""" |
|
|
使用批处理的方式合并大量CSV文件,减少内存占用 |
|
|
|
|
|
Args: |
|
|
folder_path (str): 包含CSV文件的文件夹路径 |
|
|
output_file (str): 输出文件名 |
|
|
batch_size (int): 每批处理的文件数量 |
|
|
max_workers (int): 最大线程数 |
|
|
""" |
|
|
csv_files = glob.glob(os.path.join(folder_path, "*.csv")) |
|
|
|
|
|
if not csv_files: |
|
|
print("文件夹中没有找到CSV文件") |
|
|
return |
|
|
|
|
|
print(f"找到 {len(csv_files)} 个CSV文件,将分批处理") |
|
|
|
|
|
|
|
|
try: |
|
|
first_df = pd.read_csv(csv_files[0]) |
|
|
expected_columns = first_df.columns.tolist() |
|
|
except Exception as e: |
|
|
print(f"无法读取第一个文件: {str(e)}") |
|
|
return |
|
|
|
|
|
|
|
|
total_rows = 0 |
|
|
is_first_batch = True |
|
|
|
|
|
with tqdm(total=len(csv_files), desc="总进度") as main_pbar: |
|
|
for i in range(0, len(csv_files), batch_size): |
|
|
batch_files = csv_files[i:i + batch_size] |
|
|
batch_data = [] |
|
|
|
|
|
|
|
|
read_csv_partial = partial(read_single_csv, expected_columns=expected_columns) |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=max_workers) as executor: |
|
|
future_to_file = {executor.submit(read_csv_partial, file_path): file_path |
|
|
for file_path in batch_files} |
|
|
|
|
|
for future in as_completed(future_to_file): |
|
|
df, filename, error = future.result() |
|
|
if df is not None: |
|
|
batch_data.append(df) |
|
|
main_pbar.update(1) |
|
|
|
|
|
|
|
|
if batch_data: |
|
|
batch_df = pd.concat(batch_data, ignore_index=True) |
|
|
|
|
|
|
|
|
mode = 'w' if is_first_batch else 'a' |
|
|
header = is_first_batch |
|
|
batch_df.to_csv(output_file, mode=mode, header=header, index=False) |
|
|
|
|
|
total_rows += len(batch_df) |
|
|
is_first_batch = False |
|
|
|
|
|
print(f"\n批次 {i//batch_size + 1} 完成,添加了 {len(batch_df)} 行") |
|
|
|
|
|
print(f"\n✅ 所有批次处理完成!总共 {total_rows} 行数据保存到 {output_file}") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
folder_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386" |
|
|
output_file = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386.csv" |
|
|
|
|
|
|
|
|
merge_single_row_csvs( |
|
|
folder_path=folder_path, |
|
|
output_file=output_file, |
|
|
max_workers=8 |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|