SuperCS's picture
Add files using upload-large-folder tool
e31e7b4 verified
import pandas as pd
import os
import glob
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import threading
from functools import partial
def read_single_csv(file_path, expected_columns=None):
"""
读取单个CSV文件的辅助函数
Args:
file_path (str): CSV文件路径
expected_columns (list): 期望的列名列表
Returns:
tuple: (DataFrame或None, 文件名, 错误信息或None)
"""
try:
df = pd.read_csv(file_path)
# 检查列是否一致
if expected_columns and df.columns.tolist() != expected_columns:
return None, os.path.basename(file_path), f"列结构不一致"
return df, os.path.basename(file_path), None
except Exception as e:
return None, os.path.basename(file_path), str(e)
def merge_single_row_csvs(folder_path, output_file='merged_data.csv', max_workers=None):
"""
使用多线程合并文件夹中所有单行CSV文件为一个大的CSV文件
Args:
folder_path (str): 包含CSV文件的文件夹路径
output_file (str): 输出文件名
max_workers (int): 最大线程数,默认为None(使用系统默认值)
"""
# 获取文件夹中所有CSV文件
csv_files = glob.glob(os.path.join(folder_path, "*.csv"))
if not csv_files:
print("文件夹中没有找到CSV文件")
return
print(f"找到 {len(csv_files)} 个CSV文件")
# 读取第一个文件获取列名
try:
first_df = pd.read_csv(csv_files[0])
expected_columns = first_df.columns.tolist()
print(f"期望的列结构: {expected_columns}")
except Exception as e:
print(f"无法读取第一个文件: {str(e)}")
return
# 存储所有数据的列表
all_data = []
failed_files = []
# 创建部分函数,预设expected_columns参数
read_csv_partial = partial(read_single_csv, expected_columns=expected_columns)
# 使用ThreadPoolExecutor进行多线程处理
print("开始多线程读取文件...")
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# 提交所有任务
future_to_file = {executor.submit(read_csv_partial, file_path): file_path
for file_path in csv_files}
# 使用tqdm显示进度并收集结果
with tqdm(total=len(csv_files), desc="读取CSV文件") as pbar:
for future in as_completed(future_to_file):
df, filename, error = future.result()
if df is not None:
all_data.append(df)
else:
failed_files.append((filename, error))
pbar.update(1)
# 在进度条描述中显示成功/失败统计
pbar.set_postfix({
'成功': len(all_data),
'失败': len(failed_files)
})
# 显示处理结果
print(f"\n处理完成:")
print(f"成功读取: {len(all_data)} 个文件")
print(f"失败: {len(failed_files)} 个文件")
if failed_files:
print("\n失败的文件:")
for filename, error in failed_files[:10]: # 只显示前10个错误
print(f" {filename}: {error}")
if len(failed_files) > 10:
print(f" ... 还有 {len(failed_files) - 10} 个失败的文件")
if not all_data:
print("没有成功读取任何数据")
return
# 合并所有数据
print("\n正在合并数据...")
with tqdm(desc="合并数据") as pbar:
merged_df = pd.concat(all_data, ignore_index=True)
pbar.update(1)
# 保存合并后的数据
print("正在保存文件...")
with tqdm(desc="保存文件") as pbar:
merged_df.to_csv(output_file, index=False)
pbar.update(1)
print(f"\n✅ 合并完成!")
print(f"共 {len(merged_df)} 行数据已保存到 {output_file}")
# 显示数据概览
print(f"\n📊 数据概览:")
print(f"总行数: {len(merged_df):,}")
print(f"总列数: {len(merged_df.columns)}")
print(f"文件大小: {os.path.getsize(output_file) / 1024 / 1024:.2f} MB")
print(f"列名: {list(merged_df.columns)}")
# 显示前几行数据
print(f"\n📝 数据预览:")
print(merged_df.head())
def merge_with_batch_processing(folder_path, output_file='merged_data.csv',
batch_size=1000, max_workers=None):
"""
使用批处理的方式合并大量CSV文件,减少内存占用
Args:
folder_path (str): 包含CSV文件的文件夹路径
output_file (str): 输出文件名
batch_size (int): 每批处理的文件数量
max_workers (int): 最大线程数
"""
csv_files = glob.glob(os.path.join(folder_path, "*.csv"))
if not csv_files:
print("文件夹中没有找到CSV文件")
return
print(f"找到 {len(csv_files)} 个CSV文件,将分批处理")
# 读取第一个文件获取列名
try:
first_df = pd.read_csv(csv_files[0])
expected_columns = first_df.columns.tolist()
except Exception as e:
print(f"无法读取第一个文件: {str(e)}")
return
# 分批处理文件
total_rows = 0
is_first_batch = True
with tqdm(total=len(csv_files), desc="总进度") as main_pbar:
for i in range(0, len(csv_files), batch_size):
batch_files = csv_files[i:i + batch_size]
batch_data = []
# 处理当前批次
read_csv_partial = partial(read_single_csv, expected_columns=expected_columns)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_file = {executor.submit(read_csv_partial, file_path): file_path
for file_path in batch_files}
for future in as_completed(future_to_file):
df, filename, error = future.result()
if df is not None:
batch_data.append(df)
main_pbar.update(1)
# 合并当前批次数据
if batch_data:
batch_df = pd.concat(batch_data, ignore_index=True)
# 保存到文件(追加模式)
mode = 'w' if is_first_batch else 'a'
header = is_first_batch
batch_df.to_csv(output_file, mode=mode, header=header, index=False)
total_rows += len(batch_df)
is_first_batch = False
print(f"\n批次 {i//batch_size + 1} 完成,添加了 {len(batch_df)} 行")
print(f"\n✅ 所有批次处理完成!总共 {total_rows} 行数据保存到 {output_file}")
# 使用示例
if __name__ == "__main__":
folder_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386"
output_file = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386.csv"
# 方法1: 标准多线程合并(推荐用于中等大小的数据集)
merge_single_row_csvs(
folder_path=folder_path,
output_file=output_file,
max_workers=8 # 可以根据你的CPU核心数调整
)
# 方法2: 批处理合并(推荐用于大型数据集,节省内存)
# merge_with_batch_processing(
# folder_path=folder_path,
# output_file=output_file,
# batch_size=1000,
# max_workers=8
# )