dataset-builder / data1 /reporting /visualization.py
SunDou's picture
Upload data1/reporting/visualization.py with huggingface_hub
debcb41 verified
"""
Visualization module: Generate publication-ready figures (PNG/SVG)
ACL conference style, 1920x1080 or A4 landscape size
"""
import matplotlib
matplotlib.use('Agg') # 非交互式后端
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import seaborn as sns
import pandas as pd
import numpy as np
from pathlib import Path
import json
import statistics
from collections import Counter
# Font fallback mechanism
# Try Arial, fallback to DejaVu Sans (common on Linux) or sans-serif
font_families_to_try = ['Arial', 'DejaVu Sans', 'Liberation Sans', 'sans-serif']
available_fonts = [f.name for f in fm.fontManager.ttflist]
font_found = None
for font_family in font_families_to_try:
# Check if font exists (case-insensitive)
font_lower = font_family.lower()
if any(f.lower() == font_lower for f in available_fonts):
font_found = font_family
break
if font_found is None:
# If no font found, use default sans-serif
font_found = 'sans-serif'
# Nature journal style: professional, high-contrast, color-rich
# Large fonts for axis labels and tick values, smaller for titles/legends to avoid overlap
# Increased font sizes for PPT presentation
plt.rcParams['font.family'] = font_found
plt.rcParams['font.size'] = 24
plt.rcParams['axes.labelsize'] = 42 # Large axis labels (increased from 32)
plt.rcParams['axes.titlesize'] = 30 # Titles (increased from 20)
plt.rcParams['xtick.labelsize'] = 36 # Large tick values (increased from 28)
plt.rcParams['ytick.labelsize'] = 36 # Large tick values (increased from 28)
plt.rcParams['legend.fontsize'] = 20 # Legend (increased from 16)
plt.rcParams['figure.titlesize'] = 32 # Figure titles (increased from 24)
plt.rcParams['axes.linewidth'] = 1.5
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.alpha'] = 0.3
plt.rcParams['grid.linewidth'] = 0.5
plt.rcParams['axes.unicode_minus'] = False
# Nature color scheme (high contrast, professional)
NATURE_COLORS = {
'primary': '#2E5090', # Nature blue
'secondary': '#1A5490',
'accent': '#4A90E2',
'success': '#2E7D32',
'warning': '#F57C00',
'error': '#C62828',
'neutral': '#424242',
'light': '#E3F2FD'
}
# Nature style palette
nature_palette = ['#2E5090', '#4A90E2', '#1A5490', '#6BA3D8', '#94C4E8']
class Visualization:
def __init__(self, output_dir, figsize=(19.2, 10.8), dpi=150):
self.output_dir = Path(output_dir)
self.output_dir.mkdir(parents=True, exist_ok=True)
self.figsize = figsize
self.dpi = dpi # 提高DPI以获得更清晰的图片
self.fig_counter = 1
def apply_nature_style(self, ax):
"""Apply Nature journal style to axes"""
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(1.5)
ax.spines['bottom'].set_linewidth(1.5)
ax.grid(True, alpha=0.3, linestyle='--', linewidth=0.5)
ax.tick_params(width=1.5, length=5)
def save_fig(self, fig, name):
"""保存图片"""
fig_path_png = self.output_dir / f"fig_{self.fig_counter:02d}_{name}.png"
fig_path_svg = self.output_dir / f"fig_{self.fig_counter:02d}_{name}.svg"
fig.savefig(fig_path_png, dpi=self.dpi, bbox_inches='tight', facecolor='white')
fig.savefig(fig_path_svg, bbox_inches='tight', facecolor='white')
plt.close(fig)
self.fig_counter += 1
print(f"Saved: {fig_path_png}")
def plot_funnel(self, stage_a_dir, stage_b_dir, repo_meta_dir, top_n=None):
"""绘制漏斗图:搜索->过滤->深度分析"""
# 读取数据
stage_a_path = Path(stage_a_dir) / 'summary_overall.json'
stage_b_path = Path(stage_b_dir) / 'filter_summary.json'
repo_meta_path = Path(repo_meta_dir) / 'repo_meta_summary.json'
with open(stage_a_path, 'r') as f:
stage_a = json.load(f)
with open(stage_b_path, 'r') as f:
stage_b = json.load(f)
# 尝试读取repo_meta数据获取实际数量
deep_analysis_count = top_n if top_n else 0
try:
with open(repo_meta_path, 'r') as f:
repo_meta = json.load(f)
deep_analysis_count = repo_meta.get('total_repos', deep_analysis_count)
except:
pass
# 动态生成标签
if top_n:
deep_analysis_label = f'Deep Analysis\n({top_n:,} repos)'
else:
deep_analysis_label = f'Deep Analysis\n({deep_analysis_count:,} repos)'
stages = ['Search Stage\n(~1.3M repos)', 'Filtered\n(~30K repos)', deep_analysis_label]
values = [
stage_a['total_records'],
stage_b['total'],
deep_analysis_count
]
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
# Draw funnel - Use Nature blue gradient
y_pos = np.arange(len(stages))
colors = [NATURE_COLORS['primary'], NATURE_COLORS['accent'], NATURE_COLORS['secondary']]
bars = ax.barh(y_pos, values, color=colors, alpha=0.85, edgecolor='white', linewidth=2)
ax.set_yticks(y_pos)
ax.set_yticklabels(stages, fontsize=36)
ax.set_xlabel('Number of Repositories', fontsize=42, fontweight='bold')
ax.set_title('Data Pipeline: Search → Filter → Deep Analysis', fontsize=30, fontweight='bold', pad=20)
# Add value labels
for i, (bar, val) in enumerate(zip(bars, values)):
ax.text(val * 0.5, i, f'{val:,}', ha='center', va='center',
fontsize=24, fontweight='bold', color='white')
ax.invert_yaxis()
plt.tight_layout()
self.save_fig(fig, 'funnel')
def plot_top_keywords(self, csv_path, top_n=20):
"""绘制Top keywords条形图"""
df = pd.read_csv(csv_path)
df_top = df.head(top_n)
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
# Use Nature blue
bars = ax.barh(range(len(df_top)), df_top['count'],
color=NATURE_COLORS['primary'], alpha=0.85,
edgecolor='white', linewidth=1.5)
ax.set_yticks(range(len(df_top)))
ax.set_yticklabels(df_top['keyword'], fontsize=36, rotation=15, ha='right')
ax.set_xlabel('Number of Repositories', fontsize=42, fontweight='bold')
ax.set_title(f'Top {top_n} Keywords (Search Stage)', fontsize=30, fontweight='bold', pad=20)
# Add value labels
for i, (idx, row) in enumerate(df_top.iterrows()):
ax.text(row['count'] * 0.5, i, f"{int(row['count']):,}",
ha='center', va='center', fontsize=24, fontweight='bold', color='white')
ax.invert_yaxis()
plt.tight_layout()
self.save_fig(fig, 'top_keywords')
def plot_language_distribution(self, csv_path, top_n=15):
"""绘制语言分布"""
df = pd.read_csv(csv_path)
df = df[df['language'] != '<empty>'].head(top_n)
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
bars = ax.bar(range(len(df)), df['count'],
color=NATURE_COLORS['primary'], alpha=0.85,
edgecolor='white', linewidth=1.5)
ax.set_xticks(range(len(df)))
ax.set_xticklabels(df['language'], rotation=45, ha='right', fontsize=36)
ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
ax.set_title(f'Top {top_n} Programming Languages (Search Stage)',
fontsize=30, fontweight='bold', pad=20)
# 添加数值标签(旋转45度避免重叠)
for i, count in enumerate(df['count']):
ax.text(i, count, f"{int(count):,}", ha='center', va='bottom',
fontsize=24, fontweight='bold', rotation=45)
plt.tight_layout()
self.save_fig(fig, 'language_distribution')
def plot_stars_distribution(self, csv_path):
"""绘制stars分布(对数坐标)"""
df = pd.read_csv(csv_path)
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
# Use log bins
log_bins = np.logspace(0, np.log10(df['stars'].max() + 1), 50)
ax.hist(df['stars'], bins=log_bins,
color=NATURE_COLORS['primary'], alpha=0.75,
edgecolor='white', linewidth=1)
ax.set_xscale('log')
ax.set_xlabel('Stars (log scale)', fontsize=42, fontweight='bold')
ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
ax.set_title('Distribution of Repository Stars (Log Scale)',
fontsize=30, fontweight='bold', pad=20)
plt.tight_layout()
self.save_fig(fig, 'stars_distribution')
def plot_filter_results(self, csv_path):
"""绘制过滤结果(按keyword的YES/NO)"""
df = pd.read_csv(csv_path)
df = df.head(15) # Top 15 keywords
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
x = np.arange(len(df))
width = 0.4
bars1 = ax.bar(x - width/2, df['yes'], width, label='Relevant (YES)',
color=NATURE_COLORS['success'], alpha=0.85, edgecolor='white', linewidth=1.5)
bars2 = ax.bar(x + width/2, df['no'], width, label='Irrelevant (NO)',
color=NATURE_COLORS['error'], alpha=0.85, edgecolor='white', linewidth=1.5)
ax.set_xlabel('Keyword', fontsize=42, fontweight='bold')
ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
ax.set_title('Filter Results: YES/NO Distribution by Keyword',
fontsize=30, fontweight='bold', pad=20)
ax.set_xticks(x)
ax.set_xticklabels(df['keyword'], rotation=45, ha='right', fontsize=36)
ax.legend(fontsize=20, frameon=True, fancybox=True, shadow=True)
plt.tight_layout()
self.save_fig(fig, 'filter_results_by_keyword')
def plot_reason_length_comparison(self, csv_path):
"""绘制reason长度对比(YES vs NO)"""
df = pd.read_csv(csv_path)
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
yes_lengths = df[df['label'] == 'YES']['length']
no_lengths = df[df['label'] == 'NO']['length']
bp = ax.boxplot([yes_lengths, no_lengths], labels=['YES', 'NO'],
patch_artist=True,
widths=0.6,
boxprops=dict(facecolor=NATURE_COLORS['primary'], alpha=0.7, linewidth=2),
medianprops=dict(color='white', linewidth=3),
whiskerprops=dict(linewidth=2),
capprops=dict(linewidth=2),
flierprops=dict(marker='o', markersize=8, alpha=0.5))
ax.set_ylabel('Reason Length (Characters)', fontsize=42, fontweight='bold')
ax.set_title('Comparison of Filter Reason Length: YES vs NO',
fontsize=30, fontweight='bold', pad=20)
ax.set_xticklabels(['Relevant (YES)', 'Irrelevant (NO)'], fontsize=36)
plt.tight_layout()
self.save_fig(fig, 'reason_length_comparison')
def plot_extension_distribution(self, csv_path, top_n=20, repo_top_n=None):
"""绘制文件扩展名分布"""
df = pd.read_csv(csv_path)
df = df.head(top_n)
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
bars = ax.barh(range(len(df)), df['count'],
color=NATURE_COLORS['accent'], alpha=0.85,
edgecolor='white', linewidth=1.5)
ax.set_yticks(range(len(df)))
ax.set_yticklabels(df['extension'], fontsize=36, rotation=15, ha='right')
ax.set_xlabel('Number of Files', fontsize=42, fontweight='bold')
# 动态生成标题
if repo_top_n:
repo_label = f'Top {repo_top_n:,} Repositories'
else:
repo_label = 'All Repositories'
ax.set_title(f'Top {top_n} File Extension Distribution ({repo_label})',
fontsize=30, fontweight='bold', pad=20)
ax.invert_yaxis()
plt.tight_layout()
self.save_fig(fig, 'extension_distribution')
def plot_repo_file_count_distribution(self, csv_path, repo_top_n=None):
"""绘制仓库文件数分布"""
df = pd.read_csv(csv_path)
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
# Use log bins (file counts may span large ranges)
log_bins = np.logspace(0, np.log10(df['total_files'].max() + 1), 50)
ax.hist(df['total_files'], bins=log_bins,
color=NATURE_COLORS['primary'], alpha=0.75,
edgecolor='white', linewidth=1)
ax.set_xscale('log')
ax.set_xlabel('Number of Files (log scale)', fontsize=42, fontweight='bold')
ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
# 动态生成标题
if repo_top_n:
repo_label = f'Top {repo_top_n:,} Repositories'
else:
repo_label = 'All Repositories'
ax.set_title(f'Distribution of Repository File Counts ({repo_label})',
fontsize=30, fontweight='bold', pad=20)
plt.tight_layout()
self.save_fig(fig, 'repo_file_count_distribution')
def plot_stars_vs_code_size(self, repos_searched_csv, repo_level_csv, repo_top_n=None):
"""绘制stars vs 代码规模散点图(需要join)"""
# 读取repos_searched获取stars
df_searched = pd.read_csv(repos_searched_csv, usecols=['full_name', 'stars'])
df_searched = df_searched.dropna(subset=['stars'])
df_searched['stars'] = df_searched['stars'].astype(float)
# 读取repo_level统计
df_repo = pd.read_csv(repo_level_csv)
df_repo['full_name'] = df_repo['full_name'].fillna(df_repo['repo_name'].str.replace('___', '/'))
# Join
df_merged = df_repo.merge(df_searched, on='full_name', how='inner')
df_merged = df_merged[df_merged['total_code_lines'] > 0]
if len(df_merged) == 0:
print("Warning: No data to plot stars vs code size")
return
fig, ax = plt.subplots(figsize=self.figsize)
self.apply_nature_style(ax)
# Log scale, use Nature blue, increase transparency
ax.scatter(df_merged['total_code_lines'], df_merged['stars'],
alpha=0.4, s=30, color=NATURE_COLORS['primary'], edgecolors='white', linewidth=0.5)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Lines of Code (LOC, log scale)', fontsize=42, fontweight='bold')
ax.set_ylabel('Stars (log scale)', fontsize=42, fontweight='bold')
# 动态生成标题
if repo_top_n:
repo_label = f'Top {repo_top_n:,} Repositories'
else:
repo_label = 'All Repositories'
ax.set_title(f'Stars vs Code Size ({repo_label})',
fontsize=30, fontweight='bold', pad=20)
# 添加相关性
corr = np.corrcoef(np.log10(df_merged['total_code_lines']),
np.log10(df_merged['stars']))[0, 1]
ax.text(0.05, 0.95, f'Correlation: r = {corr:.3f}', transform=ax.transAxes,
fontsize=24, verticalalignment='top', fontweight='bold',
bbox=dict(boxstyle='round', facecolor='white', alpha=0.8, edgecolor=NATURE_COLORS['primary'], linewidth=2))
plt.tight_layout()
self.save_fig(fig, 'stars_vs_code_size')
def plot_repo_stats_by_language(self, repo_level_csv, repo_top_n=None):
"""绘制按主语言的代码统计对比"""
df = pd.read_csv(repo_level_csv)
df = df[df['primary_language'] != 'unknown']
# 选择Top 10语言
top_langs = df['primary_language'].value_counts().head(10).index
df = df[df['primary_language'].isin(top_langs)]
# 增大图片尺寸以避免字体重叠
larger_figsize = (24, 16) # 从默认的 (19.2, 10.8) 增大到 (24, 16)
fig, axes = plt.subplots(2, 2, figsize=larger_figsize)
colors_list = [NATURE_COLORS['primary'], NATURE_COLORS['accent'],
NATURE_COLORS['success'], NATURE_COLORS['secondary']]
# 1. 平均代码行数
ax = axes[0, 0]
self.apply_nature_style(ax)
lang_stats = df.groupby('primary_language')['total_code_lines'].mean().sort_values(ascending=False)
lang_stats.plot(kind='bar', ax=ax, color=colors_list[0], alpha=0.85, edgecolor='white', linewidth=1.5)
ax.set_title('Average Lines of Code', fontsize=28, fontweight='bold', pad=25)
ax.set_xlabel('')
ax.set_ylabel('Average LOC', fontsize=42)
ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
ax.tick_params(axis='y', labelsize=36, pad=10)
# 2. Average number of functions
ax = axes[0, 1]
self.apply_nature_style(ax)
lang_stats = df.groupby('primary_language')['total_functions'].mean().sort_values(ascending=False)
lang_stats.plot(kind='bar', ax=ax, color=colors_list[1], alpha=0.85, edgecolor='white', linewidth=1.5)
ax.set_title('Average Number of Functions', fontsize=28, fontweight='bold', pad=25)
ax.set_xlabel('')
ax.set_ylabel('Average Functions', fontsize=42)
ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
ax.tick_params(axis='y', labelsize=36, pad=10)
# 3. Average comment ratio
ax = axes[1, 0]
self.apply_nature_style(ax)
lang_stats = df.groupby('primary_language')['comment_ratio'].mean().sort_values(ascending=False)
lang_stats.plot(kind='bar', ax=ax, color=colors_list[2], alpha=0.85, edgecolor='white', linewidth=1.5)
ax.set_title('Average Comment Ratio', fontsize=28, fontweight='bold', pad=25)
ax.set_xlabel('')
ax.set_ylabel('Comment Ratio', fontsize=42)
ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
ax.tick_params(axis='y', labelsize=36, pad=10)
# 4. Language diversity (entropy)
ax = axes[1, 1]
self.apply_nature_style(ax)
lang_stats = df.groupby('primary_language')['language_entropy'].mean().sort_values(ascending=False)
lang_stats.plot(kind='bar', ax=ax, color=colors_list[3], alpha=0.85, edgecolor='white', linewidth=1.5)
ax.set_title('Average Language Diversity', fontsize=28, fontweight='bold', pad=25)
ax.set_xlabel('')
ax.set_ylabel('Language Entropy', fontsize=42)
ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
ax.tick_params(axis='y', labelsize=36, pad=10)
# 动态生成标题
if repo_top_n:
repo_label = f'Top {repo_top_n:,} Repositories'
else:
repo_label = 'All Repositories'
plt.suptitle(f'Repository Code Statistics by Primary Language ({repo_label})',
fontsize=36, fontweight='bold', y=0.995)
# 大幅增加上下子图之间的垂直间距,并调整其他间距
# top 调小以给总标题留出更多空间,避免与子图标题重叠
plt.subplots_adjust(hspace=0.8, wspace=0.4, top=0.88, bottom=0.08, left=0.08, right=0.95)
self.save_fig(plt.gcf(), 'repo_stats_by_language')
def plot_keyword_wordcloud(self, csv_path, max_words=200):
"""Generate wordcloud for keywords (Nature style: colorful)"""
try:
from wordcloud import WordCloud
except ImportError:
print("Warning: wordcloud library not installed. Skipping wordcloud generation.")
print("Install with: pip install wordcloud")
return
try:
# Read keyword data
df = pd.read_csv(csv_path)
# Create frequency dictionary - ensure values are integers
keyword_freq = {}
for keyword, count in zip(df['keyword'], df['count']):
keyword_freq[str(keyword)] = int(count)
# Generate wordcloud with Nature style (colorful)
wordcloud = WordCloud(
width=1920,
height=1080,
background_color='white',
colormap='Blues', # Blue colormap for Nature style
max_words=max_words,
relative_scaling=0.5,
min_font_size=10,
prefer_horizontal=0.7,
collocations=False
)
# Generate from frequencies
wordcloud.generate_from_frequencies(keyword_freq)
# Create figure
fig, ax = plt.subplots(figsize=self.figsize)
# Convert WordCloud to PIL Image first, then to numpy array
# This avoids the numpy version compatibility issue with copy parameter
pil_image = wordcloud.to_image()
wordcloud_array = np.array(pil_image)
ax.imshow(wordcloud_array, interpolation='bilinear')
ax.axis('off')
ax.set_title('Keyword Word Cloud (Search Stage)',
fontsize=30, fontweight='bold', pad=20)
plt.tight_layout()
# Save wordcloud
fig_path_png = self.output_dir / f"fig_{self.fig_counter:02d}_keyword_wordcloud.png"
fig.savefig(fig_path_png, dpi=self.dpi, bbox_inches='tight', facecolor='white')
plt.close(fig)
self.fig_counter += 1
print(f"Saved: {fig_path_png}")
except Exception as e:
print(f"Error generating wordcloud: {e}")
import traceback
traceback.print_exc()
def generate_all_visualizations(stage_a_dir, stage_b_dir, repo_meta_dir, code_stats_dir, repos_searched_csv, top_n=None):
"""生成所有图表"""
viz = Visualization(Path(stage_a_dir).parent / 'figures')
print("Generating visualizations...")
# 动态生成文件名后缀
top_n_suffix = f"_top{top_n}" if top_n else ""
# Stage A charts
try:
viz.plot_funnel(stage_a_dir, stage_b_dir, repo_meta_dir, top_n=top_n)
except Exception as e:
print(f"Error plotting funnel: {e}")
try:
viz.plot_top_keywords(Path(stage_a_dir) / 'by_keyword.csv')
except Exception as e:
print(f"Error plotting top keywords: {e}")
try:
viz.plot_keyword_wordcloud(Path(stage_a_dir) / 'by_keyword.csv')
except Exception as e:
print(f"Error generating keyword wordcloud: {e}")
try:
viz.plot_language_distribution(Path(stage_a_dir) / 'by_language.csv')
except Exception as e:
print(f"Error plotting language distribution: {e}")
try:
viz.plot_stars_distribution(Path(stage_a_dir) / 'stars_distribution.csv')
except Exception as e:
print(f"Error plotting stars distribution: {e}")
# Stage B charts
try:
viz.plot_filter_results(Path(stage_b_dir) / 'filter_by_keyword.csv')
except Exception as e:
print(f"Error plotting filter results: {e}")
try:
viz.plot_reason_length_comparison(Path(stage_b_dir) / 'reason_length_distribution.csv')
except Exception as e:
print(f"Error plotting reason length comparison: {e}")
# Repo meta charts
try:
viz.plot_extension_distribution(Path(repo_meta_dir) / 'extension_distribution.csv', repo_top_n=top_n)
except Exception as e:
print(f"Error plotting extension distribution: {e}")
try:
viz.plot_repo_file_count_distribution(Path(repo_meta_dir) / f'repo_meta_scan{top_n_suffix}.csv', repo_top_n=top_n)
except Exception as e:
print(f"Error plotting repo file count distribution: {e}")
# Code stats charts
try:
viz.plot_stars_vs_code_size(repos_searched_csv, Path(code_stats_dir) / f'repo_level_metrics{top_n_suffix}.csv', repo_top_n=top_n)
except Exception as e:
print(f"Error plotting stars vs code size: {e}")
try:
viz.plot_repo_stats_by_language(Path(code_stats_dir) / f'repo_level_metrics{top_n_suffix}.csv', repo_top_n=top_n)
except Exception as e:
print(f"Error plotting repo stats by language: {e}")
print(f"Visualization complete! All figures saved to {viz.output_dir}")