DouDou commited on
Commit
debcb41
·
verified ·
1 Parent(s): 156430e

Upload data1/reporting/visualization.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. data1/reporting/visualization.py +595 -0
data1/reporting/visualization.py ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Visualization module: Generate publication-ready figures (PNG/SVG)
3
+ ACL conference style, 1920x1080 or A4 landscape size
4
+ """
5
+ import matplotlib
6
+ matplotlib.use('Agg') # 非交互式后端
7
+ import matplotlib.pyplot as plt
8
+ import matplotlib.font_manager as fm
9
+ import seaborn as sns
10
+ import pandas as pd
11
+ import numpy as np
12
+ from pathlib import Path
13
+ import json
14
+ import statistics
15
+ from collections import Counter
16
+
17
+ # Font fallback mechanism
18
+ # Try Arial, fallback to DejaVu Sans (common on Linux) or sans-serif
19
+ font_families_to_try = ['Arial', 'DejaVu Sans', 'Liberation Sans', 'sans-serif']
20
+ available_fonts = [f.name for f in fm.fontManager.ttflist]
21
+ font_found = None
22
+
23
+ for font_family in font_families_to_try:
24
+ # Check if font exists (case-insensitive)
25
+ font_lower = font_family.lower()
26
+ if any(f.lower() == font_lower for f in available_fonts):
27
+ font_found = font_family
28
+ break
29
+
30
+ if font_found is None:
31
+ # If no font found, use default sans-serif
32
+ font_found = 'sans-serif'
33
+
34
+ # Nature journal style: professional, high-contrast, color-rich
35
+ # Large fonts for axis labels and tick values, smaller for titles/legends to avoid overlap
36
+ # Increased font sizes for PPT presentation
37
+ plt.rcParams['font.family'] = font_found
38
+ plt.rcParams['font.size'] = 24
39
+ plt.rcParams['axes.labelsize'] = 42 # Large axis labels (increased from 32)
40
+ plt.rcParams['axes.titlesize'] = 30 # Titles (increased from 20)
41
+ plt.rcParams['xtick.labelsize'] = 36 # Large tick values (increased from 28)
42
+ plt.rcParams['ytick.labelsize'] = 36 # Large tick values (increased from 28)
43
+ plt.rcParams['legend.fontsize'] = 20 # Legend (increased from 16)
44
+ plt.rcParams['figure.titlesize'] = 32 # Figure titles (increased from 24)
45
+ plt.rcParams['axes.linewidth'] = 1.5
46
+ plt.rcParams['axes.spines.top'] = False
47
+ plt.rcParams['axes.spines.right'] = False
48
+ plt.rcParams['axes.grid'] = True
49
+ plt.rcParams['grid.alpha'] = 0.3
50
+ plt.rcParams['grid.linewidth'] = 0.5
51
+ plt.rcParams['axes.unicode_minus'] = False
52
+
53
+ # Nature color scheme (high contrast, professional)
54
+ NATURE_COLORS = {
55
+ 'primary': '#2E5090', # Nature blue
56
+ 'secondary': '#1A5490',
57
+ 'accent': '#4A90E2',
58
+ 'success': '#2E7D32',
59
+ 'warning': '#F57C00',
60
+ 'error': '#C62828',
61
+ 'neutral': '#424242',
62
+ 'light': '#E3F2FD'
63
+ }
64
+
65
+ # Nature style palette
66
+ nature_palette = ['#2E5090', '#4A90E2', '#1A5490', '#6BA3D8', '#94C4E8']
67
+
68
+
69
+ class Visualization:
70
+ def __init__(self, output_dir, figsize=(19.2, 10.8), dpi=150):
71
+ self.output_dir = Path(output_dir)
72
+ self.output_dir.mkdir(parents=True, exist_ok=True)
73
+ self.figsize = figsize
74
+ self.dpi = dpi # 提高DPI以获得更清晰的图片
75
+ self.fig_counter = 1
76
+
77
+ def apply_nature_style(self, ax):
78
+ """Apply Nature journal style to axes"""
79
+ ax.spines['top'].set_visible(False)
80
+ ax.spines['right'].set_visible(False)
81
+ ax.spines['left'].set_linewidth(1.5)
82
+ ax.spines['bottom'].set_linewidth(1.5)
83
+ ax.grid(True, alpha=0.3, linestyle='--', linewidth=0.5)
84
+ ax.tick_params(width=1.5, length=5)
85
+
86
+ def save_fig(self, fig, name):
87
+ """保存图片"""
88
+ fig_path_png = self.output_dir / f"fig_{self.fig_counter:02d}_{name}.png"
89
+ fig_path_svg = self.output_dir / f"fig_{self.fig_counter:02d}_{name}.svg"
90
+ fig.savefig(fig_path_png, dpi=self.dpi, bbox_inches='tight', facecolor='white')
91
+ fig.savefig(fig_path_svg, bbox_inches='tight', facecolor='white')
92
+ plt.close(fig)
93
+ self.fig_counter += 1
94
+ print(f"Saved: {fig_path_png}")
95
+
96
+ def plot_funnel(self, stage_a_dir, stage_b_dir, repo_meta_dir, top_n=None):
97
+ """绘制漏斗图:搜索->过滤->深度分析"""
98
+ # 读取数据
99
+ stage_a_path = Path(stage_a_dir) / 'summary_overall.json'
100
+ stage_b_path = Path(stage_b_dir) / 'filter_summary.json'
101
+ repo_meta_path = Path(repo_meta_dir) / 'repo_meta_summary.json'
102
+
103
+ with open(stage_a_path, 'r') as f:
104
+ stage_a = json.load(f)
105
+
106
+ with open(stage_b_path, 'r') as f:
107
+ stage_b = json.load(f)
108
+
109
+ # 尝试读取repo_meta数据获取实际数量
110
+ deep_analysis_count = top_n if top_n else 0
111
+ try:
112
+ with open(repo_meta_path, 'r') as f:
113
+ repo_meta = json.load(f)
114
+ deep_analysis_count = repo_meta.get('total_repos', deep_analysis_count)
115
+ except:
116
+ pass
117
+
118
+ # 动态生成标签
119
+ if top_n:
120
+ deep_analysis_label = f'Deep Analysis\n({top_n:,} repos)'
121
+ else:
122
+ deep_analysis_label = f'Deep Analysis\n({deep_analysis_count:,} repos)'
123
+
124
+ stages = ['Search Stage\n(~1.3M repos)', 'Filtered\n(~30K repos)', deep_analysis_label]
125
+ values = [
126
+ stage_a['total_records'],
127
+ stage_b['total'],
128
+ deep_analysis_count
129
+ ]
130
+
131
+ fig, ax = plt.subplots(figsize=self.figsize)
132
+ self.apply_nature_style(ax)
133
+
134
+ # Draw funnel - Use Nature blue gradient
135
+ y_pos = np.arange(len(stages))
136
+ colors = [NATURE_COLORS['primary'], NATURE_COLORS['accent'], NATURE_COLORS['secondary']]
137
+
138
+ bars = ax.barh(y_pos, values, color=colors, alpha=0.85, edgecolor='white', linewidth=2)
139
+ ax.set_yticks(y_pos)
140
+ ax.set_yticklabels(stages, fontsize=36)
141
+ ax.set_xlabel('Number of Repositories', fontsize=42, fontweight='bold')
142
+ ax.set_title('Data Pipeline: Search → Filter → Deep Analysis', fontsize=30, fontweight='bold', pad=20)
143
+
144
+ # Add value labels
145
+ for i, (bar, val) in enumerate(zip(bars, values)):
146
+ ax.text(val * 0.5, i, f'{val:,}', ha='center', va='center',
147
+ fontsize=24, fontweight='bold', color='white')
148
+
149
+ ax.invert_yaxis()
150
+ plt.tight_layout()
151
+
152
+ self.save_fig(fig, 'funnel')
153
+
154
+ def plot_top_keywords(self, csv_path, top_n=20):
155
+ """绘制Top keywords条形图"""
156
+ df = pd.read_csv(csv_path)
157
+ df_top = df.head(top_n)
158
+
159
+ fig, ax = plt.subplots(figsize=self.figsize)
160
+ self.apply_nature_style(ax)
161
+
162
+ # Use Nature blue
163
+ bars = ax.barh(range(len(df_top)), df_top['count'],
164
+ color=NATURE_COLORS['primary'], alpha=0.85,
165
+ edgecolor='white', linewidth=1.5)
166
+ ax.set_yticks(range(len(df_top)))
167
+ ax.set_yticklabels(df_top['keyword'], fontsize=36, rotation=15, ha='right')
168
+ ax.set_xlabel('Number of Repositories', fontsize=42, fontweight='bold')
169
+ ax.set_title(f'Top {top_n} Keywords (Search Stage)', fontsize=30, fontweight='bold', pad=20)
170
+
171
+ # Add value labels
172
+ for i, (idx, row) in enumerate(df_top.iterrows()):
173
+ ax.text(row['count'] * 0.5, i, f"{int(row['count']):,}",
174
+ ha='center', va='center', fontsize=24, fontweight='bold', color='white')
175
+
176
+ ax.invert_yaxis()
177
+ plt.tight_layout()
178
+
179
+ self.save_fig(fig, 'top_keywords')
180
+
181
+ def plot_language_distribution(self, csv_path, top_n=15):
182
+ """绘制语言分布"""
183
+ df = pd.read_csv(csv_path)
184
+ df = df[df['language'] != '<empty>'].head(top_n)
185
+
186
+ fig, ax = plt.subplots(figsize=self.figsize)
187
+ self.apply_nature_style(ax)
188
+
189
+ bars = ax.bar(range(len(df)), df['count'],
190
+ color=NATURE_COLORS['primary'], alpha=0.85,
191
+ edgecolor='white', linewidth=1.5)
192
+ ax.set_xticks(range(len(df)))
193
+ ax.set_xticklabels(df['language'], rotation=45, ha='right', fontsize=36)
194
+ ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
195
+ ax.set_title(f'Top {top_n} Programming Languages (Search Stage)',
196
+ fontsize=30, fontweight='bold', pad=20)
197
+
198
+ # 添加数值标签(旋转45度避免重叠)
199
+ for i, count in enumerate(df['count']):
200
+ ax.text(i, count, f"{int(count):,}", ha='center', va='bottom',
201
+ fontsize=24, fontweight='bold', rotation=45)
202
+
203
+ plt.tight_layout()
204
+
205
+ self.save_fig(fig, 'language_distribution')
206
+
207
+ def plot_stars_distribution(self, csv_path):
208
+ """绘制stars分布(对数坐标)"""
209
+ df = pd.read_csv(csv_path)
210
+
211
+ fig, ax = plt.subplots(figsize=self.figsize)
212
+ self.apply_nature_style(ax)
213
+
214
+ # Use log bins
215
+ log_bins = np.logspace(0, np.log10(df['stars'].max() + 1), 50)
216
+ ax.hist(df['stars'], bins=log_bins,
217
+ color=NATURE_COLORS['primary'], alpha=0.75,
218
+ edgecolor='white', linewidth=1)
219
+ ax.set_xscale('log')
220
+ ax.set_xlabel('Stars (log scale)', fontsize=42, fontweight='bold')
221
+ ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
222
+ ax.set_title('Distribution of Repository Stars (Log Scale)',
223
+ fontsize=30, fontweight='bold', pad=20)
224
+
225
+ plt.tight_layout()
226
+
227
+ self.save_fig(fig, 'stars_distribution')
228
+
229
+ def plot_filter_results(self, csv_path):
230
+ """绘制过滤结果(按keyword的YES/NO)"""
231
+ df = pd.read_csv(csv_path)
232
+ df = df.head(15) # Top 15 keywords
233
+
234
+ fig, ax = plt.subplots(figsize=self.figsize)
235
+ self.apply_nature_style(ax)
236
+
237
+ x = np.arange(len(df))
238
+ width = 0.4
239
+
240
+ bars1 = ax.bar(x - width/2, df['yes'], width, label='Relevant (YES)',
241
+ color=NATURE_COLORS['success'], alpha=0.85, edgecolor='white', linewidth=1.5)
242
+ bars2 = ax.bar(x + width/2, df['no'], width, label='Irrelevant (NO)',
243
+ color=NATURE_COLORS['error'], alpha=0.85, edgecolor='white', linewidth=1.5)
244
+
245
+ ax.set_xlabel('Keyword', fontsize=42, fontweight='bold')
246
+ ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
247
+ ax.set_title('Filter Results: YES/NO Distribution by Keyword',
248
+ fontsize=30, fontweight='bold', pad=20)
249
+ ax.set_xticks(x)
250
+ ax.set_xticklabels(df['keyword'], rotation=45, ha='right', fontsize=36)
251
+ ax.legend(fontsize=20, frameon=True, fancybox=True, shadow=True)
252
+
253
+ plt.tight_layout()
254
+
255
+ self.save_fig(fig, 'filter_results_by_keyword')
256
+
257
+ def plot_reason_length_comparison(self, csv_path):
258
+ """绘制reason长度对比(YES vs NO)"""
259
+ df = pd.read_csv(csv_path)
260
+
261
+ fig, ax = plt.subplots(figsize=self.figsize)
262
+ self.apply_nature_style(ax)
263
+
264
+ yes_lengths = df[df['label'] == 'YES']['length']
265
+ no_lengths = df[df['label'] == 'NO']['length']
266
+
267
+ bp = ax.boxplot([yes_lengths, no_lengths], labels=['YES', 'NO'],
268
+ patch_artist=True,
269
+ widths=0.6,
270
+ boxprops=dict(facecolor=NATURE_COLORS['primary'], alpha=0.7, linewidth=2),
271
+ medianprops=dict(color='white', linewidth=3),
272
+ whiskerprops=dict(linewidth=2),
273
+ capprops=dict(linewidth=2),
274
+ flierprops=dict(marker='o', markersize=8, alpha=0.5))
275
+
276
+ ax.set_ylabel('Reason Length (Characters)', fontsize=42, fontweight='bold')
277
+ ax.set_title('Comparison of Filter Reason Length: YES vs NO',
278
+ fontsize=30, fontweight='bold', pad=20)
279
+ ax.set_xticklabels(['Relevant (YES)', 'Irrelevant (NO)'], fontsize=36)
280
+
281
+ plt.tight_layout()
282
+
283
+ self.save_fig(fig, 'reason_length_comparison')
284
+
285
+ def plot_extension_distribution(self, csv_path, top_n=20, repo_top_n=None):
286
+ """绘制文件扩展名分布"""
287
+ df = pd.read_csv(csv_path)
288
+ df = df.head(top_n)
289
+
290
+ fig, ax = plt.subplots(figsize=self.figsize)
291
+ self.apply_nature_style(ax)
292
+
293
+ bars = ax.barh(range(len(df)), df['count'],
294
+ color=NATURE_COLORS['accent'], alpha=0.85,
295
+ edgecolor='white', linewidth=1.5)
296
+ ax.set_yticks(range(len(df)))
297
+ ax.set_yticklabels(df['extension'], fontsize=36, rotation=15, ha='right')
298
+ ax.set_xlabel('Number of Files', fontsize=42, fontweight='bold')
299
+
300
+ # 动态生成标题
301
+ if repo_top_n:
302
+ repo_label = f'Top {repo_top_n:,} Repositories'
303
+ else:
304
+ repo_label = 'All Repositories'
305
+ ax.set_title(f'Top {top_n} File Extension Distribution ({repo_label})',
306
+ fontsize=30, fontweight='bold', pad=20)
307
+
308
+ ax.invert_yaxis()
309
+ plt.tight_layout()
310
+
311
+ self.save_fig(fig, 'extension_distribution')
312
+
313
+ def plot_repo_file_count_distribution(self, csv_path, repo_top_n=None):
314
+ """绘制仓库文件数分布"""
315
+ df = pd.read_csv(csv_path)
316
+
317
+ fig, ax = plt.subplots(figsize=self.figsize)
318
+ self.apply_nature_style(ax)
319
+
320
+ # Use log bins (file counts may span large ranges)
321
+ log_bins = np.logspace(0, np.log10(df['total_files'].max() + 1), 50)
322
+ ax.hist(df['total_files'], bins=log_bins,
323
+ color=NATURE_COLORS['primary'], alpha=0.75,
324
+ edgecolor='white', linewidth=1)
325
+ ax.set_xscale('log')
326
+ ax.set_xlabel('Number of Files (log scale)', fontsize=42, fontweight='bold')
327
+ ax.set_ylabel('Number of Repositories', fontsize=42, fontweight='bold')
328
+
329
+ # 动态生成标题
330
+ if repo_top_n:
331
+ repo_label = f'Top {repo_top_n:,} Repositories'
332
+ else:
333
+ repo_label = 'All Repositories'
334
+ ax.set_title(f'Distribution of Repository File Counts ({repo_label})',
335
+ fontsize=30, fontweight='bold', pad=20)
336
+
337
+ plt.tight_layout()
338
+
339
+ self.save_fig(fig, 'repo_file_count_distribution')
340
+
341
+ def plot_stars_vs_code_size(self, repos_searched_csv, repo_level_csv, repo_top_n=None):
342
+ """绘制stars vs 代码规模散点图(需要join)"""
343
+ # 读取repos_searched获取stars
344
+ df_searched = pd.read_csv(repos_searched_csv, usecols=['full_name', 'stars'])
345
+ df_searched = df_searched.dropna(subset=['stars'])
346
+ df_searched['stars'] = df_searched['stars'].astype(float)
347
+
348
+ # 读取repo_level统计
349
+ df_repo = pd.read_csv(repo_level_csv)
350
+ df_repo['full_name'] = df_repo['full_name'].fillna(df_repo['repo_name'].str.replace('___', '/'))
351
+
352
+ # Join
353
+ df_merged = df_repo.merge(df_searched, on='full_name', how='inner')
354
+ df_merged = df_merged[df_merged['total_code_lines'] > 0]
355
+
356
+ if len(df_merged) == 0:
357
+ print("Warning: No data to plot stars vs code size")
358
+ return
359
+
360
+ fig, ax = plt.subplots(figsize=self.figsize)
361
+ self.apply_nature_style(ax)
362
+
363
+ # Log scale, use Nature blue, increase transparency
364
+ ax.scatter(df_merged['total_code_lines'], df_merged['stars'],
365
+ alpha=0.4, s=30, color=NATURE_COLORS['primary'], edgecolors='white', linewidth=0.5)
366
+ ax.set_xscale('log')
367
+ ax.set_yscale('log')
368
+ ax.set_xlabel('Lines of Code (LOC, log scale)', fontsize=42, fontweight='bold')
369
+ ax.set_ylabel('Stars (log scale)', fontsize=42, fontweight='bold')
370
+
371
+ # 动态生成标题
372
+ if repo_top_n:
373
+ repo_label = f'Top {repo_top_n:,} Repositories'
374
+ else:
375
+ repo_label = 'All Repositories'
376
+ ax.set_title(f'Stars vs Code Size ({repo_label})',
377
+ fontsize=30, fontweight='bold', pad=20)
378
+
379
+ # 添加相关性
380
+ corr = np.corrcoef(np.log10(df_merged['total_code_lines']),
381
+ np.log10(df_merged['stars']))[0, 1]
382
+ ax.text(0.05, 0.95, f'Correlation: r = {corr:.3f}', transform=ax.transAxes,
383
+ fontsize=24, verticalalignment='top', fontweight='bold',
384
+ bbox=dict(boxstyle='round', facecolor='white', alpha=0.8, edgecolor=NATURE_COLORS['primary'], linewidth=2))
385
+
386
+ plt.tight_layout()
387
+
388
+ self.save_fig(fig, 'stars_vs_code_size')
389
+
390
+ def plot_repo_stats_by_language(self, repo_level_csv, repo_top_n=None):
391
+ """绘制按主语言的代码统计对比"""
392
+ df = pd.read_csv(repo_level_csv)
393
+ df = df[df['primary_language'] != 'unknown']
394
+
395
+ # 选择Top 10语言
396
+ top_langs = df['primary_language'].value_counts().head(10).index
397
+ df = df[df['primary_language'].isin(top_langs)]
398
+
399
+ # 增大图片尺寸以避免字体重叠
400
+ larger_figsize = (24, 16) # 从默认的 (19.2, 10.8) 增大到 (24, 16)
401
+ fig, axes = plt.subplots(2, 2, figsize=larger_figsize)
402
+
403
+ colors_list = [NATURE_COLORS['primary'], NATURE_COLORS['accent'],
404
+ NATURE_COLORS['success'], NATURE_COLORS['secondary']]
405
+
406
+ # 1. 平均代码行数
407
+ ax = axes[0, 0]
408
+ self.apply_nature_style(ax)
409
+ lang_stats = df.groupby('primary_language')['total_code_lines'].mean().sort_values(ascending=False)
410
+ lang_stats.plot(kind='bar', ax=ax, color=colors_list[0], alpha=0.85, edgecolor='white', linewidth=1.5)
411
+ ax.set_title('Average Lines of Code', fontsize=28, fontweight='bold', pad=25)
412
+ ax.set_xlabel('')
413
+ ax.set_ylabel('Average LOC', fontsize=42)
414
+ ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
415
+ ax.tick_params(axis='y', labelsize=36, pad=10)
416
+
417
+ # 2. Average number of functions
418
+ ax = axes[0, 1]
419
+ self.apply_nature_style(ax)
420
+ lang_stats = df.groupby('primary_language')['total_functions'].mean().sort_values(ascending=False)
421
+ lang_stats.plot(kind='bar', ax=ax, color=colors_list[1], alpha=0.85, edgecolor='white', linewidth=1.5)
422
+ ax.set_title('Average Number of Functions', fontsize=28, fontweight='bold', pad=25)
423
+ ax.set_xlabel('')
424
+ ax.set_ylabel('Average Functions', fontsize=42)
425
+ ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
426
+ ax.tick_params(axis='y', labelsize=36, pad=10)
427
+
428
+ # 3. Average comment ratio
429
+ ax = axes[1, 0]
430
+ self.apply_nature_style(ax)
431
+ lang_stats = df.groupby('primary_language')['comment_ratio'].mean().sort_values(ascending=False)
432
+ lang_stats.plot(kind='bar', ax=ax, color=colors_list[2], alpha=0.85, edgecolor='white', linewidth=1.5)
433
+ ax.set_title('Average Comment Ratio', fontsize=28, fontweight='bold', pad=25)
434
+ ax.set_xlabel('')
435
+ ax.set_ylabel('Comment Ratio', fontsize=42)
436
+ ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
437
+ ax.tick_params(axis='y', labelsize=36, pad=10)
438
+
439
+ # 4. Language diversity (entropy)
440
+ ax = axes[1, 1]
441
+ self.apply_nature_style(ax)
442
+ lang_stats = df.groupby('primary_language')['language_entropy'].mean().sort_values(ascending=False)
443
+ lang_stats.plot(kind='bar', ax=ax, color=colors_list[3], alpha=0.85, edgecolor='white', linewidth=1.5)
444
+ ax.set_title('Average Language Diversity', fontsize=28, fontweight='bold', pad=25)
445
+ ax.set_xlabel('')
446
+ ax.set_ylabel('Language Entropy', fontsize=42)
447
+ ax.tick_params(axis='x', rotation=45, labelsize=36, pad=10)
448
+ ax.tick_params(axis='y', labelsize=36, pad=10)
449
+
450
+ # 动态生成标题
451
+ if repo_top_n:
452
+ repo_label = f'Top {repo_top_n:,} Repositories'
453
+ else:
454
+ repo_label = 'All Repositories'
455
+ plt.suptitle(f'Repository Code Statistics by Primary Language ({repo_label})',
456
+ fontsize=36, fontweight='bold', y=0.995)
457
+
458
+ # 大幅增加上下子图之间的垂直间距,并调整其他间距
459
+ # top 调小以给总标题留出更多空间,避免与子图标题重叠
460
+ plt.subplots_adjust(hspace=0.8, wspace=0.4, top=0.88, bottom=0.08, left=0.08, right=0.95)
461
+
462
+ self.save_fig(plt.gcf(), 'repo_stats_by_language')
463
+
464
+ def plot_keyword_wordcloud(self, csv_path, max_words=200):
465
+ """Generate wordcloud for keywords (Nature style: colorful)"""
466
+ try:
467
+ from wordcloud import WordCloud
468
+ except ImportError:
469
+ print("Warning: wordcloud library not installed. Skipping wordcloud generation.")
470
+ print("Install with: pip install wordcloud")
471
+ return
472
+
473
+ try:
474
+ # Read keyword data
475
+ df = pd.read_csv(csv_path)
476
+
477
+ # Create frequency dictionary - ensure values are integers
478
+ keyword_freq = {}
479
+ for keyword, count in zip(df['keyword'], df['count']):
480
+ keyword_freq[str(keyword)] = int(count)
481
+
482
+ # Generate wordcloud with Nature style (colorful)
483
+ wordcloud = WordCloud(
484
+ width=1920,
485
+ height=1080,
486
+ background_color='white',
487
+ colormap='Blues', # Blue colormap for Nature style
488
+ max_words=max_words,
489
+ relative_scaling=0.5,
490
+ min_font_size=10,
491
+ prefer_horizontal=0.7,
492
+ collocations=False
493
+ )
494
+
495
+ # Generate from frequencies
496
+ wordcloud.generate_from_frequencies(keyword_freq)
497
+
498
+ # Create figure
499
+ fig, ax = plt.subplots(figsize=self.figsize)
500
+
501
+ # Convert WordCloud to PIL Image first, then to numpy array
502
+ # This avoids the numpy version compatibility issue with copy parameter
503
+ pil_image = wordcloud.to_image()
504
+ wordcloud_array = np.array(pil_image)
505
+
506
+ ax.imshow(wordcloud_array, interpolation='bilinear')
507
+ ax.axis('off')
508
+ ax.set_title('Keyword Word Cloud (Search Stage)',
509
+ fontsize=30, fontweight='bold', pad=20)
510
+
511
+ plt.tight_layout()
512
+
513
+ # Save wordcloud
514
+ fig_path_png = self.output_dir / f"fig_{self.fig_counter:02d}_keyword_wordcloud.png"
515
+ fig.savefig(fig_path_png, dpi=self.dpi, bbox_inches='tight', facecolor='white')
516
+ plt.close(fig)
517
+ self.fig_counter += 1
518
+ print(f"Saved: {fig_path_png}")
519
+
520
+ except Exception as e:
521
+ print(f"Error generating wordcloud: {e}")
522
+ import traceback
523
+ traceback.print_exc()
524
+
525
+
526
+ def generate_all_visualizations(stage_a_dir, stage_b_dir, repo_meta_dir, code_stats_dir, repos_searched_csv, top_n=None):
527
+ """生成所有图表"""
528
+ viz = Visualization(Path(stage_a_dir).parent / 'figures')
529
+
530
+ print("Generating visualizations...")
531
+
532
+ # 动态生成文件名后缀
533
+ top_n_suffix = f"_top{top_n}" if top_n else ""
534
+
535
+ # Stage A charts
536
+ try:
537
+ viz.plot_funnel(stage_a_dir, stage_b_dir, repo_meta_dir, top_n=top_n)
538
+ except Exception as e:
539
+ print(f"Error plotting funnel: {e}")
540
+
541
+ try:
542
+ viz.plot_top_keywords(Path(stage_a_dir) / 'by_keyword.csv')
543
+ except Exception as e:
544
+ print(f"Error plotting top keywords: {e}")
545
+
546
+ try:
547
+ viz.plot_keyword_wordcloud(Path(stage_a_dir) / 'by_keyword.csv')
548
+ except Exception as e:
549
+ print(f"Error generating keyword wordcloud: {e}")
550
+
551
+ try:
552
+ viz.plot_language_distribution(Path(stage_a_dir) / 'by_language.csv')
553
+ except Exception as e:
554
+ print(f"Error plotting language distribution: {e}")
555
+
556
+ try:
557
+ viz.plot_stars_distribution(Path(stage_a_dir) / 'stars_distribution.csv')
558
+ except Exception as e:
559
+ print(f"Error plotting stars distribution: {e}")
560
+
561
+ # Stage B charts
562
+ try:
563
+ viz.plot_filter_results(Path(stage_b_dir) / 'filter_by_keyword.csv')
564
+ except Exception as e:
565
+ print(f"Error plotting filter results: {e}")
566
+
567
+ try:
568
+ viz.plot_reason_length_comparison(Path(stage_b_dir) / 'reason_length_distribution.csv')
569
+ except Exception as e:
570
+ print(f"Error plotting reason length comparison: {e}")
571
+
572
+ # Repo meta charts
573
+ try:
574
+ viz.plot_extension_distribution(Path(repo_meta_dir) / 'extension_distribution.csv', repo_top_n=top_n)
575
+ except Exception as e:
576
+ print(f"Error plotting extension distribution: {e}")
577
+
578
+ try:
579
+ viz.plot_repo_file_count_distribution(Path(repo_meta_dir) / f'repo_meta_scan{top_n_suffix}.csv', repo_top_n=top_n)
580
+ except Exception as e:
581
+ print(f"Error plotting repo file count distribution: {e}")
582
+
583
+ # Code stats charts
584
+ try:
585
+ viz.plot_stars_vs_code_size(repos_searched_csv, Path(code_stats_dir) / f'repo_level_metrics{top_n_suffix}.csv', repo_top_n=top_n)
586
+ except Exception as e:
587
+ print(f"Error plotting stars vs code size: {e}")
588
+
589
+ try:
590
+ viz.plot_repo_stats_by_language(Path(code_stats_dir) / f'repo_level_metrics{top_n_suffix}.csv', repo_top_n=top_n)
591
+ except Exception as e:
592
+ print(f"Error plotting repo stats by language: {e}")
593
+
594
+ print(f"Visualization complete! All figures saved to {viz.output_dir}")
595
+