| | |
| | """ |
| | Create metadata table for the elaborated GitHub Python dataset. |
| | |
| | This script parses the python_files_elaborated.txt file containing GitHub URLs |
| | and extracts repository metadata (owner, repo name, file path). |
| | It generates a CSV file with this information and prints statistics. |
| | |
| | The elaborated dataset contains more files than the licensed subset and |
| | may include repositories with various licenses (not just permissive ones). |
| | """ |
| |
|
| | import csv |
| | import os |
| | import re |
| | import pandas as pd |
| | from collections import Counter, defaultdict |
| | from tqdm import tqdm |
| | from urllib.parse import urlparse |
| |
|
| | |
| | ELABORATED_FILES_LIST = "python_files_elaborated.txt" |
| | LICENSED_FILES_LIST = "python_files.txt" |
| | OUTPUT_CSV = "python_files_elaborated_metadata.csv" |
| |
|
| | |
| | |
| | GITHUB_RAW_PATTERN = r"https://raw\.githubusercontent\.com/([^/]+)/([^/]+)/[^/]+/(.*)" |
| |
|
| |
|
| | def parse_github_url(url): |
| | """ |
| | Parse a GitHub raw URL to extract owner, repo name, and file path. |
| | |
| | Args: |
| | url (str): GitHub raw URL |
| | |
| | Returns: |
| | tuple: (owner, repo_name, file_path) or None if URL doesn't match pattern |
| | """ |
| | match = re.match(GITHUB_RAW_PATTERN, url) |
| | if match: |
| | owner, repo_name, file_path = match.groups() |
| | return owner, repo_name, file_path |
| | return None |
| |
|
| |
|
| | def create_metadata_table(file_list_path): |
| | """ |
| | Create a metadata table from a list of GitHub URLs. |
| | |
| | Args: |
| | file_list_path (str): Path to file containing GitHub URLs |
| | |
| | Returns: |
| | list: List of dictionaries with metadata |
| | """ |
| | metadata = [] |
| | |
| | |
| | with open(file_list_path, "r") as f: |
| | urls = [line.strip() for line in f if line.strip()] |
| | |
| | print(f"Processing URLs from {file_list_path}...") |
| | |
| | |
| | for url in tqdm(urls, desc="Parsing URLs"): |
| | parsed = parse_github_url(url) |
| | if parsed: |
| | owner, repo_name, file_path = parsed |
| | metadata.append({ |
| | "owner": owner, |
| | "repo_name": repo_name, |
| | "file_path": file_path, |
| | "url": url |
| | }) |
| | |
| | return metadata |
| |
|
| |
|
| | def generate_statistics(metadata, dataset_name): |
| | """ |
| | Generate and print statistics for the dataset. |
| | |
| | Args: |
| | metadata (list): List of dictionaries with metadata |
| | dataset_name (str): Name of the dataset for display |
| | """ |
| | |
| | repos = set((item["owner"], item["repo_name"]) for item in metadata) |
| | owners = set(item["owner"] for item in metadata) |
| | |
| | |
| | repo_counts = Counter((item["owner"], item["repo_name"]) for item in metadata) |
| | top_repos = repo_counts.most_common(10) |
| | |
| | |
| | owner_counts = Counter(item["owner"] for item in metadata) |
| | top_owners = owner_counts.most_common(5) |
| | |
| | |
| | extensions = Counter(os.path.splitext(item["file_path"])[1] for item in metadata) |
| | |
| | |
| | print(f"\n=== {dataset_name} Statistics ===") |
| | print(f"Total files: {len(metadata)}") |
| | print(f"Unique repositories: {len(repos)}") |
| | print(f"Unique repository owners: {len(owners)}") |
| | |
| | print("\nTop 10 repositories by file count:") |
| | for (owner, repo), count in top_repos: |
| | print(f" {owner}/{repo}: {count} files") |
| | |
| | print("\nFile extensions:") |
| | for ext, count in extensions.most_common(): |
| | if ext: |
| | print(f" {ext}: {count} files") |
| | |
| | print("\nTop 5 repository owners:") |
| | for owner, count in top_owners: |
| | print(f" {owner}: {count} files") |
| | |
| | return { |
| | "total_files": len(metadata), |
| | "unique_repos": len(repos), |
| | "unique_owners": len(owners), |
| | "top_repos": top_repos, |
| | "top_owners": top_owners, |
| | "extensions": extensions |
| | } |
| |
|
| |
|
| | def compare_datasets(elaborated_stats, licensed_stats): |
| | """ |
| | Compare statistics between elaborated and licensed datasets. |
| | |
| | Args: |
| | elaborated_stats (dict): Statistics for elaborated dataset |
| | licensed_stats (dict): Statistics for licensed dataset |
| | """ |
| | print("\n=== Dataset Comparison ===") |
| | print(f"Elaborated dataset: {elaborated_stats['total_files']} files") |
| | print(f"Licensed dataset: {licensed_stats['total_files']} files") |
| | print(f"Additional files in elaborated dataset: {elaborated_stats['total_files'] - licensed_stats['total_files']} files") |
| | |
| | |
| | pct_increase = ((elaborated_stats['total_files'] / licensed_stats['total_files']) - 1) * 100 |
| | print(f"Percentage increase: {pct_increase:.1f}%") |
| | |
| | |
| | print(f"\nElaborated dataset: {elaborated_stats['unique_repos']} repositories") |
| | print(f"Licensed dataset: {licensed_stats['unique_repos']} repositories") |
| | |
| | |
| | print(f"\nElaborated dataset: {elaborated_stats['unique_owners']} repository owners") |
| | print(f"Licensed dataset: {licensed_stats['unique_owners']} repository owners") |
| | |
| | |
| | elaborated_repos = set((owner, repo) for (owner, repo), _ in elaborated_stats['top_repos']) |
| | licensed_repos = set((owner, repo) for (owner, repo), _ in licensed_stats['top_repos']) |
| | unique_to_elaborated = elaborated_repos - licensed_repos |
| | |
| | if unique_to_elaborated: |
| | print("\nTop repositories unique to elaborated dataset:") |
| | for owner, repo in list(unique_to_elaborated)[:5]: |
| | print(f" {owner}/{repo}") |
| |
|
| |
|
| | def main(): |
| | |
| | elaborated_metadata = create_metadata_table(ELABORATED_FILES_LIST) |
| | |
| | |
| | with open(OUTPUT_CSV, "w", newline="") as f: |
| | writer = csv.DictWriter(f, fieldnames=["owner", "repo_name", "file_path", "url"], |
| | quoting=csv.QUOTE_MINIMAL) |
| | writer.writeheader() |
| | writer.writerows(elaborated_metadata) |
| | |
| | print(f"Metadata saved to {OUTPUT_CSV}") |
| | |
| | |
| | elaborated_stats = generate_statistics(elaborated_metadata, "Elaborated Dataset") |
| | |
| | |
| | if os.path.exists(LICENSED_FILES_LIST): |
| | licensed_metadata = create_metadata_table(LICENSED_FILES_LIST) |
| | licensed_stats = generate_statistics(licensed_metadata, "Licensed Dataset") |
| | |
| | |
| | compare_datasets(elaborated_stats, licensed_stats) |
| | else: |
| | print(f"Warning: {LICENSED_FILES_LIST} not found. Cannot compare datasets.") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|