File size: 2,965 Bytes
a88e206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import pandas as pd
from sklearn.metrics import accuracy_score
import argparse
import os

def process_tsv_files(parent_dir, output_csv, is_qa=None):
    results = []
    
    # Walk through all subdirectories
    for root, _, files in os.walk(parent_dir):
        for file in files:
            if file.endswith(".tsv"):  # Check for TSV files
                file_path = os.path.join(root, file)

                try:
                    df = pd.read_csv(file_path, sep="\t")
                    # Evaluate non-qa inference results
                    if is_qa is None: 
                        if "gold_label" in df.columns and "prediction" in df.columns:
                            y_gold = df["gold_label"].tolist()
                            y_pred = df["prediction"].tolist()
                            
                            accuracy = accuracy_score(y_gold, y_pred)
                            results.append([file_path, accuracy])
                        else:
                            print(f"Skipping {file_path}: Required columns not found.")

                    # Evaluate qa inference results
                    elif is_qa == "y":
                        for neg_value in df["prediction"].unique(): 
                            if neg_value.startswith("not_"): 
                                    evaluating_label = neg_value.replace("not_", "")

                                    # Replace all other labels in gold_label with the negated version
                                    df["gold_label"] = df["gold_label"].apply(
                                        lambda x: x if x == evaluating_label else neg_value
                                    )

                        if "gold_label" in df.columns and "prediction" in df.columns:
                            y_gold = df["gold_label"].tolist()
                            y_pred = df["prediction"].tolist()
                            
                            accuracy = accuracy_score(y_gold, y_pred)
                            results.append([file_path, accuracy])
                        else:
                            print(f"Skipping {file_path}: Required columns not found.")

                except Exception as e:
                    print(f"Error processing {file_path}: {e}")
    
    # Save results to a CSV
    results_df = pd.DataFrame(results, columns=["File Path", "Accuracy"])
    results_df.to_csv(output_csv, index=False)
    print(f"Results saved to {output_csv}")

def main(): 
    parser = argparse.ArgumentParser()
    parser.add_argument("--parent_dir", type=str, required=True, help="Path to the parent folder")
    parser.add_argument("--output_csv", type=str, required=True, help="Path to the output CSV file")
    parser.add_argument("--is_qa", type=str, help="Are we evaluating qa inference?")
    args = parser.parse_args()
    
    process_tsv_files(args.parent_dir, args.output_csv, args.is_qa)

if __name__ == "__main__":
    main()