File size: 3,244 Bytes
2560dd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import pickle
import numpy as np
import pandas as pd
from tokenizers import Tokenizer
import os

DATA_PATH = "/home/n5huang/dna_token/tokenizer_evaluation/eval_data.pkl"
RESULTS_PATH = "/home/n5huang/dna_token/tokenizer_evaluation/evaluation_results.pkl"

# Ensure these match the filenames you upload to the server
VOCAB_PATHS = {
    "Merged_uni": "/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram.json",
    "Merged_word": "/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_wordPiece.json",
    "Weighted": "/home/n5huang/dna_token/tokenizer_evaluation/weighted_bpe/tokenizer.json", # Adjust filename if needed
    "SeqOnly": "/home/n5huang/dna_token/tokenizer_evaluation/baseline_bpe/tokenizer.json",   # Adjust filename if needed
    "DNAbert2": "/home/n5huang/dna_token/pretrain/models/DNAbert2_Pretrained/tokenizer.json",
    "Grover": "/home/n5huang/dna_token/pretrain/models/Grover_Pretrained/tokenizer.json",
}

def evaluate_tokenizer_on_phyloP(tokenizer, sequences, phyloPs):
    """
    For each tokenizer, compute:
      - token_mean_scores: list of mean phyloP per token occurrence
      - token_variances:   list of variance per token occurrence
      - token_names:       list of token strings
    """

    token_means = []
    token_vars = []
    token_names = []

    total_tokens = 0

    

    for seq, scores in zip(sequences, phyloPs):
        # Skip if chunk is too small (end of chrom) or has N padding
        if len(seq) < 100: 
            continue

        enc = tokenizer.encode(seq.upper())
        total_tokens += len(enc.ids)

        for tok, (start, end) in zip(enc.tokens, enc.offsets):
            region = scores[start:end]

            if len(region) == 0:
                continue

            m = region.mean()
            v = region.var()

            token_means.append(m)
            token_vars.append(v)
            token_names.append(tok)

    print(f"  -> Processed {total_tokens:,} tokens.")
    return {
        "mean": np.array(token_means),
        "var": np.array(token_vars),
        "token": token_names
    }


# --- 3. MAIN EXECUTION ---
if __name__ == "__main__":
    print("Loading data from pickle...")
    with open(DATA_PATH, "rb") as f:
        data = pickle.load(f)
        
    sequences = data["test_sequences"]
    phyloPs = data["test_phyloP"]
    print(f"Loaded {len(sequences)} genomic windows.")

    # Load Tokenizers
    tokenizers = {}
    print("Loading tokenizers...")
    for name, path in VOCAB_PATHS.items():
        if os.path.exists(path):
            tokenizers[name] = Tokenizer.from_file(path)
            print(f"✅ Loaded {name}")
        else:
            print(f"❌ Warning: File {path} not found. Skipping.")

    # Run Eval
    results = {}
    print("\nStarting Benchmark (with Token Names)...")
    
    for name, tok in tokenizers.items():
        print(f"Evaluating {name}...")
        results[name] = evaluate_tokenizer_on_phyloP(tok, sequences, phyloPs)

    # Save Results
    print(f"\nSaving results to {RESULTS_PATH}...")
    with open(RESULTS_PATH, "wb") as f:
        pickle.dump(results, f)
        
    print("Success! Download 'evaluation_results.pkl' (Note: File size will be larger).")