File size: 4,171 Bytes
2560dd0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | import os
import random
from os.path import join
from collections import Counter
import numpy as np
import pysam
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.trainers import BpeTrainer
from tokenizers.pre_tokenizers import PreTokenizer
from tokenizers.pre_tokenizers import ByteLevel
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.pre_tokenizers import CharDelimiterSplit
from tokenizers.normalizers import Sequence, Lowercase
from tokenizers import models, pre_tokenizers, decoders
from tokenizers.pre_tokenizers import Split
def writetsv(data, label, savefile):
with open(savefile, 'w') as f:
f.write('sequence\tlabels\n')
for seq, lab in zip(data, label):
f.write(f'{seq}\t{lab}\n')
def nonoverlap_split(tokens, maxlen, tolerance=0.5):
seqs = []
skipped = 0
num_windows = len(tokens) // maxlen
for i in range(num_windows):
window = tokens[i*maxlen:(i+1)*maxlen]
# NEW: token-aware N detection
num_N = sum('N' in tok for tok in window)
if num_N / maxlen < tolerance:
seqs.append(" ".join(window))
else:
skipped += 1
print(f"In this chromosome, skipped sequences: {skipped}")
return seqs
def tokenize_full_sequence_collect(tokenizer, sequence, chunk_size=1_000_000):
raw_tokens = []
for i in range(0, len(sequence), chunk_size):
chunk = sequence[i:i + chunk_size]
encoded = tokenizer.encode(chunk)
raw_tokens.extend(encoded.tokens)
if i % (10 * chunk_size) == 0:
print(f"Processed {i:,} bp")
return raw_tokens
maxlen = 512
tolerance = 0.5
chrm = 'chr1'
CHUNK_SIZE = 1_000_000
fasta_path = '/home/n5huang/dna_token/hg38.fa'
args_token_path = '/home/n5huang/dna_token/output_tokens'
os.makedirs(args_token_path, exist_ok=True)
with pysam.FastaFile(fasta_path) as genome:
full_sequence = genome.fetch(
reference=chrm,
)
print(f"Chromosome: {chrm}")
print(f"Total length: {len(full_sequence):,} bases")
print(f"First 100 bases:\n{full_sequence[:100]}")
# --- 2. LOAD YOUR TOKENIZERS ---
VOCAB_PATHS = {
"Merged_uni_tfidf":"/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram_tf_idf.json"
#"Merged_uni_len":"/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram_len.json",
#"Merged_uni_len2":"/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram_len2.json",
#"Weighted": "/home/n5huang/dna_token/tokenizer_evaluation/weighted_bpe/tokenizer.json"
#"SeqOnly": "/home/n5huang/dna_token/tokenizer_evaluation/baseline_bpe/tokenizer.json"
}
tokenizers = {}
for name, path in VOCAB_PATHS.items():
tokenizers[name] = Tokenizer.from_file(path)
for tok_name, tok in tokenizers.items():
print(tok.pre_tokenizer)
print(tok.model)
print(f"\n=== Processing tokenizer: {tok_name} ===")
# 1. Tokenize full chromosome
raw_tokens = tokenize_full_sequence_collect(tok, full_sequence)
print(f"Total raw tokens: {len(raw_tokens):,}")
# 2. Build sequences
final_seqs = nonoverlap_split(
tokens=raw_tokens,
maxlen=maxlen,
tolerance=tolerance
)
print(f"Total sequences for pretrain: {len(final_seqs):,}")
# 3. Labels
labels = [chrm] * len(final_seqs)
# 4. Shuffle
combined = list(zip(final_seqs, labels))
random.seed(42)
random.shuffle(combined)
shuffle_data, shuffle_labels = zip(*combined)
# 5. Train / Val split
train_num = int(0.9 * len(shuffle_data))
train_data = shuffle_data[:train_num]
train_labels = shuffle_labels[:train_num]
val_data = shuffle_data[train_num:]
val_labels = shuffle_labels[train_num:]
# 6. Save TSVs
train_path = join(
args_token_path,
f"{tok_name}_{chrm}_all_tokenized_train.tsv"
)
val_path = join(
args_token_path,
f"{tok_name}_{chrm}_all_tokenized_val.tsv"
)
writetsv(train_data, train_labels, train_path)
writetsv(val_data, val_labels, val_path)
print(f"Saved:\n {train_path}\n {val_path}")
|