| import os |
| import random |
| from os.path import join |
| from collections import Counter |
| import numpy as np |
| import pysam |
| from tokenizers import Tokenizer |
| from tokenizers.models import BPE |
| from tokenizers.trainers import BpeTrainer |
| from tokenizers.pre_tokenizers import PreTokenizer |
| from tokenizers.pre_tokenizers import ByteLevel |
| from tokenizers.pre_tokenizers import Whitespace |
| from tokenizers.pre_tokenizers import CharDelimiterSplit |
| from tokenizers.normalizers import Sequence, Lowercase |
| from tokenizers import models, pre_tokenizers, decoders |
| from tokenizers.pre_tokenizers import Split |
|
|
|
|
| def writetsv(data, label, savefile): |
| with open(savefile, 'w') as f: |
| f.write('sequence\tlabels\n') |
| for seq, lab in zip(data, label): |
| f.write(f'{seq}\t{lab}\n') |
|
|
|
|
| def nonoverlap_split(tokens, maxlen, tolerance=0.5): |
| seqs = [] |
| skipped = 0 |
|
|
| num_windows = len(tokens) // maxlen |
|
|
| for i in range(num_windows): |
| window = tokens[i*maxlen:(i+1)*maxlen] |
|
|
| |
| num_N = sum('N' in tok for tok in window) |
|
|
| if num_N / maxlen < tolerance: |
| seqs.append(" ".join(window)) |
| else: |
| skipped += 1 |
|
|
| print(f"In this chromosome, skipped sequences: {skipped}") |
| return seqs |
|
|
|
|
| def tokenize_full_sequence_collect(tokenizer, sequence, chunk_size=1_000_000): |
| raw_tokens = [] |
|
|
| for i in range(0, len(sequence), chunk_size): |
| chunk = sequence[i:i + chunk_size] |
| encoded = tokenizer.encode(chunk) |
| raw_tokens.extend(encoded.tokens) |
|
|
| if i % (10 * chunk_size) == 0: |
| print(f"Processed {i:,} bp") |
|
|
| return raw_tokens |
|
|
| maxlen = 512 |
| tolerance = 0.5 |
| chrm = 'chr1' |
| CHUNK_SIZE = 1_000_000 |
| fasta_path = '/home/n5huang/dna_token/hg38.fa' |
| args_token_path = '/home/n5huang/dna_token/output_tokens' |
| os.makedirs(args_token_path, exist_ok=True) |
|
|
|
|
|
|
| with pysam.FastaFile(fasta_path) as genome: |
| full_sequence = genome.fetch( |
| reference=chrm, |
| ) |
|
|
| print(f"Chromosome: {chrm}") |
| print(f"Total length: {len(full_sequence):,} bases") |
| print(f"First 100 bases:\n{full_sequence[:100]}") |
|
|
|
|
|
|
| |
| VOCAB_PATHS = { |
| "Merged_uni_tfidf":"/home/n5huang/dna_token/tokenizer_evaluation/merge_bpe/merge_tokenizer_unigram_tf_idf.json" |
| |
| |
| |
| |
| } |
|
|
| tokenizers = {} |
| for name, path in VOCAB_PATHS.items(): |
| tokenizers[name] = Tokenizer.from_file(path) |
|
|
|
|
| for tok_name, tok in tokenizers.items(): |
| print(tok.pre_tokenizer) |
| print(tok.model) |
|
|
| print(f"\n=== Processing tokenizer: {tok_name} ===") |
|
|
| |
| raw_tokens = tokenize_full_sequence_collect(tok, full_sequence) |
| print(f"Total raw tokens: {len(raw_tokens):,}") |
|
|
| |
| final_seqs = nonoverlap_split( |
| tokens=raw_tokens, |
| maxlen=maxlen, |
| tolerance=tolerance |
| ) |
|
|
| print(f"Total sequences for pretrain: {len(final_seqs):,}") |
|
|
| |
| labels = [chrm] * len(final_seqs) |
|
|
| |
| combined = list(zip(final_seqs, labels)) |
| random.seed(42) |
| random.shuffle(combined) |
| shuffle_data, shuffle_labels = zip(*combined) |
|
|
| |
| train_num = int(0.9 * len(shuffle_data)) |
|
|
| train_data = shuffle_data[:train_num] |
| train_labels = shuffle_labels[:train_num] |
| val_data = shuffle_data[train_num:] |
| val_labels = shuffle_labels[train_num:] |
|
|
| |
| train_path = join( |
| args_token_path, |
| f"{tok_name}_{chrm}_all_tokenized_train.tsv" |
| ) |
| val_path = join( |
| args_token_path, |
| f"{tok_name}_{chrm}_all_tokenized_val.tsv" |
| ) |
|
|
| writetsv(train_data, train_labels, train_path) |
| writetsv(val_data, val_labels, val_path) |
|
|
| print(f"Saved:\n {train_path}\n {val_path}") |
|
|