File size: 3,320 Bytes
0b416c6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | """
Word2Vec embedding for fMRI language encoding.
Uses the pre-trained Google News Word2Vec model (300-d).
Download from:
https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing
(GoogleNews-vectors-negative300.bin.gz, ~1.5 GB)
Place the decompressed .bin file at:
lab3/data/raw/GoogleNews-vectors-negative300.bin
The pipeline mirrors bow.py: embed each word token → downsample to TR-rate
via Lanczos interpolation → trim edges → add temporal lags.
"""
import sys
import os
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from preprocessing import downsample_word_vectors, make_delayed
W2V_DIM = 300
DEFAULT_W2V_PATH = os.path.join(
os.path.dirname(__file__), "../../data/raw/GoogleNews-vectors-negative300.bin"
)
def load_word2vec(model_path: str = DEFAULT_W2V_PATH):
"""Load the binary Word2Vec model via gensim."""
try:
from gensim.models import KeyedVectors
except ImportError:
raise ImportError("Install gensim: pip install gensim")
print(f"Loading Word2Vec from {model_path} …")
model = KeyedVectors.load_word2vec_format(model_path, binary=True)
return model
def get_word2vec_vectors(wordseqs: dict, model) -> dict:
"""Look up each word token in the Word2Vec model.
OOV words receive a zero vector.
Returns:
{story: np.ndarray of shape (num_words, 300)}
"""
word_vectors = {}
for story, ds in wordseqs.items():
vecs = []
for word in ds.data:
w = word.lower()
if w in model:
vecs.append(model[w])
else:
vecs.append(np.zeros(W2V_DIM, dtype=np.float32))
word_vectors[story] = np.array(vecs, dtype=np.float32)
return word_vectors
def process_word2vec(stories_train, stories_test, wordseqs,
model_path=DEFAULT_W2V_PATH,
trim_start=5, trim_end=10, delays=range(1, 5)):
"""Full Word2Vec pipeline: embed → downsample → trim → lag."""
model = load_word2vec(model_path)
all_stories = list(set(stories_train) | set(stories_test))
word_vectors = get_word2vec_vectors(
{s: wordseqs[s] for s in all_stories}, model
)
downsampled = downsample_word_vectors(all_stories, word_vectors, wordseqs)
def _trim_and_lag(stories):
mats = []
for story in stories:
ds = downsampled[story]
trimmed = ds[trim_start: len(ds) - trim_end]
lagged = make_delayed(trimmed, list(delays))
mats.append(lagged)
return np.vstack(mats)
X_train = _trim_and_lag(stories_train)
X_test = _trim_and_lag(stories_test)
return X_train, X_test
if __name__ == "__main__":
import pickle
wordseqs = pickle.load(open(sys.argv[1], "rb"))
train_list = sys.argv[2].split(",")
test_list = sys.argv[3].split(",")
out_prefix = sys.argv[4]
model_path = sys.argv[5] if len(sys.argv) > 5 else DEFAULT_W2V_PATH
X_train, X_test = process_word2vec(train_list, test_list, wordseqs, model_path)
np.save(f"{out_prefix}_train_word2vec_embeddings.npy", X_train)
np.save(f"{out_prefix}_test_word2vec_embeddings.npy", X_test)
print(f"Saved Word2Vec embeddings: train {X_train.shape}, test {X_test.shape}")
|