desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary.'
def reset_weights(self):
logger.info('resetting layer weights') self.wv.syn0 = empty((len(self.wv.vocab), self.vector_size), dtype=REAL) for i in xrange(len(self.wv.vocab)): self.wv.syn0[i] = self.seeded_vector((self.wv.index2word[i] + str(self.seed))) if self.hs: self.syn1 = zeros((len(self.wv.vocab), sel...
'Create one \'random\' vector (but deterministic by seed_string)'
def seeded_vector(self, seed_string):
once = random.RandomState((self.hashfxn(seed_string) & 4294967295)) return ((once.rand(self.vector_size) - 0.5) / self.vector_size)
'Merge the input-hidden weight matrix from the original C word2vec-tool format given, where it intersects with the current vocabulary. (No words are added to the existing vocabulary, but intersecting words adopt the file\'s weights, and non-intersecting words are left alone.) `binary` is a boolean indicating whether th...
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
overlap_count = 0 logger.info(('loading projection weights from %s' % fname)) with utils.smart_open(fname) as fin: header = utils.to_unicode(fin.readline(), encoding=encoding) (vocab_size, vector_size) = map(int, header.split()) if (not (vector_size == self.vector_size)):...
'Deprecated. Use self.wv.most_similar() instead. Refer to the documentation for `gensim.models.KeyedVectors.most_similar`'
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)
'Deprecated. Use self.wv.wmdistance() instead. Refer to the documentation for `gensim.models.KeyedVectors.wmdistance`'
def wmdistance(self, document1, document2):
return self.wv.wmdistance(document1, document2)
'Deprecated. Use self.wv.most_similar_cosmul() instead. Refer to the documentation for `gensim.models.KeyedVectors.most_similar_cosmul`'
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
return self.wv.most_similar_cosmul(positive, negative, topn)
'Deprecated. Use self.wv.similar_by_word() instead. Refer to the documentation for `gensim.models.KeyedVectors.similar_by_word`'
def similar_by_word(self, word, topn=10, restrict_vocab=None):
return self.wv.similar_by_word(word, topn, restrict_vocab)
'Deprecated. Use self.wv.similar_by_vector() instead. Refer to the documentation for `gensim.models.KeyedVectors.similar_by_vector`'
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
return self.wv.similar_by_vector(vector, topn, restrict_vocab)
'Deprecated. Use self.wv.doesnt_match() instead. Refer to the documentation for `gensim.models.KeyedVectors.doesnt_match`'
def doesnt_match(self, words):
return self.wv.doesnt_match(words)
'Deprecated. Use self.wv.__getitem__() instead. Refer to the documentation for `gensim.models.KeyedVectors.__getitem__`'
def __getitem__(self, words):
return self.wv.__getitem__(words)
'Deprecated. Use self.wv.__contains__() instead. Refer to the documentation for `gensim.models.KeyedVectors.__contains__`'
def __contains__(self, word):
return self.wv.__contains__(word)
'Deprecated. Use self.wv.similarity() instead. Refer to the documentation for `gensim.models.KeyedVectors.similarity`'
def similarity(self, w1, w2):
return self.wv.similarity(w1, w2)
'Deprecated. Use self.wv.n_similarity() instead. Refer to the documentation for `gensim.models.KeyedVectors.n_similarity`'
def n_similarity(self, ws1, ws2):
return self.wv.n_similarity(ws1, ws2)
'Report the probability distribution of the center word given the context words as input to the trained model.'
def predict_output_word(self, context_words_list, topn=10):
if (not self.negative): raise RuntimeError('We have currently only implemented predict_output_word for the negative sampling scheme, so you need to have run word2vec with negative > 0 for this to work.') if ((not hasattr(self.wv,...
'init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors'
def init_sims(self, replace=False):
if (replace and hasattr(self, 'syn1')): del self.syn1 return self.wv.init_sims(replace)
'Estimate required memory for a model using current settings and provided vocabulary size.'
def estimate_memory(self, vocab_size=None, report=None):
vocab_size = (vocab_size or len(self.wv.vocab)) report = (report or {}) report['vocab'] = (vocab_size * (700 if self.hs else 500)) report['syn0'] = ((vocab_size * self.vector_size) * dtype(REAL).itemsize) if self.hs: report['syn1'] = ((vocab_size * self.layer1_size) * dtype(REAL).itemsize) ...
'Deprecated. Use self.wv.log_evaluate_word_pairs() instead. Refer to the documentation for `gensim.models.KeyedVectors.log_evaluate_word_pairs`'
@staticmethod def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
return KeyedVectors.log_evaluate_word_pairs(pearson, spearman, oov, pairs)
'Deprecated. Use self.wv.evaluate_word_pairs() instead. Refer to the documentation for `gensim.models.KeyedVectors.evaluate_word_pairs`'
def evaluate_word_pairs(self, pairs, delimiter=' DCTB ', restrict_vocab=300000, case_insensitive=True, dummy4unknown=False):
return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)
'Discard parameters that are used in training and score. Use if you\'re sure you\'re done training a model. If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized ones = saves lots of memory!'
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
if replace_word_vectors_with_normalized: self.init_sims(replace=True) self._minimize_model()
'Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.'
@classmethod def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict', limit=None, datatype=REAL):
raise DeprecationWarning('Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.')
'Deprecated. Use model.wv.save_word2vec_format instead.'
def save_word2vec_format(self, fname, fvocab=None, binary=False):
raise DeprecationWarning('Deprecated. Use model.wv.save_word2vec_format instead.')
'`source` can be either a string or a file object. Clip the file to the first `limit` lines (or no clipped if limit is None, the default). Example:: sentences = LineSentence(\'myfile.txt\') Or for compressed files:: sentences = LineSentence(\'compressed_text.txt.bz2\') sentences = LineSentence(\'compressed_text.txt.gz\...
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
self.source = source self.max_sentence_length = max_sentence_length self.limit = limit
'Iterate through the lines in the source.'
def __iter__(self):
try: self.source.seek(0) for line in itertools.islice(self.source, self.limit): line = utils.to_unicode(line).split() i = 0 while (i < len(line)): (yield line[i:(i + self.max_sentence_length)]) i += self.max_sentence_length exce...
'`source` should be a path to a directory (as a string) where all files can be opened by the LineSentence class. Each file will be read up to `limit` lines (or no clipped if limit is None, the default). Example:: sentences = LineSentencePath(os.getcwd() + \'\corpus\\') The files in the directory should be either text f...
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
self.source = source self.max_sentence_length = max_sentence_length self.limit = limit if os.path.isfile(self.source): logging.warning('single file read, better to use models.word2vec.LineSentence') self.input_files = [self.source] elif os.path.isdir(self.source): ...
'iterate through the files'
def __iter__(self):
for file_name in self.input_files: logging.info(('reading file ' + file_name)) with utils.smart_open(file_name) as fin: for line in itertools.islice(fin, self.limit): line = utils.to_unicode(line).split() i = 0 while (i < len(line)): ...
'`corpus` is any iterable gensim corpus `time_slice` as described above is a list which contains the number of documents in each time-slice `id2word` is a mapping from word ids (integers) to words (strings). It is used to determine the vocabulary size and printing topics. `alphas` is a prior of your choice and should ...
def __init__(self, corpus=None, time_slice=None, id2word=None, alphas=0.01, num_topics=10, initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10, random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100):
self.id2word = id2word if ((corpus is None) and (self.id2word is None)): raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality') if (self.id2word is None): logger.warning('no word id mapping ...
'Method to initialize State Space Language Model, topic wise.'
def init_ldaseq_ss(self, topic_chain_variance, topic_obs_variance, alpha, init_suffstats):
self.alphas = alpha for (k, chain) in enumerate(self.topic_chains): sstats = init_suffstats[:, k] sslm.sslm_counts_init(chain, topic_obs_variance, topic_chain_variance, sstats)
'fit an lda sequence model: for each time period: set up lda model with E[log p(w|z)] and lpha for each document: perform posterior inference update sufficient statistics/likelihood maximize topics'
def fit_lda_seq(self, corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize):
LDASQE_EM_THRESHOLD = 0.0001 LOWER_ITER = 10 ITER_MULT_LOW = 2 MAX_ITER = 500 num_topics = self.num_topics vocab_len = self.vocab_len data_len = self.num_time_slices corpus_len = self.corpus_len bound = 0 convergence = (LDASQE_EM_THRESHOLD + 1) iter_ = 0 while ((iter_ < e...
'Inference or E- Step. This is used to set up the gensim LdaModel to be used for each time-slice. It also allows for Document Influence Model code to be written in.'
def lda_seq_infer(self, corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize):
num_topics = self.num_topics vocab_len = self.vocab_len bound = 0.0 lda = ldamodel.LdaModel(num_topics=num_topics, alpha=self.alphas, id2word=self.id2word) lda.topics = np.array(np.split(np.zeros((vocab_len * num_topics)), vocab_len)) ldapost = LdaPost(max_doc_len=self.max_doc_len, num_topics=nu...
'Computes the likelihood of a sequential corpus under an LDA seq model, and return the likelihood bound. Need to pass the LdaSeq model, corpus, sufficient stats, gammas and lhoods matrices previously created, and LdaModel and LdaPost class objects.'
def inferDTMseq(self, corpus, topic_suffstats, gammas, lhoods, lda, ldapost, iter_, bound, lda_inference_max_iter, chunksize):
doc_index = 0 time = 0 doc_num = 0 num_topics = self.num_topics lda = self.make_lda_seq_slice(lda, time) time_slice = np.cumsum(np.array(self.time_slice)) for (chunk_no, chunk) in enumerate(utils.grouper(corpus, chunksize)): for doc in chunk: if (doc_index > time_slice[ti...
'set up the LDA model topic-word values with that of ldaseq.'
def make_lda_seq_slice(self, lda, time):
for k in range(0, self.num_topics): lda.topics[:, k] = np.copy(self.topic_chains[k].e_log_prob[:, time]) lda.alpha = np.copy(self.alphas) return lda
'Fit lda sequence topic wise.'
def fit_lda_seq_topics(self, topic_suffstats):
lhood = 0 lhood_term = 0 for (k, chain) in enumerate(self.topic_chains): logger.info('Fitting topic number %i', k) lhood_term = sslm.fit_sslm(chain, topic_suffstats[k]) lhood += lhood_term return lhood
'Prints one topic showing each time-slice.'
def print_topic_times(self, topic, top_terms=20):
topics = [] for time in range(0, self.num_time_slices): topics.append(self.print_topic(topic, time, top_terms)) return topics
'Prints all topics in a particular time-slice.'
def print_topics(self, time=0, top_terms=20):
topics = [] for topic in range(0, self.num_topics): topics.append(self.print_topic(topic, time, top_terms)) return topics
'Topic is the topic number Time is for a particular time_slice top_terms is the number of terms to display'
def print_topic(self, topic, time=0, top_terms=20):
topic = self.topic_chains[topic].e_log_prob topic = np.transpose(topic) topic = np.exp(topic[time]) topic = (topic / topic.sum()) bestn = matutils.argsort(topic, top_terms, reverse=True) beststr = [(self.id2word[id_], round(topic[id_], 3)) for id_ in bestn] return beststr
'On passing the LdaSeqModel trained ldaseq object, the doc_number of your document in the corpus, it returns the doc-topic probabilities of that document.'
def doc_topics(self, doc_number):
doc_topic = np.copy(self.gammas) doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis] return doc_topic[doc_number]
'returns term_frequency, vocab, doc_lengths, topic-term distributions and doc_topic distributions, specified by pyLDAvis format. all of these are needed to visualise topics for DTM for a particular time-slice via pyLDAvis. input parameter is the year to do the visualisation.'
def dtm_vis(self, time, corpus):
doc_topic = np.copy(self.gammas) doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis] topic_term = [(np.exp(np.transpose(chain.e_log_prob)[time]) / np.exp(np.transpose(chain.e_log_prob)[time]).sum()) for (k, chain) in enumerate(self.topic_chains)] doc_lengths = [len(doc) for (doc_no, doc) in enumerate(corp...
'returns all topics of a particular time-slice without probabilitiy values for it to be used for either "u_mass" or "c_v" coherence.'
def dtm_coherence(self, time):
coherence_topics = [] for topics in self.print_topics(time): coherence_topic = [] for (word, dist) in topics: coherence_topic.append(word) coherence_topics.append(coherence_topic) return coherence_topics
'Similar to the LdaModel __getitem__ function, it returns topic proportions of a document passed.'
def __getitem__(self, doc):
lda_model = ldamodel.LdaModel(num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word) lda_model.topics = np.array(np.split(np.zeros((self.vocab_len * self.num_topics)), self.vocab_len)) ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc) time_lhoods =...
'Updates the Zeta Variational Parameter. Zeta is described in the appendix and is equal to sum (exp(mean[word] + Variance[word] / 2)), over every time-slice. It is the value of variational parameter zeta which maximizes the lower bound.'
def update_zeta(self):
for (j, val) in enumerate(self.zeta): self.zeta[j] = np.sum(np.exp((self.mean[:, (j + 1)] + (self.variance[:, (j + 1)] / 2)))) return self.zeta
'Based on the Variational Kalman Filtering approach for Approximate Inference [https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf] This function accepts the word to compute variance for, along with the associated sslm class object, and returns variance and fwd_variance Computes Var[eta_{t,w}] for t = 1:T ...
def compute_post_variance(self, word, chain_variance):
INIT_VARIANCE_CONST = 1000 T = self.num_time_slices variance = self.variance[word] fwd_variance = self.fwd_variance[word] fwd_variance[0] = (chain_variance * INIT_VARIANCE_CONST) for t in range(1, (T + 1)): if self.obs_variance: c = (self.obs_variance / ((fwd_variance[(t - 1)...
'Based on the Variational Kalman Filtering approach for Approximate Inference [https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf] This function accepts the word to compute mean for, along with the associated sslm class object, and returns mean and fwd_mean Essentially a forward-backward to compute E[eta_...
def compute_post_mean(self, word, chain_variance):
T = self.num_time_slices obs = self.obs[word] fwd_variance = self.fwd_variance[word] mean = self.mean[word] fwd_mean = self.fwd_mean[word] fwd_mean[0] = 0 for t in range(1, (T + 1)): c = (self.obs_variance / ((fwd_variance[(t - 1)] + chain_variance) + self.obs_variance)) fwd_...
'Compute the expected log probability given values of m. The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper; The below implementation is the result of solving the equation and is as implemented in the original Blei DTM code.'
def compute_expected_log_prob(self):
for ((w, t), val) in np.ndenumerate(self.e_log_prob): self.e_log_prob[w][t] = (self.mean[w][(t + 1)] - np.log(self.zeta[t])) return self.e_log_prob
'Initialize State Space Language Model with LDA sufficient statistics. Called for each topic-chain and initializes intial mean, variance and Topic-Word probabilities for the first time-slice.'
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
W = self.vocab_len T = self.num_time_slices log_norm_counts = np.copy(sstats) log_norm_counts = (log_norm_counts / sum(log_norm_counts)) log_norm_counts = (log_norm_counts + (1.0 / W)) log_norm_counts = (log_norm_counts / sum(log_norm_counts)) log_norm_counts = np.log(log_norm_counts) se...
'Fits variational distribution. This is essentially the m-step. Accepts the sstats for a particular topic for input and maximizes values for that topic. Updates the values in the update_obs() and compute_expected_log_prob methods.'
def fit_sslm(self, sstats):
W = self.vocab_len bound = 0 old_bound = 0 sslm_fit_threshold = 1e-06 sslm_max_iter = 2 converged = (sslm_fit_threshold + 1) totals = np.zeros(sstats.shape[1]) (self.variance, self.fwd_variance) = map(np.array, list(zip(*[self.compute_post_variance(w, self.chain_variance) for w in range(...
'Compute log probability bound. Forumula is as described in appendix of DTM by Blei. (formula no. 5)'
def compute_bound(self, sstats, totals):
W = self.vocab_len T = self.num_time_slices term_1 = 0 term_2 = 0 term_3 = 0 val = 0 ent = 0 chain_variance = self.chain_variance (self.mean, self.fwd_mean) = map(np.array, zip(*[self.compute_post_mean(w, self.chain_variance) for w in range(0, W)])) self.zeta = self.update_zeta()...
'Function to perform optimization of obs. Parameters are suff_stats set up in the fit_sslm method. TODO: This is by far the slowest function in the whole algorithm. Replacing or improving the performance of this would greatly speed things up.'
def update_obs(self, sstats, totals):
OBS_NORM_CUTOFF = 2 STEP_SIZE = 0.01 TOL = 0.001 W = self.vocab_len T = self.num_time_slices runs = 0 mean_deriv_mtx = np.resize(np.zeros((T * (T + 1))), (T, (T + 1))) norm_cutoff_obs = None for w in range(0, W): w_counts = sstats[w] counts_norm = 0 for i in r...
'Used in helping find the optimum function. computes derivative of E[eta_{t,w}]/d obs_{s,w} for t = 1:T. put the result in deriv, allocated T+1 vector'
def compute_mean_deriv(self, word, time, deriv):
T = self.num_time_slices fwd_variance = self.variance[word] deriv[0] = 0 for t in range(1, (T + 1)): if (self.obs_variance > 0.0): w = (self.obs_variance / ((fwd_variance[(t - 1)] + self.chain_variance) + self.obs_variance)) else: w = 0.0 val = (w * deriv[...
'Derivation of obs which is used in derivative function [df_obs] while optimizing.'
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
init_mult = 1000 T = self.num_time_slices mean = self.mean[word] variance = self.variance[word] self.temp_vect = np.zeros(T) for u in range(0, T): self.temp_vect[u] = np.exp((mean[(u + 1)] + (variance[(u + 1)] / 2))) for t in range(0, T): mean_deriv = mean_deriv_mtx[t] ...
'Update variational multinomial parameters, based on a document and a time-slice. This is done based on the original Blei-LDA paper, where: log_phi := beta * exp(Κ(gamma)), over every topic for every word. TODO: incorporate lee-sueng trick used in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 20...
def update_phi(self, doc_number, time):
num_topics = self.lda.num_topics dig = np.zeros(num_topics) for k in range(0, num_topics): dig[k] = digamma(self.gamma[k]) n = 0 for (word_id, count) in self.doc: for k in range(0, num_topics): self.log_phi[n][k] = (dig[k] + self.lda.topics[word_id][k]) log_phi_ro...
'update variational dirichlet parameters as described in the original Blei LDA paper: gamma = alpha + sum(phi), over every topic for every word.'
def update_gamma(self):
self.gamma = np.copy(self.lda.alpha) n = 0 for (word_id, count) in self.doc: phi_row = self.phi[n] for k in range(0, self.lda.num_topics): self.gamma[k] += (phi_row[k] * count) n += 1 return self.gamma
'Initialize variational posterior, does not return anything.'
def init_lda_post(self):
total = sum((count for (word_id, count) in self.doc)) self.gamma.fill((self.lda.alpha[0] + (float(total) / self.lda.num_topics))) self.phi[:len(self.doc), :] = (1.0 / self.lda.num_topics)
'compute the likelihood bound'
def compute_lda_lhood(self):
num_topics = self.lda.num_topics gamma_sum = np.sum(self.gamma) lhood = (gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)) self.lhood[num_topics] = lhood digsum = digamma(gamma_sum) model = 'DTM' for k in range(0, num_topics): e_log_theta_k = (digamma(self.gamma[k]) - digsum) ...
'Posterior inference for lda. g, g3, g4 and g5 are matrices used in Document Influence Model and not used currently.'
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-08, lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
self.init_lda_post() total = sum((count for (word_id, count) in self.doc)) model = 'DTM' if (model == 'DIM'): pass lhood = self.compute_lda_lhood() lhood_old = 0 converged = 0 iter_ = 0 iter_ += 1 lhood_old = lhood self.gamma = self.update_gamma() model = 'DTM' ...
'Update lda sequence sufficient statistics from an lda posterior. This is very similar to the update_gamma method and uses the same formula.'
def update_lda_seq_ss(self, time, doc, topic_suffstats):
num_topics = self.lda.num_topics for k in range(0, num_topics): topic_ss = topic_suffstats[k] n = 0 for (word_id, count) in self.doc: topic_ss[word_id][time] += (count * self.phi[n][k]) n += 1 topic_suffstats[k] = topic_ss return topic_suffstats
'Return representation with the ids transformed.'
def __getitem__(self, bow):
(is_corpus, bow) = utils.is_corpus(bow) if is_corpus: return self._apply(bow) return sorted(((self.old2new[oldid], weight) for (oldid, weight) in bow if (oldid in self.old2new)))
'Compute tf-idf by multiplying a local component (term frequency) with a global component (inverse document frequency), and normalizing the resulting documents to unit length. Formula for unnormalized weight of term `i` in document `j` in a corpus of D documents:: weight_{i,j} = frequency_{i,j} * log_2(D / document_fre...
def __init__(self, corpus=None, id2word=None, dictionary=None, wlocal=utils.identity, wglobal=df2idf, normalize=True):
self.normalize = normalize self.id2word = id2word (self.wlocal, self.wglobal) = (wlocal, wglobal) (self.num_docs, self.num_nnz, self.idfs) = (None, None, None) if (dictionary is not None): if (corpus is not None): logger.warning('constructor received both corpus and ...
'Compute inverse document weights, which will be used to modify term frequencies for documents.'
def initialize(self, corpus):
logger.info('collecting document frequencies') dfs = {} (numnnz, docno) = (0, (-1)) for (docno, bow) in enumerate(corpus): if ((docno % 10000) == 0): logger.info('PROGRESS: processing document #%i', docno) numnnz += len(bow) for (termid, _) in bow: ...
'Return tf-idf representation of the input vector and/or corpus.'
def __getitem__(self, bow, eps=1e-12):
(is_corpus, bow) = utils.is_corpus(bow) if is_corpus: return self._apply(bow) vector = [(termid, (self.wlocal(tf) * self.idfs.get(termid))) for (termid, tf) in bow if (self.idfs.get(termid, 0.0) != 0.0)] if (self.normalize is True): vector = matutils.unitvec(vector) elif self.normali...
'Initialize the model from an iterable of `sentences`. Each sentence must be a list of words (unicode strings) that will be used for training. The `sentences` iterable can be simply a list, but for larger corpora, consider a generator that streams the sentences directly from disk/network, without storing everything in ...
def __init__(self, sentences=None, min_count=5, threshold=10.0, max_vocab_size=40000000, delimiter='_', progress_per=10000, scoring='default'):
if (min_count <= 0): raise ValueError('min_count should be at least 1') if ((threshold <= 0) and (scoring == 'default')): raise ValueError('threshold should be positive for default scoring') if ((scoring == 'npmi') and ((threshold < (-1)) or (threshold > 1)))...
'Get short string representation of this phrase detector.'
def __str__(self):
return ('%s<%i vocab, min_count=%s, threshold=%s, max_vocab_size=%s>' % (self.__class__.__name__, len(self.vocab), self.min_count, self.threshold, self.max_vocab_size))
'Collect unigram/bigram counts from the `sentences` iterable.'
@staticmethod def learn_vocab(sentences, max_vocab_size, delimiter='_', progress_per=10000):
sentence_no = (-1) total_words = 0 logger.info('collecting all words and their counts') vocab = defaultdict(int) min_reduce = 1 for (sentence_no, sentence) in enumerate(sentences): if ((sentence_no % progress_per) == 0): logger.info(('PROGRESS: at sentenc...
'Merge the collected counts `vocab` into this phrase detector.'
def add_vocab(self, sentences):
(min_reduce, vocab, total_words) = self.learn_vocab(sentences, self.max_vocab_size, self.delimiter, self.progress_per) self.corpus_word_count += total_words if (len(self.vocab) > 0): logger.info('merging %i counts into %s', len(vocab), self) self.min_reduce = max(self.min_reduce,...
'Generate an iterator that contains all phrases in given \'sentences\' Example:: >>> sentences = Text8Corpus(path_to_corpus) >>> bigram = Phrases(sentences, min_count=5, threshold=100) >>> for phrase, score in bigram.export_phrases(sentences): ... print(u\'{0} {1}\'.format(phrase, score)) then you can debug the t...
def export_phrases(self, sentences, out_delimiter=' ', as_tuples=False):
vocab = self.vocab threshold = self.threshold delimiter = self.delimiter min_count = self.min_count scoring = self.scoring corpus_word_count = self.corpus_word_count if (scoring == 'default'): scoring_function = partial(self.original_scorer, len_vocab=float(len(vocab)), min_count=flo...
'Convert the input tokens `sentence` (=list of unicode strings) into phrase tokens (=list of unicode strings, where detected phrases are joined by u\'_\'). If `sentence` is an entire corpus (iterable of sentences rather than a single sentence), return an iterable that converts each of the corpus\' sentences into phrase...
def __getitem__(self, sentence):
warnings.warn('For a faster implementation, use the gensim.models.phrases.Phraser class') (is_single, sentence) = _is_single(sentence) if (not is_single): return self._apply(sentence) (s, new_s) = ([utils.any2utf8(w) for w in sentence], []) last_bigram = False vocab ...
'Convert the input tokens `sentence` (=list of unicode strings) into phrase tokens (=list of unicode strings, where detected phrases are joined by u\'_\' (or other configured delimiter-character). If `sentence` is an entire corpus (iterable of sentences rather than a single sentence), return an iterable that converts e...
def __getitem__(self, sentence):
(is_single, sentence) = _is_single(sentence) if (not is_single): return self._apply(sentence) (s, new_s) = ([utils.any2utf8(w) for w in sentence], []) last_bigram = False phrasegrams = self.phrasegrams delimiter = self.delimiter for (word_a, word_b) in zip(s, s[1:]): bigram_t...
'Construct the (U, S) projection from a corpus `docs`. The projection can be later updated by merging it with another Projection via `self.merge()`. This is the class taking care of the \'core math\'; interfacing with corpora, splitting large corpora into chunks and merging them etc. is done through the higher-level `L...
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
(self.m, self.k) = (m, k) self.power_iters = power_iters self.extra_dims = extra_dims if (docs is not None): if (not use_svdlibc): (u, s) = stochastic_svd(docs, k, chunksize=sys.maxsize, num_terms=m, power_iters=self.power_iters, extra_dims=self.extra_dims) else: ...
'Merge this Projection with another. The content of `other` is destroyed in the process, so pass this function a copy of `other` if you need it further.'
def merge(self, other, decay=1.0):
if (other.u is None): return if (self.u is None): self.u = other.u.copy() self.s = other.s.copy() return if (self.m != other.m): raise ValueError(('vector space mismatch: update is using %s features, expected %s' % (other.m, self.m))) lo...
'`num_topics` is the number of requested factors (latent dimensions). After the model has been trained, you can estimate topics for an arbitrary, unseen document, using the ``topics = self[document]`` dictionary notation. You can also add new training documents, with ``self.add_documents``, so that training can be stop...
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000, decay=1.0, distributed=False, onepass=True, power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
self.id2word = id2word self.num_topics = int(num_topics) self.chunksize = int(chunksize) self.decay = float(decay) if distributed: if (not onepass): logger.warning('forcing the one-pass algorithm for distributed LSA') onepass = True self.onepass ...
'Update singular value decomposition to take into account a new corpus of documents. Training proceeds in chunks of `chunksize` documents at a time. The size of `chunksize` is a tradeoff between increased speed (bigger `chunksize`) vs. lower memory footprint (smaller `chunksize`). If the distributed mode is on, each ch...
def add_documents(self, corpus, chunksize=None, decay=None):
logger.info('updating model with new documents') if (chunksize is None): chunksize = self.chunksize if (decay is None): decay = self.decay if (not scipy.sparse.issparse(corpus)): if (not self.onepass): update = Projection(self.num_terms, self.num_topics, N...
'Return latent representation, as a list of (topic_id, topic_value) 2-tuples. This is done by folding input document into the latent topic space. If `scaled` is set, scale topics by the inverse of singular values (default: no scaling).'
def __getitem__(self, bow, scaled=False, chunksize=512):
assert (self.projection.u is not None), 'decomposition not initialized yet' (is_corpus, bow) = utils.is_corpus(bow) if (is_corpus and chunksize): return self._apply(bow, chunksize=chunksize) if (not is_corpus): bow = [bow] vec = matutils.corpus2csc(bow, num_terms=self.num_te...
'Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`, as a string. Return only the `topn` words which contribute the most to the direction of the topic (both negative and positive). >>> lsimodel.show_topic(10, topn=5) [("category", -0.340), ("$M$", 0.298), ("algebra", 0.183), ("functor"...
def show_topic(self, topicno, topn=10):
if (topicno >= len(self.projection.u.T)): return '' c = np.asarray(self.projection.u.T[topicno, :]).flatten() norm = np.sqrt(np.sum(np.dot(c, c))) most = matutils.argsort(np.abs(c), topn, reverse=True) return [(self.id2word[val], ((1.0 * c[val]) / norm)) for val in most]
'Return `num_topics` most significant topics (return all by default). For each topic, show `num_words` most significant words (10 words by default). The topics are returned as a list -- a list of strings if `formatted` is True, or a list of `(word, probability)` 2-tuples if False. If `log` is True, also output this res...
def show_topics(self, num_topics=(-1), num_words=10, log=False, formatted=True):
shown = [] if (num_topics < 0): num_topics = self.num_topics for i in xrange(min(num_topics, self.num_topics)): if (i < len(self.projection.s)): if formatted: topic = self.print_topic(i, topn=num_words) else: topic = self.show_topic(i, ...
'Print (to log) the most salient words of the first `num_topics` topics. Unlike `print_topics()`, this looks for words that are significant for a particular topic *and* not for others. This *should* result in a more human-interpretable description of topics.'
def print_debug(self, num_topics=5, num_words=10):
print_debug(self.id2word, self.projection.u, self.projection.s, range(min(num_topics, len(self.projection.u.T))), num_words=num_words)
'Save the model to file. Large internal arrays may be stored into separate files, with `fname` as prefix. Note: do not save as a compressed file if you intend to load the file back with `mmap`.'
def save(self, fname, *args, **kwargs):
if (self.projection is not None): self.projection.save(utils.smart_extension(fname, '.projection'), *args, **kwargs) super(LsiModel, self).save(fname, ignore=['projection', 'dispatcher'], *args, **kwargs)
'Load a previously saved object from file (also see `save`). Large arrays can be memmap\'ed back as read-only (shared memory) by setting `mmap=\'r\'`: >>> LsiModel.load(fname, mmap=\'r\')'
@classmethod def load(cls, fname, *args, **kwargs):
kwargs['mmap'] = kwargs.get('mmap', None) result = super(LsiModel, cls).load(fname, *args, **kwargs) projection_fname = utils.smart_extension(fname, '.projection') try: result.projection = super(LsiModel, cls).load(projection_fname, *args, **kwargs) except Exception as e: logging.war...
'`gamma`: first level concentration `alpha`: second level concentration `eta`: the topic Dirichlet `T`: top level truncation level `K`: second level truncation level `kappa`: learning rate `tau`: slow down parameter `max_time`: stop training after this many seconds `max_chunks`: stop after having processed this many ch...
def __init__(self, corpus, id2word, max_chunks=None, max_time=None, chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1, gamma=1, eta=0.01, scale=1.0, var_converge=0.0001, outputdir=None, random_state=None):
self.corpus = corpus self.id2word = id2word self.chunksize = chunksize self.max_chunks = max_chunks self.max_time = max_time self.outputdir = outputdir self.random_state = utils.get_random_state(random_state) self.lda_alpha = None self.lda_beta = None self.m_W = len(id2word) ...
'e step for a single doc'
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list, unique_words, doc_word_ids, doc_word_counts, var_converge):
chunkids = [unique_words[id] for id in doc_word_ids] Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids] v = np.zeros((2, (self.m_K - 1))) v[0] = 1.0 v[1] = self.m_alpha phi = ((np.ones((len(doc_word_ids), self.m_K)) * 1.0) / self.m_K) likelihood = 0.0 old_likelihood = (-1e+200) converge...
'ordering the topics'
def optimal_ordering(self):
idx = matutils.argsort(self.m_lambda_sum, reverse=True) self.m_varphi_ss = self.m_varphi_ss[idx] self.m_lambda = self.m_lambda[idx, :] self.m_lambda_sum = self.m_lambda_sum[idx] self.m_Elogbeta = self.m_Elogbeta[idx, :]
'Since we\'re doing lazy updates on lambda, at any given moment the current state of lambda may not be accurate. This function updates all of the elements of lambda and Elogbeta so that if (for example) we want to print out the topics we\'ve learned we\'ll get the correct behavior.'
def update_expectations(self):
for w in xrange(self.m_W): self.m_lambda[:, w] *= np.exp((self.m_r[(-1)] - self.m_r[self.m_timestamp[w]])) self.m_Elogbeta = (psi((self.m_eta + self.m_lambda)) - psi(((self.m_W * self.m_eta) + self.m_lambda_sum[:, np.newaxis]))) self.m_timestamp[:] = self.m_updatect self.m_status_up_to_date = Tr...
'Print the `num_words` most probable words for topic `topic_id`. Set `formatted=True` to return the topics as a list of strings, or `False` as lists of (weight, word) pairs.'
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None):
if (num_words is not None): logger.warning('The parameter num_words for show_topic() would be deprecated in the updated version.') logger.warning('Please use topn instead.') topn = num_words if (not self.m_status_up_to_date): self.update_...
'Print the `num_words` most probable words for `num_topics` number of topics. Set `num_topics=-1` to print all topics. Set `formatted=True` to return the topics as a list of strings, or `False` as lists of (weight, word) pairs.'
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
if (not self.m_status_up_to_date): self.update_expectations() betas = (self.m_lambda + self.m_eta) hdp_formatter = HdpTopicFormatter(self.id2word, betas) return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
'legacy method; use `self.save()` instead'
def save_topics(self, doc_count=None):
if (not self.outputdir): logger.error('cannot store topics without having specified an output directory') if (doc_count is None): fname = 'final' else: fname = ('doc-%i' % doc_count) fname = ('%s/%s.topics' % (self.outputdir, fname)) logger.info(('savi...
'legacy method; use `self.save()` instead'
def save_options(self):
if (not self.outputdir): logger.error('cannot store options without having specified an output directory') return fname = ('%s/options.dat' % self.outputdir) with utils.smart_open(fname, 'wb') as fout: fout.write(('tau: %s\n' % str((self.m_tau - 1)))) ...
'Compute the LDA almost equivalent HDP.'
def hdp_to_lda(self):
sticks = (self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])) alpha = np.zeros(self.m_T) left = 1.0 for i in xrange(0, (self.m_T - 1)): alpha[i] = (sticks[i] * left) left = (left - alpha[i]) alpha[(self.m_T - 1)] = left alpha = (alpha * self.m_alpha) beta = ...
'Returns closest corresponding ldamodel object corresponding to current hdp model. The hdp_to_lda method only returns corresponding alpha, beta values, and this method returns a trained ldamodel. The num_topics is m_T (default is 150) so as to preserve the matrice shapes when we assign alpha and beta.'
def suggested_lda_model(self):
(alpha, beta) = self.hdp_to_lda() ldam = ldamodel.LdaModel(num_topics=self.m_T, alpha=alpha, id2word=self.id2word, random_state=self.random_state) ldam.expElogbeta[:] = beta return ldam
'Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.'
@Pyro4.expose @Pyro4.oneway def requestjob(self):
if (self.model is None): raise RuntimeError('worker must be initialized before receiving jobs') job = None while ((job is None) and (not self.finished)): try: job = self.dispatcher.getjob(self.myid) except Queue.Empty: continue if (job is...
'Store the input-hidden weight matrix in the same format used by the original C word2vec-tool, for compatibility. `fname` is the file used to save the vectors in `fvocab` is an optional file used to save the vocabulary `binary` is an optional boolean indicating whether the data is to be saved in binary word2vec format ...
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
if (total_vec is None): total_vec = len(self.vocab) vector_size = self.syn0.shape[1] if (fvocab is not None): logger.info(('storing vocabulary in %s' % fvocab)) with utils.smart_open(fvocab, 'wb') as vout: for (word, vocab) in sorted(iteritems(self.vocab), key=(l...
'Load the input-hidden weight matrix from the original C word2vec-tool format. Note that the information stored in the file is incomplete (the binary tree is missing), so while you can query for word similarity etc., you cannot continue training with a model loaded this way. `binary` is a boolean indicating whether the...
@classmethod def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict', limit=None, datatype=REAL):
counts = None if (fvocab is not None): logger.info('loading word counts from %s', fvocab) counts = {} with utils.smart_open(fvocab) as fin: for line in fin: (word, count) = utils.to_unicode(line).strip().split() counts[word] = int(c...
'Accept a single word as input. Returns the word\'s representations in vector space, as a 1D numpy array. If `use_norm` is True, returns the normalized word vector. Example:: >>> trained_model[\'office\'] array([ -1.40128313e-02, ...])'
def word_vec(self, word, use_norm=False):
if (word in self.vocab): if use_norm: return self.syn0norm[self.vocab[word].index] else: return self.syn0[self.vocab[word].index] else: raise KeyError(("word '%s' not in vocabulary" % word))
'Find the top-N most similar words. Positive words contribute positively towards the similarity, negative words negatively. This method computes cosine similarity between a simple mean of the projection weight vectors of the given words and the vectors for each word in the model. The method corresponds to the `word-ana...
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
self.init_sims() if (isinstance(positive, string_types) and (not negative)): positive = [positive] positive = [((word, 1.0) if isinstance(word, (string_types + (ndarray,))) else word) for word in positive] negative = [((word, (-1.0)) if isinstance(word, (string_types + (ndarray,))) else word) fo...
'Compute the Word Mover\'s Distance between two documents. When using this code, please consider citing the following papers: .. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching". .. Ofir Pele and Michael Werman, "Fast and robust earth mover\'s distances". .. Matt Kusner et al. "...
def wmdistance(self, document1, document2):
if (not PYEMD_EXT): raise ImportError('Please install pyemd Python package to compute WMD.') len_pre_oov1 = len(document1) len_pre_oov2 = len(document2) document1 = [token for token in document1 if (token in self)] document2 = [token for token in document2 if (token in s...
'Find the top-N most similar words, using the multiplicative combination objective proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute positively towards the similarity, negative words negatively, but with less susceptibility to one large distance dominating the calculation. In the common a...
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
self.init_sims() if (isinstance(positive, string_types) and (not negative)): positive = [positive] all_words = set([self.vocab[word].index for word in (positive + negative) if ((not isinstance(word, ndarray)) and (word in self.vocab))]) positive = [(self.word_vec(word, use_norm=True) if isinstan...
'Find the top-N most similar words. If topn is False, similar_by_word returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors in the voca...
def similar_by_word(self, word, topn=10, restrict_vocab=None):
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
'Find the top-N most similar words by vector. If topn is False, similar_by_vector returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors...
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
'Which word from the given list doesn\'t go with the others? Example:: >>> trained_model.doesnt_match("breakfast cereal dinner lunch".split()) \'cereal\''
def doesnt_match(self, words):
self.init_sims() used_words = [word for word in words if (word in self)] if (len(used_words) != len(words)): ignored_words = (set(words) - set(used_words)) logger.warning('vectors for words %s are not present in the model, ignoring these words', ignored_wo...
'Accept a single word or a list of words as input. If a single word: returns the word\'s representations in vector space, as a 1D numpy array. Multiple words: return the words\' representations in vector space, as a 2d numpy array: #words x #vector_size. Matrix rows are in the same order as in input. Example:: >>> trai...
def __getitem__(self, words):
if isinstance(words, string_types): return self.word_vec(words) return vstack([self.word_vec(word) for word in words])
'Compute cosine similarity between two words. Example:: >>> trained_model.similarity(\'woman\', \'man\') 0.73723527 >>> trained_model.similarity(\'woman\', \'woman\') 1.0'
def similarity(self, w1, w2):
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
'Compute cosine similarity between two sets of words. Example:: >>> trained_model.n_similarity([\'sushi\', \'shop\'], [\'japanese\', \'restaurant\']) 0.61540466561049689 >>> trained_model.n_similarity([\'restaurant\', \'japanese\'], [\'japanese\', \'restaurant\']) 1.0000000000000004 >>> trained_model.n_similarity([\'su...
def n_similarity(self, ws1, ws2):
if (not (len(ws1) and len(ws2))): raise ZeroDivisionError('Atleast one of the passed list is empty.') v1 = [self[word] for word in ws1] v2 = [self[word] for word in ws2] return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
'Compute accuracy of the model. `questions` is a filename where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines. See questions-words.txt in https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip for an example. The accuracy is reported (=print...
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]] ok_vocab = (dict(((w.upper(), v) for (w, v) in reversed(ok_vocab))) if case_insensitive else dict(ok_vocab)) (sections, section) = ([], None) for (line_no, line) in enumerate(utils.smart_open(questions)): line = utils.to_u...